firmware/br-ext-chip-fullhan/board/fh8852v100/kernel/patches/00_fh8852v100_kernel-3.0.8....

141673 lines
3.8 MiB
Raw Blame History

This file contains invisible Unicode characters!

This file contains invisible Unicode characters that may be processed differently from what appears below. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to reveal hidden characters.

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 91c84cbe..28ac5c82 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -858,6 +858,20 @@ config ARCH_OMAP
help
Support for TI's OMAP platform (OMAP1/2/3/4).
+config ARCH_FULLHAN
+ bool "FullHan"
+ select GENERIC_CLOCKEVENTS
+# select GENERIC_TIME
+ select HAVE_SCHED_CLOCK
+ select ARCH_REQUIRE_GPIOLIB
+# select ZONE_DMA
+ select CLKDEV_LOOKUP
+ select GENERIC_ALLOCATOR
+# select GENERIC_IRQ_CHIP
+ select HAVE_SYSCALL_TRACEPOINTS
+ help
+ Support for FullHan's FH platform.
+
config PLAT_SPEAR
bool "ST SPEAr"
select ARM_AMBA
@@ -900,6 +914,8 @@ source "arch/arm/mach-dove/Kconfig"
source "arch/arm/mach-ep93xx/Kconfig"
+source "arch/arm/mach-fh/Kconfig"
+
source "arch/arm/mach-footbridge/Kconfig"
source "arch/arm/mach-gemini/Kconfig"
@@ -1577,7 +1593,8 @@ config LEDS
ARCH_OMAP || ARCH_P720T || ARCH_PXA_IDP || \
ARCH_SA1100 || ARCH_SHARK || ARCH_VERSATILE || \
ARCH_AT91 || ARCH_DAVINCI || \
- ARCH_KS8695 || MACH_RD88F5182 || ARCH_REALVIEW
+ ARCH_KS8695 || MACH_RD88F5182 || ARCH_REALVIEW || \
+ ARCH_FULLHAN
help
If you say Y here, the LEDs on your machine will be used
to provide useful information about your current system status.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index f5b2b390..20ef496c 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -139,6 +139,7 @@ machine-$(CONFIG_ARCH_DAVINCI) := davinci
machine-$(CONFIG_ARCH_DOVE) := dove
machine-$(CONFIG_ARCH_EBSA110) := ebsa110
machine-$(CONFIG_ARCH_EP93XX) := ep93xx
+machine-$(CONFIG_ARCH_FULLHAN) := fh
machine-$(CONFIG_ARCH_GEMINI) := gemini
machine-$(CONFIG_ARCH_H720X) := h720x
machine-$(CONFIG_ARCH_INTEGRATOR) := integrator
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index 832d3723..0bcea2c1 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -169,6 +169,23 @@ asmlinkage void __div0(void)
extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
+#ifdef CONFIG_TEST_BOOT_TIME
+#define SET_TIMING_GPIO(port, level) \
+ do { \
+ unsigned char *p_gpio = (unsigned char *) 0xf0300000; \
+ int data = *(p_gpio + 0x0004); \
+ data |= 1 << (port); \
+ *(p_gpio + 0x0004) = data; \
+ data = *(p_gpio); \
+ if ((level) == 0) \
+ data &= ~(1 << (port)); \
+ else \
+ data |= 1 << (port); \
+ *(p_gpio) = data; \
+ } while (0)
+#else
+#define SET_TIMING_GPIO(port, level)
+#endif
void
decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
@@ -182,6 +199,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
free_mem_end_ptr = free_mem_ptr_end_p;
__machine_arch_type = arch_id;
+ SET_TIMING_GPIO(4, 1);
+
arch_decomp_setup();
putstr("Uncompressing Linux...");
diff --git a/arch/arm/configs/fh8633_defconfig b/arch/arm/configs/fh8633_defconfig
new file mode 100755
index 00000000..ad8b0adf
--- /dev/null
+++ b/arch/arm/configs/fh8633_defconfig
@@ -0,0 +1,1932 @@
+#
+# Automatically generated make config: don't edit
+# Linux/arm 3.0.8 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_HAVE_SCHED_CLOCK=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+CONFIG_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HAVE_SPARSE_IRQ=y
+CONFIG_GENERIC_IRQ_SHOW=y
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_PREEMPT_RCU=y
+# CONFIG_TINY_RCU is not set
+# CONFIG_TINY_PREEMPT_RCU is not set
+CONFIG_PREEMPT_RCU=y
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_CGROUPS is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="usr/rootfs.cpio.gz"
+CONFIG_INITRAMFS_ROOT_UID=0
+CONFIG_INITRAMFS_ROOT_GID=0
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_INITRAMFS_COMPRESSION_NONE=y
+# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+
+#
+# GCOV-based kernel profiling
+#
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS4 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+CONFIG_ARCH_FULLHAN=y
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_KEYBOARD_GPIO_POLLED is not set
+CONFIG_CPU_FH8833=y
+
+#
+# FullHan Implementations
+#
+
+#
+# FullHan Core Type
+#
+# CONFIG_ARCH_FH8810 is not set
+CONFIG_ARCH_FH8833=y
+# CONFIG_ARCH_FH8830 is not set
+# CONFIG_ARCH_WUDANG is not set
+
+#
+# FullHan Board Type
+#
+# CONFIG_USE_PTS_AS_CLOCKSOURCE is not set
+# CONFIG_FH_SIMPLE_TIMER is not set
+CONFIG_MACH_FH8833=y
+CONFIG_MACH_FH8833_QFN56=y
+# CONFIG_MACH_FH_NAND is not set
+# CONFIG_JLINK_DEBUG is not set
+
+#
+# System MMU
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_V6=y
+CONFIG_CPU_32v6=y
+CONFIG_CPU_ABRT_EV6=y
+CONFIG_CPU_PABRT_V6=y
+CONFIG_CPU_CACHE_V6=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V6=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_CPU_USE_DOMAINS=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
+CONFIG_ARM_ERRATA_411920=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_HZ=100
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NEED_PER_CPU_KM=y
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+
+#
+# Boot options
+#
+# CONFIG_USE_OF is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_VFP=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV6 is not set
+# CONFIG_IP6_NF_QUEUE is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+
+#
+# Classification
+#
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_EMATCH is not set
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=y
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_RFKILL_GPIO is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/mdev"
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+CONFIG_MTD_M25P80=y
+CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_FH_I2S is not set
+# CONFIG_FH_DW_I2S is not set
+# CONFIG_FH_ACW is not set
+# CONFIG_FH_PWM is not set
+CONFIG_FH_PWM_NUM=8
+# CONFIG_FH_SADC is not set
+CONFIG_FH_FIRMWARE_LOADER=m
+CONFIG_FH_EFUSE=y
+CONFIG_FH_CLK_MISC=y
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_ISCSI_BOOT_SYSFS is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_TARGET_CORE is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+CONFIG_MII=y
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+CONFIG_FH_GMAC=y
+CONFIG_FH_GMAC_DA=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_FTMAC100 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+CONFIG_BCMDHD=m
+CONFIG_BCMDHD_FW_PATH="/bcmdhd/fw_bcmdhd.bin"
+CONFIG_BCMDHD_NVRAM_PATH="/bcmdhd/nvram_ap6181.txt"
+CONFIG_BCMDHD_CONFIG_PATH="/bcmdhd/config.txt"
+CONFIG_BCMDHD_OOB=y
+# CONFIG_BCMDHD_SDIO_IRQ is not set
+# CONFIG_RTL8189FS_SD is not set
+# CONFIG_MRVL8801 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTL8192SE is not set
+# CONFIG_RTL8192CU is not set
+# CONFIG_WL1251 is not set
+# CONFIG_WL12XX_MENU is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+# CONFIG_RTL8189ES is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_WAN is not set
+
+#
+# CAIF transport drivers
+#
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPPOE is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+# CONFIG_VT_CONSOLE is not set
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+CONFIG_SERIAL_FH=y
+CONFIG_SERIAL_FH_CONSOLE=y
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+# CONFIG_I2C_SMBUS is not set
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_FH_INTERRUPT=y
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+CONFIG_SPI_FH=y
+CONFIG_SPI_FH_SLAVE=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_BASIC_MMIO is not set
+# CONFIG_GPIO_IT8761E is not set
+CONFIG_GPIO_FH=y
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+CONFIG_FH_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+CONFIG_MFD_SUPPORT=y
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13XXX is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_CONFIG_USB_FH_OTG=y
+# CONFIG_USB_S3C_OTG_HOST is not set
+# CONFIG_USB_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+# CONFIG_USB_UAS is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_DWC2 is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_DW is not set
+CONFIG_MMC_FH=y
+CONFIG_MMC_FH_IDMAC=y
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+# CONFIG_LEDS_CLASS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+# CONFIG_NFC_DEVICES is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_FH=y
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+# CONFIG_DW_DMAC is not set
+CONFIG_FH_DMAC=y
+CONFIG_FH_DMAC_MISC=y
+# CONFIG_TIMB_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_PWM=y
+CONFIG_PWM_FULLHAN=y
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_YAFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_ROOT_NFS is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+# CONFIG_TEST_BOOT_TIME is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_FRAME_POINTER=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API=y
+# CONFIG_CRYPTO_USER_API_HASH is not set
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_HW=y
+CONFIG_FH_AES=y
+# CONFIG_FH_AES_SELF_TEST is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=m
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_XZ_DEC is not set
+# CONFIG_XZ_DEC_BCJ is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+CONFIG_AVERAGE=y
diff --git a/arch/arm/configs/fh8852_defconfig b/arch/arm/configs/fh8852_defconfig
new file mode 100644
index 00000000..d1bae5f8
--- /dev/null
+++ b/arch/arm/configs/fh8852_defconfig
@@ -0,0 +1,1703 @@
+#
+# Automatically generated make config: don't edit
+# Linux/arm 3.0.8 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_HAVE_SCHED_CLOCK=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HAVE_SPARSE_IRQ=y
+CONFIG_GENERIC_IRQ_SHOW=y
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TINY_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_CGROUPS is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="usr/rootfs.cpio.gz"
+CONFIG_INITRAMFS_ROOT_UID=0
+CONFIG_INITRAMFS_ROOT_GID=0
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_INITRAMFS_COMPRESSION_NONE=y
+# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS4 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+CONFIG_ARCH_FULLHAN=y
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_GPIO_PCA953X is not set
+CONFIG_CPU_FH8856=y
+
+#
+# FullHan Implementations
+#
+
+#
+# FullHan Core Type
+#
+# CONFIG_ARCH_FH8810 is not set
+# CONFIG_ARCH_FH8833 is not set
+# CONFIG_ARCH_FH8830 is not set
+CONFIG_ARCH_FH8856=y
+# CONFIG_ARCH_WUDANG is not set
+
+#
+# FullHan Board Type
+#
+# CONFIG_USE_PTS_AS_CLOCKSOURCE is not set
+# CONFIG_FH_SIMPLE_TIMER is not set
+# CONFIG_MACH_FH8856 is not set
+CONFIG_MACH_FH8852=y
+# CONFIG_MACH_FH_NAND is not set
+# CONFIG_JLINK_DEBUG is not set
+
+#
+# System MMU
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_V6=y
+CONFIG_CPU_32v6=y
+CONFIG_CPU_ABRT_EV6=y
+CONFIG_CPU_PABRT_V6=y
+CONFIG_CPU_CACHE_V6=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V6=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_CPU_USE_DOMAINS=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
+CONFIG_ARM_ERRATA_411920=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NEED_PER_CPU_KM=y
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+
+#
+# Boot options
+#
+# CONFIG_USE_OF is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_VFP=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV6 is not set
+# CONFIG_IP6_NF_QUEUE is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+
+#
+# Classification
+#
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_EMATCH is not set
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=y
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_RFKILL_GPIO is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/mdev"
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+CONFIG_MTD_M25P80=y
+CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_FH_DW_I2S is not set
+# CONFIG_FH_ACW is not set
+# CONFIG_FH_PWM is not set
+CONFIG_FH_PWM_NUM=8
+CONFIG_FH_PINCTRL=y
+CONFIG_FH_SADC_V1=y
+CONFIG_FH_SADC_V11=y
+CONFIG_FH_EFUSE=y
+CONFIG_FH_CLK_MISC=y
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+CONFIG_MII=y
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+CONFIG_FH_GMAC=y
+CONFIG_FH_GMAC_DA=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_FTMAC100 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTL8192SE is not set
+# CONFIG_RTL8192CU is not set
+# CONFIG_WL1251 is not set
+# CONFIG_WL12XX_MENU is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_WAN is not set
+
+#
+# CAIF transport drivers
+#
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPPOE is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+# CONFIG_VT_CONSOLE is not set
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+CONFIG_SERIAL_FH=y
+CONFIG_SERIAL_FH_CONSOLE=y
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+# CONFIG_I2C_SMBUS is not set
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_FH_INTERRUPT=y
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+CONFIG_SPI_FH=y
+CONFIG_SPI_FH_SLAVE=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_BASIC_MMIO is not set
+# CONFIG_GPIO_IT8761E is not set
+CONFIG_GPIO_FH=y
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+CONFIG_FH_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_FH_OTG=y
+CONFIG_FH_HOST_ONLY=y
+# CONFIG_FH_DEVICE_ONLY is not set
+# CONFIG_USB_S3C_OTG_HOST is not set
+# CONFIG_USB_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_DW is not set
+CONFIG_MMC_FH=y
+CONFIG_MMC_FH_IDMAC=y
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_NFC_DEVICES is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_FH_V2=y
+CONFIG_USE_TSENSOR=y
+# CONFIG_USE_TSENSOR_OFFSET is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+# CONFIG_DW_DMAC is not set
+CONFIG_FH_DMAC=y
+CONFIG_FH_DMAC_MISC=y
+# CONFIG_TIMB_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_PWM=y
+CONFIG_PWM_FULLHAN=y
+CONFIG_PWM_FULLHAN_V20=y
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_YAFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_ROOT_NFS is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+# CONFIG_TEST_BOOT_TIME is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API=y
+# CONFIG_CRYPTO_USER_API_HASH is not set
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_HW=y
+CONFIG_FH_AES=y
+# CONFIG_FH_AES_SELF_TEST is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=m
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_XZ_DEC is not set
+# CONFIG_XZ_DEC_BCJ is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+CONFIG_AVERAGE=y
diff --git a/arch/arm/configs/fh8856_defconfig b/arch/arm/configs/fh8856_defconfig
new file mode 100644
index 00000000..f5f61f9d
--- /dev/null
+++ b/arch/arm/configs/fh8856_defconfig
@@ -0,0 +1,1705 @@
+#
+# Automatically generated make config: don't edit
+# Linux/arm 3.0.8 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_HAVE_SCHED_CLOCK=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HAVE_SPARSE_IRQ=y
+CONFIG_GENERIC_IRQ_SHOW=y
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TINY_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_CGROUPS is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="usr/rootfs.cpio.gz"
+CONFIG_INITRAMFS_ROOT_UID=0
+CONFIG_INITRAMFS_ROOT_GID=0
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_INITRAMFS_COMPRESSION_NONE=y
+# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS4 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+CONFIG_ARCH_FULLHAN=y
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_GPIO_PCA953X is not set
+CONFIG_CPU_FH8856=y
+
+#
+# FullHan Implementations
+#
+
+#
+# FullHan Core Type
+#
+# CONFIG_ARCH_FH8810 is not set
+# CONFIG_ARCH_FH8833 is not set
+# CONFIG_ARCH_FH8830 is not set
+CONFIG_ARCH_FH8856=y
+# CONFIG_ARCH_FHZY2 is not set
+# CONFIG_ARCH_WUDANG is not set
+
+#
+# FullHan Board Type
+#
+# CONFIG_USE_PTS_AS_CLOCKSOURCE is not set
+# CONFIG_FH_SIMPLE_TIMER is not set
+CONFIG_MACH_FH8856=y
+# CONFIG_MACH_FH8852 is not set
+# CONFIG_MACH_FH_NAND is not set
+# CONFIG_JLINK_DEBUG is not set
+
+#
+# System MMU
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_V6=y
+CONFIG_CPU_32v6=y
+CONFIG_CPU_ABRT_EV6=y
+CONFIG_CPU_PABRT_V6=y
+CONFIG_CPU_CACHE_V6=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V6=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_CPU_USE_DOMAINS=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
+CONFIG_ARM_ERRATA_411920=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NEED_PER_CPU_KM=y
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+
+#
+# Boot options
+#
+# CONFIG_USE_OF is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_VFP=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV6 is not set
+# CONFIG_IP6_NF_QUEUE is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+
+#
+# Classification
+#
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_EMATCH is not set
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=y
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_RFKILL_GPIO is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/mdev"
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+CONFIG_MTD_M25P80=y
+CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_FH_DW_I2S is not set
+# CONFIG_FH_ACW is not set
+# CONFIG_FH_PWM is not set
+CONFIG_FH_PWM_NUM=8
+CONFIG_FH_PINCTRL=y
+CONFIG_FH_SADC_V1=y
+CONFIG_FH_SADC_V11=y
+CONFIG_FH_EFUSE=y
+CONFIG_FH_CLK_MISC=y
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+CONFIG_MII=y
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+CONFIG_FH_GMAC=y
+CONFIG_FH_GMAC_DA=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_FTMAC100 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTL8192SE is not set
+# CONFIG_RTL8192CU is not set
+# CONFIG_WL1251 is not set
+# CONFIG_WL12XX_MENU is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_WAN is not set
+
+#
+# CAIF transport drivers
+#
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPPOE is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+# CONFIG_VT_CONSOLE is not set
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+CONFIG_SERIAL_FH=y
+CONFIG_SERIAL_FH_CONSOLE=y
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+# CONFIG_I2C_SMBUS is not set
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_FH_INTERRUPT=y
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+CONFIG_SPI_FH=y
+CONFIG_SPI_FH_SLAVE=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_BASIC_MMIO is not set
+# CONFIG_GPIO_IT8761E is not set
+CONFIG_GPIO_FH=y
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+CONFIG_FH_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_FH_OTG=y
+# CONFIG_FH_OTG is not set
+CONFIG_FH_HOST_ONLY=y
+# CONFIG_FH_DEVICE_ONLY is not set
+# CONFIG_USB_S3C_OTG_HOST is not set
+# CONFIG_USB_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_DW is not set
+CONFIG_MMC_FH=y
+CONFIG_MMC_FH_IDMAC=y
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_NFC_DEVICES is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_FH_V2=y
+CONFIG_USE_TSENSOR=y
+# CONFIG_USE_TSENSOR_OFFSET is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+# CONFIG_DW_DMAC is not set
+CONFIG_FH_DMAC=y
+CONFIG_FH_DMAC_MISC=y
+# CONFIG_TIMB_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_PWM=y
+CONFIG_PWM_FULLHAN=y
+CONFIG_PWM_FULLHAN_V20=y
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_YAFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_ROOT_NFS is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+# CONFIG_TEST_BOOT_TIME is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API=y
+# CONFIG_CRYPTO_USER_API_HASH is not set
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_HW=y
+CONFIG_FH_AES=y
+# CONFIG_FH_AES_SELF_TEST is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=m
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_XZ_DEC is not set
+# CONFIG_XZ_DEC_BCJ is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+CONFIG_AVERAGE=y
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index ee2ad8ae..3aefe527 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -143,6 +143,12 @@ struct tag_memclk {
__u32 fmemclk;
};
+#define ATAG_PHYMODE 0x41000601
+
+struct tag_phymode {
+ u32 phymode;
+};
+
struct tag {
struct tag_header hdr;
union {
@@ -165,6 +171,11 @@ struct tag {
* DC21285 specific
*/
struct tag_memclk memclk;
+
+ /*
+ * Fullhan specific
+ */
+ struct tag_phymode phymode;
} u;
};
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 6b1e0ad9..aece734b 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -61,7 +61,11 @@ __after_proc_init:
* the CPU init function.
*/
#ifdef CONFIG_ALIGNMENT_TRAP
+#if (__GNUC__ >= 5 && __LINUX_ARM_ARCH__ >= 6)
+ bic r0, r0, #CR_A
+#else
orr r0, r0, #CR_A
+#endif
#else
bic r0, r0, #CR_A
#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 278c1b0e..d2da8a78 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -226,6 +226,7 @@ __create_page_tables:
* This allows debug messages to be output
* via a serial console before paging_init.
*/
+#ifndef CONFIG_JLINK_DEBUG
addruart r7, r3
mov r3, r3, lsr #20
@@ -243,6 +244,7 @@ __create_page_tables:
add r3, r3, #1 << 20
teq r0, r6
bne 1b
+#endif
#else /* CONFIG_DEBUG_ICEDCC */
/* we don't need any serial debugging mappings for ICEDCC */
@@ -349,7 +351,11 @@ __secondary_data:
*/
__enable_mmu:
#ifdef CONFIG_ALIGNMENT_TRAP
+#if (__GNUC__ >= 5 && __LINUX_ARM_ARCH__ >= 6)
+ bic r0, r0, #CR_A
+#else
orr r0, r0, #CR_A
+#endif
#else
bic r0, r0, #CR_A
#endif
@@ -362,6 +368,13 @@ __enable_mmu:
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
+ /*
+ * add by fullhan
+ */
+#ifdef CONFIG_JLINK_DEBUG
+ mov r4, #0x10000000
+#endif
+
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
old mode 100644
new mode 100755
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
old mode 100644
new mode 100755
diff --git a/arch/arm/mach-fh/Kconfig b/arch/arm/mach-fh/Kconfig
new file mode 100644
index 00000000..1861aa25
--- /dev/null
+++ b/arch/arm/mach-fh/Kconfig
@@ -0,0 +1,202 @@
+if ARCH_FULLHAN
+
+config CPU_FH8810
+ select CPU_V6
+ bool
+
+config CPU_WUDANG
+ select CPU_V6
+ bool
+
+config CPU_FH8830
+ select CPU_V6
+ bool
+
+config CPU_FH8833
+ select CPU_V6
+ bool
+
+config CPU_FH8856
+ select CPU_V6
+ bool
+
+config CPU_FH8626V100
+ select CPU_V6
+ bool
+
+config CPU_ZY2
+ select CPU_V6
+ bool
+
+menu "FullHan Implementations"
+
+comment "FullHan Core Type"
+
+choice
+ prompt "Select Fullhan Chip:"
+ default ARCH_FH8833
+
+config ARCH_FH8810
+ bool "FullHan FH8810 based system"
+ select CPU_FH8810
+ select USE_PTS_AS_CLOCKSOURCE
+
+config ARCH_FH8833
+ bool "FullHan FH8833 based system"
+ select CPU_FH8833
+
+config ARCH_FH8830
+ bool "FullHan FH8830 based system"
+ select CPU_FH8830
+
+config ARCH_FH8856
+ bool "FullHan FH8856 based system"
+ select CPU_FH8856
+
+config ARCH_FH8626V100
+ bool "FullHan FH8626V100 based system"
+ select CPU_FH8626V100
+
+
+config ARCH_WUDANG
+ bool "FullHan Wudang based system"
+ select CPU_WUDANG
+
+config ARCH_ZY2
+ bool "FullHan ZY2 based system"
+ select CPU_ZY2
+
+
+endchoice
+
+comment "FullHan Board Type"
+
+config USE_PTS_AS_CLOCKSOURCE
+ bool "use pts as clock source"
+ default n
+
+config FH_SIMPLE_TIMER
+ bool "use fh self-defined simple timer"
+ default n
+
+config MACH_FH8810
+ bool "FullHan FH8810 board"
+ default n
+ depends on ARCH_FH8810
+
+config MACH_FH8830_FPGA
+ bool "FullHan fh8830 fpga board"
+ default n
+ depends on ARCH_FH8830
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is a FH8830 fpga
+
+config MACH_FH8833
+ bool "FullHan FH8833 board"
+ default y
+ depends on ARCH_FH8833
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is FH8833
+
+
+config MACH_FH8830
+ bool "FullHan FH8830 board"
+ default y
+ depends on ARCH_FH8830
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is FH8830
+
+config MACH_FH8856
+ bool "FullHan FH8856 board"
+ default y
+ depends on ARCH_FH8856
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is FH8856
+
+config MACH_FH8852
+ bool "FullHan FH8852 board"
+ default n
+ depends on ARCH_FH8856
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is FH8852
+config MACH_FH8626V100
+ bool "FullHan FH8626V100 board"
+ default y
+ depends on ARCH_FH8626V100
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is FH8626V100
+
+config MACH_WUDANG
+ bool "FullHan Wudang board"
+ default n
+ depends on ARCH_WUDANG
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is a Wudang EVM
+
+config MACH_HIK
+ bool "HIK FH8810 board"
+ default n
+ depends on ARCH_FH8810
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is a HIK EVM
+
+config MACH_FH8830_QFN
+ bool "FH8830 QFN"
+ default n
+ depends on ARCH_FH8830
+ help
+ FH8830 QFN
+
+config MACH_FH8833_QFN56
+ bool "FH8633 QFN56"
+ default n
+ depends on ARCH_FH8833
+ help
+ FH8633 QFN56
+
+config MACH_ZY2
+ bool "FullHan ZY2 board"
+ default y
+ depends on ARCH_ZY2
+ select MISC_DEVICES
+ select I2C
+ help
+ Configure this option to specify the whether the board used
+ for development is ZY2
+
+config MACH_FH_NAND
+ bool "USE NAND FLASH"
+ default n
+ help
+ use NAND FLASH
+
+config JLINK_DEBUG
+ bool "Use jlink to debug kernel."
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-fh/Makefile b/arch/arm/mach-fh/Makefile
new file mode 100644
index 00000000..7354c1a4
--- /dev/null
+++ b/arch/arm/mach-fh/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the linux kernel.
+#
+#
+
+# Common objects
+obj-y := time.o \
+ sram.o irq.o pmu.o pm.o sram.o fh_common.o fh_chipid.o
+# clk config
+obj-$(CONFIG_ARCH_FH8856) += clock.o
+# Chip specific
+obj-$(CONFIG_ARCH_FH8856) += fh8856.o
+# Board specific
+obj-$(CONFIG_ARCH_FH8856) += board-fh8856.o pinctrl.o
+obj-$(CONFIG_FH_SIMPLE_TIMER) += fh_simple_timer.o
+
+# Power Management
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_SUSPEND) += pm.o sleep.o
diff --git a/arch/arm/mach-fh/Makefile.boot b/arch/arm/mach-fh/Makefile.boot
new file mode 100644
index 00000000..f05489f0
--- /dev/null
+++ b/arch/arm/mach-fh/Makefile.boot
@@ -0,0 +1,4 @@
+ zreladdr-y := 0xA0008000
+params_phys-y := 0xA0000100
+initrd_phys-y := 0xA0800000
+
diff --git a/arch/arm/mach-fh/board-fh8856.c b/arch/arm/mach-fh/board-fh8856.c
new file mode 100644
index 00000000..fabbf378
--- /dev/null
+++ b/arch/arm/mach-fh/board-fh8856.c
@@ -0,0 +1,1258 @@
+/*
+ * Fullhan FH8810 board support
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/clk.h>
+#include <linux/i2c/at24.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/phy.h>
+#include <linux/dma-mapping.h>
+#include <linux/spi/eeprom.h>
+#include <linux/delay.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/pmu.h>
+
+#include <mach/system.h>
+#include <mach/chip.h>
+#include <mach/iomux.h>
+#include <mach/irqs.h>
+#include <mach/pmu.h>
+#include <mach/fh_dmac.h>
+#include <mach/fh_gmac.h>
+#include <mach/gpio.h>
+#include <mach/spi.h>
+#include <mach/clock.h>
+#include <mach/fh_rtc_v2.h>
+#include <mach/pinctrl.h>
+#include <mach/fh_wdt.h>
+#include <mach/fhmci.h>
+#include <mach/board_config.h>
+#include <mach/fh_efuse_plat.h>
+#include <crypto/if_alg.h>
+#include <mach/pmu.h>
+#include <mach/fh_usb.h>
+#include <mach/fh_i2s.h>
+
+
+static struct map_desc fh8856_io_desc[] = {
+ {
+ .virtual = VA_RAM_REG_BASE,
+ .pfn = __phys_to_pfn(RAM_BASE),
+ .length = SZ_16K,
+ .type = MT_MEMORY,
+ },
+ {
+ .virtual = VA_DDRC_REG_BASE,
+ .pfn = __phys_to_pfn(DDRC_REG_BASE),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = VA_INTC_REG_BASE,
+ .pfn = __phys_to_pfn(INTC_REG_BASE),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = VA_TIMER_REG_BASE,
+ .pfn = __phys_to_pfn(TIMER_REG_BASE),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = VA_PMU_REG_BASE,
+ .pfn = __phys_to_pfn(PMU_REG_BASE),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = VA_UART0_REG_BASE,
+ .pfn = __phys_to_pfn(UART0_REG_BASE),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = VA_UART1_REG_BASE,
+ .pfn = __phys_to_pfn(UART1_REG_BASE),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+};
+
+
+static struct resource fh_gpio0_resources[] = {
+ {
+ .start = GPIO0_REG_BASE,
+ .end = GPIO0_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = GPIO0_IRQ,
+ .end = GPIO0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource fh_gpio1_resources[] = {
+ {
+ .start = GPIO1_REG_BASE,
+ .end = GPIO1_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = GPIO1_IRQ,
+ .end = GPIO1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+
+static struct resource fh_uart0_resources[] = {
+ {
+ .start = (UART0_REG_BASE),
+ .end = (UART0_REG_BASE) + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = UART0_IRQ,
+ .end = UART0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource fh_uart1_resources[] = {
+ {
+ .start = (UART1_REG_BASE),
+ .end = (UART1_REG_BASE) + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = UART1_IRQ,
+ .end = UART1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource fh_dma_resources[] = {
+ {
+ .start = (DMAC_REG_BASE),
+ .end = (DMAC_REG_BASE) + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = DMAC0_IRQ,
+ .end = DMAC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+static struct resource fh_i2c_resources_0[] = {
+ {
+ .start = I2C0_REG_BASE,
+ .end = I2C0_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = I2C0_IRQ,
+ .end = I2C0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+static struct resource fh_i2c_resources_1[] = {
+ {
+ .start = I2C1_REG_BASE,
+ .end = I2C1_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = I2C1_IRQ,
+ .end = I2C1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource fh_sdc0_resources[] = {
+ {
+ .start = SDC0_REG_BASE,
+ .end = SDC0_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = SDC0_IRQ,
+ .end = SDC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+static struct resource fh_sdc1_resources[] = {
+ {
+ .start = SDC1_REG_BASE,
+ .end = SDC1_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = SDC1_IRQ,
+ .end = SDC1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+static struct resource fh_wdt_resources[] = {
+ {
+ .start = WDT_REG_BASE,
+ .end = WDT_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = WDT_IRQ,
+ .end = WDT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource fh_spi0_resources[] = {
+ {
+ .start = SPI0_REG_BASE,
+ .end = SPI0_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "fh spi0 mem",
+ },
+ {
+ .start = SPI0_IRQ,
+ .end = SPI0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ .name = "fh spi0 irq",
+ },
+};
+
+static struct resource fh_spi1_resources[] = {
+ {
+ .start = SPI1_REG_BASE,
+ .end = SPI1_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "fh spi1 mem",
+ },
+ {
+ .start = SPI1_IRQ,
+ .end = SPI1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ .name = "fh spi1 irq",
+ },
+};
+
+static struct resource fh_spi2_resources[] = {
+ {
+ .start = SPI2_REG_BASE,
+ .end = SPI2_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "fh spi2 mem",
+ },
+ {
+ .start = SPI2_IRQ,
+ .end = SPI2_IRQ,
+ .flags = IORESOURCE_IRQ,
+ .name = "fh spi2 irq",
+ },
+};
+
+
+
+static struct resource fh_gmac_resources[] = {
+ {
+ .start = GMAC_REG_BASE,
+ .end = GMAC_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+ {
+ .start = GMAC_IRQ,
+ .end = GMAC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource fh_pwm_resources[] = {
+ {
+ .start = PWM_REG_BASE,
+ .end = PWM_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = PWM_IRQ,
+ .end = PWM_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource fh_sadc_resources[] = {
+ {
+ .start = SADC_REG_BASE,
+ .end = SADC_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "fh sadc mem",
+ },
+ {
+ .start = SADC_IRQ,
+ .end = SADC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ .name = "fh sadc irq",
+ },
+};
+
+static struct resource fh_aes_resources[] = {
+ {
+ .start = AES_REG_BASE,
+ .end = AES_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "fh aes mem",
+ },
+ {
+ .start = AES_IRQ,
+ .end = AES_IRQ,
+ .flags = IORESOURCE_IRQ,
+ .name = "fh aes irq",
+ },
+};
+
+static struct resource fh_rtc_resources[] = {
+ {
+ .start = RTC_REG_BASE,
+ .end = RTC_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = RTC_IRQ,
+ .end = RTC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+
+static struct resource fh_efuse_resources[] = {
+ {
+ .start = EFUSE_REG_BASE,
+ .end = EFUSE_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+
+};
+
+static void fh_gmac_early_init(struct fh_gmac_platform_data *plat_data)
+{
+}
+
+static void fh_gmac_plat_init(struct fh_gmac_platform_data *plat_data)
+{
+ u32 reg;
+
+ if (plat_data->interface == PHY_INTERFACE_MODE_RMII) {
+ reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
+ reg |= 0x7000000;
+ fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
+
+ fh_pmu_set_reg(REG_PMU_SWRST_AHB_CTRL, 0xfffdffff);
+ while (fh_pmu_get_reg(REG_PMU_SWRST_AHB_CTRL) != 0xffffffff)
+ ;
+
+ } else if (plat_data->interface == PHY_INTERFACE_MODE_MII) {
+ reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
+ reg &= ~(0x7000000);
+ reg |= 0x1000000;
+ fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
+
+ fh_pmu_set_reg(REG_PMU_SWRST_AHB_CTRL, 0xfffdffff);
+ while (fh_pmu_get_reg(REG_PMU_SWRST_AHB_CTRL) != 0xffffffff)
+ ;
+ }
+}
+
+static void fh_set_rmii_speed(int speed)
+{
+ u32 reg;
+
+ if (speed == gmac_speed_10m) {
+ reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
+ reg &= ~(0x1000000);
+ fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
+ } else {
+ reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
+ reg |= 0x1000000;
+ fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
+ }
+}
+
+static void fh_phy_reset(void)
+{
+ /*
+ * RXDV must be low during phy reset
+ */
+
+ fh_pmu_set_reg(0xe8, 0x01101030);
+
+ gpio_request(CONFIG_GPIO_EMACPHY_RESET, "phy_reset");
+ gpio_request(CONFIG_GPIO_EMACPHY_RXDV, "phy_rxdv");
+
+ gpio_direction_output(CONFIG_GPIO_EMACPHY_RESET, 0);
+ mdelay(10);
+ gpio_direction_output(CONFIG_GPIO_EMACPHY_RESET, 1);
+ msleep(150);
+
+ gpio_free(CONFIG_GPIO_EMACPHY_RESET);
+ gpio_free(CONFIG_GPIO_EMACPHY_RXDV);
+
+ fh_pmu_set_reg(0xe8, 0x00101030);
+
+}
+
+static struct fh_gmac_platform_data fh_gmac_data = {
+ .early_init = fh_gmac_early_init,
+ .plat_init = fh_gmac_plat_init,
+ .set_rmii_speed = fh_set_rmii_speed,
+ .phy_reset = fh_phy_reset,
+ .phyid = -1,
+};
+
+static const char *const fh_gpio0_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3",
+ "GPIO4", "GPIO5", "GPIO6", "GPIO7",
+ "GPIO8", "GPIO9", "GPIO10", "GPIO11",
+ "GPIO12", "GPIO13", "GPIO14", "GPIO15",
+ "GPIO16", "GPIO17", "GPIO18", "GPIO19",
+ "GPIO20", "GPIO21", "GPIO22", "GPIO23",
+ "GPIO24", "GPIO25", "GPIO26", "GPIO27",
+ "GPIO28", "GPIO29", "GPIO30", "GPIO31",
+};
+
+static const char *const fh_gpio1_names[] = {
+ "GPIO32", "GPIO33", "GPIO34", "GPIO35",
+ "GPIO36", "GPIO37", "GPIO38", "GPIO39",
+ "GPIO40", "GPIO41", "GPIO42", "GPIO43",
+ "GPIO44", "GPIO45", "GPIO46", "GPIO47",
+ "GPIO48", "GPIO49", "GPIO50", "GPIO51",
+ "GPIO52", "GPIO53", "GPIO54", "GPIO55",
+ "GPIO56", "GPIO57", "GPIO58", "GPIO59",
+ "GPIO60", "GPIO61", "GPIO62", "GPIO63",
+};
+
+static struct fh_gpio_chip fh_gpio0_chip = {
+ .chip = {
+ .owner = THIS_MODULE,
+ .label = "FH_GPIO0",
+ .base = 0,
+ .ngpio = 32,
+ .names = fh_gpio0_names,
+ },
+};
+
+static struct fh_gpio_chip fh_gpio1_chip = {
+ .chip = {
+ .owner = THIS_MODULE,
+ .label = "FH_GPIO1",
+ .base = 32,
+ .ngpio = 32,
+ .names = fh_gpio1_names,
+ },
+};
+
+
+static void fh_wdt_pause(void)
+{
+ unsigned int reg;
+
+ reg = fh_pmu_get_reg(REG_PMU_WDT_CTRL);
+ reg |= 0x100;
+ fh_pmu_set_reg(REG_PMU_WDT_CTRL, reg);
+
+ printk(KERN_INFO "wdt pause\n");
+}
+
+static void fh_wdt_resume(void)
+{
+ unsigned int reg;
+
+ reg = fh_pmu_get_reg(REG_PMU_WDT_CTRL);
+ reg &= ~(0x100);
+ fh_pmu_set_reg(REG_PMU_WDT_CTRL, reg);
+}
+
+static irqreturn_t fh_wdt_intr(void *pri)
+{
+ struct fh_wdt_t *fh_wdt = (struct fh_wdt_t *)pri;
+ struct fh_wdt_platform_data *plat_data =
+ (struct fh_wdt_platform_data *)fh_wdt->plat_data;
+ struct fh_wdt_platform_reset *priv =
+ (struct fh_wdt_platform_reset *)plat_data->plat_info;
+ unsigned int spi0_cs_pin = priv->spi0_cs_pin;
+ unsigned int spi0_rst_bit = priv->spi0_rst_bit;
+ unsigned int sd0_rst_bit = priv->sd0_rst_bit;
+ unsigned int uart0_rst_bit = priv->uart0_rst_bit;
+
+ gpio_request(spi0_cs_pin, "spi0_cs0");
+ gpio_direction_output(spi0_cs_pin, 1);
+
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, ~BIT(spi0_rst_bit));
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, ~BIT(uart0_rst_bit));
+ fh_pmu_set_reg(REG_PMU_SWRST_AHB_CTRL, ~BIT(sd0_rst_bit));
+
+ fh_pmu_stop();
+
+ /* Dead Loop! Prevent Other IRQ's Reentrancy!*/
+ while (1)
+ ;
+ return IRQ_HANDLED;
+}
+
+static int fh_buswd(u32 slot_id)
+{
+ return 4;
+}
+
+static int sd_init(unsigned int slot_id, void *data, void *v)
+{
+ u32 reg;
+
+ reg = slot_id ? 0xfffffffd : 0xfffffffb;
+ fh_pmu_set_reg(REG_PMU_SWRST_AHB_CTRL, reg);
+ while (fh_pmu_get_reg(REG_PMU_SWRST_AHB_CTRL) != 0xffffffff)
+ ;
+ return 0;
+}
+
+static unsigned int __maybe_unused
+fh_mci_sys_card_detect_fixed(struct fhmci_host *host)
+{
+ return 0;
+}
+
+static unsigned int __maybe_unused
+fh_mci_sys_read_only_fixed(struct fhmci_host *host)
+{
+ return 0;
+}
+
+struct fh_mci_board fh_mci = {
+ .init = sd_init,
+#ifdef CONFIG_SD_CD_FIXED
+ .get_cd = fh_mci_sys_card_detect_fixed,
+#endif
+ .num_slots = 1,
+ .bus_hz = 50000000,
+ .detect_delay_ms = 200,
+ .get_bus_wd = fh_buswd,
+ .caps = MMC_CAP_4_BIT_DATA
+ | MMC_CAP_SD_HIGHSPEED
+ | MMC_CAP_MMC_HIGHSPEED
+ | MMC_CAP_NEEDS_POLL
+ /* | MMC_CAP_SDIO_IRQ */,
+};
+
+struct fh_mci_board fh_mci_sd = {
+ .init = sd_init,
+ .num_slots = 1,
+ .bus_hz = 50000000,
+ .detect_delay_ms = 200,
+ .get_bus_wd = fh_buswd,
+ .caps = MMC_CAP_4_BIT_DATA
+ | MMC_CAP_SD_HIGHSPEED
+ | MMC_CAP_MMC_HIGHSPEED
+ | MMC_CAP_NEEDS_POLL
+ /* | MMC_CAP_SDIO_IRQ */,
+};
+
+static struct fh_dma_platform_data fh_dma_data = {
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .nr_channels = 8,
+};
+
+static struct at24_platform_data at24c02 = {
+ .byte_len = SZ_2K / 8,
+ .page_size = 8,
+ .flags = AT24_FLAG_TAKE8ADDR,
+};
+
+static struct platform_device fh_gmac_device = {
+ .name = "fh_gmac",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_gmac_resources),
+ .resource = fh_gmac_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &fh_gmac_data,
+ },
+};
+
+static struct platform_device fh_gpio0_device = {
+ .name = GPIO_NAME,
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_gpio0_resources),
+ .resource = fh_gpio0_resources,
+ .dev = {
+ .platform_data = &fh_gpio0_chip,
+ },
+};
+
+static struct platform_device fh_gpio1_device = {
+ .name = GPIO_NAME,
+ .id = 1,
+ .num_resources = ARRAY_SIZE(fh_gpio1_resources),
+ .resource = fh_gpio1_resources,
+ .dev = {
+ .platform_data = &fh_gpio1_chip,
+ },
+};
+
+struct platform_device fh_sd0_device = {
+ .name = "fh_mci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_sdc0_resources),
+ .resource = fh_sdc0_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &fh_mci_sd,
+ }
+};
+
+struct platform_device fh_sd1_device = {
+ .name = "fh_mci",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(fh_sdc1_resources),
+ .resource = fh_sdc1_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &fh_mci,
+ }
+};
+
+static struct platform_device fh_uart0_device = {
+ .name = "ttyS",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_uart0_resources),
+ .resource = fh_uart0_resources,
+};
+
+static struct platform_device fh_uart1_device = {
+ .name = "ttyS",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(fh_uart1_resources),
+ .resource = fh_uart1_resources,
+};
+
+static struct platform_device fh_dma_device = {
+ .name = "fh_dmac",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_dma_resources),
+ .resource = fh_dma_resources,
+ .dev.platform_data = &fh_dma_data,
+};
+
+static struct platform_device fh_i2c0_device = {
+ .name = "fh_i2c",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_i2c_resources_0),
+ .resource = fh_i2c_resources_0,
+};
+
+static struct platform_device fh_i2c1_device = {
+ .name = "fh_i2c",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(fh_i2c_resources_1),
+ .resource = fh_i2c_resources_1,
+};
+
+static struct i2c_board_info __initdata fh_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("24c02", 0x50),
+ .platform_data = &at24c02,
+ },
+ {
+ I2C_BOARD_INFO("pcf8563", 0x51)
+ }
+};
+
+static struct resource fh_i2s_resources[] = {
+ {
+ .start = I2S_REG_BASE,
+ .end = I2S_REG_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = I2S0_IRQ,
+ .end = I2S0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int i2s_clk_config(int div_val)
+{
+ int reg;
+
+ if (div_val & 0xffff0000)
+ return -EINVAL;
+
+ reg = fh_pmu_get_reg(REG_PMU_CLK_DIV6); /*config I2S clk div*/
+ reg &= ~0x3fff;
+ reg |= div_val;
+ fh_pmu_set_reg(REG_PMU_CLK_DIV6, reg);
+
+ /* i2s_clk switch to PLLVCO */
+ reg = fh_pmu_get_reg(REG_PAD_PWR_SEL);
+ reg |= 1<<6;
+ fh_pmu_set_reg(REG_PAD_PWR_SEL, reg);
+
+ return 0;
+}
+
+static struct fh_i2s_platform_data fh_i2s_data = {
+ .dma_capture_channel = 2,
+ .dma_playback_channel = 3,
+ .dma_master = 0,
+ .clk_config = i2s_clk_config,
+};
+
+static struct platform_device fh_i2s_device = {
+ .name = "fh_audio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_i2s_resources),
+ .resource = fh_i2s_resources,
+ .dev = {
+ .platform_data = &fh_i2s_data,
+ },
+};
+
+
+#define FH_SPI0_CS0 (54)
+#define FH_SPI0_CS1 (55)
+
+#define FH_SPI1_CS0 (56)
+#define FH_SPI1_CS1 (57)
+
+#define SPI0_FIFO_DEPTH (128)
+#define SPI0_CLK_IN (100000000)
+#define SPI0_MAX_SLAVE_NO (2)
+#define SPI0_DMA_RX_CHANNEL (0)
+#define SPI0_DMA_TX_CHANNEL (1)
+
+#define SPI1_FIFO_DEPTH (64)
+#define SPI1_CLK_IN (100000000)
+#define SPI1_MAX_SLAVE_NO (2)
+#define SPI1_DMA_RX_CHANNEL (2)
+#define SPI1_DMA_TX_CHANNEL (3)
+
+#define SPI2_FIFO_DEPTH (64)
+#define SPI2_CLK_IN (100000000)
+/* SPI_TRANSFER_USE_DMA */
+
+static struct fh_spi_platform_data fh_spi0_data = {
+ .apb_clock_in = SPI0_CLK_IN,
+ .fifo_len = SPI0_FIFO_DEPTH,
+ .slave_max_num = SPI0_MAX_SLAVE_NO,
+ .cs_data[0].GPIO_Pin = FH_SPI0_CS0,
+ .cs_data[0].name = "spi0_cs0",
+ .cs_data[1].GPIO_Pin = FH_SPI0_CS1,
+ .cs_data[1].name = "spi0_cs1",
+ .dma_transfer_enable = 0,
+ .rx_handshake_num = 2,
+ .tx_handshake_num = 3,
+ .rx_dma_channel = SPI0_DMA_RX_CHANNEL,
+ .tx_dma_channel = SPI0_DMA_TX_CHANNEL,
+ .clk_name = "spi0_clk",
+ .max_speed_support = 50000000,
+ .spidma_xfer_mode = TX_RX_MODE,
+ .data_reg_offset = 0,
+ /*dma use inc mode could move data by burst mode...
+ or move data use single mode with low efficient*/
+ .data_increase_support = 0,
+ .data_field_size = 0,
+ .ctl_wire_support = ONE_WIRE_SUPPORT | DUAL_WIRE_SUPPORT |\
+ MULTI_WIRE_SUPPORT,
+ .swap_support = SWAP_SUPPORT,
+
+};
+
+static struct fh_spi_platform_data fh_spi1_data = {
+ .apb_clock_in = SPI1_CLK_IN,
+ .fifo_len = SPI1_FIFO_DEPTH,
+ .slave_max_num = SPI1_MAX_SLAVE_NO,
+ .cs_data[0].GPIO_Pin = FH_SPI1_CS0,
+ .cs_data[0].name = "spi1_cs0",
+ .cs_data[1].GPIO_Pin = FH_SPI1_CS1,
+ .cs_data[1].name = "spi1_cs1",
+ .dma_transfer_enable = 0,
+ .rx_handshake_num = 4,
+ .tx_handshake_num = 5,
+ .rx_dma_channel = SPI1_DMA_RX_CHANNEL,
+ .tx_dma_channel = SPI1_DMA_TX_CHANNEL,
+ .clk_name = "spi1_clk",
+ .max_speed_support = 25000000,
+ .data_reg_offset = 0x60,
+ .data_increase_support = 0,
+};
+
+static struct fh_spi_platform_data fh_spi2_data = {
+ .apb_clock_in = SPI2_CLK_IN,
+ .fifo_len = SPI2_FIFO_DEPTH,
+ .dma_transfer_enable = 0,
+ .rx_handshake_num = 12,
+ .tx_handshake_num = 13,
+ .clk_name = "spi2_clk",
+};
+
+static struct fh_rtc_platform_data fh_rtc_data = {
+ .clock_in = 32768,
+ .dev_name = "rtc",
+ .clk_name = "rtc_clk",
+ .base_year = 2000,
+ .base_month = 1,
+ .base_day = 1,
+ .sadc_channel = -1,
+};
+
+
+static struct fh_wdt_platform_reset fh_plat_rst_info = {
+ .spi0_cs_pin = FH_SPI0_CS0,
+ .spi0_rst_bit = SPI0_RSTN_BIT,
+ .sd0_rst_bit = SDC0_HRSTN_BIT,
+ .uart0_rst_bit = UART0_RSTN_BIT,
+};
+
+static struct fh_wdt_platform_data fh_wdt_data = {
+ .pause = fh_wdt_pause,
+ .resume = fh_wdt_resume,
+ .intr = fh_wdt_intr,
+ .plat_info = &fh_plat_rst_info,
+};
+
+
+struct platform_device fh_wdt_device = {
+ .name = "fh_wdt",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_wdt_resources),
+ .resource = fh_wdt_resources,
+ .dev = {
+ .platform_data = &fh_wdt_data,
+ }
+};
+
+
+static struct platform_device fh_spi0_device = {
+ .name = "fh_spi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_spi0_resources),
+ .resource = fh_spi0_resources,
+ .dev = {
+ .platform_data = &fh_spi0_data,
+ },
+};
+
+static struct platform_device fh_spi1_device = {
+ .name = "fh_spi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(fh_spi1_resources),
+ .resource = fh_spi1_resources,
+ .dev = {
+ .platform_data = &fh_spi1_data,
+ },
+};
+
+
+static struct platform_device fh_spi2_device = {
+ .name = "fh_spi_slave",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_spi2_resources),
+ .resource = fh_spi2_resources,
+ .dev = {
+ .platform_data = &fh_spi2_data,
+ },
+};
+
+static struct platform_device fh_pwm_device = {
+ .name = "fh_pwm",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_pwm_resources),
+ .resource = fh_pwm_resources,
+
+};
+
+static struct platform_device fh_pinctrl_device = {
+ .name = "fh_pinctrl",
+ .id = 0,
+};
+
+static struct platform_device fh_sadc_device = {
+ .name = "fh_sadc",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_sadc_resources),
+ .resource = fh_sadc_resources,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+
+static struct platform_device fh_aes_device = {
+ .name = "fh_aes",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_aes_resources),
+ .resource = fh_aes_resources,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+
+static struct platform_device fh_rtc_device = {
+ .name = "fh_rtc",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_rtc_resources),
+ .resource = fh_rtc_resources,
+ .dev = {
+ .platform_data = &fh_rtc_data,
+ },
+};
+
+struct fh_efuse_platform_data fh_efuse_plat_data = {
+ .efuse_support_flag = CRYPTO_CPU_SET_KEY |
+ CRYPTO_EX_MEM_SET_KEY |
+ CRYPTO_EX_MEM_SWITCH_KEY |
+ CRYPTO_EX_MEM_4_ENTRY_1_KEY |
+ CRYPTO_EX_MEM_INDEP_POWER,
+};
+static struct platform_device fh_efuse_device = {
+ .name = "fh_efuse",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fh_efuse_resources),
+ .resource = fh_efuse_resources,
+ .dev = {
+ .platform_data = &fh_efuse_plat_data,
+ },
+};
+
+
+/*
+ * fh8856 usb board config
+ * add 2016/12/20
+ *
+ */
+#define S3C64XX_PA_USBHOST USBC_REG_BASE
+#define IRQ_UHOST USBC_IRQ
+#define S3C_PA_OTG S3C64XX_PA_USBHOST
+#define IRQ_OTG IRQ_UHOST
+#define S3C64XX_SZ_USBHOST SZ_1M
+#define S3C_SZ_OTG SZ_1M
+
+static void fh_usb_utmi_rst(void)
+{
+ uint32_t pmu_reg = 0;
+
+ pmu_reg = fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL);
+ pmu_reg &= ~(0x2);
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, pmu_reg);
+ pmu_reg = fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL);
+ mdelay(1);
+
+ pmu_reg |= 0x2;
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, pmu_reg);
+ pmu_reg = fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL);
+ msleep(20);
+}
+
+static void fh_usb_phy_rst(void)
+{
+ uint32_t pmu_reg = 0;
+
+ pmu_reg = fh_pmu_get_reg(REG_PMU_USB_SYS);
+ pmu_reg |= (0x11);
+ fh_pmu_set_reg(REG_PMU_USB_SYS, pmu_reg);
+ mdelay(1);
+
+ pmu_reg = fh_pmu_get_reg(REG_PMU_USB_SYS);
+ pmu_reg &= (~0x11);
+ fh_pmu_set_reg(REG_PMU_USB_SYS, pmu_reg);
+}
+
+/*1:normal mode 0:sleep mode*/
+
+static void fh_usb_resume(void)
+{
+ uint32_t pmu_reg = 0;
+
+ pmu_reg = fh_pmu_get_reg(REG_PMU_USB_SYS);
+ pmu_reg |= (0x1<<24);
+ fh_pmu_set_reg(REG_PMU_USB_SYS, pmu_reg);
+ mdelay(1);
+}
+
+/*usb vbus power on*/
+static void fh_usb_pwr_on(void)
+{
+ uint32_t pwron_gpio = 0;
+ uint32_t pmu_reg = 0;
+
+ pmu_reg = fh_pmu_get_reg(REG_PMU_USB_SYS1);
+ pmu_reg &= (~(1<<10));
+ fh_pmu_set_reg(REG_PMU_USB_SYS1, pmu_reg);
+ pwron_gpio = 2;
+
+#if defined(CONFIG_FH_HOST_ONLY)
+ gpio_request(pwron_gpio, "usb_pwren");
+ gpio_direction_output(pwron_gpio, 1);
+ mdelay(1);
+ gpio_free(pwron_gpio);
+#endif
+
+#if defined(CONFIG_FH_DEVICE_ONLY)
+ gpio_request(pwron_gpio, "usb_pwren");
+ gpio_direction_output(pwron_gpio, 0);
+ mdelay(1);
+ gpio_free(pwron_gpio);
+#endif
+}
+
+struct fh_usb_platform_data fh_usb_plat_data = {
+ .utmi_rst = fh_usb_utmi_rst,
+ .phy_rst = fh_usb_phy_rst,
+ .power_on = fh_usb_pwr_on,
+ .hcd_resume = fh_usb_resume,
+ .grxfsiz_pwron_val = 0xA00,
+ .gnptxfsiz_pwron_val = 0xA000A00,
+};
+
+
+static struct resource s3c_usb_otghcd_resource[] = {
+ {
+ .start = S3C_PA_OTG,
+ .end = S3C_PA_OTG + S3C_SZ_OTG - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_OTG,
+ .end = IRQ_OTG,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static u64 fh_usb_otghcd_dmamask = 0xffffffffUL;
+struct platform_device fh_device_usb_otghcd = {
+ .name = "fh_otg",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c_usb_otghcd_resource),
+ .resource = s3c_usb_otghcd_resource,
+ .dev = {
+ .dma_mask = &fh_usb_otghcd_dmamask,
+ .coherent_dma_mask = 0xffffffffUL,
+ .platform_data = &fh_usb_plat_data,
+ }
+};
+
+static struct platform_device *fh8856_devices[] __initdata = {
+ &fh_gmac_device,
+ &fh_uart0_device,
+ &fh_uart1_device,
+ &fh_dma_device,
+ &fh_i2c0_device,
+ &fh_i2c1_device,
+ &fh_sd0_device,
+ &fh_sd1_device,
+ &fh_spi0_device,
+ &fh_spi1_device,
+ &fh_spi2_device,
+ &fh_gpio0_device,
+ &fh_gpio1_device,
+ &fh_wdt_device,
+ &fh_pwm_device,
+ &fh_pinctrl_device,
+ &fh_sadc_device,
+ &fh_aes_device,
+ &fh_rtc_device,
+ &fh_device_usb_otghcd,
+ &fh_efuse_device,
+ &fh_i2s_device,
+};
+
+static struct mtd_partition fh_sf_parts[] = {
+ {
+ /* head & Ramboot */
+ .name = "bootstrap",
+ .offset = 0,
+ .size = SZ_256K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ /* Ramboot & U-Boot environment */
+ .name = "uboot-env",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_64K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ /* U-Boot */
+ .name = "uboot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_256K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_4M,
+ .mask_flags = 0,
+ }, {
+ .name = "rootfs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_8M,
+ .mask_flags = 0,
+ }, {
+ .name = "app",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ }
+ /* mtdparts=
+ * spi_flash:256k(bootstrap),
+ * 64k(u-boot-env),
+ * 192k(u-boot),4M(kernel),
+ * 8M(rootfs),
+ * -(app) */
+ /* two blocks with bad block table (and mirror) at the end */
+};
+#ifdef CONFIG_MACH_FH_NAND
+static struct mtd_partition fh_sf_nand_parts[] = {
+ {
+ /* head & Ramboot */
+ .name = "bootstrap",
+ .offset = 0,
+ .size = SZ_256K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "uboot-env",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_256K,
+ .mask_flags = MTD_WRITEABLE,
+ }, {
+ .name = "uboot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_256K,
+ .mask_flags = MTD_WRITEABLE,
+ }, {
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_4M,
+ .mask_flags = 0,
+ }, {
+ .name = "rootfs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_8M,
+ .mask_flags = 0,
+ }, {
+ .name = "app",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ }
+ /* mtdparts=
+ * spi0.0:64k(bootstrap),
+ * 64k(u-boot-env),
+ * 192k(u-boot),
+ * 4M(kernel),
+ * 8M(rootfs),
+ * -(app) */
+ /* two blocks with bad block table (and mirror) at the end */
+};
+#endif
+
+static struct flash_platform_data fh_flash_platform_data = {
+ .name = "spi_flash",
+ .parts = fh_sf_parts,
+ .nr_parts = ARRAY_SIZE(fh_sf_parts),
+};
+
+#ifdef CONFIG_MACH_FH_NAND
+static struct flash_platform_data fh_nandflash_platform_data = {
+ .name = "spi_nandflash",
+ .parts = fh_sf_nand_parts,
+ .nr_parts = ARRAY_SIZE(fh_sf_nand_parts),
+};
+#endif
+
+static struct spi_board_info fh_spi_devices[] = {
+#ifdef CONFIG_MACH_FH_NAND
+ {
+ .modalias = "spi-nand",
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 50000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &fh_nandflash_platform_data,
+ },
+#endif
+ {
+ .modalias = "m25p80",
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = SPI_MODE_3,
+ .max_speed_hz = 50000000,
+ .platform_data = &fh_flash_platform_data,
+ },
+
+};
+static void __init fh8856_map_io(void)
+{
+ iotable_init(fh8856_io_desc, ARRAY_SIZE(fh8856_io_desc));
+}
+
+
+static __init void fh8856_board_init(void)
+{
+ printk(KERN_INFO "%s board init\n", fh_get_chipname());
+ platform_add_devices(fh8856_devices, ARRAY_SIZE(fh8856_devices));
+ i2c_register_board_info(1, fh_i2c_devices, ARRAY_SIZE(fh_i2c_devices));
+ spi_register_board_info(fh_spi_devices, ARRAY_SIZE(fh_spi_devices));
+ fh_clk_procfs_init();
+ fh_pmu_init();
+}
+
+static void __init fh8856_init_early(void)
+{
+ fh_clk_init();
+ fh_pinctrl_init(VA_PMU_REG_BASE + 0x80);
+}
+
+MACHINE_START(FH8856, "FH8856")
+ .boot_params = DDR_BASE + 0x100,
+ .map_io = fh8856_map_io,
+ .init_irq = fh_intc_init,
+ .timer = &fh_timer,
+ .init_machine = fh8856_board_init,
+ .init_early = fh8856_init_early,
+MACHINE_END
+
+MACHINE_START(FH8852, "FH8852")
+ .boot_params = DDR_BASE + 0x100,
+ .map_io = fh8856_map_io,
+ .init_irq = fh_intc_init,
+ .timer = &fh_timer,
+ .init_machine = fh8856_board_init,
+ .init_early = fh8856_init_early,
+MACHINE_END
diff --git a/arch/arm/mach-fh/clock.c b/arch/arm/mach-fh/clock.c
new file mode 100644
index 00000000..2e8a162b
--- /dev/null
+++ b/arch/arm/mach-fh/clock.c
@@ -0,0 +1,790 @@
+/*
+ * Clock and PLL control for FH devices
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <asm/bitops.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <mach/hardware.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+#include <mach/clock.h>
+#include <mach/clock.h>
+#include <linux/platform_device.h>
+#include <mach/pmu.h>
+
+#define PROC_FILE "driver/clock"
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+static DEFINE_SPINLOCK(clocks_lock);
+
+struct proc_dir_entry *proc_file;
+
+
+#define DIVVCO_ONE_DEVISION 0x0
+#define DIVVCO_TWO_DEVISION 0x8
+#define DIVVCO_FOUR_DEVISION 0xc
+#define DIVVCO_EIGHT_DEVISION 0xd
+#define DIVVCO_SIXTEEN_DEVISION 0xe
+#define DIVVCO_THIRTYTWO_DEVISION 0xf
+
+
+/*#define FH_CLK_DEBUG*/
+
+#if defined(FH_CLK_DEBUG)
+ #define PRINT_CLK_DBG(fmt, args...) \
+ do { \
+ printk("FH_CLK_DEBUG: "); \
+ printk(fmt, ##args); \
+ } while (0)
+#else
+ #define PRINT_CLK_DBG(fmt, args...) \
+ do { \
+ } while (0)
+#endif
+
+
+void clk_set_clk_sel(unsigned int reg)
+{
+ fh_pmu_set_reg(REG_PMU_CLK_SEL, reg);
+}
+EXPORT_SYMBOL(clk_set_clk_sel);
+
+unsigned int clk_get_clk_sel(void)
+{
+ return fh_pmu_get_reg(REG_PMU_CLK_SEL);
+}
+EXPORT_SYMBOL(clk_get_clk_sel);
+
+#ifdef FH_CLOCK_DEBUG
+static void __clk_sel_ddr_clk(int source)
+{
+ unsigned int clk_sel;
+ int shift = 24;
+ clk_sel = clk_get_clk_sel();
+ clk_sel &= ~(0x1 << shift);
+ clk_sel |= (source & 0x1) << shift;
+ clk_set_clk_sel(clk_sel);
+}
+
+static void __clk_sel_pix_clk(int source)
+{
+ unsigned int clk_sel;
+ int shift = 4;
+ clk_sel = clk_get_clk_sel();
+ clk_sel &= ~(0x3 << shift);
+ clk_sel |= (source & 0x3) << shift;
+ clk_set_clk_sel(clk_sel);
+}
+
+static void __clk_sel_ac_clk(int source)
+{
+ unsigned int clk_sel;
+ int shift = 0;
+ clk_sel = clk_get_clk_sel();
+ clk_sel &= ~(0x1 << shift);
+ clk_sel |= (source & 0x1) << shift;
+ clk_set_clk_sel(clk_sel);
+}
+#endif
+
+
+static void fh_clk_enable(struct clk *clk)
+{
+ unsigned int reg;
+
+ if (clk->flag & CLOCK_NOGATE) {
+ PRINT_CLK_DBG("%s, %s has no gate register\n", __func__, clk->name);
+ return;
+ }
+
+ reg = fh_pmu_get_reg(clk->en_reg_offset);
+ PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
+ reg &= ~(clk->en_reg_mask);
+ fh_pmu_set_reg(clk->en_reg_offset, reg);
+ PRINT_CLK_DBG("%s, clk: %s, after mask: 0x%x\n", __func__, clk->name, reg);
+}
+
+static void fh_clk_disable(struct clk *clk)
+{
+ unsigned int reg;
+
+ if (clk->flag & CLOCK_NOGATE) {
+ PRINT_CLK_DBG("%s, %s has no gate register\n", __func__, clk->name);
+ return;
+ }
+
+ reg = fh_pmu_get_reg(clk->en_reg_offset);
+ reg |= clk->en_reg_mask;
+ fh_pmu_set_reg(clk->en_reg_offset, reg);
+
+ PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
+}
+
+static int fh_clk_get_sel(struct clk *clk)
+{
+ unsigned int reg, shift;
+ int ret;
+
+ if (!(clk->flag & CLOCK_MULTI_PARENT))
+ return 0;
+
+ shift = ffs(clk->sel_reg_mask) - 1;
+ reg = fh_pmu_get_reg(clk->sel_reg_offset);
+ reg &= clk->sel_reg_mask;
+ ret = reg >> shift;
+ PRINT_CLK_DBG("%s, clk: %s, sel: %d\n", __func__, clk->name, ret);
+
+ return ret;
+}
+
+static void fh_clk_set_sel(struct clk *clk, int sel)
+{
+ unsigned int reg, shift;
+
+ if (!(clk->flag & CLOCK_MULTI_PARENT)) {
+ PRINT_CLK_DBG("%s, clk: %s has only one parent\n", __func__, clk->name);
+ return;
+ }
+
+ clk->select = sel;
+ shift = ffs(clk->sel_reg_mask) - 1;
+ reg = fh_pmu_get_reg(clk->sel_reg_offset);
+ reg &= ~(clk->sel_reg_mask);
+ reg |= (sel << shift);
+ fh_pmu_set_reg(clk->sel_reg_offset, reg);
+ PRINT_CLK_DBG("%s, clk: %s, select: %d, reg: 0x%x\n", __func__, clk->name, sel,
+ reg);
+}
+
+static unsigned long fh_clk_get_pll_rate(struct clk *clk)
+{
+ unsigned int reg, m, n, od, no = 1, i;
+
+ reg = fh_pmu_get_reg(clk->div_reg_offset);
+ m = reg & 0xff;
+ n = (reg >> 8) & 0xf;
+ od = (reg >> 16) & 0x3;
+
+ for(i=0; i<od; i++)
+ no *= 2;
+
+ clk->frequency = OSC_FREQUENCY * m / n / no;
+
+ return clk->frequency;
+}
+
+static unsigned long fh_clk_get_pll_p_rate(struct clk *clk)
+{
+ unsigned int reg, m, n, p, r = 1;
+ unsigned int clk_vco, divvcop = 1, shift;
+
+ reg = fh_pmu_get_reg(clk->div_reg_offset);
+
+ m = reg & 0x7f;
+ n = (reg >> 8) & 0x1f;
+ p = (reg >> 16) & 0x3f;
+ r = (reg >> 24) & 0x3f;
+
+ /*pll databook*/
+ if (m<4)
+ m=128+m;
+
+ if (m==0xb)
+ m=0xa;
+
+ shift = ffs(clk->en_reg_mask)-1;
+ reg = fh_pmu_get_reg(clk->en_reg_offset);
+
+ switch ((reg&clk->en_reg_mask)>>shift){
+ case DIVVCO_ONE_DEVISION:
+ divvcop=1;
+ break;
+
+ case DIVVCO_TWO_DEVISION:
+ divvcop=2;
+ break;
+
+ case DIVVCO_FOUR_DEVISION:
+ divvcop=4;
+ break;
+
+ case DIVVCO_EIGHT_DEVISION:
+ divvcop=8;
+ break;
+
+ case DIVVCO_SIXTEEN_DEVISION:
+ divvcop=16;
+ break;
+
+ case DIVVCO_THIRTYTWO_DEVISION:
+ divvcop=32;
+ break;
+ default:
+ printk("divvcop error:%x\n",divvcop);
+ }
+
+ clk_vco = OSC_FREQUENCY * m / (n+1);
+ clk->frequency = clk_vco/ (p+1)/divvcop;
+ return clk->frequency;
+}
+
+static unsigned long fh_clk_get_pll_r_rate(struct clk *clk)
+{
+ unsigned int reg, m, n, p, r = 1;
+ unsigned int clk_vco,divvcor=1,shift;
+
+ reg = fh_pmu_get_reg(clk->div_reg_offset);
+
+ m = reg & 0x7f;
+ n = (reg >> 8) & 0x1f;
+ p = (reg >> 16) & 0x3f;
+ r = (reg >> 24) & 0x3f;
+
+ /*pll databook*/
+ if(m<4)
+ m=128+m;
+
+ if(m==0xb)
+ m=0xa;
+
+ shift = ffs(clk->en_reg_mask)-1;
+ reg = fh_pmu_get_reg(clk->en_reg_offset);
+
+ switch ((reg&clk->en_reg_mask)>>shift){
+ case DIVVCO_ONE_DEVISION:
+ divvcor=1;
+ break;
+
+ case DIVVCO_TWO_DEVISION:
+ divvcor=2;
+ break;
+
+ case DIVVCO_FOUR_DEVISION:
+ divvcor=4;
+ break;
+
+ case DIVVCO_EIGHT_DEVISION:
+ divvcor=8;
+ break;
+
+ case DIVVCO_SIXTEEN_DEVISION:
+ divvcor=16;
+ break;
+
+ case DIVVCO_THIRTYTWO_DEVISION:
+ divvcor=32;
+ break;
+ default:
+ printk("divvcop error:%x\n", divvcor);
+ }
+
+
+ clk_vco = OSC_FREQUENCY * m / (n+1);
+ clk->frequency = clk_vco/ (r+1)/divvcor;
+ return clk->frequency;
+}
+
+static int fh_clk_get_div(struct clk *clk)
+{
+ unsigned int reg, shift;
+ int ret;
+
+ if (clk->flag & (CLOCK_NODIV | CLOCK_FIXED))
+ return 0;
+
+ shift = ffs(clk->div_reg_mask) - 1;
+ reg = fh_pmu_get_reg(clk->div_reg_offset);
+ PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
+ reg &= clk->div_reg_mask;
+ PRINT_CLK_DBG("%s, clk: %s, shift: %d, after mask: 0x%x\n", __func__, clk->name,
+ shift, reg);
+ ret = reg >> shift;
+ PRINT_CLK_DBG("%s, clk: %s, div: %d\n", __func__, clk->name, ret);
+ PRINT_CLK_DBG("%s, clk: %s, div_mask: 0x%x, div_offset: 0x%x\n",
+ __func__, clk->name, clk->div_reg_mask, clk->div_reg_offset);
+
+ return ret;
+}
+
+static void fh_clk_set_div(struct clk *clk, int div)
+{
+ unsigned int reg, shift;
+
+ if (clk->flag & CLOCK_NODIV) {
+ PRINT_CLK_DBG("%s, clk: %s has no divide\n", __func__, clk->name);
+ return;
+ }
+
+ shift = ffs(clk->div_reg_mask) - 1;
+
+ if(div > clk->div_reg_mask >> shift) {
+ pr_err("%s, clk: %s, curr div %d is too big, max is %d\n",
+ __func__, clk->name, div, clk->div_reg_mask >> shift);
+ return;
+ }
+
+ clk->divide = div;
+
+ reg = fh_pmu_get_reg(clk->div_reg_offset);
+ PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
+ reg &= ~(clk->div_reg_mask);
+ reg |= (div << shift);
+ PRINT_CLK_DBG("%s, clk: %s, shift: %d, after mask: 0x%x\n", __func__, clk->name,
+ shift, reg);
+ fh_pmu_set_reg(clk->div_reg_offset, reg);
+ PRINT_CLK_DBG("%s, clk: %s, div: %d, reg: 0x%x\n", __func__, clk->name, div,
+ reg);
+ PRINT_CLK_DBG("%s, clk: %s, div_mask: 0x%x, div_offset: 0x%x\n",
+ __func__, clk->name, clk->div_reg_mask, clk->div_reg_offset);
+
+}
+
+unsigned long fh_clk_get_rate(struct clk *clk)
+{
+ if (clk->flag & CLOCK_FIXED) {
+ PRINT_CLK_DBG("%s, clk: %s is fixed clock, rate: %lu\n", __func__, clk->name,
+ clk->frequency);
+ return clk->frequency;
+ }
+
+ if (clk->flag & CLOCK_PLL) {
+ PRINT_CLK_DBG("%s, clk: %s is a PLL clock\n", __func__, clk->name);
+ return fh_clk_get_pll_rate(clk);
+ }
+
+ if (clk->flag & CLOCK_PLL_P) {
+ PRINT_CLK_DBG("%s, clk: %s is a PLL clock\n", __func__, clk->name);
+ return fh_clk_get_pll_p_rate(clk);
+ }
+
+ if (clk->flag & CLOCK_PLL_R) {
+ PRINT_CLK_DBG("%s, clk: %s is a PLL clock\n", __func__, clk->name);
+ return fh_clk_get_pll_r_rate(clk);
+ }
+
+ if (clk->flag & CLOCK_CIS && (0 == fh_clk_get_sel(clk))) {
+ clk->frequency = OSC_FREQUENCY;
+ return clk->frequency;
+ }
+
+ clk->select = fh_clk_get_sel(clk);
+ clk->divide = fh_clk_get_div(clk) + 1;
+
+ if (clk->select > CLOCK_MAX_PARENT) {
+ pr_err("ERROR, %s, clk: %s, select is not correct, clk->select: %d\n", __func__,
+ clk->name, clk->select);
+ return 0;
+ }
+
+ if (!clk->parent[clk->select]) {
+ pr_err("ERROR, %s, clk: %s has no parent and is not a fixed clock\n", __func__,
+ clk->name);
+ return 0;
+ }
+
+ clk->frequency = clk->parent[clk->select]->frequency / clk->prediv;
+ clk->frequency /= clk->divide;
+
+ PRINT_CLK_DBG("%s, clk: %s, rate: %lu\n", __func__, clk->name, clk->frequency);
+
+ return clk->frequency;
+}
+
+void fh_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (clk->flag & CLOCK_FIXED) {
+ pr_err("%s, clk: %s is fixed clock, rate: %lu\n", __func__, clk->name,
+ clk->frequency);
+ return;
+ }
+
+ if (clk->flag & (CLOCK_PLL | CLOCK_PLL_R | CLOCK_PLL_P)) {
+ pr_err("%s, clk: %s is a PLL clock, changing frequency is not recommended\n",
+ __func__, clk->name);
+ return;
+ }
+ if (clk->select > CLOCK_MAX_PARENT) {
+ pr_err("ERROR, %s, clk: %s, select is not correct, clk->select: %d\n", __func__,
+ clk->name, clk->select);
+ return;
+ }
+
+ if (!clk->parent[clk->select]) {
+ pr_err("ERROR, %s, clk: %s has no parent and is not a fixed clock\n", __func__,
+ clk->name);
+ return;
+ }
+ if (clk->flag & CLOCK_CIS) {
+
+ if (rate == OSC_FREQUENCY) {
+ fh_clk_set_sel(clk, 0);
+ clk->frequency = rate;
+ return;
+ } else
+ fh_clk_set_sel(clk, 1);
+ }
+
+ clk->frequency = clk->parent[clk->select]->frequency / clk->prediv;
+ clk->divide = clk->frequency / rate;
+ PRINT_CLK_DBG("%s, clk: %s, set rate: %lu, divide: %d\n", __func__, clk->name,
+ rate, clk->divide);
+ fh_clk_set_div(clk, clk->divide - 1);
+
+ clk->frequency = rate;
+
+ PRINT_CLK_DBG("%s, clk: %s, rate: %lu\n", __func__, clk->name, clk->frequency);
+}
+
+void fh_clk_reset(struct clk *clk)
+{
+ unsigned int reg;
+
+ if (clk->flag & CLOCK_NORESET) {
+ pr_err("%s, clk: %s has no reset\n", __func__, clk->name);
+ return;
+ }
+
+ reg = 0xffffffff & ~(clk->rst_reg_mask);
+
+ fh_pmu_set_reg(clk->rst_reg_offset, reg);
+ while (fh_pmu_get_reg(clk->rst_reg_offset) != 0xffffffff) {
+
+ }
+ PRINT_CLK_DBG("%s, clk: %s has been reset\n", __func__, clk->name);
+}
+
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if(clk->flag & (CLOCK_PLL_R | CLOCK_PLL_P))
+ fh_clk_disable(clk);
+ else
+ fh_clk_enable(clk);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if (clk->flag & (CLOCK_PLL_R | CLOCK_PLL_P))
+ fh_clk_enable(clk);
+ else
+ fh_clk_disable(clk);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ unsigned long flags, rate;
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+
+ rate = fh_clk_get_rate(clk);
+
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long flags, real_rate;
+ int ret = -EINVAL;
+
+ if (clk == NULL || IS_ERR(clk))
+ return ret;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ fh_clk_set_rate(clk, rate);
+ real_rate = clk_get_rate(clk);
+ if (rate != real_rate)
+ printk("WARN: set clk %s to %ld, but get %ld\n", clk->name, rate, real_rate);
+
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+void clk_reset(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ fh_clk_reset(clk);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+EXPORT_SYMBOL(clk_reset);
+
+void clk_change_parent(struct clk *clk, int select)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ fh_clk_set_sel(clk, select);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+EXPORT_SYMBOL(clk_change_parent);
+
+int clk_register(struct clk *clk)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ if (WARN(clk->parent[clk->select] && !clk->parent[clk->select]->frequency,
+ "CLK: %s parent %s has no rate!\n",
+ clk->name, clk->parent[clk->select]->name))
+ return -EINVAL;
+
+ clk_get_rate(clk);
+
+ PRINT_CLK_DBG("clk: %s has been registered, div: %d, sel: %d\n",
+ clk->name, clk->divide, clk->select);
+
+ mutex_lock(&clocks_mutex);
+ list_add_tail(&clk->list, &clocks);
+ mutex_unlock(&clocks_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return;
+
+ mutex_lock(&clocks_mutex);
+ list_del(&clk->list);
+ mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clk_unregister);
+
+
+static void del_char(char* str, char ch)
+{
+ char *p = str;
+ char *q = str;
+ while (*q) {
+ if (*q != ch) {
+ *p++ = *q;
+ }
+ q++;
+ }
+ *p = '\0';
+}
+
+static ssize_t fh_clk_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
+{
+ int i, ret;
+ char message[64] = {0};
+ char * const delim = ",";
+ char *cur = message;
+ char *param_str[4];
+ unsigned int param[4];
+ struct clk *clk;
+
+ len = (len > 64) ? 64 : len;
+
+ if (copy_from_user(message, buf, len))
+ return -EFAULT;
+
+ for (i = 0; i < 3; i++) {
+ param_str[i] = strsep(&cur, delim);
+ if (!param_str[i]) {
+ pr_err("%s: ERROR: parameter[%d] is empty\n", __func__, i);
+ pr_err("[clk name], [enable/disable], [clk rate]\n");
+ return -EINVAL;
+ } else {
+ del_char(param_str[i], ' ');
+ del_char(param_str[i], '\n');
+ }
+ }
+
+ clk = clk_get(NULL, param_str[0]);
+ if (!clk) {
+ pr_err("%s: ERROR: clk %s is not found\n", __func__, param_str[0]);
+ pr_err("[clk name], [enable/disable], [clk rate]\n");
+ return -EINVAL;
+ }
+
+ param[2] = (u32)simple_strtoul(param_str[2], NULL, 10);
+ if (param[2] < 0) {
+ pr_err("ERROR: parameter[2] is incorrect\n");
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(clk, param[2]);
+ if (ret) {
+ pr_err("set clk rate failed\n, ret=%d\n", ret);
+ }
+
+ if (!strcmp(param_str[1], "enable")) {
+ clk_enable(clk);
+ printk("clk %s enabled\n", param_str[0]);
+ }
+ else if (!strcmp(param_str[1], "disable")) {
+ clk_disable(clk);
+ printk(KERN_ERR "clk %s disabled\n", param_str[0]);
+ } else {
+ pr_err("%s: ERROR: parameter[1]:%s is incorrect\n",
+ __func__, param_str[1]);
+ pr_err("[clk name], [enable/disable], [clk rate]\n");
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter = 0;
+ if (*pos == 0)
+ return &counter;
+ else {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+
+ struct clk_lookup *clock_lookup;
+ struct clk *clk;
+ unsigned long rate;
+ unsigned int reg;
+ char gate[10] = {0};
+ seq_printf(sfile, "\nPLL Information: \n");
+ for (clock_lookup = fh_clks; clock_lookup->clk; clock_lookup++) {
+ clk = clock_lookup->clk;
+
+ if (clk->flag & CLOCK_HIDE)
+ continue;
+
+ rate = clk_get_rate(clk);
+ if (!(clk->flag & CLOCK_NOGATE)) {
+ reg = fh_pmu_get_reg(clk->en_reg_offset);
+ reg &= clk->en_reg_mask;
+ if (reg) {
+ if (clk->flag & (CLOCK_PLL_P|CLOCK_PLL_R))
+ strncpy(gate, "enable", sizeof(gate));
+ else
+ strncpy(gate, "disable", sizeof(gate));
+ } else {
+ if (clk->flag & (CLOCK_PLL_P|CLOCK_PLL_R))
+ strncpy(gate, "disable", sizeof(gate));
+ else
+ strncpy(gate, "enable", sizeof(gate));
+ }
+ } else {
+ strncpy(gate, "nogate", sizeof(gate));
+ }
+ seq_printf(sfile, "\t%-20s \t%9luHZ \t%-10s\n",
+ clk->name, rate, gate);
+}
+ return 0;
+}
+
+static const struct seq_operations fh_clk_seq_ops = {
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int fh_clk_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fh_clk_seq_ops);
+}
+
+
+static struct file_operations fh_clk_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = fh_clk_proc_open,
+ .read = seq_read,
+ .write = fh_clk_proc_write,
+ .release = seq_release,
+};
+
+int __init fh_clk_procfs_init(void)
+{
+ proc_file = create_proc_entry(PROC_FILE, 0644, NULL);
+ if (proc_file)
+ proc_file->proc_fops = &fh_clk_proc_ops;
+ else
+ pr_err("clock, create proc fs failed\n");
+
+ return 0;
+}
+
+int __init fh_clk_init(void)
+{
+ struct clk_lookup *clock_lookup;
+ struct clk *clk;
+ size_t num_clocks = 0;
+
+ for (clock_lookup = fh_clks; clock_lookup->clk; clock_lookup++) {
+ clk = clock_lookup->clk;
+ num_clocks++;
+ clk_register(clk);
+ if (clk->def_rate)
+ clk_set_rate(clk, clk->def_rate);
+ }
+ clkdev_add_table(fh_clks, num_clocks);
+ return 0;
+}
diff --git a/arch/arm/mach-fh/fh8856.c b/arch/arm/mach-fh/fh8856.c
new file mode 100644
index 00000000..a6ac7c92
--- /dev/null
+++ b/arch/arm/mach-fh/fh8856.c
@@ -0,0 +1,597 @@
+/*
+ * Fullhan FH8833 board support
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/chip.h>
+#include <mach/irqs.h>
+#include <mach/timex.h>
+#include <mach/pmu.h>
+#include <mach/clock.h>
+#include <mach/board_config.h>
+
+/*
+ * external oscillator
+ * fixed to 24M
+ */
+static struct clk osc_clk = {
+ .name = "osc_clk",
+ .frequency = OSC_FREQUENCY,
+ .flag = CLOCK_FIXED,
+};
+
+/*
+ * phase-locked-loop device,
+ * generates a higher frequency clock
+ * from the external oscillator reference
+ */
+static struct clk pll0p_clk = {
+ .name = "pll0p_clk",
+ .flag = CLOCK_PLL_P,
+ .parent = {&osc_clk},
+ .div_reg_offset = REG_PMU_PLL0,
+ .en_reg_offset = REG_PMU_PLL_CTRL,
+ .en_reg_mask = 0xf00,
+};
+
+static struct clk pll0r_clk = {
+ .name = "pll0r_clk",
+ .flag = CLOCK_PLL_R,
+ .parent = {&osc_clk},
+ .div_reg_offset = REG_PMU_PLL0,
+ .en_reg_offset = REG_PMU_PLL_CTRL,
+ .en_reg_mask = 0xf000,
+};
+
+#if 0
+static struct clk pll1p_clk = {
+ .name = "pll1p_clk",
+ .flag = CLOCK_PLL_P,
+ .parent = {&osc_clk},
+ .div_reg_offset = REG_PMU_PLL1,
+ .en_reg_offset = REG_PMU_PLL_CTRL,
+ .en_reg_mask = 0xf000000,
+};
+#endif
+
+static struct clk pll1r_clk = {
+ .name = "pll1r_clk",
+ .flag = CLOCK_PLL_R,
+ .parent = {&osc_clk},
+ .div_reg_offset = REG_PMU_PLL1,
+ .en_reg_offset = REG_PMU_PLL_CTRL,
+ .en_reg_mask = 0xf0000000,
+};
+
+static struct clk pll2p_clk = {
+ .name = "pll2p_clk",
+ .flag = CLOCK_PLL_P,
+ .parent = {&osc_clk},
+ .div_reg_offset = REG_PMU_PLL2,
+ .en_reg_offset = REG_PMU_PLL2_CTRL,
+ .en_reg_mask = 0xf00,
+};
+
+#if 0
+static struct clk pll2r_clk = {
+ .name = "pll2r_clk",
+ .flag = CLOCK_PLL_R,
+ .parent = {&osc_clk},
+ .div_reg_offset = REG_PMU_PLL2,
+ .en_reg_offset = REG_PMU_PLL2_CTRL,
+ .en_reg_mask = 0xf000,
+};
+#endif
+/*
+ * pll0r
+ */
+static struct clk arm_clk = {
+ .name = "arm_clk",
+ .flag = CLOCK_NOGATE,
+ .parent = {&pll0r_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV0,
+ .div_reg_mask = 0xf,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x1,
+};
+
+static struct clk ahb_clk = {
+ .name = "ahb_clk",
+ .flag = CLOCK_NORESET|CLOCK_NOGATE,
+ .parent = {&pll0r_clk},
+ .prediv = 2,
+ .div_reg_offset = REG_PMU_CLK_DIV0,
+ .div_reg_mask = 0xf0000,
+};
+static struct clk arc_clk = {
+ .name = "arc_clk",
+ .flag = 0,
+ .parent = {&pll0r_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV0,
+ .div_reg_mask = 0xf0000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x4,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x400000,
+};
+
+static struct clk pae_arc_clk = {
+ .name = "pae_arc_clk",
+ .flag = 0,
+ .parent = {&pll0r_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV0,
+ .div_reg_mask = 0xf0000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x8,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x400000,
+};
+
+static struct clk cis_clk_out = {
+ .name = "cis_clk_out",
+ .flag = CLOCK_NORESET,
+ .parent = {&pll0r_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV1,
+ .div_reg_mask = 0xff0000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x800000,
+};
+
+/*
+ * pll1r
+ */
+static struct clk ddr_clk = {
+ .name = "ddr_clk",
+ .parent = {&pll1r_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV1,
+ .div_reg_mask = 0xf,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x40,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x8,
+};
+/*
+ * pll0p
+ */
+
+static struct clk isp_aclk = {
+ .name = "isp_aclk",
+ .flag = CLOCK_NORESET,
+ .parent = {&pll0p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV0,
+ .div_reg_mask = 0xf00,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x1,
+ .def_rate = CONFIG_ISP_CLK_RATE,
+};
+
+static struct clk jpeg_clk = {
+ .name = "jpeg_clk",
+ .flag = CLOCK_NORESET|CLOCK_NODIV,
+ .parent = {&isp_aclk},
+ .prediv = 1,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x1,
+};
+
+static struct clk vpu_clk = {
+ .name = "vpu_clk",
+ .flag = CLOCK_NORESET|CLOCK_NODIV,
+ .parent = {&isp_aclk},
+ .prediv = 1,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x1,
+};
+
+static struct clk bgm_clk = {
+ .name = "bgm_clk",
+ .flag = CLOCK_NORESET,
+ .parent = {&pll0p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV0,
+ .div_reg_mask = 0xf00,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x40000,
+};
+
+static struct clk mipi_dphy_clk = {
+ .name = "mipi_dphy_clk",
+ .flag = CLOCK_NORESET|CLOCK_NODIV,
+ .parent = {&osc_clk},
+ .prediv = 1,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x100000,
+};
+
+static struct clk pix_clk = {
+ .name = "pix_clk",
+ .flag = CLOCK_NORESET,
+ .parent = {&pll0p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV2,
+ .div_reg_mask = 0xf000000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x400000,
+};
+
+static struct clk pae_clk = {
+ .name = "pae_clk",
+ .flag = CLOCK_NORESET|CLOCK_MULTI_PARENT|CLOCK_HIDE,
+ .parent = {&pll0p_clk, &pll2p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV0,
+ .div_reg_mask = 0x7000000,
+ .sel_reg_offset = REG_PMU_CLK_SEL,
+ .sel_reg_mask = 0x2,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x10,
+ .def_rate = CONFIG_PAE_CLK_RATE,
+};
+
+static struct clk ddrc_a1clk = {
+ .name = "ddrc_a1clk",
+ .flag = CLOCK_NORESET|CLOCK_NODIV,
+ .parent = {&pae_clk},
+ .prediv = 1,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x20,
+};
+
+/*
+ * pll2p
+ */
+
+static struct clk hevc_aclk = {
+ .name = "hevc_aclk",
+ .flag = CLOCK_MULTI_PARENT|CLOCK_HIDE,
+ .parent = {&pll0p_clk, &pll2p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV7,
+ .div_reg_mask = 0x700,
+ .sel_reg_offset = REG_PMU_CLK_SEL,
+ .sel_reg_mask = 0x1,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x20000000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x1000000,
+ .def_rate = CONFIG_HEVC_CLK_RATE,
+};
+
+static struct clk hevc_bclk = {
+ .name = "hevc_bclk",
+ .flag = CLOCK_MULTI_PARENT|CLOCK_HIDE,
+ .parent = {&pll0p_clk , &pll2p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV7,
+ .div_reg_mask = 0x70000,
+ .sel_reg_offset = REG_PMU_CLK_SEL,
+ .sel_reg_mask = 0x1,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x40000000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x2000000,
+ .def_rate = CONFIG_HEVC_CLK_RATE,
+};
+
+static struct clk hevc_cclk = {
+ .name = "hevc_cclk",
+ .flag = CLOCK_MULTI_PARENT|CLOCK_HIDE,
+ .parent = {&pll0p_clk, &pll2p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV7,
+ .div_reg_mask = 0x7000000,
+ .sel_reg_offset = REG_PMU_CLK_SEL,
+ .sel_reg_mask = 0x1,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x80000000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x4000000,
+ .def_rate = CONFIG_HEVC_CLK_RATE,
+};
+
+
+static struct clk pll0_div12_dw_clk = {
+ .name = "pll0_div12_dw_clk",
+ .flag = CLOCK_NORESET|CLOCK_NOGATE,
+ .parent = {&pll0p_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV5,
+ .div_reg_mask = 0xf000000,
+};
+
+static struct clk sdc0_clk = {
+ .name = "sdc0_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 2,
+ .div_reg_offset = REG_PMU_CLK_DIV3,
+ .div_reg_mask = 0xf00,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x200,
+ .rst_reg_offset = REG_PMU_SWRST_AHB_CTRL,
+ .rst_reg_mask = 0x4,
+};
+
+static struct clk sdc1_clk = {
+ .name = "sdc1_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 2,
+ .div_reg_offset = REG_PMU_CLK_DIV3,
+ .div_reg_mask = 0xf000000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x400,
+ .rst_reg_offset = REG_PMU_SWRST_AHB_CTRL,
+ .rst_reg_mask = 0x2,
+};
+
+static struct clk spi0_clk = {
+ .name = "spi0_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV3,
+ .div_reg_mask = 0xff,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x80,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x100,
+};
+
+static struct clk spi1_clk = {
+ .name = "spi1_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV3,
+ .div_reg_mask = 0xff0000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x100,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x200,
+};
+
+static struct clk spi2_clk = {
+ .name = "spi2_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV3,
+ .div_reg_mask = 0xf000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x2,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x100000,
+};
+
+static struct clk eth_clk = {
+ .name = "eth_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV6,
+ .div_reg_mask = 0xf000000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x12000000,
+ .rst_reg_offset = REG_PMU_SWRST_AHB_CTRL,
+ .rst_reg_mask = 0x20000,
+};
+
+static struct clk i2c0_clk = {
+ .name = "i2c0_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV4,
+ .div_reg_mask = 0x3f0000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x1000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x400,
+};
+
+static struct clk i2c1_clk = {
+ .name = "i2c1_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV4,
+ .div_reg_mask = 0x3f000000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x8000000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x800,
+};
+
+static struct clk pwm_clk = {
+ .name = "pwm_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV5,
+ .div_reg_mask = 0xff,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x10000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x80,
+ .def_rate = 50000000,
+};
+
+static struct clk uart0_clk = {
+ .name = "uart0_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV4,
+ .div_reg_mask = 0x1f,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x2000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x4000,
+ .def_rate = 16666666,
+};
+
+static struct clk uart1_clk = {
+ .name = "uart1_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV4,
+ .div_reg_mask = 0x1f00,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x4000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x8000,
+ .def_rate = 16666666,
+};
+
+static struct clk pts_clk = {
+ .name = "pts_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV2,
+ .div_reg_mask = 0x1ff,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x80000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x20000,
+ .def_rate = 1000000,
+};
+
+static struct clk efuse_clk = {
+ .name = "efuse_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV1,
+ .div_reg_mask = 0x3f000000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x200000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x800000,
+};
+
+static struct clk tmr0_clk = {
+ .name = "tmr0_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV5,
+ .div_reg_mask = 0xff0000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x20000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x40000,
+};
+
+static struct clk sadc_clk = {
+ .name = "sadc_clk",
+ .parent = {&pll0_div12_dw_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV6,
+ .div_reg_mask = 0x7f0000,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x4000000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x10000,
+};
+
+static struct clk ac_clk = {
+ .name = "ac_clk",
+ .parent = {&osc_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV6,
+ .div_reg_mask = 0x3f,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x800,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x1000,
+};
+
+static struct clk i2s_clk = {
+ .name = "i2s_clk",
+ .parent = {&ac_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV6,
+ .div_reg_mask = 0x3f00,
+ .en_reg_offset = REG_PMU_CLK_GATE,
+ .en_reg_mask = 0x1000000,
+ .rst_reg_offset = REG_PMU_SWRST_MAIN_CTRL,
+ .rst_reg_mask = 0x2000,
+};
+
+static struct clk wdt_clk = {
+ .name = "wdt_clk",
+ .flag = CLOCK_NOGATE,
+ .parent = {&ahb_clk},
+ .prediv = 1,
+ .div_reg_offset = REG_PMU_CLK_DIV5,
+ .div_reg_mask = 0xff00,
+ .rst_reg_offset = REG_PMU_SWRST_APB_CTRL,
+ .rst_reg_mask = 0x100000,
+ .def_rate = 1000000,
+};
+
+struct clk_lookup fh_clks[] = {
+ CLK(NULL, "osc_clk", &osc_clk),
+ CLK(NULL, "pll0p_clk", &pll0p_clk),
+ CLK(NULL, "pll0r_clk", &pll0r_clk),
+ CLK(NULL, "pll1r_clk", &pll1r_clk),
+ CLK(NULL, "pll2p_clk", &pll2p_clk),
+
+ CLK(NULL, "arm_clk", &arm_clk),
+ CLK(NULL, "arc_clk", &arc_clk),
+ CLK(NULL, "pae_arc_clk", &pae_arc_clk),
+ CLK(NULL, "ahb_clk", &ahb_clk),
+
+ CLK(NULL, "ddr_clk", &ddr_clk),
+ CLK(NULL, "isp_aclk", &isp_aclk),
+ CLK(NULL, "jpeg_clk", &jpeg_clk),
+ CLK(NULL, "vpu_clk", &vpu_clk),
+ CLK(NULL, "pae_clk", &pae_clk),
+ CLK(NULL, "bgm_clk", &bgm_clk),
+ CLK(NULL, "mipi_dphy_clk", &mipi_dphy_clk),
+ CLK(NULL, "ddrc_a1clk", &ddrc_a1clk),
+
+ CLK(NULL, "hevc_aclk", &hevc_aclk),
+ CLK(NULL, "hevc_bclk", &hevc_bclk),
+ CLK(NULL, "hevc_cclk", &hevc_cclk),
+ CLK(NULL, "pll0_div12_dw_clk", &pll0_div12_dw_clk),
+
+ CLK(NULL, "cis_clk_out", &cis_clk_out),
+ CLK(NULL, "pix_clk", &pix_clk),
+ CLK(NULL, "pts_clk", &pts_clk),
+
+ CLK(NULL, "spi0_clk", &spi0_clk),
+ CLK(NULL, "spi1_clk", &spi1_clk),
+ CLK(NULL, "spi2_clk", &spi2_clk),
+ CLK(NULL, "sdc0_clk", &sdc0_clk),
+ CLK(NULL, "sdc1_clk", &sdc1_clk),
+ CLK(NULL, "uart0_clk", &uart0_clk),
+ CLK(NULL, "uart1_clk", &uart1_clk),
+ CLK(NULL, "i2c0_clk", &i2c0_clk),
+ CLK(NULL, "i2c1_clk", &i2c1_clk),
+ CLK(NULL, "pwm_clk", &pwm_clk),
+ CLK(NULL, "wdt_clk", &wdt_clk),
+ CLK(NULL, "tmr0_clk", &tmr0_clk),
+ CLK(NULL, "ac_clk", &ac_clk),
+ CLK(NULL, "i2s_clk", &i2s_clk),
+ CLK(NULL, "sadc_clk", &sadc_clk),
+ CLK(NULL, "eth_clk", &eth_clk),
+ CLK(NULL, "efuse_clk", &efuse_clk),
+
+ CLK(NULL, NULL, NULL),
+};
+EXPORT_SYMBOL(fh_clks);
diff --git a/arch/arm/mach-fh/fh_chipid.c b/arch/arm/mach-fh/fh_chipid.c
new file mode 100644
index 00000000..5f2693c2
--- /dev/null
+++ b/arch/arm/mach-fh/fh_chipid.c
@@ -0,0 +1,216 @@
+/**
+ * Copyright (c) 2015-2019 Shanghai Fullhan Microelectronics Co., Ltd.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ *
+ * Change Logs:
+ * Date Author Notes
+ * 2019-08-20 wangyl add license Apache-2.0
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <mach/pmu.h>
+#include <mach/fh_chipid.h>
+
+#define CHIP_INFO(__plat_id, __chip_id, __chip_mask, chip, size) \
+ { \
+ ._plat_id = __plat_id, \
+ ._chip_id = __chip_id, \
+ ._chip_mask = __chip_mask, \
+ .chip_id = FH_CHIP_##chip, \
+ .ddr_size = size, \
+ .chip_name = #chip, \
+ },
+
+static struct fh_chip_info chip_infos[] = {
+ CHIP_INFO(0x46488302, 0x37, 0x3F, FH8632, 512)
+ CHIP_INFO(0x46488302, 0x07, 0x3F, FH8632v2, 512)
+ CHIP_INFO(0x17092901, 0xC, 0xF, FH8852, 512)
+ CHIP_INFO(0x17092901, 0xD, 0xF, FH8856, 1024)
+ CHIP_INFO(0x18112301, 0x3, 0x3, FH8626V100, 512)
+};
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+
+void fh_get_chipid(unsigned int *plat_id, unsigned int *chip_id)
+{
+ unsigned int _plat_id = 0;
+
+ _plat_id = fh_pmu_get_reg(REG_PMU_CHIP_ID);
+ if (plat_id != NULL)
+ *plat_id = _plat_id;
+
+ if (chip_id != NULL)
+ {
+ switch (_plat_id)
+ {
+ case 0x46488302:
+ *chip_id = fh_pmu_get_reg(REG_PMU_SPC_IO_STATUS);
+ break;
+ default:
+ *chip_id = fh_pmu_get_reg(REG_PMU_IP_VER);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(fh_get_chipid);
+
+#define FH_GET_CHIP_ID(plat_id, chip_id) \
+ int plat_id = 0;\
+ int chip_id = 0;\
+ fh_get_chipid(&plat_id, &chip_id)
+
+
+struct fh_chip_info *fh_get_chip_info(void)
+{
+ static struct fh_chip_info *chip_info = NULL;
+ struct fh_chip_info *info = NULL;
+ int plat_id = 0;
+ int chip_id = 0;
+ int i = 0;
+
+ if (chip_info != NULL)
+ return chip_info;
+
+ fh_get_chipid(&plat_id, &chip_id);
+
+ for (i = 0; i < ARRAY_SIZE(chip_infos); i++)
+ {
+ info = &chip_infos[i];
+ if (plat_id == info->_plat_id && (chip_id & info->_chip_mask) == info->_chip_id)
+ {
+ chip_info = info;
+ return info;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(fh_get_chip_info);
+
+unsigned int fh_get_ddrsize_mbit(void)
+{
+ struct fh_chip_info *info = fh_get_chip_info();
+
+ if (info)
+ return info->ddr_size;
+ return 0;
+}
+EXPORT_SYMBOL(fh_get_ddrsize_mbit);
+
+char *fh_get_chipname(void)
+{
+ struct fh_chip_info *info = fh_get_chip_info();
+
+ if (info)
+ return info->chip_name;
+ return "UNKNOWN";
+}
+EXPORT_SYMBOL(fh_get_chipname);
+
+#define DEFINE_FUNC_FH_IS(name, chip) \
+unsigned int fh_is_##name(void) \
+{ \
+ struct fh_chip_info *info = fh_get_chip_info(); \
+ \
+ if (info) \
+ return info->chip_id == FH_CHIP_##chip; \
+ return 0; \
+} \
+EXPORT_SYMBOL(fh_is_##name)
+
+unsigned int fh_is_8632(void)
+{
+ struct fh_chip_info *info = fh_get_chip_info();
+
+ if (info)
+ return (info->chip_id == FH_CHIP_FH8632 || \
+ info->chip_id == FH_CHIP_FH8632v2);
+ return 0;
+}
+EXPORT_SYMBOL(fh_is_8632);
+
+DEFINE_FUNC_FH_IS(8830, FH8830);
+DEFINE_FUNC_FH_IS(8852, FH8852);
+DEFINE_FUNC_FH_IS(8856, FH8856);
+DEFINE_FUNC_FH_IS(8626v100, FH8626V100);
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter;
+ if (*pos == 0)
+ return &counter;
+ else {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+ unsigned int plat_id = 0;
+ unsigned int chip_id = 0;
+ fh_get_chipid(&plat_id, &chip_id);
+
+ seq_printf(sfile, "chip_name\t: %s\n", fh_get_chipname());
+ seq_printf(sfile, "ddr_size\t: %dMbit\n", fh_get_ddrsize_mbit());
+ seq_printf(sfile, "plat_id\t\t: 0x%x\npkg_id\t\t: 0x%x\n",
+ plat_id, chip_id);
+ return 0;
+}
+
+static const struct seq_operations chipid_seq_ops = {
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int fh_chipid_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &chipid_seq_ops);
+}
+
+static const struct file_operations fh_chipid_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = fh_chipid_proc_open,
+ .read = seq_read,
+};
+
+#define FH_CHIPID_PROC_FILE "driver/chip"
+static struct proc_dir_entry *chipid_proc_file;
+
+int fh_chipid_init(void)
+{
+ chipid_proc_file = proc_create(FH_CHIPID_PROC_FILE, 0644, NULL,
+ &fh_chipid_proc_ops);
+
+ if (!chipid_proc_file) {
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, "CHIP ID");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-fh/fh_common.c b/arch/arm/mach-fh/fh_common.c
new file mode 100644
index 00000000..c40d706a
--- /dev/null
+++ b/arch/arm/mach-fh/fh_common.c
@@ -0,0 +1,33 @@
+/*
+ * fh_common.c
+ *
+ * Copyright (C) 2018 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <mach/system.h>
+
+/**
+ * fh_setscheduler - change the scheduling policy and/or RT priority of `current` thread.
+ * @policy: new policy. egz. SCHED_RR
+ * @priority: new priority. egz MAX_USER_RT_PRIO-12
+ * @param: structure containing the new RT priority.
+ *
+ * NOTE that the task may be already dead.
+ */
+void fh_setscheduler(int policy, int priority)
+{
+ struct sched_param param;
+ param.sched_priority = priority;
+ sched_setscheduler(current, policy, &param);
+}
+EXPORT_SYMBOL(fh_setscheduler);
diff --git a/arch/arm/mach-fh/fh_simple_timer.c b/arch/arm/mach-fh/fh_simple_timer.c
new file mode 100644
index 00000000..ecd7a4d6
--- /dev/null
+++ b/arch/arm/mach-fh/fh_simple_timer.c
@@ -0,0 +1,237 @@
+#include <linux/module.h>
+#include <mach/fh_simple_timer.h>
+
+//#define FH_TIMER_DEBUG
+#ifdef FH_TIMER_DEBUG
+#define PRINT_DBG(fmt,args...) printk(fmt,##args)
+#else
+#define PRINT_DBG(fmt,args...) do{} while(0)
+#endif
+
+enum SIMPLE_TIMER_WORKMODE {
+ SIMPLE_TIMER_SEQ,
+ SIMPLE_TIMER_PERIOD
+};
+
+struct simple_time_base
+{
+ struct timerqueue_head simple_timer_queue;
+ int state;
+ int workmode;
+};
+struct fh_simple_timer periodic_timer;
+
+
+struct simple_time_base base;
+
+static void fh_timer_enable(void)
+{
+ SET_REG(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x3);
+}
+
+static void fh_timer_disable(void)
+{
+ SET_REG(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x0);
+}
+
+static void fh_timer_clearirq(void)
+{
+ GET_REG(VTIMER(REG_TIMER_EOI_REG(SIMPLE_TIMER_BASE)));
+}
+
+void fh_simple_timer_set_next(long cycles)
+{
+ long curr_val;
+ int sync_cnt = 0;
+
+
+ PRINT_DBG("cycles: %lu\n", cycles);
+
+ if (cycles < 0) {
+ pr_err("ERROR: cycles is invaild: %lu\n", cycles);
+ fh_timer_clearirq();
+ fh_timer_disable();
+ base.state = SIMPLE_TIMER_ERROR;
+ return;
+ }
+
+ SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x00, 0x1);
+ /* zy/ticket/100 : update apb Timer LOADCNT */
+ /* CURRENTVALE could,t be start from new LOADCOUNT */
+ /* cause is timer clk 1M hz and apb is 150M hz */
+ /* check current cnt for it is disabled */
+ while (GET_REG(VTIMER(REG_TIMER_CUR_VAL(1))) != 0) {
+ sync_cnt++;
+ if (sync_cnt >= 50) {
+ /* typical cnt is 5 when in 1M timer clk */
+ /* so here use 50 to check whether it is err */
+ pr_err("timer problem,can't disable");
+ }
+ }
+ SET_REG(VTIMER(REG_TIMER_LOADCNT(SIMPLE_TIMER_BASE)), cycles);
+ SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x01, 0x1);
+#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
+#if defined(CONFIG_ARCH_FH8632) || defined(CONFIG_ARCH_FH8833) || defined(CONFIG_ARCH_FH8810)
+ curr_val = GET_REG(VTIMER(REG_TIMER_CUR_VAL(SIMPLE_TIMER_BASE))) ;
+ if (curr_val > 0x80000000) { ///0xffff0000)
+ panic("timer curr %lu, want cycles %lu\n", curr_val, cycles);
+
+ SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x01, 0x1);
+ SET_REG(VTIMER(REG_TIMER_LOADCNT(SIMPLE_TIMER_BASE)), cycles);
+
+ //pmu reset
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfffbffff);
+ while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff) {
+
+ }
+ }
+
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfffbffff);
+ while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff) {
+
+ }
+#endif
+#endif
+
+}
+
+int fh_simple_timer_create(struct fh_simple_timer* new)
+{
+ timerqueue_init(&new->node);
+ new->node.expires = new->it_value;
+ timerqueue_add(&base.simple_timer_queue, &new->node);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fh_simple_timer_create);
+
+int fh_timer_start(void)
+{
+ struct fh_simple_timer *timer = NULL;
+ struct timerqueue_node *node;
+
+ if (base.state == SIMPLE_TIMER_START)
+ return 0;
+
+
+ node = timerqueue_getnext(&base.simple_timer_queue);
+
+ if(node == NULL)
+ {
+ pr_err("ERROR: timequeue is empty\n");
+ return -1;
+ }
+ base.workmode = SIMPLE_TIMER_SEQ;
+ timer = container_of(node, struct fh_simple_timer, node);
+
+ base.state = SIMPLE_TIMER_START;
+ fh_timer_enable();
+ fh_simple_timer_set_next(ktime_to_us(ktime_sub(timer->it_value, timer->it_delay)));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fh_timer_start);
+
+int fh_simple_timer_interrupt_seq(void)
+{
+ ktime_t diff;
+ struct fh_simple_timer *curr = NULL, *next = NULL;
+ struct timerqueue_node *node;
+
+ node = timerqueue_getnext(&base.simple_timer_queue);
+
+ if(node == NULL)
+ {
+ pr_err("ERROR: timequeue is empty\n");
+ fh_timer_clearirq();
+ fh_timer_disable();
+ base.state = SIMPLE_TIMER_ERROR;
+ return -1;
+ }
+
+ curr = container_of(node, struct fh_simple_timer, node);
+
+ timerqueue_del(&base.simple_timer_queue, &curr->node);
+
+ curr->function(curr->param);
+
+ node = timerqueue_getnext(&base.simple_timer_queue);
+
+ if(node == NULL)
+ {
+ PRINT_DBG("finished all timers, close device\n");
+ fh_timer_clearirq();
+ fh_timer_disable();
+ base.state = SIMPLE_TIMER_STOP;
+ return 0;
+ }
+
+ next = container_of(node, struct fh_simple_timer, node);
+
+ PRINT_DBG("sec: %lu, nsec: %lu\n",
+ ktime_to_timespec(next->it_value).tv_sec,
+ ktime_to_timespec(next->it_value).tv_nsec);
+
+ diff = ktime_sub(next->it_value, curr->it_value);
+
+ fh_simple_timer_set_next(ktime_to_us(ktime_sub(diff, next->it_delay)));
+ fh_timer_clearirq();
+ return 0;
+}
+int fh_simple_timer_interrupt_period(void)
+{
+
+ periodic_timer.function(periodic_timer.param);
+ fh_timer_clearirq();
+ return 0;
+}
+
+int fh_simple_timer_interrupt(void)
+{
+ if (base.workmode == SIMPLE_TIMER_SEQ)
+ return fh_simple_timer_interrupt_seq();
+ else
+ return fh_simple_timer_interrupt_period();
+}
+
+
+int fh_simple_timer_init(void)
+{
+ base.state = SIMPLE_TIMER_STOP;
+ timerqueue_init_head(&base.simple_timer_queue);
+ memset(&periodic_timer, 0, sizeof(periodic_timer));
+ fh_timer_disable();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fh_simple_timer_init);
+
+
+int fh_simple_timer_periodic_start(struct fh_simple_timer *tim)
+{
+
+ if (base.state == SIMPLE_TIMER_START)
+ return 0;
+
+ if (tim == NULL)
+ return 0;
+
+ periodic_timer = *tim;
+
+
+
+ base.state = SIMPLE_TIMER_START;
+ base.workmode = SIMPLE_TIMER_PERIOD;
+ fh_timer_enable();
+ fh_simple_timer_set_next(ktime_to_us(ktime_sub(periodic_timer.it_value,
+ periodic_timer.it_delay)));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fh_simple_timer_periodic_start);
+int fh_simple_timer_periodic_stop(void)
+{
+ fh_timer_disable();
+ base.state = SIMPLE_TIMER_STOP;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fh_simple_timer_periodic_stop);
+
diff --git a/arch/arm/mach-fh/include/mach/board_config.fh8852.appboard b/arch/arm/mach-fh/include/mach/board_config.fh8852.appboard
new file mode 100644
index 00000000..d63cd9b2
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/board_config.fh8852.appboard
@@ -0,0 +1,45 @@
+/*
+ * board_config.h
+ *
+ * Created on: Jan 9, 2017
+ * Author: duobao
+ */
+
+#ifndef BOARD_CONFIG_H_
+#define BOARD_CONFIG_H_
+
+/*
+ * GPIO0 -> IRCUT_ON
+ * GPIO1 -> IRCUT_OFF
+ * GPIO2 -> USB_PWREN
+ * GPIO3 -> SD1_PWREN/WIFI_REG_ON
+ * GPIO7 -> IR
+ * GPIO11 -> EMAC PHY Reset
+ * GPIO12 -> CIS_CLK
+ * GPIO13 -> CIS_RSTN
+ * GPIO14 -> CIS_PDN
+ */
+
+#define CONFIG_GPIO_EMACPHY_RESET 11
+#define CONFIG_GPIO_EMACPHY_RXDV 41
+
+#define CONFIG_GPIO_USB_PWREN 2
+
+#define CONFIG_ISP_CLK_RATE 150000000
+#define CONFIG_HEVC_CLK_RATE 200000000
+#define CONFIG_PAE_CLK_RATE 240000000
+
+#define FH_BOARD_8852
+#define CONFIG_PINCTRL_SELECT \
+ "I2C0", "MIPI", "RMII", "SD0_NO_WP", "SSI0_4BIT",\
+ "UART0", "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO7",\
+ "GPIO11", "GPIO13", "GPIO14",\
+ \
+ /* 未引出的pad默认配置为GPIO */ \
+ "GPIO4", "GPIO5", "GPIO6", "GPIO8", "GPIO9", "GPIO10",\
+ "GPIO19", "GPIO20", "GPIO21", "GPIO22", "GPIO23", "GPIO24",\
+ "GPIO25", "GPIO26", "GPIO27", "GPIO28", "GPIO53",\
+ "GPIO55"
+
+
+#endif /* BOARD_CONFIG_H_ */
diff --git a/arch/arm/mach-fh/include/mach/board_config.fh8856.testboard b/arch/arm/mach-fh/include/mach/board_config.fh8856.testboard
new file mode 100644
index 00000000..1cd8c86c
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/board_config.fh8856.testboard
@@ -0,0 +1,47 @@
+/*
+ * board_config.h
+ *
+ * Created on: Jan 9, 2017
+ * Author: duobao
+ */
+
+#ifndef BOARD_CONFIG_H_
+#define BOARD_CONFIG_H_
+
+/*
+ * GPIO0 -> IRCUT_ON
+ * GPIO1 -> IRCUT_OFF
+ * GPIO2 -> USB_PWREN
+ * GPIO11 -> EMAC PHY Reset
+ * GPIO12 -> CIS_CLK
+ * GPIO13 -> CIS_RSTN
+ * GPIO14 -> CIS_PDN
+ * GPIO19 -> SD1_PWREN/WIFI_REG_ON
+ * GPIO20 -> AK7755 Reset
+ * GPIO24 -> LED0
+ * GPIO25 -> LED1
+ * GPIO26 -> Reset Configs
+ * GPIO27 -> AK7755 PowerDown
+ * GPIO28 -> IR
+ * GPIO53 -> USB_PWREN/SD0_PWREN
+ * GPIO55 -> SD1 WIFI Interrupt
+ */
+
+#define CONFIG_GPIO_EMACPHY_RESET 11
+#define CONFIG_GPIO_EMACPHY_RXDV 41
+
+#define CONFIG_GPIO_USB_PWREN 2
+
+#define CONFIG_ISP_CLK_RATE 200000000
+#define CONFIG_HEVC_CLK_RATE 300000000
+#define CONFIG_PAE_CLK_RATE 400000000
+
+#define FH_BOARD_8856
+#define CONFIG_PINCTRL_SELECT \
+ "I2C0", "I2C1", "MIPI", "RMII", "SD0_NO_WP", \
+ "SD1_NO_WP", "SSI0_4BIT", "UART0", "GPIO0", "GPIO1", \
+ "GPIO2", "GPIO3", "GPIO11", "GPIO13", "GPIO14", \
+ "GPIO19", "GPIO20", "GPIO24", "GPIO25", "GPIO26", \
+ "GPIO27", "GPIO28", "GPIO53", "GPIO55"
+
+#endif /* BOARD_CONFIG_H_ */
diff --git a/arch/arm/mach-fh/include/mach/board_config.h b/arch/arm/mach-fh/include/mach/board_config.h
new file mode 100644
index 00000000..1cd8c86c
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/board_config.h
@@ -0,0 +1,47 @@
+/*
+ * board_config.h
+ *
+ * Created on: Jan 9, 2017
+ * Author: duobao
+ */
+
+#ifndef BOARD_CONFIG_H_
+#define BOARD_CONFIG_H_
+
+/*
+ * GPIO0 -> IRCUT_ON
+ * GPIO1 -> IRCUT_OFF
+ * GPIO2 -> USB_PWREN
+ * GPIO11 -> EMAC PHY Reset
+ * GPIO12 -> CIS_CLK
+ * GPIO13 -> CIS_RSTN
+ * GPIO14 -> CIS_PDN
+ * GPIO19 -> SD1_PWREN/WIFI_REG_ON
+ * GPIO20 -> AK7755 Reset
+ * GPIO24 -> LED0
+ * GPIO25 -> LED1
+ * GPIO26 -> Reset Configs
+ * GPIO27 -> AK7755 PowerDown
+ * GPIO28 -> IR
+ * GPIO53 -> USB_PWREN/SD0_PWREN
+ * GPIO55 -> SD1 WIFI Interrupt
+ */
+
+#define CONFIG_GPIO_EMACPHY_RESET 11
+#define CONFIG_GPIO_EMACPHY_RXDV 41
+
+#define CONFIG_GPIO_USB_PWREN 2
+
+#define CONFIG_ISP_CLK_RATE 200000000
+#define CONFIG_HEVC_CLK_RATE 300000000
+#define CONFIG_PAE_CLK_RATE 400000000
+
+#define FH_BOARD_8856
+#define CONFIG_PINCTRL_SELECT \
+ "I2C0", "I2C1", "MIPI", "RMII", "SD0_NO_WP", \
+ "SD1_NO_WP", "SSI0_4BIT", "UART0", "GPIO0", "GPIO1", \
+ "GPIO2", "GPIO3", "GPIO11", "GPIO13", "GPIO14", \
+ "GPIO19", "GPIO20", "GPIO24", "GPIO25", "GPIO26", \
+ "GPIO27", "GPIO28", "GPIO53", "GPIO55"
+
+#endif /* BOARD_CONFIG_H_ */
diff --git a/arch/arm/mach-fh/include/mach/chip.h b/arch/arm/mach-fh/include/mach/chip.h
new file mode 100644
index 00000000..78198934
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/chip.h
@@ -0,0 +1,20 @@
+/*****************************************************************************
+*
+* chip.h
+*
+* Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
+* All Rights Reserved. Confidential.
+*
+* File Description:
+* Chip definition. Include the base address of each module, memory
+* address, memory size
+*
+* Modification History:
+*
+******************************************************************************/
+#ifndef _CHIP_H_
+#define _CHIP_H_
+
+#include <mach/fh8856.h>
+
+#endif
diff --git a/arch/arm/mach-fh/include/mach/clkdev.h b/arch/arm/mach-fh/include/mach/clkdev.h
new file mode 100644
index 00000000..14a50488
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/clkdev.h
@@ -0,0 +1,15 @@
+#ifndef __MACH_CLKDEV_H
+#define __MACH_CLKDEV_H
+
+struct clk;
+
+static inline int __clk_get(struct clk *clk)
+{
+ return 1;
+}
+
+static inline void __clk_put(struct clk *clk)
+{
+}
+
+#endif
diff --git a/arch/arm/mach-fh/include/mach/clock.h b/arch/arm/mach-fh/include/mach/clock.h
new file mode 100644
index 00000000..018b81c4
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/clock.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
+ * All Rights Reserved. Confidential.
+ *
+ *This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_FH_CLOCK_H
+#define __ASM_ARCH_FH_CLOCK_H
+
+#include <linux/list.h>
+#include <linux/clkdev.h>
+
+#define CLOCK_MAX_PARENT 4
+
+#define OSC_FREQUENCY (24000000)
+
+#define CLOCK_FIXED (1<<0)
+#define CLOCK_NOGATE (1<<1)
+#define CLOCK_NODIV (1<<2)
+#define CLOCK_NORESET (1<<3)
+#define CLOCK_MULTI_PARENT (1<<4)
+#define CLOCK_PLL (1<<5)
+#define CLOCK_PLL_P (1<<6)
+#define CLOCK_PLL_R (1<<7)
+#define CLOCK_HIDE (1<<8)
+#define CLOCK_CIS (1<<9)
+
+
+
+#define CLK_IOCTL_MAGIC 'c'
+#define ENABLE_CLK _IOWR(CLK_IOCTL_MAGIC, 0, unsigned int)
+#define DISABLE_CLK _IOWR(CLK_IOCTL_MAGIC, 1, unsigned int)
+#define SET_CLK_RATE _IOWR(CLK_IOCTL_MAGIC, 2, unsigned int)
+#define GET_CLK_RATE _IOWR(CLK_IOCTL_MAGIC, 3, unsigned int)
+#define SET_PMU _IOWR(CLK_IOCTL_MAGIC, 4, unsigned int)
+#define GET_PMU _IOWR(CLK_IOCTL_MAGIC, 5, unsigned int)
+
+#define CLK_IOCTL_MAXNR 8
+
+
+#define CLK(dev, con, ck) \
+ { \
+ .dev_id = dev, \
+ .con_id = con, \
+ .clk = ck, \
+ }
+
+struct clk_usr {
+ char *name;
+ unsigned long frequency;
+};
+
+
+struct clk {
+ struct list_head list;
+ const char *name;
+ unsigned long frequency;
+ unsigned int flag;
+ int select;
+ struct clk *parent[CLOCK_MAX_PARENT];
+ int prediv;
+ int divide;
+ unsigned int div_reg_offset;
+ unsigned int div_reg_mask;
+ unsigned int en_reg_offset;
+ unsigned int en_reg_mask;
+ unsigned int rst_reg_offset;
+ unsigned int rst_reg_mask;
+ unsigned int sel_reg_offset;
+ unsigned int sel_reg_mask;
+ unsigned int def_rate;
+};
+
+extern int clk_register(struct clk *clk);
+extern void clk_unregister(struct clk *clk);
+
+void clk_set_clk_sel(unsigned int reg);
+unsigned int clk_get_clk_sel(void);
+
+int fh_clk_init(void);
+int fh_clk_procfs_init(void);
+int fh_clk_misc_init(void);
+
+extern struct clk_lookup fh_clks[];
+
+#endif
diff --git a/arch/arm/mach-fh/include/mach/ddrc.h b/arch/arm/mach-fh/include/mach/ddrc.h
new file mode 100644
index 00000000..b6cdb5b7
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/ddrc.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
+ * All Rights Reserved. Confidential.
+ *
+ *This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef DDRC_H_
+#define DDRC_H_
+
+#define OFFSET_DENAL_CTL_31 (0x007c)
+#define OFFSET_DENAL_CTL_57 (0x00e4)
+#define OFFSET_DENAL_CTL_97 (0x0184)
+
+#define DDRC_CONTROLLER_BUSY (1 << 24)
+#define DDRC_CKE_STATUS (1 << 8)
+
+#define DDRC_LP_CMD_SELFREFRESH (10 << 8)
+#define DDRC_LP_CMD_EXITLOWPOWER (1 << 8)
+
+#define DDRC_LPI_SR_WAKEUP_TIME (3 << 24)
+#define DDRC_CKSRX_DELAY (1 << 0)
+
+#endif /* DDRC_H_ */
diff --git a/arch/arm/mach-fh/include/mach/debug-macro.S b/arch/arm/mach-fh/include/mach/debug-macro.S
new file mode 100644
index 00000000..3f542607
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/debug-macro.S
@@ -0,0 +1,50 @@
+/* linux/arch/arm/mach-fh/include/mach/debug-macro.S
+ *
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* pull in the relevant register and map files. */
+
+ /* note, for the boot process to work we have to keep the UART
+ * virtual address aligned to an 1MiB boundary for the L1
+ * mapping the head code makes. We keep the UART virtual address
+ * aligned and add in the offset when we load the value here.
+ */
+
+
+#include <linux/serial_reg.h>
+#include <mach/hardware.h>
+#include <asm/memory.h>
+#include <mach/chip.h>
+
+#include <mach/io.h>
+
+ .macro addruart, rp, rv
+ ldr \rp, =CONSOLE_REG_BASE
+ ldr \rv, =VA_CONSOLE_REG_BASE
+ .endm
+
+ .macro senduart,data,addr
+ strb \data, [\addr, #(0x00)] @ Write to Transmitter Holding Register
+ .endm
+
+ .macro waituart,data,addr
+1001: ldr \data, [\addr, #(0x14)] @ Read Status Register
+ tst \data, #(0x40) @when TX FIFO Full, then wait
+ beq 1001b
+ .endm
+
+ .macro busyuart,data,addr
+@ stmfd r13!, {r4}
+1002:
+ ldr \data, [\addr, #(0x14)]
+ tst \data, #(0x40)
+ beq 1002b
+ .endm
+
+
+
diff --git a/arch/arm/mach-fh/include/mach/entry-macro.S b/arch/arm/mach-fh/include/mach/entry-macro.S
new file mode 100644
index 00000000..6eea8639
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/entry-macro.S
@@ -0,0 +1,31 @@
+#include <mach/io.h>
+#include <mach/irqs.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =VA_INTC_REG_BASE
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ @ check low interrupts
+ ldr \irqstat, [\base, #0x30]
+ mov \irqnr, #31
+ ands \irqstat, \irqstat, #0xffffffff
+
+ @ if no low interrupts set, check high interrupts
+ ldreq \irqstat, [\base, #0x34]
+ moveq \irqnr, #63
+ andeqs \irqstat, \irqstat, #0xffffffff
+
+ @ find first active interrupt source
+ clzne \irqstat, \irqstat
+ subne \irqnr, \irqnr, \irqstat
+ .endm
+
+ .macro irq_prio_table
+ .endm
diff --git a/arch/arm/mach-fh/include/mach/fh8852_iopad.h b/arch/arm/mach-fh/include/mach/fh8852_iopad.h
new file mode 100644
index 00000000..9642e79f
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh8852_iopad.h
@@ -0,0 +1,572 @@
+#include "pinctrl.h"
+#include "pinctrl_osdep.h"
+#include "board_config.h"
+
+/* PINCTRL_FUNC */
+PINCTRL_FUNC(GPIO19, 0, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_TX, 0, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO20, 1, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_RX, 1, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO21, 2, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(I2C1_SDA, 2, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO25, 3, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_TXD, 3, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_TXD, 3, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO26, 4, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_RXD, 4, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_RXD, 4, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO27, 5, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_CLK, 5, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_CLK, 5, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO28, 6, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 6, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_CSN, 6, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(USB_DBG_CLK, 6, FUNC3, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI0_D2, 7, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO50, 7, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SDA, 7, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_TX, 7, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_D3, 8, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO51, 8, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SCL, 8, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_RX, 8, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO22, 9, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(I2C1_SCL, 9, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO23, 10, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(AC_MCLK, 10, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO24, 11, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(USB_PWREN, 11, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(MAC_RMII_CLK, 15, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO15, 15, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CLK, 15, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM0, 15, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CLK, 15, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CLK, 15, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_REF_CLK, 16, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(MAC_MDC, 17, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO34, 17, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_WP, 17, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(RTC_CLK, 17, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_MCLK, 17, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(USB_PWREN, 17, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_MDIO, 18, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO17, 18, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TDO, 18, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM7, 18, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_RXD_0, 22, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO16, 22, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_0, 22, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM1, 22, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_RXD, 22, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_RXD, 22, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_RXD_1, 23, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO38, 23, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_1, 23, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM2, 23, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 23, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CSN, 23, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_RXDV, 26, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO41, 26, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CD, 26, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM3, 26, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_TXD_0, 28, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO42, 28, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_2, 28, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM4, 28, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_TXD_1, 29, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO43, 29, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_3, 29, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM5, 29, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 29, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CSN, 29, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_TXEN, 32, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO46, 32, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CMD_RSP, 32, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM6, 32, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_TXD, 32, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_TXD, 32, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TRSTN, 35, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO0, 35, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_TXD, 35, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_TXD, 35, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_I2S_DO, 35, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(DW_I2S_DO, 35, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ACIP_ADDAT, 35, FUNC6, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TMS, 36, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO1, 36, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_RXD, 36, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_RXD, 36, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_I2S_DI, 36, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(DW_I2S_DI, 36, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TCK, 37, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO2, 37, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_CLK, 37, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_CLK, 37, FUNC3, PUPD_DOWN, 3);
+PINCTRL_FUNC(AC_I2S_CLK, 37, FUNC4, PUPD_DOWN, 3);
+PINCTRL_FUNC(DW_I2S_CLK, 37, FUNC5, PUPD_DOWN, 3);
+PINCTRL_FUNC(ACIP_BCLK, 37, FUNC6, PUPD_DOWN, 3);
+PINCTRL_FUNC(ARM_JTAG_TDI, 38, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO3, 38, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 38, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CSN, 38, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_I2S_WS, 38, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(DW_I2S_WS, 38, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ACIP_ADLRC, 38, FUNC6, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO4, 39, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CLK, 39, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM0, 39, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO5, 40, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_0, 40, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM1, 40, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO6, 41, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_1, 41, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM2, 41, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO7, 42, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CD, 42, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM3, 42, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO8, 43, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_2, 43, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM4, 43, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO9, 44, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_3, 44, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM5, 44, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO10, 45, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CMD_RSP, 45, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM6, 45, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO11, 46, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_WP, 46, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM7, 46, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM0, 47, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO12, 47, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(CIS_CLK, 47, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM1, 48, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO13, 48, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM2, 49, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO14, 49, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(UART0_RX, 50, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO48, 50, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(UART0_TX, 51, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO49, 51, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C0_SCL, 52, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO57, 52, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(I2C0_SDA, 53, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO56, 53, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(SSI0_CLK, 56, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(SSI0_TXD, 57, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_CSN_0, 58, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO54, 58, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_CSN_1, 59, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO55, 59, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_RXD, 60, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_CD, 61, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO52, 61, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TRSTN, 61, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TRSTN, 61, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_WP, 62, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO53, 62, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_CLK, 63, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO63, 63, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(ARC_JTAG_TMS, 63, FUNC2, PUPD_NONE, 3);
+PINCTRL_FUNC(PAE_JTAG_TMS, 63, FUNC3, PUPD_NONE, 3);
+PINCTRL_FUNC(SSI1_CLK, 63, FUNC4, PUPD_NONE, 3);
+PINCTRL_FUNC(SD0_CMD_RSP, 64, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO29, 64, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TCK, 64, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TCK, 64, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_TXD, 64, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_0, 65, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO62, 65, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TDI, 65, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TDI, 65, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_RXD, 65, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_1, 66, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO61, 66, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(USB_PWREN, 66, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(AC_MCLK, 66, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 66, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_2, 67, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO60, 67, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SDA, 67, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_TX, 67, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TDO, 67, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_3, 68, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO18, 68, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SCL, 68, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_RX, 68, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TDO, 68, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 68, FUNC5, PUPD_UP, 3);
+
+
+/* PINCTRL_MUX */
+
+PINCTRL_MUX(ACIP_ADDAT, 0, &PAD35_ACIP_ADDAT);
+PINCTRL_MUX(ACIP_ADLRC, 0, &PAD38_ACIP_ADLRC);
+PINCTRL_MUX(ACIP_BCLK, 0, &PAD37_ACIP_BCLK);
+
+PINCTRL_MUX(AC_I2S_CLK, 0, &PAD37_AC_I2S_CLK);
+PINCTRL_MUX(AC_I2S_DI, 0, &PAD36_AC_I2S_DI);
+PINCTRL_MUX(AC_I2S_DO, 0, &PAD35_AC_I2S_DO);
+PINCTRL_MUX(AC_I2S_WS, 0, &PAD38_AC_I2S_WS);
+PINCTRL_MUX(AC_MCLK, 2, &PAD10_AC_MCLK, &PAD17_AC_MCLK, &PAD66_AC_MCLK);
+
+PINCTRL_MUX(ARC_JTAG_TCK, 0, &PAD64_ARC_JTAG_TCK);
+PINCTRL_MUX(ARC_JTAG_TDI, 0, &PAD65_ARC_JTAG_TDI);
+PINCTRL_MUX(ARC_JTAG_TDO, 0, &PAD67_ARC_JTAG_TDO);
+PINCTRL_MUX(ARC_JTAG_TMS, 0, &PAD63_ARC_JTAG_TMS);
+PINCTRL_MUX(ARC_JTAG_TRSTN, 0, &PAD61_ARC_JTAG_TRSTN);
+
+PINCTRL_MUX(ARM_JTAG_TCK, 0, &PAD37_ARM_JTAG_TCK);
+PINCTRL_MUX(ARM_JTAG_TDI, 0, &PAD38_ARM_JTAG_TDI);
+PINCTRL_MUX(ARM_JTAG_TDO, 0, &PAD18_ARM_JTAG_TDO);
+PINCTRL_MUX(ARM_JTAG_TMS, 0, &PAD36_ARM_JTAG_TMS);
+PINCTRL_MUX(ARM_JTAG_TRSTN, 0, &PAD35_ARM_JTAG_TRSTN);
+
+PINCTRL_MUX(CIS_CLK, 0, &PAD47_CIS_CLK);
+
+PINCTRL_MUX(DW_I2S_CLK, 0, &PAD37_DW_I2S_CLK);
+PINCTRL_MUX(DW_I2S_DI, 0, &PAD36_DW_I2S_DI);
+PINCTRL_MUX(DW_I2S_DO, 0, &PAD35_DW_I2S_DO);
+PINCTRL_MUX(DW_I2S_WS, 0, &PAD38_DW_I2S_WS);
+
+PINCTRL_MUX(I2C0_SCL, 0, &PAD52_I2C0_SCL);
+PINCTRL_MUX(I2C0_SDA, 0, &PAD53_I2C0_SDA);
+
+PINCTRL_MUX(I2C1_SCL, 2, &PAD8_I2C1_SCL, &PAD9_I2C1_SCL, &PAD68_I2C1_SCL);
+PINCTRL_MUX(I2C1_SDA, 2, &PAD2_I2C1_SDA, &PAD7_I2C1_SDA, &PAD67_I2C1_SDA);
+
+PINCTRL_MUX(MAC_MDC, 0, &PAD17_MAC_MDC);
+PINCTRL_MUX(MAC_MDIO, 0, &PAD18_MAC_MDIO);
+PINCTRL_MUX(MAC_REF_CLK, 0, &PAD16_MAC_REF_CLK);
+PINCTRL_MUX(MAC_RMII_CLK, 0, &PAD15_MAC_RMII_CLK);
+PINCTRL_MUX(MAC_RXDV, 0, &PAD26_MAC_RXDV);
+PINCTRL_MUX(MAC_RXD_0, 0, &PAD22_MAC_RXD_0);
+PINCTRL_MUX(MAC_RXD_1, 0, &PAD23_MAC_RXD_1);
+PINCTRL_MUX(MAC_TXD_0, 0, &PAD28_MAC_TXD_0);
+PINCTRL_MUX(MAC_TXD_1, 0, &PAD29_MAC_TXD_1);
+PINCTRL_MUX(MAC_TXEN, 0, &PAD32_MAC_TXEN);
+
+PINCTRL_MUX(PAE_JTAG_TCK, 0, &PAD64_PAE_JTAG_TCK);
+PINCTRL_MUX(PAE_JTAG_TDI, 0, &PAD65_PAE_JTAG_TDI);
+PINCTRL_MUX(PAE_JTAG_TDO, 0, &PAD68_PAE_JTAG_TDO);
+PINCTRL_MUX(PAE_JTAG_TMS, 0, &PAD63_PAE_JTAG_TMS);
+PINCTRL_MUX(PAE_JTAG_TRSTN, 0, &PAD61_PAE_JTAG_TRSTN);
+
+PINCTRL_MUX(PWM0, 0, &PAD15_PWM0, &PAD39_PWM0, &PAD47_PWM0);
+PINCTRL_MUX(PWM1, 0, &PAD22_PWM1, &PAD40_PWM1, &PAD48_PWM1);
+PINCTRL_MUX(PWM2, 0, &PAD23_PWM2, &PAD41_PWM2, &PAD49_PWM2);
+PINCTRL_MUX(PWM3, 0, &PAD26_PWM3, &PAD42_PWM3);
+PINCTRL_MUX(PWM4, 0, &PAD28_PWM4, &PAD43_PWM4);
+PINCTRL_MUX(PWM5, 0, &PAD29_PWM5, &PAD44_PWM5);
+PINCTRL_MUX(PWM6, 0, &PAD32_PWM6, &PAD45_PWM6);
+PINCTRL_MUX(PWM7, 0, &PAD18_PWM7, &PAD46_PWM7);
+
+PINCTRL_MUX(RTC_CLK, 0, &PAD17_RTC_CLK);
+
+PINCTRL_MUX(SD0_CD, 0, &PAD61_SD0_CD);
+PINCTRL_MUX(SD0_CLK, 0, &PAD63_SD0_CLK);
+PINCTRL_MUX(SD0_CMD_RSP, 0, &PAD64_SD0_CMD_RSP);
+PINCTRL_MUX(SD0_DATA_0, 0, &PAD65_SD0_DATA_0);
+PINCTRL_MUX(SD0_DATA_1, 0, &PAD66_SD0_DATA_1);
+PINCTRL_MUX(SD0_DATA_2, 0, &PAD67_SD0_DATA_2);
+PINCTRL_MUX(SD0_DATA_3, 0, &PAD68_SD0_DATA_3);
+PINCTRL_MUX(SD0_WP, 0, &PAD62_SD0_WP);
+
+PINCTRL_MUX(SD1_CD, 0, &PAD26_SD1_CD, &PAD42_SD1_CD);
+PINCTRL_MUX(SD1_CLK, 0, &PAD15_SD1_CLK, &PAD39_SD1_CLK);
+PINCTRL_MUX(SD1_CMD_RSP, 0, &PAD32_SD1_CMD_RSP, &PAD45_SD1_CMD_RSP);
+PINCTRL_MUX(SD1_DATA_0, 0, &PAD22_SD1_DATA_0, &PAD40_SD1_DATA_0);
+PINCTRL_MUX(SD1_DATA_1, 0, &PAD23_SD1_DATA_1, &PAD41_SD1_DATA_1);
+PINCTRL_MUX(SD1_DATA_2, 0, &PAD28_SD1_DATA_2, &PAD43_SD1_DATA_2);
+PINCTRL_MUX(SD1_DATA_3, 0, &PAD29_SD1_DATA_3, &PAD44_SD1_DATA_3);
+PINCTRL_MUX(SD1_WP, 0, &PAD17_SD1_WP, &PAD46_SD1_WP);
+
+PINCTRL_MUX(SSI0_CLK, 0, &PAD56_SSI0_CLK);
+PINCTRL_MUX(SSI0_CSN_0, 0, &PAD58_SSI0_CSN_0);
+PINCTRL_MUX(SSI0_CSN_1, 0, &PAD59_SSI0_CSN_1);
+PINCTRL_MUX(SSI0_D2, 0, &PAD7_SSI0_D2);
+PINCTRL_MUX(SSI0_D3, 0, &PAD8_SSI0_D3);
+PINCTRL_MUX(SSI0_RXD, 0, &PAD60_SSI0_RXD);
+PINCTRL_MUX(SSI0_TXD, 0, &PAD57_SSI0_TXD);
+
+PINCTRL_MUX(SSI1_CLK, 2, &PAD5_SSI1_CLK, &PAD15_SSI1_CLK, &PAD37_SSI1_CLK,
+ &PAD63_SSI1_CLK);
+PINCTRL_MUX(SSI1_CSN_0, 3, &PAD6_SSI1_CSN_0, &PAD23_SSI1_CSN_0,
+ &PAD29_SSI1_CSN_0, &PAD38_SSI1_CSN_0, &PAD66_SSI1_CSN_0,
+ &PAD68_SSI1_CSN_0);
+PINCTRL_MUX(SSI1_RXD, 2, &PAD4_SSI1_RXD, &PAD22_SSI1_RXD, &PAD36_SSI1_RXD,
+ &PAD65_SSI1_RXD);
+PINCTRL_MUX(SSI1_TXD, 2, &PAD3_SSI1_TXD, &PAD32_SSI1_TXD, &PAD35_SSI1_TXD,
+ &PAD64_SSI1_TXD);
+
+PINCTRL_MUX(SSI2_CLK, 2, &PAD5_SSI2_CLK, &PAD15_SSI2_CLK, &PAD37_SSI2_CLK);
+PINCTRL_MUX(SSI2_CSN, 3, &PAD6_SSI2_CSN, &PAD23_SSI2_CSN, &PAD29_SSI2_CSN,
+ &PAD38_SSI2_CSN);
+PINCTRL_MUX(SSI2_RXD, 2, &PAD4_SSI2_RXD, &PAD22_SSI2_RXD, &PAD36_SSI2_RXD);
+PINCTRL_MUX(SSI2_TXD, 2, &PAD3_SSI2_TXD, &PAD32_SSI2_TXD, &PAD35_SSI2_TXD);
+
+PINCTRL_MUX(UART0_RX, 0, &PAD50_UART0_RX);
+PINCTRL_MUX(UART0_TX, 0, &PAD51_UART0_TX);
+
+PINCTRL_MUX(UART1_RX, 1, &PAD1_UART1_RX, &PAD8_UART1_RX, &PAD68_UART1_RX);
+PINCTRL_MUX(UART1_TX, 1, &PAD0_UART1_TX, &PAD7_UART1_TX, &PAD67_UART1_TX);
+
+PINCTRL_MUX(USB_DBG_CLK, 0, &PAD6_USB_DBG_CLK);
+PINCTRL_MUX(USB_PWREN, 1, &PAD11_USB_PWREN, &PAD17_USB_PWREN, &PAD66_USB_PWREN);
+
+PINCTRL_MUX(GPIO0, 0, &PAD35_GPIO0);
+PINCTRL_MUX(GPIO1, 0, &PAD36_GPIO1);
+PINCTRL_MUX(GPIO2, 0, &PAD37_GPIO2);
+PINCTRL_MUX(GPIO3, 0, &PAD38_GPIO3);
+PINCTRL_MUX(GPIO4, 0, &PAD39_GPIO4);
+PINCTRL_MUX(GPIO5, 0, &PAD40_GPIO5);
+PINCTRL_MUX(GPIO6, 0, &PAD41_GPIO6);
+PINCTRL_MUX(GPIO7, 0, &PAD42_GPIO7);
+PINCTRL_MUX(GPIO8, 0, &PAD43_GPIO8);
+PINCTRL_MUX(GPIO9, 0, &PAD44_GPIO9);
+PINCTRL_MUX(GPIO10, 0, &PAD45_GPIO10);
+PINCTRL_MUX(GPIO11, 0, &PAD46_GPIO11);
+PINCTRL_MUX(GPIO12, 0, &PAD47_GPIO12);
+PINCTRL_MUX(GPIO13, 0, &PAD48_GPIO13);
+PINCTRL_MUX(GPIO14, 0, &PAD49_GPIO14);
+PINCTRL_MUX(GPIO15, 0, &PAD15_GPIO15);
+PINCTRL_MUX(GPIO16, 0, &PAD22_GPIO16);
+PINCTRL_MUX(GPIO17, 0, &PAD18_GPIO17);
+PINCTRL_MUX(GPIO18, 0, &PAD68_GPIO18);
+PINCTRL_MUX(GPIO19, 0, &PAD0_GPIO19);
+PINCTRL_MUX(GPIO20, 0, &PAD1_GPIO20);
+PINCTRL_MUX(GPIO21, 0, &PAD2_GPIO21);
+PINCTRL_MUX(GPIO22, 0, &PAD9_GPIO22);
+PINCTRL_MUX(GPIO23, 0, &PAD10_GPIO23);
+PINCTRL_MUX(GPIO24, 0, &PAD11_GPIO24);
+PINCTRL_MUX(GPIO25, 0, &PAD3_GPIO25);
+PINCTRL_MUX(GPIO26, 0, &PAD4_GPIO26);
+PINCTRL_MUX(GPIO27, 0, &PAD5_GPIO27);
+PINCTRL_MUX(GPIO28, 0, &PAD6_GPIO28);
+PINCTRL_MUX(GPIO29, 0, &PAD64_GPIO29);
+PINCTRL_MUX(GPIO34, 0, &PAD17_GPIO34);
+PINCTRL_MUX(GPIO38, 0, &PAD23_GPIO38);
+PINCTRL_MUX(GPIO41, 0, &PAD26_GPIO41);
+PINCTRL_MUX(GPIO42, 0, &PAD28_GPIO42);
+PINCTRL_MUX(GPIO43, 0, &PAD29_GPIO43);
+PINCTRL_MUX(GPIO46, 0, &PAD32_GPIO46);
+PINCTRL_MUX(GPIO48, 0, &PAD50_GPIO48);
+PINCTRL_MUX(GPIO49, 0, &PAD51_GPIO49);
+PINCTRL_MUX(GPIO50, 0, &PAD7_GPIO50);
+PINCTRL_MUX(GPIO51, 0, &PAD8_GPIO51);
+PINCTRL_MUX(GPIO52, 0, &PAD61_GPIO52);
+PINCTRL_MUX(GPIO53, 0, &PAD62_GPIO53);
+PINCTRL_MUX(GPIO54, 0, &PAD58_GPIO54);
+PINCTRL_MUX(GPIO55, 0, &PAD59_GPIO55);
+PINCTRL_MUX(GPIO56, 0, &PAD53_GPIO56);
+PINCTRL_MUX(GPIO57, 0, &PAD52_GPIO57);
+PINCTRL_MUX(GPIO60, 0, &PAD67_GPIO60);
+PINCTRL_MUX(GPIO61, 0, &PAD66_GPIO61);
+PINCTRL_MUX(GPIO62, 0, &PAD65_GPIO62);
+PINCTRL_MUX(GPIO63, 0, &PAD63_GPIO63);
+
+PINCTRL_MUX(SD1_CLK_RMII, 0, &PAD15_SD1_CLK, &PAD39_SD1_CLK);
+PINCTRL_MUX(SD1_CD_RMII, 0, &PAD26_SD1_CD, &PAD42_SD1_CD);
+PINCTRL_MUX(SD1_CMD_RSP_RMII, 0, &PAD32_SD1_CMD_RSP, &PAD45_SD1_CMD_RSP);
+PINCTRL_MUX(SD1_WP_RMII, 0, &PAD17_SD1_WP, &PAD46_SD1_WP);
+PINCTRL_MUX(SD1_DATA_0_RMII, 0, &PAD22_SD1_DATA_0, &PAD40_SD1_DATA_0);
+PINCTRL_MUX(SD1_DATA_1_RMII, 0, &PAD23_SD1_DATA_1, &PAD41_SD1_DATA_1);
+PINCTRL_MUX(SD1_DATA_2_RMII, 0, &PAD28_SD1_DATA_2, &PAD43_SD1_DATA_2);
+PINCTRL_MUX(SD1_DATA_3_RMII, 0, &PAD29_SD1_DATA_3, &PAD44_SD1_DATA_3);
+
+/* PINCTRL_DEVICE */
+PINCTRL_DEVICE(ACI2S, 5, &MUX_AC_I2S_CLK, &MUX_AC_I2S_DI, &MUX_AC_I2S_DO,
+ &MUX_AC_I2S_WS, &MUX_AC_MCLK);
+PINCTRL_DEVICE(ACIP, 3, &MUX_ACIP_ADDAT, &MUX_ACIP_ADLRC, &MUX_ACIP_BCLK);
+PINCTRL_DEVICE(ARCJTAG, 5, &MUX_ARC_JTAG_TCK, &MUX_ARC_JTAG_TDI,
+ &MUX_ARC_JTAG_TDO, &MUX_ARC_JTAG_TMS, &MUX_ARC_JTAG_TRSTN);
+PINCTRL_DEVICE(ARMJTAG, 5, &MUX_ARM_JTAG_TCK, &MUX_ARM_JTAG_TDI,
+ &MUX_ARM_JTAG_TDO, &MUX_ARM_JTAG_TMS, &MUX_ARM_JTAG_TRSTN);
+PINCTRL_DEVICE(DWI2S, 4, &MUX_DW_I2S_CLK, &MUX_DW_I2S_DI, &MUX_DW_I2S_DO,
+ &MUX_DW_I2S_WS);
+PINCTRL_DEVICE(I2C0, 2, &MUX_I2C0_SCL, &MUX_I2C0_SDA);
+PINCTRL_DEVICE(I2C1, 2, &MUX_I2C1_SCL, &MUX_I2C1_SDA);
+PINCTRL_DEVICE(MIPI, 1, &MUX_CIS_CLK);
+PINCTRL_DEVICE(PAEJTAG, 5, &MUX_PAE_JTAG_TCK, &MUX_PAE_JTAG_TDI,
+ &MUX_PAE_JTAG_TDO, &MUX_PAE_JTAG_TMS, &MUX_PAE_JTAG_TRSTN);
+PINCTRL_DEVICE(PWM0, 1, &MUX_PWM0);
+PINCTRL_DEVICE(PWM1, 1, &MUX_PWM1);
+PINCTRL_DEVICE(PWM2, 1, &MUX_PWM2);
+PINCTRL_DEVICE(PWM3, 1, &MUX_PWM3);
+PINCTRL_DEVICE(PWM4, 1, &MUX_PWM4);
+PINCTRL_DEVICE(PWM5, 1, &MUX_PWM5);
+PINCTRL_DEVICE(PWM6, 1, &MUX_PWM6);
+PINCTRL_DEVICE(PWM7, 1, &MUX_PWM7);
+PINCTRL_DEVICE(RMII, 10, &MUX_MAC_MDC, &MUX_MAC_MDIO, &MUX_MAC_REF_CLK,
+ &MUX_MAC_RMII_CLK, &MUX_MAC_RXDV, &MUX_MAC_RXD_0, &MUX_MAC_RXD_1,
+ &MUX_MAC_TXD_0, &MUX_MAC_TXD_1, &MUX_MAC_TXEN);
+PINCTRL_DEVICE(SD0, 8, &MUX_SD0_CD, &MUX_SD0_CLK, &MUX_SD0_CMD_RSP,
+ &MUX_SD0_DATA_0, &MUX_SD0_DATA_1, &MUX_SD0_DATA_2, &MUX_SD0_DATA_3,
+ &MUX_SD0_WP);
+PINCTRL_DEVICE(SD0_1BIT_NO_WP, 4, &MUX_SD0_CD, &MUX_SD0_CLK, &MUX_SD0_CMD_RSP,
+ &MUX_SD0_DATA_0);
+PINCTRL_DEVICE(SD0_NO_WP, 7, &MUX_SD0_CD, &MUX_SD0_CLK, &MUX_SD0_CMD_RSP,
+ &MUX_SD0_DATA_0, &MUX_SD0_DATA_1, &MUX_SD0_DATA_2, &MUX_SD0_DATA_3);
+PINCTRL_DEVICE(SD1, 8, &MUX_SD1_CD, &MUX_SD1_CLK, &MUX_SD1_CMD_RSP,
+ &MUX_SD1_DATA_0, &MUX_SD1_DATA_1, &MUX_SD1_DATA_2, &MUX_SD1_DATA_3,
+ &MUX_SD1_WP);
+PINCTRL_DEVICE(SD1_NO_WP, 7, &MUX_SD1_CD, &MUX_SD1_CLK, &MUX_SD1_CMD_RSP,
+ &MUX_SD1_DATA_0, &MUX_SD1_DATA_1, &MUX_SD1_DATA_2, &MUX_SD1_DATA_3);
+PINCTRL_DEVICE(SSI0, 4, &MUX_GPIO54, &MUX_SSI0_CLK, &MUX_SSI0_RXD,
+ &MUX_SSI0_TXD);
+PINCTRL_DEVICE(SSI0_4BIT, 6, &MUX_GPIO54, &MUX_SSI0_CLK, &MUX_SSI0_D2,
+ &MUX_SSI0_D3, &MUX_SSI0_RXD, &MUX_SSI0_TXD);
+PINCTRL_DEVICE(SSI1, 4, &MUX_SSI1_CLK, &MUX_SSI1_CSN_0, &MUX_SSI1_RXD,
+ &MUX_SSI1_TXD);
+PINCTRL_DEVICE(SSI2, 4, &MUX_SSI2_CLK, &MUX_SSI2_CSN, &MUX_SSI2_RXD,
+ &MUX_SSI2_TXD);
+PINCTRL_DEVICE(UART0, 2, &MUX_UART0_RX, &MUX_UART0_TX);
+PINCTRL_DEVICE(UART1, 2, &MUX_UART1_RX, &MUX_UART1_TX);
+PINCTRL_DEVICE(USB, 1, &MUX_USB_PWREN);
+PINCTRL_DEVICE(GPIO0, 1, &MUX_GPIO0);
+PINCTRL_DEVICE(GPIO1, 1, &MUX_GPIO1);
+PINCTRL_DEVICE(GPIO2, 1, &MUX_GPIO2);
+PINCTRL_DEVICE(GPIO3, 1, &MUX_GPIO3);
+PINCTRL_DEVICE(GPIO4, 1, &MUX_GPIO4);
+PINCTRL_DEVICE(GPIO5, 1, &MUX_GPIO5);
+PINCTRL_DEVICE(GPIO6, 1, &MUX_GPIO6);
+PINCTRL_DEVICE(GPIO7, 1, &MUX_GPIO7);
+PINCTRL_DEVICE(GPIO8, 1, &MUX_GPIO8);
+PINCTRL_DEVICE(GPIO9, 1, &MUX_GPIO9);
+PINCTRL_DEVICE(GPIO10, 1, &MUX_GPIO10);
+PINCTRL_DEVICE(GPIO11, 1, &MUX_GPIO11);
+PINCTRL_DEVICE(GPIO12, 1, &MUX_GPIO12);
+PINCTRL_DEVICE(GPIO13, 1, &MUX_GPIO13);
+PINCTRL_DEVICE(GPIO14, 1, &MUX_GPIO14);
+PINCTRL_DEVICE(GPIO15, 1, &MUX_GPIO15);
+PINCTRL_DEVICE(GPIO16, 1, &MUX_GPIO16);
+PINCTRL_DEVICE(GPIO17, 1, &MUX_GPIO17);
+PINCTRL_DEVICE(GPIO18, 1, &MUX_GPIO18);
+PINCTRL_DEVICE(GPIO19, 1, &MUX_GPIO19);
+PINCTRL_DEVICE(GPIO20, 1, &MUX_GPIO20);
+PINCTRL_DEVICE(GPIO21, 1, &MUX_GPIO21);
+PINCTRL_DEVICE(GPIO22, 1, &MUX_GPIO22);
+PINCTRL_DEVICE(GPIO23, 1, &MUX_GPIO23);
+PINCTRL_DEVICE(GPIO24, 1, &MUX_GPIO24);
+PINCTRL_DEVICE(GPIO25, 1, &MUX_GPIO25);
+PINCTRL_DEVICE(GPIO26, 1, &MUX_GPIO26);
+PINCTRL_DEVICE(GPIO27, 1, &MUX_GPIO27);
+PINCTRL_DEVICE(GPIO28, 1, &MUX_GPIO28);
+PINCTRL_DEVICE(GPIO29, 1, &MUX_GPIO29);
+PINCTRL_DEVICE(GPIO34, 1, &MUX_GPIO34);
+PINCTRL_DEVICE(GPIO38, 1, &MUX_GPIO38);
+PINCTRL_DEVICE(GPIO41, 1, &MUX_GPIO41);
+PINCTRL_DEVICE(GPIO42, 1, &MUX_GPIO42);
+PINCTRL_DEVICE(GPIO43, 1, &MUX_GPIO43);
+PINCTRL_DEVICE(GPIO46, 1, &MUX_GPIO46);
+PINCTRL_DEVICE(GPIO48, 1, &MUX_GPIO48);
+PINCTRL_DEVICE(GPIO49, 1, &MUX_GPIO49);
+PINCTRL_DEVICE(GPIO50, 1, &MUX_GPIO50);
+PINCTRL_DEVICE(GPIO51, 1, &MUX_GPIO51);
+PINCTRL_DEVICE(GPIO52, 1, &MUX_GPIO52);
+PINCTRL_DEVICE(GPIO53, 1, &MUX_GPIO53);
+PINCTRL_DEVICE(GPIO54, 1, &MUX_GPIO54);
+PINCTRL_DEVICE(GPIO55, 1, &MUX_GPIO55);
+PINCTRL_DEVICE(GPIO56, 1, &MUX_GPIO56);
+PINCTRL_DEVICE(GPIO57, 1, &MUX_GPIO57);
+PINCTRL_DEVICE(GPIO60, 1, &MUX_GPIO60);
+PINCTRL_DEVICE(GPIO61, 1, &MUX_GPIO61);
+PINCTRL_DEVICE(GPIO62, 1, &MUX_GPIO62);
+PINCTRL_DEVICE(GPIO63, 1, &MUX_GPIO63);
+
+PINCTRL_DEVICE(SD1_WIFI_RMII, 7, &MUX_SD1_CLK_RMII, &MUX_SD1_CD_RMII,
+ &MUX_SD1_CMD_RSP_RMII, &MUX_SD1_DATA_0_RMII, &MUX_SD1_DATA_1_RMII,
+ &MUX_SD1_DATA_2_RMII, &MUX_SD1_DATA_3_RMII);
+
+void fh_pinctrl_init_devicelist(OS_LIST *list)
+{
+ OS_LIST_EMPTY(list);
+
+ /*PINCTRL_ADD_DEVICE*/
+ PINCTRL_ADD_DEVICE(ACI2S);
+ PINCTRL_ADD_DEVICE(ACIP);
+ PINCTRL_ADD_DEVICE(ARCJTAG);
+ PINCTRL_ADD_DEVICE(ARMJTAG);
+ PINCTRL_ADD_DEVICE(DWI2S);
+ PINCTRL_ADD_DEVICE(I2C0);
+ PINCTRL_ADD_DEVICE(I2C1);
+ PINCTRL_ADD_DEVICE(MIPI);
+ PINCTRL_ADD_DEVICE(PAEJTAG);
+ PINCTRL_ADD_DEVICE(PWM0);
+ PINCTRL_ADD_DEVICE(PWM1);
+ PINCTRL_ADD_DEVICE(PWM2);
+ PINCTRL_ADD_DEVICE(PWM3);
+ PINCTRL_ADD_DEVICE(PWM4);
+ PINCTRL_ADD_DEVICE(PWM5);
+ PINCTRL_ADD_DEVICE(PWM6);
+ PINCTRL_ADD_DEVICE(PWM7);
+ PINCTRL_ADD_DEVICE(RMII);
+ PINCTRL_ADD_DEVICE(SD0);
+ PINCTRL_ADD_DEVICE(SD0_1BIT_NO_WP);
+ PINCTRL_ADD_DEVICE(SD0_NO_WP);
+ PINCTRL_ADD_DEVICE(SD1);
+ PINCTRL_ADD_DEVICE(SD1_NO_WP);
+ PINCTRL_ADD_DEVICE(SSI0);
+ PINCTRL_ADD_DEVICE(SSI0_4BIT);
+ PINCTRL_ADD_DEVICE(SSI1);
+ PINCTRL_ADD_DEVICE(SSI2);
+ PINCTRL_ADD_DEVICE(UART0);
+ PINCTRL_ADD_DEVICE(UART1);
+ PINCTRL_ADD_DEVICE(USB);
+ PINCTRL_ADD_DEVICE(GPIO0);
+ PINCTRL_ADD_DEVICE(GPIO1);
+ PINCTRL_ADD_DEVICE(GPIO2);
+ PINCTRL_ADD_DEVICE(GPIO3);
+ PINCTRL_ADD_DEVICE(GPIO4);
+ PINCTRL_ADD_DEVICE(GPIO5);
+ PINCTRL_ADD_DEVICE(GPIO6);
+ PINCTRL_ADD_DEVICE(GPIO7);
+ PINCTRL_ADD_DEVICE(GPIO8);
+ PINCTRL_ADD_DEVICE(GPIO9);
+ PINCTRL_ADD_DEVICE(GPIO10);
+ PINCTRL_ADD_DEVICE(GPIO11);
+ PINCTRL_ADD_DEVICE(GPIO12);
+ PINCTRL_ADD_DEVICE(GPIO13);
+ PINCTRL_ADD_DEVICE(GPIO14);
+ PINCTRL_ADD_DEVICE(GPIO15);
+ PINCTRL_ADD_DEVICE(GPIO16);
+ PINCTRL_ADD_DEVICE(GPIO17);
+ PINCTRL_ADD_DEVICE(GPIO18);
+ PINCTRL_ADD_DEVICE(GPIO19);
+ PINCTRL_ADD_DEVICE(GPIO20);
+ PINCTRL_ADD_DEVICE(GPIO21);
+ PINCTRL_ADD_DEVICE(GPIO22);
+ PINCTRL_ADD_DEVICE(GPIO23);
+ PINCTRL_ADD_DEVICE(GPIO24);
+ PINCTRL_ADD_DEVICE(GPIO25);
+ PINCTRL_ADD_DEVICE(GPIO26);
+ PINCTRL_ADD_DEVICE(GPIO27);
+ PINCTRL_ADD_DEVICE(GPIO28);
+ PINCTRL_ADD_DEVICE(GPIO29);
+ PINCTRL_ADD_DEVICE(GPIO34);
+ PINCTRL_ADD_DEVICE(GPIO38);
+ PINCTRL_ADD_DEVICE(GPIO41);
+ PINCTRL_ADD_DEVICE(GPIO42);
+ PINCTRL_ADD_DEVICE(GPIO43);
+ PINCTRL_ADD_DEVICE(GPIO46);
+ PINCTRL_ADD_DEVICE(GPIO48);
+ PINCTRL_ADD_DEVICE(GPIO49);
+ PINCTRL_ADD_DEVICE(GPIO50);
+ PINCTRL_ADD_DEVICE(GPIO51);
+ PINCTRL_ADD_DEVICE(GPIO52);
+ PINCTRL_ADD_DEVICE(GPIO53);
+ PINCTRL_ADD_DEVICE(GPIO54);
+ PINCTRL_ADD_DEVICE(GPIO55);
+ PINCTRL_ADD_DEVICE(GPIO56);
+ PINCTRL_ADD_DEVICE(GPIO57);
+ PINCTRL_ADD_DEVICE(GPIO60);
+ PINCTRL_ADD_DEVICE(GPIO61);
+ PINCTRL_ADD_DEVICE(GPIO62);
+ PINCTRL_ADD_DEVICE(GPIO63);
+
+ PINCTRL_ADD_DEVICE(SD1_WIFI_RMII);
+}
+
+char* fh_pinctrl_selected_devices[] =
+{
+ CONFIG_PINCTRL_SELECT
+};
diff --git a/arch/arm/mach-fh/include/mach/fh8856.h b/arch/arm/mach-fh/include/mach/fh8856.h
new file mode 100644
index 00000000..27ddf27f
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh8856.h
@@ -0,0 +1,331 @@
+/*
+ *
+ * Copyright (C) 2015 Fullhan.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_FH8856_H
+#define __ASM_ARCH_FH8856_H
+
+#include <linux/init.h>
+
+#define SRAM_GRANULARITY 32
+#define SRAM_SIZE SZ_32K
+
+#define SIMPLE_TIMER_BASE 2
+
+#define RAM_BASE (0x10000000)
+#define DDR_BASE (0xA0000000)
+
+#define PMU_REG_BASE (0xF0000000)
+#define TIMER_REG_BASE (0xF0C00000)
+#define GPIO0_REG_BASE (0xF0300000)
+#define GPIO1_REG_BASE (0xF4000000)
+#define UART0_REG_BASE (0xF0700000)
+#define UART1_REG_BASE (0xF0800000)
+#define SPI0_REG_BASE (0xF0500000)
+#define SPI1_REG_BASE (0xF0600000)
+#define SPI2_REG_BASE (0xF0640000)
+#define INTC_REG_BASE (0xE0200000)
+#define GMAC_REG_BASE (0xE0600000)
+#define USBC_REG_BASE (0xE0700000)
+#define DMAC_REG_BASE (0xE0300000)
+#define I2C1_REG_BASE (0xF0B00000)
+#define I2C0_REG_BASE (0xF0200000)
+#define SDC0_REG_BASE (0xE2000000)
+#define SDC1_REG_BASE (0xE2200000)
+#define WDT_REG_BASE (0xF0D00000)
+#define PWM_REG_BASE (0xF0400000)
+#define I2S_REG_BASE (0xF0900000)
+#define ACW_REG_BASE (0xF0A00000)
+#define SADC_REG_BASE (0xF1200000)
+#define EFUSE_REG_BASE (0xF1600000)
+#define AES_REG_BASE (0xE8200000)
+#define RTC_REG_BASE (0xF1500000)
+#define DDRC_REG_BASE (0xED000000)
+#define CONSOLE_REG_BASE UART0_REG_BASE
+#define FH_UART_NUMBER 2
+
+#define PMU_REG_SIZE 0x2018
+#define PMU_DEBUG
+
+#define REG_PMU_CHIP_ID (0x0000)
+#define REG_PMU_IP_VER (0x0004)
+#define REG_PMU_FW_VER (0x0008)
+#define REG_PMU_SYS_CTRL (0x000c)
+#define REG_PMU_PLL0 (0x0010)
+#define REG_PMU_PLL1 (0x0014)
+#define REG_PMU_PLL_CTRL (0x0018)
+#define REG_PMU_CLK_GATE (0x001c)
+#define REG_PMU_CLK_SEL (0x0020)
+#define REG_PMU_CLK_DIV0 (0x0024)
+#define REG_PMU_CLK_DIV1 (0x0028)
+#define REG_PMU_CLK_DIV2 (0x002c)
+#define REG_PMU_CLK_DIV3 (0x0030)
+#define REG_PMU_CLK_DIV4 (0x0034)
+#define REG_PMU_CLK_DIV5 (0x0038)
+#define REG_PMU_CLK_DIV6 (0x003c)
+#define REG_PMU_SWRST_MAIN_CTRL (0x0040)
+#define REG_PMU_SWRST_AXI_CTRL (0x0044)
+#define REG_PMU_SWRST_AHB_CTRL (0x0048)
+#define REG_PMU_SWRST_APB_CTRL (0x004c)
+#define REG_PMU_SPC_IO_STATUS (0x0054)
+#define REG_PMU_SPC_FUN (0x0058)
+#define REG_PMU_DBG_SPOT0 (0x005c)
+#define REG_PMU_DBG_SPOT1 (0x0060)
+#define REG_PMU_DBG_SPOT2 (0x0064)
+#define REG_PMU_DBG_SPOT3 (0x0068)
+#define REG_PMU_CLK_DIV7 (0x006c)
+#define REG_PMU_CLK_DIV8 (0x0070)
+#define REG_PAD_PWR_SEL (0x0074)
+#define REG_PMU_PLL2 (0x0078)
+#define REG_PMU_PLL2_CTRL (0x007c)
+
+#define REG_PMU_PAD_CIS_HSYNC_CFG (0x0080)
+#define REG_PMU_PAD_CIS_VSYNC_CFG (0x0084)
+#define REG_PMU_PAD_CIS_PCLK_CFG (0x0088)
+#define REG_PMU_PAD_CIS_D_0_CFG (0x008c)
+#define REG_PMU_PAD_CIS_D_1_CFG (0x0090)
+#define REG_PMU_PAD_CIS_D_2_CFG (0x0094)
+#define REG_PMU_PAD_CIS_D_3_CFG (0x0098)
+#define REG_PMU_PAD_CIS_D_4_CFG (0x009c)
+#define REG_PMU_PAD_CIS_D_5_CFG (0x00a0)
+#define REG_PMU_PAD_CIS_D_6_CFG (0x00a4)
+#define REG_PMU_PAD_CIS_D_7_CFG (0x00a8)
+#define REG_PMU_PAD_CIS_D_8_CFG (0x00ac)
+#define REG_PMU_PAD_CIS_D_9_CFG (0x00b0)
+#define REG_PMU_PAD_CIS_D_10_CFG (0x00b4)
+#define REG_PMU_PAD_CIS_D_11_CFG (0x00b8)
+#define REG_PMU_PAD_MAC_RMII_CLK_CFG (0x00bc)
+#define REG_PMU_PAD_MAC_REF_CLK_CFG (0x00c0)
+#define REG_PMU_PAD_MAC_MDC_CFG (0x00c4)
+#define REG_PMU_PAD_MAC_MDIO_CFG (0x00c8)
+#define REG_PMU_PAD_MAC_COL_MII_CFG (0x00cc)
+#define REG_PMU_PAD_MAC_CRS_MII_CFG (0x00d0)
+#define REG_PMU_PAD_MAC_RXCK_CFG (0x00d4)
+#define REG_PMU_PAD_MAC_RXD0_CFG (0x00d8)
+#define REG_PMU_PAD_MAC_RXD1_CFG (0x00dc)
+#define REG_PMU_PAD_MAC_RXD2_MII_CFG (0x00e0)
+#define REG_PMU_PAD_MAC_RXD3_MII_CFG (0x00e4)
+#define REG_PMU_PAD_MAC_RXDV_CFG (0x00e8)
+#define REG_PMU_PAD_MAC_TXCK_CFG (0x00ec)
+#define REG_PMU_PAD_MAC_TXD0_CFG (0x00f0)
+#define REG_PMU_PAD_MAC_TXD1_CFG (0x00f4)
+#define REG_PMU_PAD_MAC_TXD2_MII_CFG (0x00f8)
+#define REG_PMU_PAD_MAC_TXD3_MII_CFG (0x00fc)
+#define REG_PMU_PAD_MAC_TXEN_CFG (0x0100)
+#define REG_PMU_PAD_MAC_RXER_MII_CFG (0x0104)
+#define REG_PMU_PAD_MAC_TXER_MII_CFG (0x0108)
+#define REG_PMU_PAD_GPIO_0_CFG (0x010c)
+#define REG_PMU_PAD_GPIO_1_CFG (0x0110)
+#define REG_PMU_PAD_GPIO_2_CFG (0x0114)
+#define REG_PMU_PAD_GPIO_3_CFG (0x0118)
+#define REG_PMU_PAD_GPIO_4_CFG (0x011c)
+#define REG_PMU_PAD_GPIO_5_CFG (0x0120)
+#define REG_PMU_PAD_GPIO_6_CFG (0x0124)
+#define REG_PMU_PAD_GPIO_7_CFG (0x0128)
+#define REG_PMU_PAD_GPIO_8_CFG (0x012c)
+#define REG_PMU_PAD_GPIO_9_CFG (0x0130)
+#define REG_PMU_PAD_GPIO_10_CFG (0x0134)
+#define REG_PMU_PAD_GPIO_11_CFG (0x0138)
+#define REG_PMU_PAD_GPIO_12_CFG (0x013c)
+#define REG_PMU_PAD_GPIO_13_CFG (0x0140)
+#define REG_PMU_PAD_GPIO_14_CFG (0x0144)
+#define REG_PMU_PAD_UART_RX_CFG (0x0148)
+#define REG_PMU_PAD_UART_TX_CFG (0x014c)
+#define REG_PMU_PAD_CIS_SCL_CFG (0x0150)
+#define REG_PMU_PAD_CIS_SDA_CFG (0x0154)
+#define REG_PMU_PAD_I2C_SCL_CFG (0x0158)
+#define REG_PMU_PAD_I2C_SDA_CFG (0x015c)
+#define REG_PMU_PAD_SSI0_CLK_CFG (0x0160)
+#define REG_PMU_PAD_SSI0_TXD_CFG (0x0164)
+#define REG_PMU_PAD_SSI0_CSN_0_CFG (0x0168)
+#define REG_PMU_PAD_SSI0_CSN_1_CFG (0x016c)
+#define REG_PMU_PAD_SSI0_RXD_CFG (0x0170)
+#define REG_PMU_PAD_SD0_CD_CFG (0x0174)
+#define REG_PMU_PAD_SD0_WP_CFG (0x0178)
+#define REG_PMU_PAD_SD0_CLK_CFG (0x017c)
+#define REG_PMU_PAD_SD0_CMD_RSP_CFG (0x0180)
+#define REG_PMU_PAD_SD0_DATA_0_CFG (0x0184)
+#define REG_PMU_PAD_SD0_DATA_1_CFG (0x0188)
+#define REG_PMU_PAD_SD0_DATA_2_CFG (0x018c)
+#define REG_PMU_PAD_SD0_DATA_3_CFG (0x0190)
+#define REG_PMU_PAD_SD1_CD_CFG (0x0194)
+#define REG_PMU_PAD_SD1_WP_CFG (0x0198)
+#define REG_PMU_PAD_SD1_CLK_CFG (0x019c)
+#define REG_PMU_PAD_SD1_CMD_RSP_CFG (0x01a0)
+#define REG_PMU_PAD_SD1_DATA_0_CFG (0x01a4)
+#define REG_PMU_PAD_SD1_DATA_1_CFG (0x01a8)
+#define REG_PMU_PAD_SD1_DATA_2_CFG (0x01ac)
+#define REG_PMU_PAD_SD1_DATA_3_CFG (0x01b0)
+#define REG_PMU_AXI0_PRIO_CFG0 (0x01b4)
+#define REG_PMU_AXI0_PRIO_CFG1 (0x01b8)
+#define REG_PMU_AXI1_PRIO_CFG0 (0x01bc)
+#define REG_PMU_AXI1_PRIO_CFG1 (0x01c0)
+#define REG_PMU_SWRSTN_NSR (0x01c4)
+#define REG_PMU_ARM_INT_0 (0x01e0)
+#define REG_PMU_ARM_INT_1 (0x01e4)
+#define REG_PMU_ARM_INT_2 (0x01e8)
+#define REG_PMU_A625_INT_0 (0x01ec)
+#define REG_PMU_A625_INT_1 (0x01f0)
+#define REG_PMU_A625_INT_2 (0x01f4)
+#define REG_PMU_DMA (0x01f8)
+#define REG_PMU_WDT_CTRL (0x01fc)
+#define REG_PMU_DBG_STAT0 (0x0200)
+#define REG_PMU_DBG_STAT1 (0x0204)
+#define REG_PMU_DBG_STAT2 (0x0208)
+#define REG_PMU_DBG_STAT3 (0x020c)
+#define REG_PMU_USB_SYS (0x0210)
+#define REG_PMU_USB_CFG (0x0214)
+#define REG_PMU_USB_TUNE (0x0218)
+#define REG_PMU_PAD_CIS_CLK_CFG (0x021c)
+#define REG_PMU_USB_SYS1 (0x0228)
+#define REG_PMU_PTSLO (0x022c)
+#define REG_PMU_PTSHI (0x0230)
+#define REG_PMU_USER0 (0x0234)
+#define REG_PMU_PRDCID_CTRL0 (0x500)
+#define REG_PMU_PAEARCBOOT0 (0x1000)
+#define REG_PMU_PAEARCBOOT1 (0x1004)
+#define REG_PMU_PAEARCBOOT2 (0x1008)
+#define REG_PMU_PAEARCBOOT3 (0x100c)
+#define REG_PMU_PAE_ARC_START_CTRL (0x1010)
+#define REG_PMU_A625BOOT0 (0x2000)
+#define REG_PMU_A625BOOT1 (0x2004)
+#define REG_PMU_A625BOOT2 (0x2008)
+#define REG_PMU_A625BOOT3 (0x200c)
+#define REG_PMU_A625_START_CTRL (0x2010)
+#define REG_PMU_ARC_INTC_MASK (0x2014)
+#define REG_PMU_PAE_ARC_INTC_MASK (0x2018)
+
+/*ATTENTION: written by ARC */
+#define PMU_ARM_INT_MASK (0x01ec)
+#define PMU_ARM_INT_RAWSTAT (0x01f0)
+#define PMU_ARM_INT_STAT (0x01f4)
+
+#define PMU_A625_INT_MASK (0x01e0)
+#define PMU_A625_INT_RAWSTAT (0x01e4)
+#define PMU_A625_INT_STAT (0x01e8)
+
+#define PMU_IRQ 0
+#define DDRC_IRQ 1
+#define WDT_IRQ 2
+#define TMR0_IRQ 3
+#define PAE_ARC_IRQ0 4
+#define PAE_ARC_IRQ1 5
+#define PAE_ARC_IRQ2 6
+#define VPU_IRQ 7
+#define PAE_IRQ 8
+#define ISPP_IRQ 9
+#define ISPF_IRQ 10
+#define I2C0_IRQ 11
+#define I2C1_IRQ 12
+#define JPEG_IRQ 13
+#define BGM_IRQ 14
+#define GMAC_IRQ 15
+#define AES_IRQ 16
+#define SDC0_IRQ 17
+#define SDC1_IRQ 18
+#define ACW_IRQ 19
+#define SADC_IRQ 20
+#define SPI1_IRQ 21
+#define SPI2_IRQ 22
+#define DMAC0_IRQ 23
+#define DMAC1_IRQ 24
+#define I2S0_IRQ 25
+#define GPIO0_IRQ 26
+#define USBC_IRQ 27
+#define SPI0_IRQ 28
+#define ARC_SW_IRQ 29
+#define UART0_IRQ 30
+#define UART1_IRQ 31
+#define ARM_SW_IRQ 32
+#define RTC_IRQ 33
+#define AHBC0_IRQ 34
+#define AHBC1_IRQ 35
+#define PWM_IRQ 36
+#define MIPIC_IRQ 37
+#define MIPI_WRAP_IRQ 38
+#define WAVE420_IRQ 39
+#define GPIO1_IRQ 40
+#define USBC_IDHV_IRQ 41
+#define USBC_OTG_IRQ 42
+#define USBC_DP_IRQ 43
+#define USBC_DM_IRQ 44
+
+#define UTMI_RST_BIT (1<<1)
+
+#define NR_INTERNAL_IRQS (64)
+#define NR_EXTERNAL_IRQS (64)
+#define NR_IRQS (NR_INTERNAL_IRQS + NR_EXTERNAL_IRQS)
+
+/* SWRST_MAIN_CTRL */
+#define CPU_RSTN_BIT (0)
+#define UTMI_RSTN_BIT (1)
+#define DDRPHY_RSTN_BIT (2)
+#define DDRC_RSTN_BIT (3)
+#define PIXEL_RSTN_BIT (6)
+#define PWM_RSTN_BIT (7)
+#define SPI0_RSTN_BIT (8)
+#define SPI1_RSTN_BIT (9)
+#define I2C0_RSTN_BIT (10)
+#define I2C1_RSTN_BIT (11)
+#define ACODEC_RSTN_BIT (12)
+#define I2S_RSTN_BIT (13)
+#define UART0_RSTN_BIT (14)
+#define UART1_RSTN_BIT (15)
+#define SADC_RSTN_BIT (16)
+#define PTS_RSTN_BIT (17)
+#define TMR_RSTN_BIT (18)
+#define SPI2_RSTN_BIT (20)
+#define PAE_ARC_RSTN_BIT (21)
+#define ARC_RSTN_BIT (22)
+#define EFUSE_RSTN_BIT (23)
+#define WAVE420_ARSTN_BIT (24)
+#define WAVE420_BRSTN_BIT (25)
+#define WAVE420_CRSTN_BIT (26)
+#define SYS_RSTN_BIT (31)
+
+/* SWRST_AHB_CTRL */
+#define EMC_HRSTN_BIT (0)
+#define SDC1_HRSTN_BIT (1)
+#define SDC0_HRSTN_BIT (2)
+#define AES_HRSTN_BIT (3)
+#define DMAC0_HRSTN_BIT (4)
+#define INTC_HRSTN_BIT (5)
+#define JPEG_HRSTN_BIT (8)
+#define VCU_HRSTN_BIT (9)
+#define VPU_HRSTN_BIT (10)
+#define ISP_HRSTN_BIT (11)
+#define USB_HRSTN_BIT (12)
+#define HRST1N_BIT (13)
+#define HRST0N_BIT (14)
+#define ARC_HRSTN_BIT (15)
+#define PAE_ARC_HRSTN_BIT (16)
+#define EMAC_HRSTN_BIT (17)
+#define DDRC_P3_UHRSTN_BIT (19)
+#define DMAC1_HRSTN_BIT (20)
+#define H2P_HRSTN_BIT (21)
+#define BGM_HRSTN_BIT (22)
+#define HRST2N_BIT (23)
+#define HRST3N_BIT (24)
+
+/* FH Serial HardWare HandShake */
+#define UART1_TX_HW_HANDSHAKE (9)
+#define UART1_RX_HW_HANDSHAKE (8)
+#define UART1_DMA_TX_CHAN (2)
+#define UART1_DMA_RX_CHAN (3)
+
+
+/* timer clk */
+#define TIMER_CLK (50000000)
+
+
+#endif /* __ASM_ARCH_FH8856_H */
diff --git a/arch/arm/mach-fh/include/mach/fh8856_iopad.h b/arch/arm/mach-fh/include/mach/fh8856_iopad.h
new file mode 100644
index 00000000..becb1a97
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh8856_iopad.h
@@ -0,0 +1,577 @@
+#include "pinctrl.h"
+#include "pinctrl_osdep.h"
+#include "board_config.h"
+
+/* PINCTRL_FUNC */
+PINCTRL_FUNC(GPIO19, 0, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_TX, 0, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO20, 1, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_RX, 1, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO21, 2, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(I2C1_SDA, 2, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO25, 3, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_TXD, 3, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_TXD, 3, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO26, 4, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_RXD, 4, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_RXD, 4, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO27, 5, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_CLK, 5, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_CLK, 5, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO28, 6, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 6, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_CSN, 6, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(USB_DBG_CLK, 6, FUNC3, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI0_D2, 7, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO50, 7, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SDA, 7, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_TX, 7, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_D3, 8, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO51, 8, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SCL, 8, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_RX, 8, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO22, 9, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(I2C1_SCL, 9, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO23, 10, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(AC_MCLK, 10, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO24, 11, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(USB_PWREN, 11, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(MAC_RMII_CLK, 15, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO15, 15, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CLK, 15, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM0, 15, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CLK, 15, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CLK, 15, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_REF_CLK, 16, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(MAC_MDC, 17, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO34, 17, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_WP, 17, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(RTC_CLK, 17, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_MCLK, 17, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(USB_PWREN, 17, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_MDIO, 18, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO17, 18, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TDO, 18, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM7, 18, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_RXD_0, 22, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO16, 22, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_0, 22, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM1, 22, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_RXD, 22, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_RXD, 22, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_RXD_1, 23, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO38, 23, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_1, 23, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM2, 23, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 23, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CSN, 23, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_RXDV, 26, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO41, 26, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CD, 26, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM3, 26, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_TXD_0, 28, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO42, 28, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_2, 28, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM4, 28, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_TXD_1, 29, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO43, 29, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_3, 29, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM5, 29, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 29, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CSN, 29, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(MAC_TXEN, 32, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO46, 32, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CMD_RSP, 32, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM6, 32, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_TXD, 32, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_TXD, 32, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TRSTN, 35, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO0, 35, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_TXD, 35, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_TXD, 35, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_I2S_DO, 35, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(DW_I2S_DO, 35, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ACIP_ADDAT, 35, FUNC6, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TMS, 36, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO1, 36, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_RXD, 36, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_RXD, 36, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_I2S_DI, 36, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(DW_I2S_DI, 36, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ARM_JTAG_TCK, 37, FUNC0, PUPD_DOWN, 3);
+PINCTRL_FUNC(GPIO2, 37, FUNC1, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI1_CLK, 37, FUNC2, PUPD_DOWN, 3);
+PINCTRL_FUNC(SSI2_CLK, 37, FUNC3, PUPD_DOWN, 3);
+PINCTRL_FUNC(AC_I2S_CLK, 37, FUNC4, PUPD_DOWN, 3);
+PINCTRL_FUNC(DW_I2S_CLK, 37, FUNC5, PUPD_DOWN, 3);
+PINCTRL_FUNC(ACIP_BCLK, 37, FUNC6, PUPD_DOWN, 3);
+PINCTRL_FUNC(ARM_JTAG_TDI, 38, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO3, 38, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 38, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(SSI2_CSN, 38, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(AC_I2S_WS, 38, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(DW_I2S_WS, 38, FUNC5, PUPD_UP, 3);
+PINCTRL_FUNC(ACIP_ADLRC, 38, FUNC6, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO4, 39, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CLK, 39, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM0, 39, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO5, 40, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_0, 40, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM1, 40, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO6, 41, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_1, 41, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM2, 41, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO7, 42, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CD, 42, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM3, 42, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO8, 43, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_2, 43, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM4, 43, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO9, 44, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_DATA_3, 44, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM5, 44, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO10, 45, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_CMD_RSP, 45, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM6, 45, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO11, 46, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD1_WP, 46, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM7, 46, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM0, 47, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO12, 47, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(CIS_CLK, 47, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PWM1, 48, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO13, 48, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(PWM2, 49, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO14, 49, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(UART0_RX, 50, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO48, 50, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(UART0_TX, 51, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO49, 51, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C0_SCL, 52, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO57, 52, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(I2C0_SDA, 53, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO56, 53, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(SSI0_CLK, 56, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(SSI0_TXD, 57, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_CSN_0, 58, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO54, 58, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_CSN_1, 59, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO55, 59, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SSI0_RXD, 60, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_CD, 61, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO52, 61, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TRSTN, 61, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TRSTN, 61, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_WP, 62, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO53, 62, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_CLK, 63, FUNC0, PUPD_NONE, 3);
+PINCTRL_FUNC(GPIO63, 63, FUNC1, PUPD_NONE, 3);
+PINCTRL_FUNC(ARC_JTAG_TMS, 63, FUNC2, PUPD_NONE, 3);
+PINCTRL_FUNC(PAE_JTAG_TMS, 63, FUNC3, PUPD_NONE, 3);
+PINCTRL_FUNC(SSI1_CLK, 63, FUNC4, PUPD_NONE, 3);
+PINCTRL_FUNC(SD0_CMD_RSP, 64, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO29, 64, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TCK, 64, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TCK, 64, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_TXD, 64, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_0, 65, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO62, 65, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TDI, 65, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TDI, 65, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_RXD, 65, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_1, 66, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO61, 66, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(USB_PWREN, 66, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(AC_MCLK, 66, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 66, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_2, 67, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO60, 67, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SDA, 67, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_TX, 67, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(ARC_JTAG_TDO, 67, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SD0_DATA_3, 68, FUNC0, PUPD_UP, 3);
+PINCTRL_FUNC(GPIO18, 68, FUNC1, PUPD_UP, 3);
+PINCTRL_FUNC(I2C1_SCL, 68, FUNC2, PUPD_UP, 3);
+PINCTRL_FUNC(UART1_RX, 68, FUNC3, PUPD_UP, 3);
+PINCTRL_FUNC(PAE_JTAG_TDO, 68, FUNC4, PUPD_UP, 3);
+PINCTRL_FUNC(SSI1_CSN_0, 68, FUNC5, PUPD_UP, 3);
+
+
+/* PINCTRL_MUX */
+
+PINCTRL_MUX(ACIP_ADDAT, 0, &PAD35_ACIP_ADDAT);
+PINCTRL_MUX(ACIP_ADLRC, 0, &PAD38_ACIP_ADLRC);
+PINCTRL_MUX(ACIP_BCLK, 0, &PAD37_ACIP_BCLK);
+
+PINCTRL_MUX(AC_I2S_CLK, 0, &PAD37_AC_I2S_CLK);
+PINCTRL_MUX(AC_I2S_DI, 0, &PAD36_AC_I2S_DI);
+PINCTRL_MUX(AC_I2S_DO, 0, &PAD35_AC_I2S_DO);
+PINCTRL_MUX(AC_I2S_WS, 0, &PAD38_AC_I2S_WS);
+PINCTRL_MUX(AC_MCLK, 0, &PAD10_AC_MCLK, &PAD17_AC_MCLK, &PAD66_AC_MCLK);
+
+PINCTRL_MUX(ARC_JTAG_TCK, 0, &PAD64_ARC_JTAG_TCK);
+PINCTRL_MUX(ARC_JTAG_TDI, 0, &PAD65_ARC_JTAG_TDI);
+PINCTRL_MUX(ARC_JTAG_TDO, 0, &PAD67_ARC_JTAG_TDO);
+PINCTRL_MUX(ARC_JTAG_TMS, 0, &PAD63_ARC_JTAG_TMS);
+PINCTRL_MUX(ARC_JTAG_TRSTN, 0, &PAD61_ARC_JTAG_TRSTN);
+
+PINCTRL_MUX(ARM_JTAG_TCK, 0, &PAD37_ARM_JTAG_TCK);
+PINCTRL_MUX(ARM_JTAG_TDI, 0, &PAD38_ARM_JTAG_TDI);
+PINCTRL_MUX(ARM_JTAG_TDO, 0, &PAD18_ARM_JTAG_TDO);
+PINCTRL_MUX(ARM_JTAG_TMS, 0, &PAD36_ARM_JTAG_TMS);
+PINCTRL_MUX(ARM_JTAG_TRSTN, 0, &PAD35_ARM_JTAG_TRSTN);
+
+PINCTRL_MUX(CIS_CLK, 0, &PAD47_CIS_CLK);
+
+PINCTRL_MUX(DW_I2S_CLK, 0, &PAD37_DW_I2S_CLK);
+PINCTRL_MUX(DW_I2S_DI, 0, &PAD36_DW_I2S_DI);
+PINCTRL_MUX(DW_I2S_DO, 0, &PAD35_DW_I2S_DO);
+PINCTRL_MUX(DW_I2S_WS, 0, &PAD38_DW_I2S_WS);
+
+PINCTRL_MUX(I2C0_SCL, 0, &PAD52_I2C0_SCL);
+PINCTRL_MUX(I2C0_SDA, 0, &PAD53_I2C0_SDA);
+
+PINCTRL_MUX(I2C1_SCL, 1, &PAD8_I2C1_SCL, &PAD9_I2C1_SCL, &PAD68_I2C1_SCL);
+PINCTRL_MUX(I2C1_SDA, 0, &PAD2_I2C1_SDA, &PAD7_I2C1_SDA, &PAD67_I2C1_SDA);
+
+PINCTRL_MUX(MAC_MDC, 0, &PAD17_MAC_MDC);
+PINCTRL_MUX(MAC_MDIO, 0, &PAD18_MAC_MDIO);
+PINCTRL_MUX(MAC_REF_CLK, 0, &PAD16_MAC_REF_CLK);
+PINCTRL_MUX(MAC_RMII_CLK, 0, &PAD15_MAC_RMII_CLK);
+PINCTRL_MUX(MAC_RXDV, 0, &PAD26_MAC_RXDV);
+PINCTRL_MUX(MAC_RXD_0, 0, &PAD22_MAC_RXD_0);
+PINCTRL_MUX(MAC_RXD_1, 0, &PAD23_MAC_RXD_1);
+PINCTRL_MUX(MAC_TXD_0, 0, &PAD28_MAC_TXD_0);
+PINCTRL_MUX(MAC_TXD_1, 0, &PAD29_MAC_TXD_1);
+PINCTRL_MUX(MAC_TXEN, 0, &PAD32_MAC_TXEN);
+
+PINCTRL_MUX(PAE_JTAG_TCK, 0, &PAD64_PAE_JTAG_TCK);
+PINCTRL_MUX(PAE_JTAG_TDI, 0, &PAD65_PAE_JTAG_TDI);
+PINCTRL_MUX(PAE_JTAG_TDO, 0, &PAD68_PAE_JTAG_TDO);
+PINCTRL_MUX(PAE_JTAG_TMS, 0, &PAD63_PAE_JTAG_TMS);
+PINCTRL_MUX(PAE_JTAG_TRSTN, 0, &PAD61_PAE_JTAG_TRSTN);
+
+PINCTRL_MUX(PWM0, 2, &PAD15_PWM0, &PAD39_PWM0, &PAD47_PWM0);
+PINCTRL_MUX(PWM1, 2, &PAD22_PWM1, &PAD40_PWM1, &PAD48_PWM1);
+PINCTRL_MUX(PWM2, 2, &PAD23_PWM2, &PAD41_PWM2, &PAD49_PWM2);
+PINCTRL_MUX(PWM3, 1, &PAD26_PWM3, &PAD42_PWM3);
+PINCTRL_MUX(PWM4, 1, &PAD28_PWM4, &PAD43_PWM4);
+PINCTRL_MUX(PWM5, 1, &PAD29_PWM5, &PAD44_PWM5);
+PINCTRL_MUX(PWM6, 1, &PAD32_PWM6, &PAD45_PWM6);
+PINCTRL_MUX(PWM7, 1, &PAD18_PWM7, &PAD46_PWM7);
+
+PINCTRL_MUX(RTC_CLK, 0, &PAD17_RTC_CLK);
+
+PINCTRL_MUX(SD0_CD, 0, &PAD61_SD0_CD);
+PINCTRL_MUX(SD0_CLK, 0, &PAD63_SD0_CLK);
+PINCTRL_MUX(SD0_CMD_RSP, 0, &PAD64_SD0_CMD_RSP);
+PINCTRL_MUX(SD0_DATA_0, 0, &PAD65_SD0_DATA_0);
+PINCTRL_MUX(SD0_DATA_1, 0, &PAD66_SD0_DATA_1);
+PINCTRL_MUX(SD0_DATA_2, 0, &PAD67_SD0_DATA_2);
+PINCTRL_MUX(SD0_DATA_3, 0, &PAD68_SD0_DATA_3);
+PINCTRL_MUX(SD0_WP, 0, &PAD62_SD0_WP);
+
+PINCTRL_MUX(SD1_CD, 1, &PAD26_SD1_CD, &PAD42_SD1_CD);
+PINCTRL_MUX(SD1_CLK, 1, &PAD15_SD1_CLK, &PAD39_SD1_CLK);
+PINCTRL_MUX(SD1_CMD_RSP, 1, &PAD32_SD1_CMD_RSP, &PAD45_SD1_CMD_RSP);
+PINCTRL_MUX(SD1_DATA_0, 1, &PAD22_SD1_DATA_0, &PAD40_SD1_DATA_0);
+PINCTRL_MUX(SD1_DATA_1, 1, &PAD23_SD1_DATA_1, &PAD41_SD1_DATA_1);
+PINCTRL_MUX(SD1_DATA_2, 1, &PAD28_SD1_DATA_2, &PAD43_SD1_DATA_2);
+PINCTRL_MUX(SD1_DATA_3, 1, &PAD29_SD1_DATA_3, &PAD44_SD1_DATA_3);
+PINCTRL_MUX(SD1_WP, 1, &PAD17_SD1_WP, &PAD46_SD1_WP);
+
+PINCTRL_MUX(SSI0_CLK, 0, &PAD56_SSI0_CLK);
+PINCTRL_MUX(SSI0_CSN_0, 0, &PAD58_SSI0_CSN_0);
+PINCTRL_MUX(SSI0_CSN_1, 0, &PAD59_SSI0_CSN_1);
+PINCTRL_MUX(SSI0_D2, 0, &PAD7_SSI0_D2);
+PINCTRL_MUX(SSI0_D3, 0, &PAD8_SSI0_D3);
+PINCTRL_MUX(SSI0_RXD, 0, &PAD60_SSI0_RXD);
+PINCTRL_MUX(SSI0_TXD, 0, &PAD57_SSI0_TXD);
+
+PINCTRL_MUX(SSI1_CLK, 2, &PAD5_SSI1_CLK, &PAD15_SSI1_CLK, &PAD37_SSI1_CLK,
+ &PAD63_SSI1_CLK);
+PINCTRL_MUX(SSI1_CSN_0, 3, &PAD6_SSI1_CSN_0, &PAD23_SSI1_CSN_0,
+ &PAD29_SSI1_CSN_0, &PAD38_SSI1_CSN_0, &PAD66_SSI1_CSN_0,
+ &PAD68_SSI1_CSN_0);
+PINCTRL_MUX(SSI1_RXD, 2, &PAD4_SSI1_RXD, &PAD22_SSI1_RXD, &PAD36_SSI1_RXD,
+ &PAD65_SSI1_RXD);
+PINCTRL_MUX(SSI1_TXD, 2, &PAD3_SSI1_TXD, &PAD32_SSI1_TXD, &PAD35_SSI1_TXD,
+ &PAD64_SSI1_TXD);
+
+PINCTRL_MUX(SSI2_CLK, 0, &PAD5_SSI2_CLK, &PAD15_SSI2_CLK, &PAD37_SSI2_CLK);
+PINCTRL_MUX(SSI2_CSN, 0, &PAD6_SSI2_CSN, &PAD23_SSI2_CSN, &PAD29_SSI2_CSN,
+ &PAD38_SSI2_CSN);
+PINCTRL_MUX(SSI2_RXD, 0, &PAD4_SSI2_RXD, &PAD22_SSI2_RXD, &PAD36_SSI2_RXD);
+PINCTRL_MUX(SSI2_TXD, 0, &PAD3_SSI2_TXD, &PAD32_SSI2_TXD, &PAD35_SSI2_TXD);
+
+PINCTRL_MUX(UART0_RX, 0, &PAD50_UART0_RX);
+PINCTRL_MUX(UART0_TX, 0, &PAD51_UART0_TX);
+
+PINCTRL_MUX(UART1_RX, 0, &PAD1_UART1_RX, &PAD8_UART1_RX, &PAD68_UART1_RX);
+PINCTRL_MUX(UART1_TX, 0, &PAD0_UART1_TX, &PAD7_UART1_TX, &PAD67_UART1_TX);
+
+PINCTRL_MUX(USB_DBG_CLK, 0, &PAD6_USB_DBG_CLK);
+PINCTRL_MUX(USB_PWREN, 0, &PAD11_USB_PWREN, &PAD17_USB_PWREN, &PAD66_USB_PWREN);
+
+PINCTRL_MUX(GPIO0, 0, &PAD35_GPIO0);
+PINCTRL_MUX(GPIO1, 0, &PAD36_GPIO1);
+PINCTRL_MUX(GPIO2, 0, &PAD37_GPIO2);
+PINCTRL_MUX(GPIO3, 0, &PAD38_GPIO3);
+PINCTRL_MUX(GPIO4, 0, &PAD39_GPIO4);
+PINCTRL_MUX(GPIO5, 0, &PAD40_GPIO5);
+PINCTRL_MUX(GPIO6, 0, &PAD41_GPIO6);
+PINCTRL_MUX(GPIO7, 0, &PAD42_GPIO7);
+PINCTRL_MUX(GPIO8, 0, &PAD43_GPIO8);
+PINCTRL_MUX(GPIO9, 0, &PAD44_GPIO9);
+PINCTRL_MUX(GPIO10, 0, &PAD45_GPIO10);
+PINCTRL_MUX(GPIO11, 0, &PAD46_GPIO11);
+PINCTRL_MUX(GPIO12, 0, &PAD47_GPIO12);
+PINCTRL_MUX(GPIO13, 0, &PAD48_GPIO13);
+PINCTRL_MUX(GPIO14, 0, &PAD49_GPIO14);
+PINCTRL_MUX(GPIO15, 0, &PAD15_GPIO15);
+PINCTRL_MUX(GPIO16, 0, &PAD22_GPIO16);
+PINCTRL_MUX(GPIO17, 0, &PAD18_GPIO17);
+PINCTRL_MUX(GPIO18, 0, &PAD68_GPIO18);
+PINCTRL_MUX(GPIO19, 0, &PAD0_GPIO19);
+PINCTRL_MUX(GPIO20, 0, &PAD1_GPIO20);
+PINCTRL_MUX(GPIO21, 0, &PAD2_GPIO21);
+PINCTRL_MUX(GPIO22, 0, &PAD9_GPIO22);
+PINCTRL_MUX(GPIO23, 0, &PAD10_GPIO23);
+PINCTRL_MUX(GPIO24, 0, &PAD11_GPIO24);
+PINCTRL_MUX(GPIO25, 0, &PAD3_GPIO25);
+PINCTRL_MUX(GPIO26, 0, &PAD4_GPIO26);
+PINCTRL_MUX(GPIO27, 0, &PAD5_GPIO27);
+PINCTRL_MUX(GPIO28, 0, &PAD6_GPIO28);
+PINCTRL_MUX(GPIO29, 0, &PAD64_GPIO29);
+PINCTRL_MUX(GPIO34, 0, &PAD17_GPIO34);
+PINCTRL_MUX(GPIO38, 0, &PAD23_GPIO38);
+PINCTRL_MUX(GPIO41, 0, &PAD26_GPIO41);
+PINCTRL_MUX(GPIO42, 0, &PAD28_GPIO42);
+PINCTRL_MUX(GPIO43, 0, &PAD29_GPIO43);
+PINCTRL_MUX(GPIO46, 0, &PAD32_GPIO46);
+PINCTRL_MUX(GPIO48, 0, &PAD50_GPIO48);
+PINCTRL_MUX(GPIO49, 0, &PAD51_GPIO49);
+PINCTRL_MUX(GPIO50, 0, &PAD7_GPIO50);
+PINCTRL_MUX(GPIO51, 0, &PAD8_GPIO51);
+PINCTRL_MUX(GPIO52, 0, &PAD61_GPIO52);
+PINCTRL_MUX(GPIO53, 0, &PAD62_GPIO53);
+PINCTRL_MUX(GPIO54, 0, &PAD58_GPIO54);
+PINCTRL_MUX(GPIO55, 0, &PAD59_GPIO55);
+PINCTRL_MUX(GPIO56, 0, &PAD53_GPIO56);
+PINCTRL_MUX(GPIO57, 0, &PAD52_GPIO57);
+PINCTRL_MUX(GPIO60, 0, &PAD67_GPIO60);
+PINCTRL_MUX(GPIO61, 0, &PAD66_GPIO61);
+PINCTRL_MUX(GPIO62, 0, &PAD65_GPIO62);
+PINCTRL_MUX(GPIO63, 0, &PAD63_GPIO63);
+
+PINCTRL_MUX(SD1_CLK_RMII, 0, &PAD15_SD1_CLK, &PAD39_SD1_CLK);
+PINCTRL_MUX(SD1_CD_RMII, 0, &PAD26_SD1_CD, &PAD42_SD1_CD);
+PINCTRL_MUX(SD1_CMD_RSP_RMII, 0, &PAD32_SD1_CMD_RSP, &PAD45_SD1_CMD_RSP);
+PINCTRL_MUX(SD1_WP_RMII, 0, &PAD17_SD1_WP, &PAD46_SD1_WP);
+PINCTRL_MUX(SD1_DATA_0_RMII, 0, &PAD22_SD1_DATA_0, &PAD40_SD1_DATA_0);
+PINCTRL_MUX(SD1_DATA_1_RMII, 0, &PAD23_SD1_DATA_1, &PAD41_SD1_DATA_1);
+PINCTRL_MUX(SD1_DATA_2_RMII, 0, &PAD28_SD1_DATA_2, &PAD43_SD1_DATA_2);
+PINCTRL_MUX(SD1_DATA_3_RMII, 0, &PAD29_SD1_DATA_3, &PAD44_SD1_DATA_3);
+
+/* PINCTRL_DEVICE */
+PINCTRL_DEVICE(ACI2S, 5, &MUX_AC_I2S_CLK, &MUX_AC_I2S_DI, &MUX_AC_I2S_DO,
+ &MUX_AC_I2S_WS, &MUX_AC_MCLK);
+PINCTRL_DEVICE(ACIP, 3, &MUX_ACIP_ADDAT, &MUX_ACIP_ADLRC, &MUX_ACIP_BCLK);
+PINCTRL_DEVICE(ARCJTAG, 5, &MUX_ARC_JTAG_TCK, &MUX_ARC_JTAG_TDI,
+ &MUX_ARC_JTAG_TDO, &MUX_ARC_JTAG_TMS, &MUX_ARC_JTAG_TRSTN);
+PINCTRL_DEVICE(ARMJTAG, 5, &MUX_ARM_JTAG_TCK, &MUX_ARM_JTAG_TDI,
+ &MUX_ARM_JTAG_TDO, &MUX_ARM_JTAG_TMS, &MUX_ARM_JTAG_TRSTN);
+PINCTRL_DEVICE(DWI2S, 4, &MUX_DW_I2S_CLK, &MUX_DW_I2S_DI, &MUX_DW_I2S_DO,
+ &MUX_DW_I2S_WS);
+PINCTRL_DEVICE(I2C0, 2, &MUX_I2C0_SCL, &MUX_I2C0_SDA);
+PINCTRL_DEVICE(I2C1, 2, &MUX_I2C1_SCL, &MUX_I2C1_SDA);
+PINCTRL_DEVICE(MIPI, 1, &MUX_CIS_CLK);
+PINCTRL_DEVICE(PAEJTAG, 5, &MUX_PAE_JTAG_TCK, &MUX_PAE_JTAG_TDI,
+ &MUX_PAE_JTAG_TDO, &MUX_PAE_JTAG_TMS, &MUX_PAE_JTAG_TRSTN);
+PINCTRL_DEVICE(PWM0, 1, &MUX_PWM0);
+PINCTRL_DEVICE(PWM1, 1, &MUX_PWM1);
+PINCTRL_DEVICE(PWM2, 1, &MUX_PWM2);
+PINCTRL_DEVICE(PWM3, 1, &MUX_PWM3);
+PINCTRL_DEVICE(PWM4, 1, &MUX_PWM4);
+PINCTRL_DEVICE(PWM5, 1, &MUX_PWM5);
+PINCTRL_DEVICE(PWM6, 1, &MUX_PWM6);
+PINCTRL_DEVICE(PWM7, 1, &MUX_PWM7);
+PINCTRL_DEVICE(RMII, 10, &MUX_MAC_MDC, &MUX_MAC_MDIO, &MUX_MAC_REF_CLK,
+ &MUX_MAC_RMII_CLK, &MUX_MAC_RXDV, &MUX_MAC_RXD_0, &MUX_MAC_RXD_1,
+ &MUX_MAC_TXD_0, &MUX_MAC_TXD_1, &MUX_MAC_TXEN);
+PINCTRL_DEVICE(RTC, 1, &MUX_RTC_CLK);
+PINCTRL_DEVICE(SD0, 8, &MUX_SD0_CD, &MUX_SD0_CLK, &MUX_SD0_CMD_RSP,
+ &MUX_SD0_DATA_0, &MUX_SD0_DATA_1, &MUX_SD0_DATA_2, &MUX_SD0_DATA_3,
+ &MUX_SD0_WP);
+PINCTRL_DEVICE(SD0_1BIT_NO_WP, 4, &MUX_SD0_CD, &MUX_SD0_CLK, &MUX_SD0_CMD_RSP,
+ &MUX_SD0_DATA_0);
+PINCTRL_DEVICE(SD0_NO_WP, 7, &MUX_SD0_CD, &MUX_SD0_CLK, &MUX_SD0_CMD_RSP,
+ &MUX_SD0_DATA_0, &MUX_SD0_DATA_1, &MUX_SD0_DATA_2, &MUX_SD0_DATA_3);
+PINCTRL_DEVICE(SD1, 8, &MUX_SD1_CD, &MUX_SD1_CLK, &MUX_SD1_CMD_RSP,
+ &MUX_SD1_DATA_0, &MUX_SD1_DATA_1, &MUX_SD1_DATA_2, &MUX_SD1_DATA_3,
+ &MUX_SD1_WP);
+PINCTRL_DEVICE(SD1_1BIT_NO_WP, 4, &MUX_SD1_CD, &MUX_SD1_CLK, &MUX_SD1_CMD_RSP,
+ &MUX_SD1_DATA_0);
+PINCTRL_DEVICE(SD1_NO_WP, 7, &MUX_SD1_CD, &MUX_SD1_CLK, &MUX_SD1_CMD_RSP,
+ &MUX_SD1_DATA_0, &MUX_SD1_DATA_1, &MUX_SD1_DATA_2, &MUX_SD1_DATA_3);
+PINCTRL_DEVICE(SSI0, 4, &MUX_GPIO54, &MUX_SSI0_CLK, &MUX_SSI0_RXD,
+ &MUX_SSI0_TXD);
+PINCTRL_DEVICE(SSI0_4BIT, 6, &MUX_GPIO54, &MUX_SSI0_CLK, &MUX_SSI0_D2,
+ &MUX_SSI0_D3, &MUX_SSI0_RXD, &MUX_SSI0_TXD);
+PINCTRL_DEVICE(SSI1, 4, &MUX_SSI1_CLK, &MUX_SSI1_CSN_0, &MUX_SSI1_RXD,
+ &MUX_SSI1_TXD);
+PINCTRL_DEVICE(SSI2, 4, &MUX_SSI2_CLK, &MUX_SSI2_CSN, &MUX_SSI2_RXD,
+ &MUX_SSI2_TXD);
+PINCTRL_DEVICE(UART0, 2, &MUX_UART0_RX, &MUX_UART0_TX);
+PINCTRL_DEVICE(UART1, 2, &MUX_UART1_RX, &MUX_UART1_TX);
+PINCTRL_DEVICE(USB, 1, &MUX_USB_PWREN);
+PINCTRL_DEVICE(GPIO0, 1, &MUX_GPIO0);
+PINCTRL_DEVICE(GPIO1, 1, &MUX_GPIO1);
+PINCTRL_DEVICE(GPIO2, 1, &MUX_GPIO2);
+PINCTRL_DEVICE(GPIO3, 1, &MUX_GPIO3);
+PINCTRL_DEVICE(GPIO4, 1, &MUX_GPIO4);
+PINCTRL_DEVICE(GPIO5, 1, &MUX_GPIO5);
+PINCTRL_DEVICE(GPIO6, 1, &MUX_GPIO6);
+PINCTRL_DEVICE(GPIO7, 1, &MUX_GPIO7);
+PINCTRL_DEVICE(GPIO8, 1, &MUX_GPIO8);
+PINCTRL_DEVICE(GPIO9, 1, &MUX_GPIO9);
+PINCTRL_DEVICE(GPIO10, 1, &MUX_GPIO10);
+PINCTRL_DEVICE(GPIO11, 1, &MUX_GPIO11);
+PINCTRL_DEVICE(GPIO12, 1, &MUX_GPIO12);
+PINCTRL_DEVICE(GPIO13, 1, &MUX_GPIO13);
+PINCTRL_DEVICE(GPIO14, 1, &MUX_GPIO14);
+PINCTRL_DEVICE(GPIO15, 1, &MUX_GPIO15);
+PINCTRL_DEVICE(GPIO16, 1, &MUX_GPIO16);
+PINCTRL_DEVICE(GPIO17, 1, &MUX_GPIO17);
+PINCTRL_DEVICE(GPIO18, 1, &MUX_GPIO18);
+PINCTRL_DEVICE(GPIO19, 1, &MUX_GPIO19);
+PINCTRL_DEVICE(GPIO20, 1, &MUX_GPIO20);
+PINCTRL_DEVICE(GPIO21, 1, &MUX_GPIO21);
+PINCTRL_DEVICE(GPIO22, 1, &MUX_GPIO22);
+PINCTRL_DEVICE(GPIO23, 1, &MUX_GPIO23);
+PINCTRL_DEVICE(GPIO24, 1, &MUX_GPIO24);
+PINCTRL_DEVICE(GPIO25, 1, &MUX_GPIO25);
+PINCTRL_DEVICE(GPIO26, 1, &MUX_GPIO26);
+PINCTRL_DEVICE(GPIO27, 1, &MUX_GPIO27);
+PINCTRL_DEVICE(GPIO28, 1, &MUX_GPIO28);
+PINCTRL_DEVICE(GPIO29, 1, &MUX_GPIO29);
+PINCTRL_DEVICE(GPIO34, 1, &MUX_GPIO34);
+PINCTRL_DEVICE(GPIO38, 1, &MUX_GPIO38);
+PINCTRL_DEVICE(GPIO41, 1, &MUX_GPIO41);
+PINCTRL_DEVICE(GPIO42, 1, &MUX_GPIO42);
+PINCTRL_DEVICE(GPIO43, 1, &MUX_GPIO43);
+PINCTRL_DEVICE(GPIO46, 1, &MUX_GPIO46);
+PINCTRL_DEVICE(GPIO48, 1, &MUX_GPIO48);
+PINCTRL_DEVICE(GPIO49, 1, &MUX_GPIO49);
+PINCTRL_DEVICE(GPIO50, 1, &MUX_GPIO50);
+PINCTRL_DEVICE(GPIO51, 1, &MUX_GPIO51);
+PINCTRL_DEVICE(GPIO52, 1, &MUX_GPIO52);
+PINCTRL_DEVICE(GPIO53, 1, &MUX_GPIO53);
+PINCTRL_DEVICE(GPIO54, 1, &MUX_GPIO54);
+PINCTRL_DEVICE(GPIO55, 1, &MUX_GPIO55);
+PINCTRL_DEVICE(GPIO56, 1, &MUX_GPIO56);
+PINCTRL_DEVICE(GPIO57, 1, &MUX_GPIO57);
+PINCTRL_DEVICE(GPIO60, 1, &MUX_GPIO60);
+PINCTRL_DEVICE(GPIO61, 1, &MUX_GPIO61);
+PINCTRL_DEVICE(GPIO62, 1, &MUX_GPIO62);
+PINCTRL_DEVICE(GPIO63, 1, &MUX_GPIO63);
+
+PINCTRL_DEVICE(SD1_WIFI_RMII, 7, &MUX_SD1_CLK_RMII, &MUX_SD1_CD_RMII,
+ &MUX_SD1_CMD_RSP_RMII, &MUX_SD1_DATA_0_RMII, &MUX_SD1_DATA_1_RMII,
+ &MUX_SD1_DATA_2_RMII, &MUX_SD1_DATA_3_RMII);
+
+void fh_pinctrl_init_devicelist(OS_LIST *list)
+{
+ OS_LIST_EMPTY(list);
+
+ /*PINCTRL_ADD_DEVICE*/
+ PINCTRL_ADD_DEVICE(ACI2S);
+ PINCTRL_ADD_DEVICE(ACIP);
+ PINCTRL_ADD_DEVICE(ARCJTAG);
+ PINCTRL_ADD_DEVICE(ARMJTAG);
+ PINCTRL_ADD_DEVICE(DWI2S);
+ PINCTRL_ADD_DEVICE(I2C0);
+ PINCTRL_ADD_DEVICE(I2C1);
+ PINCTRL_ADD_DEVICE(MIPI);
+ PINCTRL_ADD_DEVICE(PAEJTAG);
+ PINCTRL_ADD_DEVICE(PWM0);
+ PINCTRL_ADD_DEVICE(PWM1);
+ PINCTRL_ADD_DEVICE(PWM2);
+ PINCTRL_ADD_DEVICE(PWM3);
+ PINCTRL_ADD_DEVICE(PWM4);
+ PINCTRL_ADD_DEVICE(PWM5);
+ PINCTRL_ADD_DEVICE(PWM6);
+ PINCTRL_ADD_DEVICE(PWM7);
+ PINCTRL_ADD_DEVICE(RMII);
+ PINCTRL_ADD_DEVICE(RTC);
+ PINCTRL_ADD_DEVICE(SD0);
+ PINCTRL_ADD_DEVICE(SD0_1BIT_NO_WP);
+ PINCTRL_ADD_DEVICE(SD0_NO_WP);
+ PINCTRL_ADD_DEVICE(SD1);
+ PINCTRL_ADD_DEVICE(SD1_1BIT_NO_WP);
+ PINCTRL_ADD_DEVICE(SD1_NO_WP);
+ PINCTRL_ADD_DEVICE(SSI0);
+ PINCTRL_ADD_DEVICE(SSI0_4BIT);
+ PINCTRL_ADD_DEVICE(SSI1);
+ PINCTRL_ADD_DEVICE(SSI2);
+ PINCTRL_ADD_DEVICE(UART0);
+ PINCTRL_ADD_DEVICE(UART1);
+ PINCTRL_ADD_DEVICE(USB);
+ PINCTRL_ADD_DEVICE(GPIO0);
+ PINCTRL_ADD_DEVICE(GPIO1);
+ PINCTRL_ADD_DEVICE(GPIO2);
+ PINCTRL_ADD_DEVICE(GPIO3);
+ PINCTRL_ADD_DEVICE(GPIO4);
+ PINCTRL_ADD_DEVICE(GPIO5);
+ PINCTRL_ADD_DEVICE(GPIO6);
+ PINCTRL_ADD_DEVICE(GPIO7);
+ PINCTRL_ADD_DEVICE(GPIO8);
+ PINCTRL_ADD_DEVICE(GPIO9);
+ PINCTRL_ADD_DEVICE(GPIO10);
+ PINCTRL_ADD_DEVICE(GPIO11);
+ PINCTRL_ADD_DEVICE(GPIO12);
+ PINCTRL_ADD_DEVICE(GPIO13);
+ PINCTRL_ADD_DEVICE(GPIO14);
+ PINCTRL_ADD_DEVICE(GPIO15);
+ PINCTRL_ADD_DEVICE(GPIO16);
+ PINCTRL_ADD_DEVICE(GPIO17);
+ PINCTRL_ADD_DEVICE(GPIO18);
+ PINCTRL_ADD_DEVICE(GPIO19);
+ PINCTRL_ADD_DEVICE(GPIO20);
+ PINCTRL_ADD_DEVICE(GPIO21);
+ PINCTRL_ADD_DEVICE(GPIO22);
+ PINCTRL_ADD_DEVICE(GPIO23);
+ PINCTRL_ADD_DEVICE(GPIO24);
+ PINCTRL_ADD_DEVICE(GPIO25);
+ PINCTRL_ADD_DEVICE(GPIO26);
+ PINCTRL_ADD_DEVICE(GPIO27);
+ PINCTRL_ADD_DEVICE(GPIO28);
+ PINCTRL_ADD_DEVICE(GPIO29);
+ PINCTRL_ADD_DEVICE(GPIO34);
+ PINCTRL_ADD_DEVICE(GPIO38);
+ PINCTRL_ADD_DEVICE(GPIO41);
+ PINCTRL_ADD_DEVICE(GPIO42);
+ PINCTRL_ADD_DEVICE(GPIO43);
+ PINCTRL_ADD_DEVICE(GPIO46);
+ PINCTRL_ADD_DEVICE(GPIO48);
+ PINCTRL_ADD_DEVICE(GPIO49);
+ PINCTRL_ADD_DEVICE(GPIO50);
+ PINCTRL_ADD_DEVICE(GPIO51);
+ PINCTRL_ADD_DEVICE(GPIO52);
+ PINCTRL_ADD_DEVICE(GPIO53);
+ PINCTRL_ADD_DEVICE(GPIO54);
+ PINCTRL_ADD_DEVICE(GPIO55);
+ PINCTRL_ADD_DEVICE(GPIO56);
+ PINCTRL_ADD_DEVICE(GPIO57);
+ PINCTRL_ADD_DEVICE(GPIO60);
+ PINCTRL_ADD_DEVICE(GPIO61);
+ PINCTRL_ADD_DEVICE(GPIO62);
+ PINCTRL_ADD_DEVICE(GPIO63);
+
+ PINCTRL_ADD_DEVICE(SD1_WIFI_RMII);
+}
+
+char* fh_pinctrl_selected_devices[] =
+{
+ CONFIG_PINCTRL_SELECT
+};
diff --git a/arch/arm/mach-fh/include/mach/fh_chipid.h b/arch/arm/mach-fh/include/mach/fh_chipid.h
new file mode 100644
index 00000000..c2f9d1b9
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_chipid.h
@@ -0,0 +1,46 @@
+/**
+ * Copyright (c) 2015-2019 Shanghai Fullhan Microelectronics Co., Ltd.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ *
+ * Change Logs:
+ * Date Author Notes
+ * 2019-08-20 wangyl add license Apache-2.0
+ */
+
+#ifndef __FH_CHIPID_H__
+#define __FH_CHIPID_H__
+
+#define FH_CHIP_FH8830 0x883000A1
+#define FH_CHIP_FH8630M 0x883000B1
+#define FH_CHIP_FH8632 0x863200A1
+#define FH_CHIP_FH8632v2 0x863200A2
+#define FH_CHIP_FH8856 0x885600A1
+#define FH_CHIP_FH8852 0x885600B1
+#define FH_CHIP_FH8626V100 0x8626A100
+
+struct fh_chip_info
+{
+ int _plat_id; /* 芯片寄存器中的plat_id */
+ int _chip_id; /* 芯片寄存器中的chip_id */
+ int _chip_mask; /* 芯片寄存器中的chip_id */
+ int chip_id; /* 芯片chip_id详见上述定义 */
+ int ddr_size; /* 芯片DDR大小单位Mbit */
+ char chip_name[32]; /* 芯片名称 */
+};
+
+void fh_get_chipid(unsigned int *plat_id, unsigned int *chip_id);
+unsigned int fh_get_ddrsize_mbit(void);
+char *fh_get_chipname(void);
+struct fh_chip_info *fh_get_chip_info(void);
+
+unsigned int fh_is_8830(void);
+unsigned int fh_is_8632(void);
+unsigned int fh_is_8852(void);
+unsigned int fh_is_8856(void);
+unsigned int fh_is_8626v100(void);
+
+int fh_chipid_init(void);
+
+#endif /* __FH_CHIPID_H__ */
diff --git a/arch/arm/mach-fh/include/mach/fh_dmac.h b/arch/arm/mach-fh/include/mach/fh_dmac.h
new file mode 100644
index 00000000..c6d100b6
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_dmac.h
@@ -0,0 +1,151 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
+ * AVR32 systems.)
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef FH_DMAC_H
+#define FH_DMAC_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * enum fh_dma_slave_width - DMA slave register access width.
+ * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
+ */
+enum fh_dma_slave_width {
+ FH_DMA_SLAVE_WIDTH_8BIT,
+ FH_DMA_SLAVE_WIDTH_16BIT,
+ FH_DMA_SLAVE_WIDTH_32BIT,
+};
+
+/* bursts size */
+enum fh_dma_msize {
+ FH_DMA_MSIZE_1,
+ FH_DMA_MSIZE_4,
+ FH_DMA_MSIZE_8,
+ FH_DMA_MSIZE_16,
+ FH_DMA_MSIZE_32,
+ FH_DMA_MSIZE_64,
+ FH_DMA_MSIZE_128,
+ FH_DMA_MSIZE_256,
+};
+
+/* flow controller */
+enum fh_dma_fc {
+ FH_DMA_FC_D_M2M,
+ FH_DMA_FC_D_M2P,
+ FH_DMA_FC_D_P2M,
+ FH_DMA_FC_D_P2P,
+ FH_DMA_FC_P_P2M,
+ FH_DMA_FC_SP_P2P,
+ FH_DMA_FC_P_M2P,
+ FH_DMA_FC_DP_P2P,
+};
+
+/**
+ * struct fh_dma_slave - Controller-specific information about a slave
+ *
+ * @dma_dev: required DMA master device
+ * @tx_reg: physical address of data register used for
+ * memory-to-peripheral transfers
+ * @rx_reg: physical address of data register used for
+ * peripheral-to-memory transfers
+ * @reg_width: peripheral register width
+ * @cfg_hi: Platform-specific initializer for the CFG_HI register
+ * @cfg_lo: Platform-specific initializer for the CFG_LO register
+ * @src_master: src master for transfers on allocated channel.
+ * @dst_master: dest master for transfers on allocated channel.
+ * @src_msize: src burst size.
+ * @dst_msize: dest burst size.
+ * @fc: flow controller for DMA transfer
+ */
+struct fh_dma_slave {
+ struct device *dma_dev;
+ dma_addr_t tx_reg;
+ dma_addr_t rx_reg;
+ enum fh_dma_slave_width reg_width;
+ u32 cfg_hi;
+ u32 cfg_lo;
+ u8 src_master;
+ u8 dst_master;
+ u8 src_msize;
+ u8 dst_msize;
+ u8 fc;
+};
+
+
+/**
+ * struct fh_dma_platform_data - Controller configuration parameters
+ * @nr_channels: Number of channels supported by hardware (max 8)
+ * @is_private: The device channels should be marked as private and not for
+ * by the general purpose DMA channel allocator.
+ * @chan_allocation_order: Allocate channels starting from 0 or 7
+ * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
+ * @block_size: Maximum block size supported by the controller
+ * @nr_masters: Number of AHB masters supported by the controller
+ * @data_width: Maximum data width supported by hardware per AHB master
+ * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+ * @sd: slave specific data. Used for configuring channels
+ * @sd_count: count of slave data structures passed.
+ */
+struct fh_dma_platform_data {
+ unsigned int nr_channels;
+ bool is_private;
+#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
+#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
+ unsigned char chan_allocation_order;
+#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
+#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
+ unsigned char chan_priority;
+ unsigned short block_size;
+ unsigned char nr_masters;
+ unsigned char data_width[4];
+};
+
+/* Platform-configurable bits in CFG_HI */
+#define FHC_CFGH_FCMODE (1 << 0)
+#define FHC_CFGH_FIFO_MODE (1 << 1)
+#define FHC_CFGH_PROTCTL(x) ((x) << 2)
+#define FHC_CFGH_SRC_PER(x) ((x) << 7)
+#define FHC_CFGH_DST_PER(x) ((x) << 11)
+
+/* Platform-configurable bits in CFG_LO */
+#define FHC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
+#define FHC_CFGL_LOCK_CH_BLOCK (1 << 12)
+#define FHC_CFGL_LOCK_CH_XACT (2 << 12)
+#define FHC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
+#define FHC_CFGL_LOCK_BUS_BLOCK (1 << 14)
+#define FHC_CFGL_LOCK_BUS_XACT (2 << 14)
+#define FHC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
+#define FHC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
+#define FHC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
+#define FHC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
+
+/* DMA API extensions */
+struct fh_cyclic_desc {
+ struct fh_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct fh_cyclic_desc *fh_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction);
+void fh_dma_cyclic_free(struct dma_chan *chan);
+int fh_dma_cyclic_start(struct dma_chan *chan);
+void fh_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t fh_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t fh_dma_get_dst_addr(struct dma_chan *chan);
+
+#endif /* FH_DMAC_H */
diff --git a/arch/arm/mach-fh/include/mach/fh_dmac_regs.h b/arch/arm/mach-fh/include/mach/fh_dmac_regs.h
new file mode 100644
index 00000000..208d686c
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_dmac_regs.h
@@ -0,0 +1,321 @@
+/*
+ * Driver for the Synopsys DesignWare AHB DMA Controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dmaengine.h>
+#include <mach/fh_dmac.h>
+
+#define FH_DMA_MAX_NR_CHANNELS 8
+#define FH_DMA_MAX_NR_REQUESTS 16
+
+/*
+ * Redefine this macro to handle differences between 32- and 64-bit
+ * addressing, big vs. little endian, etc.
+ */
+#define FH_REG(name) u32 name; u32 __pad_##name
+
+/* Hardware register definitions. */
+struct fh_dma_chan_regs {
+ FH_REG(SAR); /* Source Address Register */
+ FH_REG(DAR); /* Destination Address Register */
+ FH_REG(LLP); /* Linked List Pointer */
+ u32 CTL_LO; /* Control Register Low */
+ u32 CTL_HI; /* Control Register High */
+ FH_REG(SSTAT);
+ FH_REG(DSTAT);
+ FH_REG(SSTATAR);
+ FH_REG(DSTATAR);
+ u32 CFG_LO; /* Configuration Register Low */
+ u32 CFG_HI; /* Configuration Register High */
+ FH_REG(SGR);
+ FH_REG(DSR);
+};
+
+struct fh_dma_irq_regs {
+ FH_REG(XFER);
+ FH_REG(BLOCK);
+ FH_REG(SRC_TRAN);
+ FH_REG(DST_TRAN);
+ FH_REG(ERROR);
+};
+
+struct fh_dma_regs {
+ /* per-channel registers */
+ struct fh_dma_chan_regs CHAN[FH_DMA_MAX_NR_CHANNELS];
+
+ /* irq handling */
+ struct fh_dma_irq_regs RAW; /* r */
+ struct fh_dma_irq_regs STATUS; /* r (raw & mask) */
+ struct fh_dma_irq_regs MASK; /* rw (set = irq enabled) */
+ struct fh_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
+
+ FH_REG(STATUS_INT); /* r */
+
+ /* software handshaking */
+ FH_REG(REQ_SRC);
+ FH_REG(REQ_DST);
+ FH_REG(SGL_REQ_SRC);
+ FH_REG(SGL_REQ_DST);
+ FH_REG(LAST_SRC);
+ FH_REG(LAST_DST);
+
+ /* miscellaneous */
+ FH_REG(CFG);
+ FH_REG(CH_EN);
+ FH_REG(ID);
+ FH_REG(TEST);
+
+ /* reserved */
+ FH_REG(__reserved0);
+ FH_REG(__reserved1);
+
+ /* optional encoded params, 0x3c8..0x3f7 */
+ u32 __reserved;
+
+ /* per-channel configuration registers */
+ u32 FHC_PARAMS[FH_DMA_MAX_NR_CHANNELS];
+ u32 MULTI_BLK_TYPE;
+ u32 MAX_BLK_SIZE;
+
+ /* top-level parameters */
+ u32 FH_PARAMS;
+};
+
+#ifdef CONFIG_FH_DMAC_BIG_ENDIAN_IO
+#define dma_readl_native ioread32be
+#define dma_writel_native iowrite32be
+#else
+#define dma_readl_native readl
+#define dma_writel_native writel
+#endif
+
+/* To access the registers in early stage of probe */
+#define dma_read_byaddr(addr, name) \
+ dma_readl_native((addr) + offsetof(struct fh_dma_regs, name))
+
+/* Bitfields in FH_PARAMS */
+#define FH_PARAMS_NR_CHAN 8 /* number of channels */
+#define FH_PARAMS_NR_MASTER 11 /* number of AHB masters */
+#define FH_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
+#define FH_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
+#define FH_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
+#define FH_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
+#define FH_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
+#define FH_PARAMS_EN 28 /* encoded parameters */
+
+/* Bitfields in FHC_PARAMS */
+#define FHC_PARAMS_MBLK_EN 11 /* multi block transfer */
+
+/* Bitfields in CTL_LO */
+#define FHC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
+#define FHC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
+#define FHC_CTLL_SRC_WIDTH(n) ((n)<<4)
+#define FHC_CTLL_DST_INC (0<<7) /* DAR update/not */
+#define FHC_CTLL_DST_DEC (1<<7)
+#define FHC_CTLL_DST_FIX (2<<7)
+#define FHC_CTLL_SRC_INC (0<<9) /* SAR update/not */
+#define FHC_CTLL_SRC_DEC (1<<9)
+#define FHC_CTLL_SRC_FIX (2<<9)
+#define FHC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
+#define FHC_CTLL_SRC_MSIZE(n) ((n)<<14)
+#define FHC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
+#define FHC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
+#define FHC_CTLL_FC(n) ((n) << 20)
+#define FHC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
+#define FHC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
+#define FHC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
+#define FHC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
+/* plus 4 transfer types for peripheral-as-flow-controller */
+#define FHC_CTLL_DMS(n) ((n)<<23) /* dst master select */
+#define FHC_CTLL_SMS(n) ((n)<<25) /* src master select */
+#define FHC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
+#define FHC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
+
+/* Bitfields in CTL_HI */
+#define FHC_CTLH_DONE 0x00001000
+#define FHC_CTLH_BLOCK_TS_MASK 0x00000fff
+#define FHC_PROTCTL_MASK (7 << 2)
+#define FHC_PROTCTL(n) ((n) << 2)
+/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/fh_dmac.h> */
+#define FHC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
+#define FHC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
+#define FHC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
+#define FHC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
+#define FHC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
+#define FHC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
+#define FHC_CFGL_MAX_BURST(x) ((x) << 20)
+#define FHC_CFGL_RELOAD_SAR (1 << 30)
+#define FHC_CFGL_RELOAD_DAR (1 << 31)
+
+/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/fh_dmac.h> */
+#define FHC_CFGH_DS_UPD_EN (1 << 5)
+#define FHC_CFGH_SS_UPD_EN (1 << 6)
+
+/* Bitfields in SGR */
+#define FHC_SGR_SGI(x) ((x) << 0)
+#define FHC_SGR_SGC(x) ((x) << 20)
+
+/* Bitfields in DSR */
+#define FHC_DSR_DSI(x) ((x) << 0)
+#define FHC_DSR_DSC(x) ((x) << 20)
+
+/* Bitfields in CFG */
+#define FH_CFG_DMA_EN (1 << 0)
+
+#define FH_REGLEN 0x400
+#define PROTCTL_ENABLE 0x55
+#define MASTER_SEL_ENABLE 0x55
+
+enum fh_dmac_flags {
+ FH_DMA_IS_CYCLIC = 0,
+ FH_DMA_IS_SOFT_LLP = 1,
+};
+
+struct fh_dma_extra {
+ u32 sinc;
+ u32 dinc;
+ u32 protctl_flag;
+ u32 protctl_data;
+ u32 master_flag;
+ u32 src_master;
+ u32 dst_master;
+};
+
+struct fh_dma_chan {
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ enum dma_transfer_direction direction;
+ bool paused;
+ bool initialized;
+
+ /* software emulation of the LLP transfers */
+ struct list_head *tx_node_active;
+
+ spinlock_t lock;
+
+ /* these other elements are all protected by lock */
+ unsigned long flags;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ u32 residue;
+ struct fh_cyclic_desc *cdesc;
+
+ unsigned int descs_allocated;
+
+ /* hardware configuration */
+ unsigned int block_size;
+ bool nollp;
+
+ /* custom slave configuration */
+ unsigned int request_line;
+ unsigned char src_master;
+ unsigned char dst_master;
+ struct fh_dma_extra ext_para;
+ /* configuration passed via DMA_SLAVE_CONFIG */
+ struct dma_slave_config dma_sconfig;
+};
+
+enum fh_dma_slave_increment {
+ FH_DMA_SLAVE_INC,
+ FH_DMA_SLAVE_DEC,
+ FH_DMA_SLAVE_FIX,
+};
+
+
+static inline struct fh_dma_chan_regs __iomem *
+__fhc_regs(struct fh_dma_chan *fhc)
+{
+ return fhc->ch_regs;
+}
+
+#define channel_readl(fhc, name) \
+ dma_readl_native(&(__fhc_regs(fhc)->name))
+#define channel_writel(fhc, name, val) \
+ dma_writel_native((val), &(__fhc_regs(fhc)->name))
+
+static inline struct fh_dma_chan *to_fh_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fh_dma_chan, chan);
+}
+
+struct fh_dma {
+ struct dma_device dma;
+ void __iomem *regs;
+ struct dma_pool *desc_pool;
+ struct tasklet_struct tasklet;
+ struct clk *clk;
+
+ u8 all_chan_mask;
+
+ /* hardware configuration */
+ unsigned char nr_masters;
+ unsigned char data_width[4];
+
+ struct fh_dma_chan chan[0];
+};
+
+static inline struct fh_dma_regs __iomem *__fh_regs(struct fh_dma *dw)
+{
+ return dw->regs;
+}
+
+#define dma_readl(dw, name) \
+ dma_readl_native(&(__fh_regs(dw)->name))
+#define dma_writel(dw, name, val) \
+ dma_writel_native((val), &(__fh_regs(dw)->name))
+
+#define channel_set_bit(dw, reg, mask) \
+ dma_writel(dw, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(dw, reg, mask) \
+ dma_writel(dw, reg, ((mask) << 8) | 0)
+
+static inline struct fh_dma *to_fh_dma(struct dma_device *ddev)
+{
+ return container_of(ddev, struct fh_dma, dma);
+}
+
+/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+struct fh_lli {
+ /* values that are not changed by hardware */
+ u32 sar;
+ u32 dar;
+ u32 llp; /* chain to next lli */
+ u32 ctllo;
+ /* values that may get written back: */
+ u32 ctlhi;
+ /* sstat and dstat can snapshot peripheral register state.
+ * silicon config may discard either or both...
+ */
+ u32 sstat;
+ u32 dstat;
+};
+
+struct fh_desc {
+ /* FIRST values the hardware uses */
+ struct fh_lli lli;
+
+ /* THEN values for driver housekeeping */
+ struct list_head desc_node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor txd;
+ size_t len;
+ size_t total_len;
+};
+
+#define to_fh_desc(h) list_entry(h, struct fh_desc, desc_node)
+
+static inline struct fh_desc *
+txd_to_fh_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct fh_desc, txd);
+}
diff --git a/arch/arm/mach-fh/include/mach/fh_efuse_plat.h b/arch/arm/mach-fh/include/mach/fh_efuse_plat.h
new file mode 100755
index 00000000..df742bfa
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_efuse_plat.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_FH_EFUSE_PLAT_H
+#define __ARCH_ARM_FH_EFUSE_PLAT_H
+
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+struct fh_efuse_platform_data {
+ u32 efuse_support_flag;
+};
+#endif /* __ARCH_ARM_FH_EFUSE_PLAT_H */
diff --git a/arch/arm/mach-fh/include/mach/fh_gmac.h b/arch/arm/mach-fh/include/mach/fh_gmac.h
new file mode 100644
index 00000000..1ab649d0
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_gmac.h
@@ -0,0 +1,33 @@
+#ifndef __FH_GMAC_PLATFORM_DATA
+#define __FH_GMAC_PLATFORM_DATA
+
+#include <linux/platform_device.h>
+
+enum {
+ gmac_phyt_reg_basic_ctrl = 0,
+ gmac_phyt_reg_basic_status = 1,
+ gmac_phyt_reg_phy_id1 = 2,
+ gmac_phyt_reg_phy_id2 = 3,
+ gmac_phyt_rtl8201_rmii_mode = 16,
+ gmac_phyt_ti83848_rmii_mode = 17,
+ gmac_phyt_rtl8201_power_saving = 24,
+ gmac_phyt_rtl8201_page_select = 31,
+ gmac_phyt_ip101g_page_select = 20
+};
+
+enum {
+ gmac_speed_10m,
+ gmac_speed_100m
+};
+
+struct fh_gmac_platform_data {
+ int interface;
+ int phyid;
+ void (*early_init)(struct fh_gmac_platform_data *plat_data);
+ void (*plat_init)(struct fh_gmac_platform_data *plat_data);
+ void (*set_rmii_speed)(int speed);
+ void (*phy_reset)(void);
+};
+
+#endif
+
diff --git a/arch/arm/mach-fh/include/mach/fh_i2s.h b/arch/arm/mach-fh/include/mach/fh_i2s.h
new file mode 100644
index 00000000..e320c29f
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_i2s.h
@@ -0,0 +1,13 @@
+#ifndef __FH_I2S_PLATFORM_DATA
+#define __FH_I2S_PLATFORM_DATA
+
+struct fh_i2s_platform_data {
+ int dma_capture_channel;
+ int dma_playback_channel;
+ int dma_master;
+ char *acodec_clk_name;
+ int (*clk_config)(int div_val);
+};
+
+#endif
+
diff --git a/arch/arm/mach-fh/include/mach/fh_predefined.h b/arch/arm/mach-fh/include/mach/fh_predefined.h
new file mode 100644
index 00000000..a5572f2f
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_predefined.h
@@ -0,0 +1,40 @@
+/*
+ * fh_predefined.h
+ *
+ * Created on: May 22, 2014
+ * Author: duobao
+ */
+
+#ifndef FH_PREDEFINED_H_
+#define FH_PREDEFINED_H_
+
+typedef unsigned char UINT8;
+typedef unsigned short UINT16;
+typedef unsigned int UINT32;
+typedef unsigned long long UINT64;
+
+typedef char SINT8;
+typedef short SINT16;
+typedef int SINT32;
+typedef long long SINT64;
+#define FALSE (0)
+#define TRUE (!FALSE)
+#define reg_read(addr) (*((volatile UINT32 *)(addr)))
+#define reg_write(addr,value) (*(volatile UINT32 *)(addr)=(value))
+
+#define GET_REG(addr) reg_read(addr)
+#define SET_REG(addr,value) reg_write(addr,value)
+#define SET_REG_M(addr,value,mask) reg_write(addr,(reg_read(addr)&(~(mask)))|((value)&(mask)))
+#define SET_REG_B(addr,element,highbit,lowbit) SET_REG_M((addr),((element)<<(lowbit)),(((1<<((highbit)-(lowbit)+1))-1)<<(lowbit)))
+
+#define GET_REG8(addr) (*((volatile UINT8 *)(addr)))
+#define SET_REG8(addr,value) (*(volatile UINT8 *)(addr)=(value))
+
+#define LD8(addr) (*((volatile u8 *)(addr)))
+#define ST8(addr,value) (*(volatile u8 *)(addr)=(value))
+#define LD16(addr) (*((volatile u16 *)(addr)))
+#define ST16(addr,value) (*(volatile u16 *)(addr)=(value))
+#define LD32(addr) (*((volatile u32 *)(addr)))
+#define ST32(addr,value) (*(volatile u32 *)(addr)=(value))
+
+#endif /* FH_PREDEFINED_H_ */
diff --git a/arch/arm/mach-fh/include/mach/fh_rtc_v1.h b/arch/arm/mach-fh/include/mach/fh_rtc_v1.h
new file mode 100644
index 00000000..594259eb
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_rtc_v1.h
@@ -0,0 +1,238 @@
+/*
+ * rtc.h
+ *
+ * Created on: Aug 18, 2016
+ * Author: fullhan
+ */
+
+#ifndef ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_
+#define ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_
+#include "fh_predefined.h"
+
+/*
+ * Registers offset
+ */
+#define FH_RTC_COUNTER 0x0
+#define FH_RTC_OFFSET 0x4
+#define FH_RTC_POWER_FAIL 0x8
+#define FH_RTC_ALARM_COUNTER 0xC
+#define FH_RTC_INT_STAT 0x10
+#define FH_RTC_INT_EN 0x14
+#define FH_RTC_SYNC 0x18
+#define FH_RTC_DEBUG 0x1C
+#define FH_RTC_USER_REG 0x20
+
+#define SEC_BIT_START 0
+#define SEC_VAL_MASK 0x3f
+
+#define MIN_BIT_START 6
+#define MIN_VAL_MASK 0xfc0
+
+#define HOUR_BIT_START 12
+#define HOUR_VAL_MASK 0x1f000
+
+#define DAY_BIT_START 17
+#define DAY_VAL_MASK 0xfffe0000
+
+#define FH_RTC_ISR_SEC_POS 1<<0
+#define FH_RTC_ISR_MIN_POS 1<<1
+#define FH_RTC_ISR_HOUR_POS 1<<2
+#define FH_RTC_ISR_DAY_POS 1<<3
+#define FH_RTC_ISR_ALARM_POS 1<<4
+#define FH_RTC_ISR_SEC_MASK 1<<27
+#define FH_RTC_ISR_MIN_MASK 1<<28
+#define FH_RTC_ISR_HOUR_MASK 1<<29
+#define FH_RTC_ISR_DAY_MASK 1<<30
+#define FH_RTC_ISR_ALARM_MASK 1<<31
+
+// input: val=fh_rtc_get_time(base_addr)
+#define FH_GET_RTC_SEC(val) ((val & SEC_VAL_MASK) >> SEC_BIT_START)
+#define FH_GET_RTC_MIN(val) ((val & MIN_VAL_MASK) >> MIN_BIT_START)
+#define FH_GET_RTC_HOUR(val) ((val & HOUR_VAL_MASK) >> HOUR_BIT_START)
+#define FH_GET_RTC_DAY(val) ((val & DAY_VAL_MASK) >> DAY_BIT_START)
+
+#define ELAPSED_LEAP_YEARS(y) (((y -1)/4)-((y-1)/100)+((y+299)/400)-17)
+
+#define FH_RTC_PROC_FILE "driver/fh_rtc"
+
+struct fh_rtc_platform_data
+{
+ u32 clock_in;
+ char *clk_name;
+ char *dev_name;
+ u32 base_year;
+ u32 base_month;
+ u32 base_day;
+ int sadc_channel;
+};
+enum
+{
+ init_done=1,
+ initing=0
+
+};
+
+/*******************************************************************************
+* Function Name : fh_rtc_interrupt_disabel
+* Description : disabale rtc interrupt
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_interrupt_disabel(base_addr) SET_REG(base_addr+REG_RTC_INT_EN,DISABLE)
+
+/*******************************************************************************
+* Function Name : fh_rtc_get_time
+* Description : get rtc current time
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_time(base_addr) GET_REG(base_addr+FH_RTC_COUNTER)
+
+/*******************************************************************************
+* Function Name : fh_rtc_set_time
+* Description : set rtc current time
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_set_time(base_addr,value) SET_REG(base_addr+FH_RTC_COUNTER,value)
+
+/*******************************************************************************
+* Function Name : fh_rtc_set_alarm_time
+* Description : set rtc alarm
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_set_alarm_time(base_addr,value) SET_REG(base_addr+FH_RTC_ALARM_COUNTER,value)
+
+/*******************************************************************************
+* Function Name : fh_rtc_get_alarm_time
+* Description : get alarm register
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_alarm_time(base_addr) GET_REG(base_addr+FH_RTC_ALARM_COUNTER)
+
+/*******************************************************************************
+* Function Name : fh_rtc_get_int_status
+* Description : get rtc current interrupt status
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_int_status(base_addr) GET_REG(base_addr+FH_RTC_INT_STAT)
+/*******************************************************************************
+* Function Name : fh_rtc_enable_interrupt
+* Description : enable rtc interrupt
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_enable_interrupt(base_addr,value) SET_REG(base_addr+FH_RTC_INT_EN,value|GET_REG(base_addr+FH_RTC_INT_EN))
+/*******************************************************************************
+* Function Name : fh_rtc_disenable_interrupt
+* Description : disable interrupt
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_disenable_interrupt(base_addr,value) SET_REG(base_addr+FH_RTC_INT_EN,(~value)&GET_REG(base_addr+FH_RTC_INT_EN))
+
+/*******************************************************************************
+* Function Name : fh_rtc_get_enabled_interrupt
+* Description : get rtc current interrupt enabled
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_enabled_interrupt(base_addr) GET_REG(base_addr+FH_RTC_INT_EN)
+/*******************************************************************************
+* Function Name : fh_rtc_set_mask_interrupt
+* Description : set rtc interrupt mask
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_set_mask_interrupt(base_addr,value) SET_REG(base_addr+FH_RTC_INT_EN,value|GET_REG(base_addr+FH_RTC_INT_EN))
+/*******************************************************************************
+* Function Name : fh_rtc_clear_interrupt_status
+* Description : clear rtc interrupt status
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_clear_interrupt_status(base_addr,value) SET_REG(base_addr+FH_RTC_INT_STAT,(~value)&GET_REG(base_addr+FH_RTC_INT_STAT))
+/*******************************************************************************
+* Function Name : fh_rtc_get_offset
+* Description : get rtc offset
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_offset(base_addr) GET_REG(base_addr+FH_RTC_OFFSET)
+/*******************************************************************************
+* Function Name : fh_rtc_get_power_fail
+* Description : get rtc power fail register
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_power_fail(base_addr) GET_REG(base_addr+FH_RTC_POWER_FAIL)
+
+/*******************************************************************************
+* Function Name : fh_rtc_get_sync
+* Description : get rtc sync register value
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_sync(base_addr) GET_REG(base_addr+FH_RTC_SYNC)
+
+/*******************************************************************************
+* Function Name : fh_rtc_set_sync
+* Description : set rtc sync register value
+* Input : rtc base addr,init_done/initing
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_set_sync(base_addr,value) SET_REG(base_addr+FH_RTC_SYNC,value)
+
+/*******************************************************************************
+* Function Name : fh_rtc_get_debug
+* Description : get rtc debug register value
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_get_debug(base_addr) GET_REG(base_addr+FH_RTC_DEBUG)
+
+/*******************************************************************************
+* Function Name : fh_rtc_set_debug
+* Description : set rtc debug register value
+* Input : rtc base addr,x pclk
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define fh_rtc_set_debug(base_addr,value) SET_REG(base_addr+FH_RTC_DEBUG,value)
+#endif /* ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_ */
diff --git a/arch/arm/mach-fh/include/mach/fh_rtc_v2.h b/arch/arm/mach-fh/include/mach/fh_rtc_v2.h
new file mode 100644
index 00000000..5c4e805c
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_rtc_v2.h
@@ -0,0 +1,268 @@
+/*
+ * rtc.h
+ *
+ * Created on: Aug 18, 2016
+ * Author: fullhan
+ */
+
+#ifndef ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_
+#define ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_
+#include "fh_predefined.h"
+
+/*
+ * Registers offset
+ */
+#define FH_RTC_INT_STATUS 0x0
+#define FH_RTC_INT_EN 0x4
+#define FH_RTC_DEBUG0 0x8
+#define FH_RTC_DEBUG1 0xC
+#define FH_RTC_DEBUG2 0x10
+#define FH_RTC_CMD 0x14
+#define FH_RTC_RD_DATA 0x18
+#define FH_RTC_WR_DATA 0x1C
+
+#define FH_RTC_CMD_COUNTER (0<<4)
+#define FH_RTC_CMD_OFFSET (1<<4)
+#define FH_RTC_CMD_ALARM_CFG (2<<4)
+#define FH_RTC_CMD_TEMP_INFO (0x3<<4)
+#define FH_RTC_CMD_TEMP_CFG (0x4<<4)
+#define FH_RTC_CMD_ANA_CFG (0x5<<4)
+#define FH_RTC_CMD_INT_STATUS (0x6<<4)
+#define FH_RTC_CMD_INT_EN (0x7<<4)
+#define FH_RTC_CMD_DEBUG (0x8<<4)
+#define FH_RTC_CMD_OFFSET_LUT (0x9<<4)
+
+#define OFFSET_EN (1<<0)
+#define OFFSET_ATUTO (1<<1)
+#define OFFSET_IDX (1<<2)
+#define OFFSET_BK_EN (1<<8)
+#define OFFSET_BK_AUTO (1<<9)
+#define OFFSET_BK_IDX (1<<10)
+#define OFFSET_CURRENT (1<<16)
+#define LP_MODE (1<<31)
+
+#define RTC_READ 1
+#define RTC_WRITE 2
+#define RTC_TEMP 3
+#define FH_RTC_INT_STATUS_RX_CRC_ERR (1<<0)
+#define FH_RTC_INT_STATUS_RX_COM_ERR (1<<1)
+#define FH_RTC_INT_STATUS_RX_LEN_ERR (1<<2)
+#define FH_RTC_INT_STATUS_CNT_THL (1<<3)
+#define FH_RTC_INT_STATUS_CNT_THH (1<<4)
+#define FH_RTC_INT_STATUS_CORE_IDLE (1<<5)
+#define FH_RTC_INT_STATUS_CORE (1<<6)
+#define FH_RTC_INT_STATUS_WRAPPER_BUSY (1<<8)
+#define FH_RTC_INT_STATUS_CORE_BUSY (1<<16)
+
+#define FH_RTC_INT_RX_CRC_ERR_EN (1<<0)
+#define FH_RTC_INT_RX_COM_ERR_EN (1<<1)
+#define FH_RTC_INT_RX_LEN_ERR_EN (1<<2)
+#define FH_RTC_INT_CNT_THL_ERR_EN (1<<3)
+#define FH_RTC_INT_CNT_THH_ERR_EN (1<<4)
+#define FH_RTC_INT_CORE_IDLE_ERR_EN (1<<5)
+#define FH_RTC_INT_CORE_INT_ERR_EN (1<<6)
+#define FH_RTC_INT_RX_CRC_ERR_MASK (1<<16)
+#define FH_RTC_INT_RX_COM_ERR_MASK (1<<17)
+#define FH_RTC_INT_RX_LEN_ERR_MASK (1<<18)
+#define FH_RTC_INT_CNT_THL_ERR_MASK (1<<19)
+#define FH_RTC_INT_CNT_THH_ERR_MASK (1<<20)
+#define FH_RTC_INT_CORE_IDLE_ERR_MASK (1<<21)
+#define FH_RTC_INT_CORE_INT_ERR_MASK (1<<22)
+#define FH_RTC_INT_CORE_INT_ERR_MASK_COV 0xffbfffff
+#define FH_RTC_INT_CORE_INT_STATUS_COV 0xffffff3f
+#define FH_RTC_INT_CORE_INT_ALL_COV 0xffffffff
+
+#define FH_RTC_CORE_INT_EN_SEC_INT (0x1<<0)
+#define FH_RTC_CORE_INT_EN_MIN_INT (0x1<<1)
+#define FH_RTC_CORE_INT_EN_HOU_INT (0x1<<2)
+#define FH_RTC_CORE_INT_EN_DAY_INT (0x1<<3)
+#define FH_RTC_CORE_INT_EN_ALM_INT (0x1<<4)
+#define FH_RTC_CORE_INT_EN_POW_INT (0x1<<5)
+
+
+#define FH_RTC_CORE_INT_EN_SEC_MAS (0x1<<16)
+#define FH_RTC_CORE_INT_EN_MIN_MAS (0x1<<17)
+#define FH_RTC_CORE_INT_EN_HOU_MAS (0x1<<18)
+#define FH_RTC_CORE_INT_EN_DAY_MAS (0x1<<19)
+#define FH_RTC_CORE_INT_EN_ALM_MAS (0x1<<20)
+#define FH_RTC_CORE_INT_EN_POE_MAS (0x1<<21)
+
+#define FH_RTC_IOCTL_MEM_BASE 'A'
+#define GET_TSENSOR_DATA _IOWR(FH_RTC_IOCTL_MEM_BASE, 0, int)
+#define GET_CURRENT_OFFSET_DATA _IOWR(FH_RTC_IOCTL_MEM_BASE, 1, int)
+#define GET_CURRENT_OFFSET_IDX _IOWR(FH_RTC_IOCTL_MEM_BASE, 2, int)
+#define RTC_GET_LUT _IOWR(FH_RTC_IOCTL_MEM_BASE, 3, int)
+#define RTC_SET_LUT _IOWR(FH_RTC_IOCTL_MEM_BASE, 4, int)
+#define GET_REG_VALUE _IOWR(FH_RTC_IOCTL_MEM_BASE, 5, int)
+#define SET_REG_VALUE _IOWR(FH_RTC_IOCTL_MEM_BASE, 6, int)
+#define GET_TEMP_VALUE _IOWR(FH_RTC_IOCTL_MEM_BASE, 7, int)
+#define GET_WRAPPER_REG _IOWR(FH_RTC_IOCTL_MEM_BASE, 8, int)
+#define SET_WRAPPER_REG _IOWR(FH_RTC_IOCTL_MEM_BASE, 9, int)
+#define SET_REG_VALUE_SINGLE _IOWR(FH_RTC_IOCTL_MEM_BASE, 10, int)
+
+#define SEC_BIT_START 0
+#define SEC_VAL_MASK 0x3f
+
+#define MIN_BIT_START 6
+#define MIN_VAL_MASK 0xfc0
+
+#define HOUR_BIT_START 12
+#define HOUR_VAL_MASK 0x1f000
+
+#define DAY_BIT_START 17
+#define DAY_VAL_MASK 0xfffe0000
+
+#define FH_RTC_ISR_SEC_POS (1<<0)
+#define FH_RTC_ISR_MIN_POS (1<<1)
+#define FH_RTC_ISR_HOUR_POS (1<<2)
+#define FH_RTC_ISR_DAY_POS (1<<3)
+#define FH_RTC_ISR_ALARM_POS (1<<4)
+#define FH_RTC_ISR_POWERFAIL_POS (1<<5)
+#define FH_RTC_ISR_RX_CRC_ERR_INT (1<<6)
+#define FH_RTC_ISR_RX_COM_ERR_INT (1<<7)
+#define FH_RTC_LEN_ERR_INT (1<<8)
+
+/* input: val=fh_rtc_get_time(base_addr)*/
+#define FH_GET_RTC_SEC(val) ((val & SEC_VAL_MASK) >> SEC_BIT_START)
+#define FH_GET_RTC_MIN(val) ((val & MIN_VAL_MASK) >> MIN_BIT_START)
+#define FH_GET_RTC_HOUR(val) ((val & HOUR_VAL_MASK) >> HOUR_BIT_START)
+#define FH_GET_RTC_DAY(val) ((val & DAY_VAL_MASK) >> DAY_BIT_START)
+
+#define ELAPSED_LEAP_YEARS(y) (((y-1)/4)-((y-1)/100)+((y+299)/400)-17)
+#define OUT_TEMP(y) (y/4096*235-73)
+#define FH_RTC_PROC_FILE "fh_zy_rtc"
+
+struct fh_rtc_platform_data {
+ u32 clock_in;
+ char *clk_name;
+ char *dev_name;
+ u32 base_year;
+ u32 base_month;
+ u32 base_day;
+ int sadc_channel;
+};
+enum {
+ init_done = 1,
+ initing = 0
+
+};
+
+/****************************************************
+* Function Name : fh_rtc_interrupt_disabel
+* Description : disabale rtc interrupt
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ ****************************************************/
+#define fh_rtc_interrupt_disabel(base_addr) \
+SET_REG(base_addr+REG_RTC_INT_EN, DISABLE)
+
+/******************************************************
+* Function Name : fh_rtc_get_int_status
+* Description : get rtc current interrupt status
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ ******************************************************/
+#define fh_rtc_get_int_status(base_addr) \
+GET_REG(base_addr+FH_RTC_INT_STAT)
+/******************************************************
+* Function Name : fh_rtc_enable_interrupt
+* Description : enable rtc interrupt
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ ******************************************************/
+#define fh_rtc_enable_interrupt(base_addr, value) \
+SET_REG(base_addr+FH_RTC_INT_EN, \
+value|GET_REG(base_addr+FH_RTC_INT_EN))
+
+
+/*******************************************************
+* Function Name : fh_rtc_get_enabled_interrupt
+* Description : get rtc current interrupt enabled
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ ******************************************************/
+#define fh_rtc_get_enabled_interrupt(base_addr) \
+GET_REG(base_addr+FH_RTC_INT_EN)
+/********************************************************
+* Function Name : fh_rtc_set_mask_interrupt
+* Description : set rtc interrupt mask
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************/
+#define fh_rtc_set_mask_interrupt(base_addr, value) \
+SET_REG(base_addr+FH_RTC_INT_EN, value|GET_REG(base_addr+FH_RTC_INT_EN))
+/********************************************************
+* Function Name : fh_rtc_get_offset
+* Description : get rtc offset
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *******************************************************/
+#define fh_rtc_get_offset(base_addr) \
+GET_REG(base_addr+FH_RTC_OFFSET)
+/********************************************************
+* Function Name : fh_rtc_get_power_fail
+* Description : get rtc power fail register
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ ******************************************************/
+#define fh_rtc_get_power_fail(base_addr) \
+GET_REG(base_addr+FH_RTC_POWER_FAIL)
+
+/********************************************************
+* Function Name : fh_rtc_get_sync
+* Description : get rtc sync register value
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ *********************************************************/
+#define fh_rtc_get_sync(base_addr) \
+GET_REG(base_addr+FH_RTC_SYNC)
+
+/**********************************************************
+* Function Name : fh_rtc_set_sync
+* Description : set rtc sync register value
+* Input : rtc base addr,init_done/initing
+* Output : None
+* Return : None
+*
+************************************************************/
+#define fh_rtc_set_sync(base_addr, value) \
+SET_REG(base_addr+FH_RTC_SYNC, value)
+
+/***********************************************************
+* Function Name : fh_rtc_get_debug
+* Description : get rtc debug register value
+* Input : rtc base addr
+* Output : None
+* Return : None
+*
+ **********************************************************/
+#define fh_rtc_get_debug(base_addr) \
+GET_REG(base_addr+FH_RTC_DEBUG)
+
+/************************************************************
+* Function Name : fh_rtc_set_debug
+* Description : set rtc debug register value
+* Input : rtc base addr,x pclk
+* Output : None
+* Return : None
+*
+ ***********************************************************/
+#define fh_rtc_set_debug(base_addr, value) \
+SET_REG(base_addr+FH_RTC_DEBUG, value)
+#endif /* ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_ */
diff --git a/arch/arm/mach-fh/include/mach/fh_sadc.h b/arch/arm/mach-fh/include/mach/fh_sadc.h
new file mode 100644
index 00000000..db6d11ce
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_sadc.h
@@ -0,0 +1,108 @@
+/*
+ * fh_sadc.h
+ *
+ * Created on: Mar 13, 2015
+ * Author: duobao
+ */
+
+#ifndef FH_SADC_H_
+#define FH_SADC_H_
+
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+
+/****************************************************************************
+ * #define section
+ * add constant #define here if any
+ ***************************************************************************/
+#define FH_SADC_PROC_FILE "driver/sadc"
+#define MAX_CHANNEL_NO (8)
+#define LOOP_MODE (0x55)
+#define ISR_MODE (0xAA)
+
+
+#define SADC_TIMEOUT 0x55
+/****************************************************************************
+ * ADT section
+ * add Abstract Data Type definition here
+ ***************************************************************************/
+
+struct wrap_sadc_reg {
+#ifdef CONFIG_FH_SADC_V21
+ u32 sadc_model;
+#endif
+ u32 sadc_cmd;
+ u32 sadc_control;
+#ifdef CONFIG_FH_SADC_V21
+ u32 sadc_scan_en;
+ u32 sadc_chn_cfg;
+ u32 sadc_chn_cfg1;
+ u32 sadc_glitch_cfg;
+ u32 sadc_continuous_time;
+ u32 sadc_glitch_time;
+#endif
+ u32 sadc_ier;
+ u32 sadc_int_status;
+ u32 sadc_dout0;
+ u32 sadc_dout1;
+ u32 sadc_dout2;
+ u32 sadc_dout3;
+#ifdef CONFIG_FH_SADC_V21
+ u32 sadc_dout0_all;
+ u32 sadc_dout1_all;
+ u32 sadc_dout2_all;
+ u32 sadc_dout3_all;
+#endif
+ u32 sadc_debuge0;
+ u32 sadc_status;
+ u32 sadc_cnt;
+ u32 sadc_timeout;
+ u32 sadc_status2;
+#ifdef CONFIG_FH_SADC_V22
+ u32 sadc_hit_value0;
+ u32 sadc_hit_value1;
+ u32 sadc_hit_value2;
+ u32 sadc_hit_value3;
+ u32 sadc_hit_cfg;
+ u32 sadc_hit_data0;
+ u32 sadc_hit_data1;
+ u32 sadc_hit_data2;
+ u32 sadc_hit_data3;
+#endif
+};
+
+struct wrap_sadc_obj {
+ void *regs;
+ u32 irq_no;
+ u32 active_channel_no;
+ u32 active_channel_status;
+ uint16_t channel_data[MAX_CHANNEL_NO];
+ u32 error_rec;
+ u32 en_isr;
+ u32 sample_mode;
+ spinlock_t lock;
+ struct mutex sadc_lock;
+ struct completion done;
+ struct proc_dir_entry *proc_file;
+#ifdef CONFIG_FH_SADC_V21
+ wait_queue_head_t readqueue;
+#endif
+};
+
+long fh_sadc_get_value(int channel);
+
+
+#endif /* fh_SADC_H_ */
diff --git a/arch/arm/mach-fh/include/mach/fh_simple_timer.h b/arch/arm/mach-fh/include/mach/fh_simple_timer.h
new file mode 100644
index 00000000..b6b3729e
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_simple_timer.h
@@ -0,0 +1,80 @@
+/*
+ * fh_simple_timer.h
+ *
+ * Created on: Jan 22, 2017
+ * Author: duobao
+ */
+
+#ifndef FH_SIMPLE_TIMER_H_
+#define FH_SIMPLE_TIMER_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/timerqueue.h>
+#include <mach/pmu.h>
+#include <mach/timex.h>
+#include <mach/io.h>
+#include <mach/fh_predefined.h>
+
+
+static inline unsigned int timern_base(int n)
+{
+ unsigned int base = 0;
+ switch (n) {
+ case 0:
+ default:
+ base = TIMER_REG_BASE;
+ break;
+ case 1:
+ base = TIMER_REG_BASE + 0x14;
+ break;
+ case 2:
+ base = TIMER_REG_BASE + 0x28;
+ break;
+ case 3:
+ base = TIMER_REG_BASE + 0x3c;
+ break;
+ }
+ return base;
+}
+
+#define TIMERN_REG_BASE(n) (timern_base(n))
+
+#define REG_TIMER_LOADCNT(n) (timern_base(n) + 0x00)
+#define REG_TIMER_CUR_VAL(n) (timern_base(n) + 0x04)
+#define REG_TIMER_CTRL_REG(n) (timern_base(n) + 0x08)
+#define REG_TIMER_EOI_REG(n) (timern_base(n) + 0x0C)
+#define REG_TIMER_INTSTATUS(n) (timern_base(n) + 0x10)
+
+#define REG_TIMERS_INTSTATUS (TIMER_REG_BASE + 0xa0)
+
+
+enum simple_timer_state {
+ SIMPLE_TIMER_STOP,
+ SIMPLE_TIMER_START,
+ SIMPLE_TIMER_ERROR,
+};
+
+struct fh_simple_timer
+{
+ struct timerqueue_node node;
+ ktime_t it_interval; /* timer period */
+ ktime_t it_value; /* timer expiration */
+ ktime_t it_delay;
+ void (*function) (void *);
+ void *param;
+};
+
+
+int fh_simple_timer_interrupt(void);
+int fh_simple_timer_create(struct fh_simple_timer *tim);
+int fh_timer_start(void);
+int fh_simple_timer_init(void);
+int fh_simple_timer_periodic_start(struct fh_simple_timer *tim);
+int fh_simple_timer_periodic_stop(void);
+
+#endif /* FH_SIMPLE_TIMER_H_ */
diff --git a/arch/arm/mach-fh/include/mach/fh_usb.h b/arch/arm/mach-fh/include/mach/fh_usb.h
new file mode 100644
index 00000000..09356af4
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_usb.h
@@ -0,0 +1,18 @@
+#ifndef __FH_USB
+#define __FH_USB
+
+#include <linux/platform_device.h>
+
+
+struct fh_usb_platform_data {
+
+ void (*utmi_rst)(void);
+ void (*phy_rst)(void);
+ void (*hcd_resume)(void);
+ void (*power_on)(void);
+ unsigned int grxfsiz_pwron_val;
+ unsigned int gnptxfsiz_pwron_val;
+};
+
+#endif
+
diff --git a/arch/arm/mach-fh/include/mach/fh_wdt.h b/arch/arm/mach-fh/include/mach/fh_wdt.h
new file mode 100644
index 00000000..457180f9
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fh_wdt.h
@@ -0,0 +1,32 @@
+#ifndef __FH_WDT_PLATFORM_DATA
+#define __FH_WDT_PLATFORM_DATA
+
+#include <linux/platform_device.h>
+
+struct fh_wdt_platform_reset {
+ unsigned int spi0_cs_pin;
+ unsigned int spi0_rst_bit;
+ unsigned int sd0_rst_bit;
+ unsigned int uart0_rst_bit;
+};
+
+struct fh_wdt_platform_data {
+ void (*resume)(void);
+ void (*pause)(void);
+ irqreturn_t (*intr)(void *pri);
+ struct fh_wdt_platform_reset *plat_info;
+};
+
+struct fh_wdt_t {
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long in_use;
+ unsigned long next_heartbeat;
+ struct timer_list timer;
+ int expect_close;
+ struct fh_wdt_platform_data *plat_data;
+};
+
+#endif
+
diff --git a/arch/arm/mach-fh/include/mach/fhmci.h b/arch/arm/mach-fh/include/mach/fhmci.h
new file mode 100644
index 00000000..b60ad26e
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/fhmci.h
@@ -0,0 +1,178 @@
+#ifndef _FH_MCI_H_
+#define _FH_MCI_H_
+
+extern int trace_level;
+#define FHMCI_TRACE_LEVEL 5
+/*
+ 0 - all message
+ 1 - dump all register read/write
+ 2 - flow trace
+ 3 - timeout err and protocol err
+ */
+
+#define FHMCI_TRACE_FMT KERN_INFO
+#define ID_SD0 0
+#define ID_SD1 1
+
+#define POWER_ON 1
+#define POWER_OFF 0
+
+#define CARD_UNPLUGED 1
+#define CARD_PLUGED 0
+
+#define ENABLE 1
+#define DISABLE 0
+
+#define FH_MCI_DETECT_TIMEOUT (HZ/4)
+
+#define FH_MCI_REQUEST_TIMEOUT (5 * HZ)
+
+#define MAX_RETRY_COUNT 100
+#define MAX_MCI_HOST (2) /* max num of host on soc */
+
+#define fhmci_trace(level, msg...) do { \
+ if ((level) >= trace_level) { \
+ printk(FHMCI_TRACE_FMT "%s:%d: ", __func__, __LINE__); \
+ printk(msg); \
+ printk("\n"); \
+ } \
+} while (0)
+
+#define fhmci_assert(cond) do { \
+ if (!(cond)) {\
+ printk(KERN_ERR "Assert:fhmci:%s:%d\n", \
+ __func__, \
+ __LINE__); \
+ BUG(); \
+ } \
+} while (0)
+
+#define fhmci_error(s...) do { \
+ printk(KERN_ERR "fhmci:%s:%d: ", __func__, __LINE__); \
+ printk(s); \
+ printk("\n"); \
+} while (0)
+
+#define fhmci_readl(addr) ({unsigned int reg = readl((unsigned int)addr); \
+ fhmci_trace(1, "readl(0x%04X) = 0x%08X", (unsigned int)addr, reg); \
+ reg; })
+
+#define fhmci_writel(v, addr) do { \
+ writel(v, (unsigned int)addr); \
+ fhmci_trace(1, "writel(0x%04X) = 0x%08X",\
+ (unsigned int)addr, (unsigned int)(v)); \
+} while (0)
+
+
+struct fhmci_des {
+ unsigned long idmac_des_ctrl;
+ unsigned long idmac_des_buf_size;
+ unsigned long idmac_des_buf_addr;
+ unsigned long idmac_des_next_addr;
+};
+
+struct fhmci_host {
+ struct mmc_host *mmc;
+ spinlock_t lock;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ void __iomem *base;
+ unsigned int card_status;
+ struct scatterlist *dma_sg;
+ unsigned int dma_sg_num;
+ unsigned int dma_alloc_size;
+ unsigned int dma_dir;
+ dma_addr_t dma_paddr;
+ unsigned int *dma_vaddr;
+ struct timer_list timer;
+ unsigned int irq;
+ unsigned int irq_status;
+ unsigned int is_tuning;
+ wait_queue_head_t intr_wait;
+ unsigned long pending_events;
+ unsigned int id;
+ struct fh_mci_board *pdata;
+ unsigned int (*get_cd)(struct fhmci_host *host);
+ unsigned int (*get_ro)(struct fhmci_host *host);
+#define FHMCI_PEND_DTO_b (0)
+#define FHMCI_PEND_DTO_m (1 << FHMCI_PEND_DTO_b)
+};
+
+/* Board platform data */
+struct fh_mci_board {
+ unsigned int num_slots;
+
+ unsigned int quirks; /* Workaround / Quirk flags */
+ unsigned int bus_hz; /* Bus speed */
+
+ unsigned int caps; /* Capabilities */
+
+ /* delay in mS before detecting cards after interrupt */
+ unsigned int detect_delay_ms;
+
+ int (*init)(unsigned int slot_id,void* irq_handler_t , void *);
+ unsigned int (*get_ro)(struct fhmci_host *host);
+ unsigned int (*get_cd)(struct fhmci_host *host);
+ int (*get_ocr)(unsigned int slot_id);
+ int (*get_bus_wd)(unsigned int slot_id);
+ /*
+ * Enable power to selected slot and set voltage to desired level.
+ * Voltage levels are specified using MMC_VDD_xxx defines defined
+ * in linux/mmc/host.h file.
+ */
+ void (*setpower)(unsigned int slot_id, unsigned int volt);
+ void (*exit)(unsigned int slot_id);
+ void (*select_slot)(unsigned int slot_id);
+
+ struct dw_mci_dma_ops *dma_ops;
+ struct dma_pdata *data;
+ struct block_settings *blk_settings;
+ int fifo_depth;
+};
+
+union cmd_arg_s {
+ unsigned int cmd_arg;
+ struct cmd_bits_arg {
+ unsigned int cmd_index:6;
+ unsigned int response_expect:1;
+ unsigned int response_length:1;
+ unsigned int check_response_crc:1;
+ unsigned int data_transfer_expected:1;
+ unsigned int read_write:1;
+ unsigned int transfer_mode:1;
+ unsigned int send_auto_stop:1;
+ unsigned int wait_prvdata_complete:1;
+ unsigned int stop_abort_cmd:1;
+ unsigned int send_initialization:1;
+ unsigned int card_number:5;
+ unsigned int update_clk_reg_only:1; /* bit 21 */
+ unsigned int read_ceata_device:1;
+ unsigned int ccs_expected:1;
+ unsigned int enable_boot:1;
+ unsigned int expect_boot_ack:1;
+ unsigned int disable_boot:1;
+ unsigned int boot_mode:1;
+ unsigned int volt_switch:1;
+ unsigned int use_hold_reg:1;
+ unsigned int reserved:1;
+ unsigned int start_cmd:1; /* HSB */
+ } bits;
+};
+
+struct mmc_ctrl {
+ unsigned int slot_idx; /*0: mmc0; 1: mmc1*/
+ unsigned int mmc_ctrl_state; /*0: enable mmc_rescan; 1: disable mmc_rescan*/
+};
+
+enum mmc_ctrl_state {
+ RESCAN_ENABLE = 0,
+ RESCAN_DISABLE
+};
+
+struct platform_device *get_mci_device(unsigned int index);
+int storage_dev_set_mmc_rescan(struct mmc_ctrl *m_ctrl);
+int read_mci_ctrl_states(int id_mmc_sd);
+
+#endif
+
diff --git a/arch/arm/mach-fh/include/mach/gpio.h b/arch/arm/mach-fh/include/mach/gpio.h
new file mode 100644
index 00000000..781093dd
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/gpio.h
@@ -0,0 +1,259 @@
+/*
+ * TI DaVinci GPIO Support
+ *
+ * Copyright (c) 2006 David Brownell
+ * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FH_GPIO_H
+#define __FH_GPIO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <asm-generic/gpio.h>
+
+#include <mach/irqs.h>
+#include <mach/fh_predefined.h>
+
+/*
+ * GPIO Direction
+ */
+#define GPIO_DIR_INPUT 0
+#define GPIO_DIR_OUTPUT 1
+
+/*
+ * GPIO interrupt type
+ */
+#define GPIO_INT_TYPE_LEVEL 0
+#define GPIO_INT_TYPE_EDGE 1
+
+/*
+ * GPIO interrupt polarity
+ */
+#define GPIO_INT_POL_LOW 0
+#define GPIO_INT_POL_HIGH 1
+
+#define OFFSET_GPIO_SWPORTA_DR (0x0000)
+#define OFFSET_GPIO_SWPORTA_DDR (0x0004)
+#define OFFSET_GPIO_PORTA_CTL (0x0008)
+#define OFFSET_GPIO_SWPORTB_DR (0x000C)
+#define OFFSET_GPIO_SWPORTB_DDR (0x0010)
+#define OFFSET_GPIO_PORTB_CTL (0x0014)
+#define OFFSET_GPIO_INTEN (0x0030)
+#define OFFSET_GPIO_INTMASK (0x0034)
+#define OFFSET_GPIO_INTTYPE_LEVEL (0x0038)
+#define OFFSET_GPIO_INT_POLARITY (0x003C)
+#define OFFSET_GPIO_INTSTATUS (0x0040)
+#define OFFSET_GPIO_RAWINTSTATUS (0x0044)
+#define OFFSET_GPIO_DEBOUNCE (0x0048)
+#define OFFSET_GPIO_PORTA_EOI (0x004C)
+#define OFFSET_GPIO_EXT_PORTA (0x0050)
+#define OFFSET_GPIO_EXT_PORTB (0x0054)
+
+static inline void FH_GPIO_SetValue(unsigned int base, int bit, int val)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_SWPORTA_DR);
+ reg = val ? (reg | (1 << bit)) : (reg & ~(1 << bit));
+ SET_REG(base + OFFSET_GPIO_SWPORTA_DR, reg);
+}
+
+static inline int FH_GPIO_GetValue(unsigned int base, int bit)
+{
+ return (GET_REG(base + OFFSET_GPIO_EXT_PORTA) >> bit) & 0x1;
+}
+
+static inline void FH_GPIO_SetDirection(unsigned int base, int bit, int dir)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_SWPORTA_DDR);
+ reg = dir ? reg | (1 << bit) : reg & ~(1 << bit);
+ SET_REG(base + OFFSET_GPIO_SWPORTA_DDR, reg);
+}
+
+static inline int FH_GPIO_GetDirection(unsigned int base, int bit)
+{
+ return (GET_REG(base + OFFSET_GPIO_SWPORTA_DDR) >> bit) & 0x1;
+}
+
+static inline void FH_GPIOB_SetValue(unsigned int base, int bit, int val)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_SWPORTB_DR);
+ reg = val ? (reg | (1 << bit)) : (reg & ~(1 << bit));
+ SET_REG(base + OFFSET_GPIO_SWPORTB_DR, reg);
+}
+
+static inline int FH_GPIOB_GetValue(unsigned int base, int bit)
+{
+ return (GET_REG(base + OFFSET_GPIO_EXT_PORTB) >> bit) & 0x1;
+}
+
+static inline void FH_GPIOB_SetDirection(unsigned int base, int bit, int dir)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_SWPORTB_DDR);
+ reg = dir ? reg | (1 << bit) : reg & ~(1 << bit);
+ SET_REG(base + OFFSET_GPIO_SWPORTB_DDR, reg);
+}
+
+static inline int FH_GPIOB_GetDirection(unsigned int base, int bit)
+{
+ return (GET_REG(base + OFFSET_GPIO_SWPORTB_DDR) >> bit) & 0x1;
+}
+
+static inline void FH_GPIO_EnableDebounce(unsigned int base, int bit, int bool)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_DEBOUNCE);
+ reg = bool ? reg | (1 << bit) : reg & ~(1 << bit);
+ SET_REG(base + OFFSET_GPIO_DEBOUNCE, reg);
+}
+
+static inline void FH_GPIO_SetInterruptType(unsigned int base, int bit,
+ int type)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_INTTYPE_LEVEL);
+ reg = type ? reg | (1 << bit) : reg & ~(1 << bit);
+ SET_REG(base + OFFSET_GPIO_INTTYPE_LEVEL, reg);
+}
+
+static inline void FH_GPIO_SetInterruptPolarity(unsigned int base, int bit,
+ int pol)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_INT_POLARITY);
+ reg = pol ? reg | (1 << bit) : reg & ~(1 << bit);
+ SET_REG(base + OFFSET_GPIO_INT_POLARITY, reg);
+}
+
+static inline void FH_GPIO_EnableInterruptMask(unsigned int base, int bit,
+ int bool)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_INTMASK);
+ reg = bool ? reg | (1 << bit) : reg & ~(1 << bit);
+ SET_REG(base + OFFSET_GPIO_INTMASK, reg);
+}
+
+static inline void FH_GPIO_EnableInterrupt(unsigned int base, int bit, int bool)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_INTEN);
+ reg = bool ? reg | (1 << bit) : reg & ~(1 << bit);
+ SET_REG(base + OFFSET_GPIO_INTEN, reg);
+}
+
+static inline void FH_GPIO_SetEnableInterrupts(unsigned int base,
+ unsigned int val)
+{
+ SET_REG(base + OFFSET_GPIO_INTEN, val);
+}
+
+static inline unsigned int FH_GPIO_GetEnableInterrupts(unsigned int base)
+{
+ return GET_REG(base + OFFSET_GPIO_INTEN);
+}
+
+static inline unsigned int FH_GPIO_GetInterruptStatus(unsigned int base)
+{
+ return GET_REG(base + OFFSET_GPIO_INTSTATUS);
+}
+
+static inline void FH_GPIO_ClearInterrupt(unsigned int base, int bit)
+{
+ unsigned int reg;
+
+ reg = GET_REG(base + OFFSET_GPIO_PORTA_EOI);
+ reg |= (1 << bit);
+ SET_REG(base + OFFSET_GPIO_PORTA_EOI, reg);
+}
+
+#define GPIO_NAME "FH_GPIO"
+
+
+struct gpio_irq_info {
+ int irq_gpio;
+ int irq_line;
+ int irq_type;
+ int irq_gpio_val;
+ int irq_gpio_mode;
+};
+
+struct fh_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *base;
+
+ struct platform_device *pdev;
+ int irq;
+ spinlock_t lock;
+
+ u32 gpio_wakeups;
+ u32 gpio_backups;
+};
+
+/*
+ * The get/set/clear functions will inline when called with constant
+ * parameters referencing built-in GPIOs, for low-overhead bitbanging.
+ *
+ * gpio_set_value() will inline only on traditional Davinci style controllers
+ * with distinct set/clear registers.
+ *
+ * Otherwise, calls with variable parameters or referencing external
+ * GPIOs (e.g. on GPIO expander chips) use outlined functions.
+ */
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+/* Returns zero or nonzero; works for gpios configured as inputs OR
+ * as outputs, at least for built-in GPIOs.
+ *
+ * NOTE: for built-in GPIOs, changes in reported values are synchronized
+ * to the GPIO clock. This is easily seen after calling gpio_set_value()
+ * and then immediately gpio_get_value(), where the gpio_get_value() will
+ * return the old value until the GPIO clock ticks and the new value gets
+ * latched.
+ */
+static inline int gpio_get_value(unsigned gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ return 0;
+}
+
+static inline int gpio_to_irq(unsigned gpio)
+{
+ return __gpio_to_irq(gpio);
+}
+
+static inline int irq_to_gpio(unsigned irq)
+{
+ return 0;
+}
+
+void fh_gpio_irq_suspend(void);
+void fh_gpio_irq_resume(void);
+
+#endif
+
diff --git a/arch/arm/mach-fh/include/mach/hardware.h b/arch/arm/mach-fh/include/mach/hardware.h
new file mode 100644
index 00000000..af93e037
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/hardware.h
@@ -0,0 +1,18 @@
+/*
+* Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
+* All Rights Reserved. Confidential.
+*
+*This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation version 2.
+*
+* This program is distributed "as is" WITHOUT ANY WARRANTY of any
+* kind, whether express or implied; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef __ASM_ARCH_HARDWARE_H
+#define __ASM_ARCH_HARDWARE_H
+
+#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-fh/include/mach/i2c.h b/arch/arm/mach-fh/include/mach/i2c.h
new file mode 100644
index 00000000..8bf70b6e
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/i2c.h
@@ -0,0 +1,339 @@
+/*
+* Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
+* All Rights Reserved. Confidential.
+*
+*This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation version 2.
+*
+* This program is distributed "as is" WITHOUT ANY WARRANTY of any
+* kind, whether express or implied; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef __ASM_ARCH_I2C_H
+#define __ASM_ARCH_I2C_H
+#include "fh_predefined.h"
+
+//I2C
+#define REG_I2C_CON (0x0000)
+#define REG_I2C_TAR (0x0004)
+#define REG_I2C_SAR (0x0008)
+#define REG_I2C_HS_MADDR (0x000C)
+#define REG_I2C_DATA_CMD (0x0010)
+#define REG_I2C_SS_SCL_HCNT (0x0014)
+#define REG_I2C_SS_SCL_LCNT (0x0018)
+#define REG_I2C_FS_SCL_HCNT (0x001C)
+#define REG_I2C_FS_SCL_LCNT (0x0020)
+#define REG_I2C_HS_SCL_HCNT (0x0024)
+#define REG_I2C_HS_SCL_LCNT (0x0028)
+#define REG_I2C_INTR_STAT (0x002c)
+#define REG_I2C_INTR_MASK (0x0030)
+#define REG_I2C_RAW_INTR_STAT (0x0034)
+#define REG_I2C_RX_TL (0x0038)
+#define REG_I2C_TX_TL (0x003c)
+#define REG_I2C_CLR_INTR (0x0040)
+#define REG_I2C_ENABLE (0x006c)
+#define REG_I2C_STATUS (0x0070)
+#define REG_I2C_TXFLR (0x0074)
+#define REG_I2C_RXFLR (0x0078)
+#define REG_I2C_DMA_CR (0x0088)
+#define REG_I2C_DMA_TDLR (0x008c)
+#define REG_I2C_DMA_RDLR (0x0090)
+
+#define DW_IC_INTR_NONE 0x0
+
+
+enum BUS_STATUS {
+ I2C_BUSY,
+ I2C_IDLE
+};
+enum RESULT {
+ SUCCESS,
+ FAILURE
+};
+enum ENABLE_SET {
+ DISABLE,
+ ENABLE
+};
+enum SPEED_MODE {
+ SSPEED = 1,
+ FSPEED = 2,
+ HSPEED = 3,
+};
+
+UINT32 I2c_Disable(UINT32 base_addr);
+
+void I2c_SetSpeed(UINT32 base_addr, UINT8 model);
+void I2c_SetDeviceId(UINT32 base_addr, UINT32 deviceID);
+void I2c_Enable(UINT32 enable);
+UINT32 I2c_GetStatus(UINT32 base_addr);
+void I2c_SetIr(UINT32 base_addr, UINT16 mask);
+UINT32 I2c_Disable(UINT32 base_addr);
+
+void I2c_Init(UINT32 base_addr, UINT16 slave_addr, enum SPEED_MODE speed,
+ int txtl, int rxtl);
+
+/* function Macro */
+
+/*************************************************************************
+* Function Name : I2C_GetTransmitFifoLevel
+* Description : get tx fifo level
+* Input : base addr
+* Output : None
+* Return : None
+*
+ ***********************************************************************/
+#define I2C_GetTransmitFifoLevel(base_addr) (GET_REG(base_addr + \
+ REG_I2C_TXFLR))
+
+/*******************************************************************************
+* Function Name : I2c_GetTxFifoDepth
+* Description : get tx fifo depth
+* Input : base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define I2c_GetTxFifoDepth( base_addr) (((GET_REG(base_addr + \
+ DW_IC_COMP_PARAM_1)>> 16) & 0xff) + 1)
+
+/*******************************************************************************
+* Function Name : I2c_GetRxFifoDepth
+* Description : get rx fifo depth
+* Input : base addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define I2c_GetRxFifoDepth( base_addr) (((GET_REG(base_addr + \
+ DW_IC_COMP_PARAM_1)>> 8) & 0xff) + 1)
+/*******************************************************************************
+* Function Name : I2c_SetDeviceId
+* Description : set the slave addr
+* Input : deviceID:slave addr
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define I2c_SetDeviceId( base_addr,deviceID) SET_REG(base_addr + REG_I2C_TAR,deviceID) //set IIC slave address
+
+/*******************************************************************************
+* Function Name : I2c_Read
+* Description : read data from I2C bus
+* Input : None
+* Output : None
+* Return : data:I2C data
+*
+ *******************************************************************************/
+
+#define I2c_Read(base_addr ) (GET_REG(base_addr + REG_I2C_DATA_CMD)&0xff) //DW_I2C_DATA_CMD
+/*******************************************************************************
+* Function Name : I2c_SetSsSclHcnt
+* Description : set i2c ss scl hcnt
+* Input : hcnt
+* Output : None
+* Return : data:I2C data
+*
+ *******************************************************************************/
+
+#define I2c_SetSsHcnt(base_addr, hcnt) SET_REG(base_addr + DW_IC_SS_SCL_HCNT,hcnt)
+
+/*******************************************************************************
+* Function Name : I2c_SetSsSclLcnt
+* Description : set i2c ss scl lcnt
+* Input : lcnt
+* Output : None
+* Return : data:I2C data
+*
+ *******************************************************************************/
+
+#define I2c_SetSsLcnt(base_addr, lcnt) SET_REG(base_addr + DW_IC_SS_SCL_LCNT,lcnt)
+/*******************************************************************************
+* Function Name : I2c_SetFsSclHcnt
+* Description : set i2c fs scl hcnt
+* Input : hcnt
+* Output : None
+* Return : data:I2C data
+*
+ *******************************************************************************/
+
+#define I2c_SetFsHcnt(base_addr, hcnt) SET_REG(base_addr + DW_IC_FS_SCL_HCNT,hcnt)
+
+/*******************************************************************************
+* Function Name : I2c_SetFsSclLcnt
+* Description : set i2c fs scl lcnt
+* Input : lcnt
+* Output : None
+* Return : data:I2C data
+*
+ *******************************************************************************/
+
+#define I2c_SetFsLcnt(base_addr, lcnt) SET_REG(base_addr + DW_IC_FS_SCL_LCNT,lcnt)
+/*******************************************************************************
+* Function Name : I2c_Disable
+* Description : disable I2C bus
+* Input : None
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define I2c_DisEnable(base_addr) SET_REG(base_addr + REG_I2C_ENABLE,DISABLE);
+/*******************************************************************************
+* Function Name : I2c_Enable
+* Description : set the I2C bus enable
+* Input : enable
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define I2c_Enable(base_addr) SET_REG(base_addr + REG_I2C_ENABLE,ENABLE);
+/*******************************************************************************
+* Function Name : I2c_Write
+* Description : Write data to I2C bus
+* Input : data:wirte out data
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+#define I2c_Write(base_addr, data) SET_REG(base_addr + REG_I2C_DATA_CMD,data)
+
+/*******************************************************************************
+* Function Name : I2c_GetTxTl
+* Description : Get TX_TL
+* Input : TX_TL
+* Return : None
+ *******************************************************************************/
+#define I2c_GetTxTl(base_addr ) (GET_REG(base_addr + REG_I2C_TX_TL)&0xff);
+/*******************************************************************************
+* Function Name : I2c_GetRxTl
+* Description : Get RX_TL
+* Input : RX_TL
+* Return : None
+ *******************************************************************************/
+#define I2c_GetRxTl(base_addr ) (GET_REG(base_addr + REG_I2C_RX_TL)&0xff);
+/*******************************************************************************
+* Function Name : I2c_GetRxFLR
+* Description : Get RX_FLR
+* Input : base_addr
+* Return : None
+ *******************************************************************************/
+#define I2c_GetRxFLR(base_addr) (GET_REG(base_addr + DW_IC_RXFLR)&0xff);
+/*******************************************************************************
+* Function Name : I2c_GetTxFLR
+* Description : Get TX_FLR
+* Input : base_addr
+* Return : None
+ *******************************************************************************/
+#define I2c_GetTxFLR(base_addr) (GET_REG(base_addr + DW_IC_TXFLR)&0xff);
+/*******************************************************************************
+* Function Name : I2c_SetTxRxTl
+* Description : set TX_TL RX_TL
+* Input : TX_TL, RX_TL
+* Return : None
+ *******************************************************************************/
+#define I2c_SetTxRxTl(base_addr ,txtl,rxtl) \
+ SET_REG(base_addr + REG_I2C_TX_TL, txtl); \
+ SET_REG(base_addr + REG_I2C_RX_TL, rxtl)
+
+/*******************************************************************************
+* Function Name : I2c_IsActiveMst
+* Description : if master mode is active, return 1
+* Input : none
+* Return : MST_ACTIVITY (IC_STATUS[5])
+ *******************************************************************************/
+#define I2c_IsActiveMst(base_addr) (GET_REG(base_addr + REG_I2C_STATUS)>>5 & 1)
+
+/*******************************************************************************
+* Function Name : I2c_SetCon
+* Description : set config
+* Input : config
+* Return : None
+ *******************************************************************************/
+#define I2c_SetCon(base_addr,config) SET_REG(base_addr + REG_I2C_CON,config)
+/*******************************************************************************
+* Function Name : I2c_GetCon
+* Description : get config
+* Input : config
+* Return : None
+ *******************************************************************************/
+#define I2c_GetCon(base_addr) GET_REG(base_addr + REG_I2C_CON)
+
+/*******************************************************************************
+* Function Name : I2c_Status
+* Description : get i2c status
+* Input : None
+* Return : None
+ *******************************************************************************/
+#define I2c_Status(base_addr) GET_REG(base_addr + REG_I2C_STATUS)
+
+/*******************************************************************************
+* Function Name : I2c_SetTar
+* Description : set target address
+* Input : id
+* Return : None
+ *******************************************************************************/
+#define I2c_SetTar(base_addr, id) SET_REG(base_addr + REG_I2C_TAR,id)
+
+/*******************************************************************************
+* Function Name : I2c_SetIntrMask
+* Description : set interrupt mask
+* Input : mask
+* Return : None
+ *******************************************************************************/
+#define I2c_SetIntrMask(base_addr,mask) SET_REG(base_addr + REG_I2C_INTR_MASK,mask)
+
+/*******************************************************************************
+* Function Name : I2c_ClrIntr
+* Description : clear interrupt
+* Input : mask
+* Return : None
+ *******************************************************************************/
+#define I2c_ClrIntr(base_addr,mask) GET_REG(base_addr + mask)
+/*******************************************************************************
+* Function Name : I2c_REG_STATUS
+* Description : clear interrupt
+* Input : mask
+* Return : None
+ *******************************************************************************/
+#define I2c_GetTxAbrtSource(base_addr) GET_REG(base_addr + DW_IC_TX_ABRT_SOURCE)
+
+/*******************************************************************************
+* Function Name : I2c_TxEmpty
+* Description : TX_EMPTY interrupt assert
+* Input : none
+* Return : TX_EMPTY
+ *******************************************************************************/
+#define I2c_TxEmpty(base_addr) (GET_REG(base_addr + REG_I2C_RAW_INTR_STAT) & M_TX_EMPTY)
+
+/*******************************************************************************
+* Function Name : I2c_RxFull
+* Description : RX_FULL interrupt assert
+* Input : none
+* Return : RX_FULL
+ *******************************************************************************/
+#define I2c_RxFull(base_addr) (GET_REG(base_addr + REG_I2C_RAW_INTR_STAT) & M_RX_FULL)
+/*******************************************************************************
+* Function Name : I2c_RxEmpty
+* Description : RX_EMPTY interrupt assert
+* Input : none
+* Return : RX_EMPTY
+ *******************************************************************************/
+#define I2c_RxEmpty(base_addr) (GET_REG(base_addr + REG_I2C_RAW_INTR_STAT) & M_RX_OVER)
+
+/* register define */
+typedef union {
+ struct {
+ UINT32 MASTER_MODE : 1;
+ UINT32 SPEED : 2;
+ UINT32 IC_10BITADDR_SLAVE : 1;
+ UINT32 IC_10BITADDR_MASTER : 1;
+ UINT32 IC_RESTART_EN : 1;
+ UINT32 IC_SLAVE_DISABLE : 1;
+ UINT32 reserved_31_7 : 25;
+ } x;
+ UINT32 dw;
+} Reg_I2c_Con;
+
+#endif /* __ASM_ARCH_I2C_H */
diff --git a/arch/arm/mach-fh/include/mach/io.h b/arch/arm/mach-fh/include/mach/io.h
new file mode 100644
index 00000000..aa4883a3
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/io.h
@@ -0,0 +1,132 @@
+/*
+ * fh io definitions
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
+
+#include <mach/chip.h>
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+/*
+ * We don't actually have real ISA nor PCI buses, but there is so many
+ * drivers out there that might just work if we fake them...
+ */
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+#define __mem_isa(a) (a)
+
+#ifndef CONFIG_JLINK_DEBUG
+#define FH_VIRT 0xFE000000
+
+#define VA_INTC_REG_BASE (FH_VIRT + 0x00000)
+#define VA_TIMER_REG_BASE (FH_VIRT + 0x10000)
+#define VA_UART0_REG_BASE (FH_VIRT + 0x20000)
+/* #define VA_GMAC_REG_BASE (FH_VIRT + 0x30000) */
+/* #define VA_SPI0_REG_BASE (FH_VIRT + 0x40000) */
+/* #define VA_GPIO_REG_BASE (FH_VIRT + 0x50000) */
+/* #define VA_DMAC_REG_BASE (FH_VIRT + 0x60000) */
+/* #define VA_SDC0_REG_BASE (FH_VIRT + 0x70000) */
+/* #define VA_I2C_REG_BASE (FH_VIRT + 0x80000) */
+#define VA_PMU_REG_BASE (FH_VIRT + 0x90000)
+/* #define VA_SDC1_REG_BASE (FH_VIRT + 0xa0000) */
+#define VA_UART1_REG_BASE (FH_VIRT + 0xa0000)
+#define VA_UART2_REG_BASE (FH_VIRT + 0xb0000)
+#define VA_UART3_REG_BASE (FH_VIRT + 0xc0000)
+
+#define VA_RAM_REG_BASE (FH_VIRT + 0xd0000)
+#define VA_DDRC_REG_BASE (FH_VIRT + 0xe0000)
+
+#ifdef CONFIG_ARCH_FH
+#define VA_CONSOLE_REG_BASE VA_UART1_REG_BASE
+#else
+#define VA_CONSOLE_REG_BASE VA_UART0_REG_BASE
+#endif
+
+#define I2C_OFFSET (VA_I2C_REG_BASE - I2C_REG_BASE)
+#define VI2C(x) (x + I2C_OFFSET)
+
+#define INTC_OFFSET (VA_INTC_REG_BASE - INTC_REG_BASE)
+#define VINTC(x) (x + INTC_OFFSET)
+
+#define TIME_OFFSET (VA_TIMER_REG_BASE - TIMER_REG_BASE)
+#define VTIMER(x) (x + TIME_OFFSET)
+
+#define UART0_OFFSET (VA_UART0_REG_BASE - UART0_REG_BASE)
+#define VUART0(x) (x + UART0_OFFSET)
+
+#define UART1_OFFSET (VA_UART1_REG_BASE - UART1_REG_BASE)
+#define VUART1(x) (x + UART1_OFFSET)
+
+#define UART2_OFFSET (VA_UART2_REG_BASE - UART2_REG_BASE)
+#define VUART2(x) (x + UART2_OFFSET)
+
+#define UART3_OFFSET (VA_UART3_REG_BASE - UART3_REG_BASE)
+#define VUART3(x) (x + UART3_OFFSET)
+
+
+#define SPI0_OFFSET (VA_SPI0_REG_BASE - SPI0_REG_BASE)
+#define VSPI0(x) (x + SPI0_OFFSET)
+
+#define GMAC_OFFSET (VA_GMAC_REG_BASE - GMAC_REG_BASE)
+#define VGMAC(x) (x + GMAC_OFFSET)
+
+#define DMAC_OFFSET (VA_DMAC_REG_BASE - DMAC_REG_BASE)
+#define VDMAC(x) (x + DMAC_OFFSET)
+
+#define SDC0_OFFSET (VA_SDC0_REG_BASE - SDC0_REG_BASE)
+#define VSDC0(x) (x + SDC0_OFFSET)
+
+#define SDC1_OFFSET (VA_SDC1_REG_BASE - SDC1_REG_BASE)
+#define VSDC1(x) (x + SDC1_OFFSET)
+
+#define GPIO_OFFSET (VA_GPIO_REG_BASE - GPIO_REG_BASE)
+#define VGPIO(x) (x + GPIO_OFFSET)
+
+#define PMU_OFFSET (VA_PMU_REG_BASE - PMU_REG_BASE)
+#define VPMU(x) (x + PMU_OFFSET)
+
+#define PAE_OFFSET (VA_PAE_REG_BASE - PAE_REG_BASE)
+#define VPAE(x) (x + PAE_OFFSET)
+
+#else
+#define VA_INTC_REG_BASE INTC_REG_BASE
+#define VA_TIMER_REG_BASE TIMER_REG_BASE
+#define VA_UART0_REG_BASE UART0_REG_BASE
+#define VA_UART1_REG_BASE UART1_REG_BASE
+#define VA_GMAC_REG_BASE GMAC_REG_BASE
+#define VA_DMAC_REG_BASE DMAC_REG_BASE
+#define VA_I2C_REG_BASE I2C_REG_BASE
+#define VA_SDC0_REG_BASE SDC0_REG_BASE
+
+#define VA_SPI0_REG_BASE SPI0_REG_BASE
+
+#define VA_GPIO_REG_BASE GPIO0_REG_BASE
+#define VA_PMU_REG_BASE PMU_REG_BASE
+
+//#define VA_GPIO_REG_BASE (FH_VIRT + 0x500000)
+
+#define VINTC(x) x
+#define VTIMER(x) x
+#define VUART0(x) x
+#define VUART1(x) x
+#define VUART2(x) x
+#define VUART3(x) x
+#define VGMAC(x) x
+
+#define VDMAC(x) x
+#define VI2C(x) x
+#define VSDC0(x) x
+
+#define VSPI0(x) x
+#define VPMU(x) x
+
+#endif
+#endif /* __ASM_ARCH_IO_H */
diff --git a/arch/arm/mach-fh/include/mach/iomux.h b/arch/arm/mach-fh/include/mach/iomux.h
new file mode 100644
index 00000000..398b0a6e
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/iomux.h
@@ -0,0 +1,165 @@
+#ifndef IOMUX_H_
+#define IOMUX_H_
+#include <linux/types.h>
+#include <linux/io.h>
+#include <mach/io.h>
+
+#define IOMUX_PADTYPE(n) (Iomux_PadType##n *)
+#define IOMUX_PUPD_NONE 0
+#define IOMUX_PUPD_DOWN 1
+#define IOMUX_PUPD_UP 2
+#define IOMUX_PUPD_KEEPER 3
+//#define IOMUX_DEBUG
+
+
+typedef union {
+ struct {
+ __u32 sr : 1;
+ __u32 reserved_3_1 : 3;
+
+ __u32 e8_e4 : 2;
+ __u32 reserved_31_6 : 24;
+
+ } bit;
+ __u32 dw;
+} Iomux_PadType5;
+
+typedef union {
+ struct {
+ __u32 sr : 1;
+ __u32 reserved_3_1 : 3;
+
+ __u32 e8_e4 : 2;
+ __u32 reserved_7_6 : 2;
+
+ __u32 mfs : 1;
+ __u32 reserved_31_9 : 23;
+
+ } bit;
+ __u32 dw;
+} Iomux_PadType8;
+
+
+typedef union {
+ struct {
+ __u32 smt : 1;
+ __u32 reserved_3_1 : 3;
+
+ __u32 ie : 1;
+ __u32 reserved_7_5 : 3;
+
+ __u32 pu_pd : 2;
+ __u32 reserved_31_10 : 22;
+
+ } bit;
+ __u32 dw;
+} Iomux_PadType9;
+
+
+typedef union {
+ struct {
+ __u32 e4_e2 : 2;
+ __u32 reserved_3_2 : 2;
+
+ __u32 smt : 1;
+ __u32 reserved_7_5 : 3;
+
+ __u32 ie : 1;
+ __u32 reserved_11_9 : 3;
+
+ __u32 mfs : 2;
+ __u32 reserved_31_14 : 18;
+
+ } bit;
+ __u32 dw;
+} Iomux_PadType13;
+
+typedef union {
+ struct {
+ __u32 sr : 1;
+ __u32 reserved_3_1 : 3;
+
+ __u32 e8_e4 : 2;
+ __u32 reserved_7_6 : 2;
+
+ __u32 smt : 1;
+ __u32 reserved_11_9 : 3;
+
+ __u32 ie : 1;
+ __u32 e : 1; //only for PAD_MAC_REF_CLK_CFG (0x00a4)
+ __u32 reserved_15_12 : 2;
+
+ __u32 pu_pd : 2;
+ __u32 reserved_31_18 : 14;
+
+ } bit;
+ __u32 dw;
+} Iomux_PadType17;
+
+typedef union {
+ struct {
+ __u32 sr : 1;
+ __u32 reserved_3_1 : 3;
+
+ __u32 e4_e2 : 2;
+ __u32 reserved_7_6 : 2;
+
+ __u32 smt : 1;
+ __u32 reserved_11_9 : 3;
+
+ __u32 ie : 1;
+ __u32 reserved_15_13 : 3;
+
+ __u32 pu_pd : 2;
+ __u32 reserved_19_18 : 2;
+
+ __u32 mfs : 1;
+ __u32 reserved_31_21 : 11;
+
+ } bit;
+ __u32 dw;
+} Iomux_PadType20;
+
+
+typedef union {
+ struct {
+ __u32 sr : 1;
+ __u32 reserved_3_1 : 3;
+
+ __u32 e4_e2 : 2;
+ __u32 reserved_7_6 : 2;
+
+ __u32 smt : 1;
+ __u32 reserved_11_9 : 3;
+
+ __u32 ie : 1;
+ __u32 reserved_15_13 : 3;
+
+ __u32 pu_pd : 2;
+ __u32 reserved_19_18 : 2;
+
+ __u32 mfs : 2;
+ __u32 reserved_31_21 : 10;
+
+ } bit;
+ __u32 dw;
+} Iomux_PadType21;
+
+typedef struct {
+ u32 *reg;
+ u32 reg_offset;
+ char *func_name[4];
+ int reg_type;
+ int func_sel;
+ int drv_cur;
+ int pupd;
+} Iomux_Pad;
+
+typedef struct {
+ void __iomem *base;
+ Iomux_Pad *pads;
+} Iomux_Object;
+
+void fh_iomux_init(Iomux_Object *iomux_obj);
+
+#endif /* IOMUX_H_ */
diff --git a/arch/arm/mach-fh/include/mach/irqs.h b/arch/arm/mach-fh/include/mach/irqs.h
new file mode 100644
index 00000000..0138006f
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/irqs.h
@@ -0,0 +1,43 @@
+/*
+ * fh interrupt controller definitions
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __ASM_ARCH_IRQS_H
+#define __ASM_ARCH_IRQS_H
+
+#include <mach/chip.h>
+
+#define REG_IRQ_EN_LOW (INTC_REG_BASE + 0x0000)
+#define REG_IRQ_EN_HIGH (INTC_REG_BASE + 0x0004)
+#define REG_IRQ_IRQMASK_LOW (INTC_REG_BASE + 0x0008)
+#define REG_IRQ_IRQMASK_HIGH (INTC_REG_BASE + 0x000C)
+#define REG_IRQ_IRQFORCE_LOW (INTC_REG_BASE + 0x0010)
+#define REG_IRQ_IRQFORCE_HIGH (INTC_REG_BASE + 0x0014)
+#define REG_IRQ_RAWSTATUS_LOW (INTC_REG_BASE + 0x0018)
+#define REG_IRQ_RAWSTATUS_HIGH (INTC_REG_BASE + 0x001C)
+#define REG_IRQ_STATUS_LOW (INTC_REG_BASE + 0x0020)
+#define REG_IRQ_STATUS_HIGH (INTC_REG_BASE + 0x0024)
+#define REG_IRQ_MASKSTATUS_LOW (INTC_REG_BASE + 0x0028)
+#define REG_IRQ_MASKSTATUS_HIGH (INTC_REG_BASE + 0x002C)
+#define REG_IRQ_FINSTATUS_LOW (INTC_REG_BASE + 0x0030)
+#define REG_IRQ_FINSTATUS_HIGH (INTC_REG_BASE + 0x0034)
+#define REG_FIQ_EN_LOW (INTC_REG_BASE + 0x02C0)
+#define REG_FIQ_EN_HIGH (INTC_REG_BASE + 0x02C4)
+#define REG_FIQ_FIQMASK_LOW (INTC_REG_BASE + 0x02C8)
+#define REG_FIQ_FIQMASK_HIGH (INTC_REG_BASE + 0x02CC)
+#define REG_FIQ_FIQFORCE_LOW (INTC_REG_BASE + 0x02D0)
+#define REG_FIQ_FIQFORCE_HIGH (INTC_REG_BASE + 0x02D4)
+#define REG_FIQ_RAWSTATUS_LOW (INTC_REG_BASE + 0x02D8)
+#define REG_FIQ_RAWSTATUS_HIGH (INTC_REG_BASE + 0x02DC)
+#define REG_FIQ_STATUS_LOW (INTC_REG_BASE + 0x02E0)
+#define REG_FIQ_STATUS_HIGH (INTC_REG_BASE + 0x02E4)
+#define REG_FIQ_FINSTATUS_LOW (INTC_REG_BASE + 0x02E8)
+#define REG_FIQ_FINSTATUS_HIGH (INTC_REG_BASE + 0x02EC)
+
+#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-fh/include/mach/memory.h b/arch/arm/mach-fh/include/mach/memory.h
new file mode 100644
index 00000000..c8c984a9
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/memory.h
@@ -0,0 +1,27 @@
+/*
+ * fh memory space definitions
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __ASM_ARCH_MEMORY_H
+#define __ASM_ARCH_MEMORY_H
+
+/**************************************************************************
+ * Included Files
+ **************************************************************************/
+#include <asm/page.h>
+#include <asm/sizes.h>
+
+/**************************************************************************
+ * Definitions
+ **************************************************************************/
+#define FH_DDR_BASE 0xA0000000
+
+#define PLAT_PHYS_OFFSET FH_DDR_BASE
+
+#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-fh/include/mach/pinctrl.h b/arch/arm/mach-fh/include/mach/pinctrl.h
new file mode 100644
index 00000000..2d1355ff
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/pinctrl.h
@@ -0,0 +1,137 @@
+#ifndef PINCTRL_H_
+#define PINCTRL_H_
+#include "pinctrl_osdep.h"
+#include <linux/seq_file.h>
+
+#define MSC_3_3V (0)
+#define MSC_1_8V (1)
+
+#define PINCTRL_UNUSED (-1)
+
+#define PUPD_NONE (0)
+#define PUPD_UP (1)
+#define PUPD_DOWN (2)
+
+#define INPUT_DISABLE (0)
+#define INPUT_ENABLE (1)
+#define OUTPUT_DISABLE (0)
+#define OUTPUT_ENABLE (1)
+
+#if defined(CONFIG_ARCH_FH8626V100)
+#define PUPD_DISABLE (1)
+#define PUPD_ENABLE (0)
+#define PUPD_ZERO (0)
+#else
+#define PUPD_DISABLE (0)
+#define PUPD_ENABLE (1)
+#define PUPD_ZERO (0)
+#endif
+
+#define FUNC0 (0)
+#define FUNC1 (1)
+#define FUNC2 (2)
+#define FUNC3 (3)
+#define FUNC4 (4)
+#define FUNC5 (5)
+#define FUNC6 (6)
+#define FUNC7 (7)
+
+#define NEED_CHECK_PINLIST (1)
+
+#define MAX_FUNC_NUM 6
+
+#define PINCTRL_FUNC(name, id, sel, pupd, ds) \
+PinCtrl_Pin PAD##id##_##name = \
+{ \
+ .pad_id = id, \
+ .func_name = #name, \
+ .reg_offset = (id * 4), \
+ .func_sel = sel, \
+ .pullup_pulldown = pupd, \
+ .driving_curr = ds, \
+}
+
+#define PINCTRL_MUX(pname, sel, ...) \
+PinCtrl_Mux MUX_##pname = \
+{ \
+ .mux_pin = { __VA_ARGS__ }, \
+ .cur_pin = sel, \
+}
+
+#define PINCTRL_DEVICE(name, count, ...) \
+typedef struct \
+{ \
+ char *dev_name; \
+ int mux_count; \
+ OS_LIST list; \
+ PinCtrl_Mux *mux[count]; \
+} PinCtrl_Device_##name; \
+PinCtrl_Device_##name pinctrl_dev_##name = \
+{ \
+ .dev_name = #name, \
+ .mux_count = count, \
+ .mux = { __VA_ARGS__ }, \
+}
+
+typedef union {
+ struct {
+ unsigned int sl : 1; /*0*/
+ unsigned int : 3; /*1~3*/
+ unsigned int ds : 3; /*4~6*/
+ unsigned int msc : 1; /*7*/
+ unsigned int st : 1; /*8*/
+ unsigned int : 3; /*9~11*/
+ unsigned int ie : 1; /*12*/
+ unsigned int : 3; /*13~15*/
+ unsigned int pdn : 1; /*16*/
+ unsigned int : 3; /*17~19*/
+ unsigned int pun : 1; /*20*/
+ unsigned int : 3; /*21~23*/
+ unsigned int mfs : 4; /*24~27*/
+ unsigned int oe : 1; /*28*/
+ unsigned int : 3; /*29~31*/
+ } bit;
+ unsigned int dw;
+} PinCtrl_Register;
+
+typedef struct
+{
+ char *func_name;
+ PinCtrl_Register *reg;
+ unsigned int pad_id : 8;
+ unsigned int reg_offset : 12;
+ unsigned int func_sel : 4;
+ unsigned int input_enable : 1;
+ unsigned int output_enable : 1;
+ unsigned int pullup_pulldown : 2;
+ unsigned int volt_mode : 1;
+ unsigned int driving_curr : 3;
+}PinCtrl_Pin;
+
+typedef struct
+{
+ int cur_pin;
+ PinCtrl_Pin *mux_pin[MUX_NUM];
+} PinCtrl_Mux;
+
+typedef struct
+{
+ void *vbase;
+ void *pbase;
+ PinCtrl_Pin *pinlist[PAD_NUM];
+} PinCtrl_Object;
+
+typedef struct
+{
+ char *dev_name;
+ int mux_count;
+ OS_LIST list;
+ void *mux;
+}PinCtrl_Device;
+
+void fh_pinctrl_init(unsigned int base);
+void fh_pinctrl_prt(struct seq_file *sfile);
+int fh_pinctrl_smux(char *devname, char* muxname, int muxsel, unsigned int flag);
+int fh_pinctrl_sdev(char *devname, unsigned int flag);
+void fh_pinctrl_init_devicelist(OS_LIST *list);
+#endif /* PINCTRL_H_ */
diff --git a/arch/arm/mach-fh/include/mach/pinctrl_osdep.h b/arch/arm/mach-fh/include/mach/pinctrl_osdep.h
new file mode 100644
index 00000000..8e3d24de
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/pinctrl_osdep.h
@@ -0,0 +1,23 @@
+#ifndef PINCTRL_OSDEP_H_
+#define PINCTRL_OSDEP_H_
+
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <mach/fh_predefined.h>
+#include <linux/string.h>
+
+#define OS_LIST_INIT LIST_HEAD_INIT
+#define OS_LIST struct list_head
+#define OS_PRINT printk
+#define OS_LIST_EMPTY INIT_LIST_HEAD
+#define OS_NULL NULL
+
+#define PINCTRL_ADD_DEVICE(name) \
+ list_add(&pinctrl_dev_##name.list, \
+ list)
+
+#define PAD_NUM (77)
+
+#define MUX_NUM (6)
+
+#endif /* PINCTRL_OSDEP_H_ */
diff --git a/arch/arm/mach-fh/include/mach/pmu.h b/arch/arm/mach-fh/include/mach/pmu.h
new file mode 100644
index 00000000..33c75f53
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/pmu.h
@@ -0,0 +1,16 @@
+
+#ifndef _FH_PMU_H_
+#define _FH_PMU_H_
+
+#include <linux/types.h>
+#include "fh_chipid.h"
+
+void fh_pmu_set_reg(u32 offset, u32 data);
+u32 fh_pmu_get_reg(u32 offset);
+int fh_pmu_init(void);
+
+void fh_pmu_stop(void);
+
+void fh_pae_set_reg(u32 offset, u32 data);
+
+#endif /* _FH_PMU_H_ */
diff --git a/arch/arm/mach-fh/include/mach/spi.h b/arch/arm/mach-fh/include/mach/spi.h
new file mode 100644
index 00000000..7123ce94
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/spi.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_FH_SPI_H
+#define __ARCH_ARM_FH_SPI_H
+
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+#define SPI_MASTER_CONTROLLER_MAX_SLAVE (2)
+#define SPI_TRANSFER_USE_DMA (0x77888877)
+
+struct fh_spi_cs {
+ u32 GPIO_Pin;
+ char *name;
+};
+
+struct fh_spi_chip {
+ u8 poll_mode; /* 0 for contoller polling mode */
+ u8 type; /* SPI/SSP/Micrwire */
+ u8 enable_dma;
+ void *cs_control;
+ /* void (*cs_control)(u32 command);*/
+};
+
+struct fh_spi_platform_data {
+ u32 apb_clock_in;
+ u32 fifo_len;
+ u32 slave_max_num;
+ struct fh_spi_cs cs_data[SPI_MASTER_CONTROLLER_MAX_SLAVE];
+ /*below is dma transfer needed*/
+ u32 dma_transfer_enable;
+ u32 rx_handshake_num;
+ u32 tx_handshake_num;
+ u32 bus_no;
+ char *clk_name;
+ u32 rx_dma_channel;
+ u32 tx_dma_channel;
+#define RX_ONLY_MODE 0x30
+#define TX_RX_MODE 0x31
+ u32 spidma_xfer_mode;
+ /* add support wire width*/
+ u32 ctl_wire_support;
+ u32 max_speed_support;
+ u32 data_reg_offset;
+#define INC_SUPPORT 0x55
+ u32 data_increase_support;
+ u32 data_field_size;
+#define SWAP_SUPPORT 0x55
+ u32 swap_support;
+ void (*plat_init)(struct fh_spi_platform_data *plat_data);
+#define SPI_DMA_PROTCTL_ENABLE 0x55
+ u32 dma_protctl_enable;
+ u32 dma_protctl_data;
+
+#define SPI_DMA_MASTER_SEL_ENABLE 0x55
+ u32 dma_master_sel_enable;
+ u32 dma_master_ctl_sel;
+ u32 dma_master_mem_sel;
+};
+
+#endif /* __ARCH_ARM_FH_SPI_H */
diff --git a/arch/arm/mach-fh/include/mach/sram.h b/arch/arm/mach-fh/include/mach/sram.h
new file mode 100644
index 00000000..46d2dcd1
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/sram.h
@@ -0,0 +1,25 @@
+/*
+ * mach/sram.h - FH simple SRAM allocator
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __MACH_SRAM_H
+#define __MACH_SRAM_H
+
+/*
+ * SRAM allocations return a CPU virtual address, or NULL on error.
+ * If a DMA address is requested and the SRAM supports DMA, its
+ * mapped address is also returned.
+ *
+ * Errors include SRAM memory not being available, and requesting
+ * DMA mapped SRAM on systems which don't allow that.
+ */
+extern void *sram_alloc(size_t len, dma_addr_t *dma);
+extern void sram_free(void *addr, size_t len);
+
+#endif /* __MACH_SRAM_H */
diff --git a/arch/arm/mach-fh/include/mach/system.h b/arch/arm/mach-fh/include/mach/system.h
new file mode 100644
index 00000000..6ff6766f
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/system.h
@@ -0,0 +1,43 @@
+/*
+ * mach/system.h
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __ASM_ARCH_SYSTEM_H
+#define __ASM_ARCH_SYSTEM_H
+
+#include <mach/io.h>
+#include <mach/fh_predefined.h>
+#include <mach/pmu.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/mach/time.h>
+#include <asm/proc-fns.h>
+
+extern void fh_intc_init(void);
+
+void fh_irq_suspend(void);
+void fh_irq_resume(void);
+
+extern unsigned int fh_cpu_suspend_sz;
+extern void fh_cpu_suspend(void);
+
+static inline void arch_idle(void)
+{
+
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0x7fffffff);
+}
+
+/*change the scheduling policy and/or RT priority of `current` thread. */
+extern void fh_setscheduler(int policy, int priority);
+
+#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-fh/include/mach/timex.h b/arch/arm/mach-fh/include/mach/timex.h
new file mode 100644
index 00000000..7c6fafc9
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/timex.h
@@ -0,0 +1,21 @@
+/*
+ * FH timer subsystem
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __ASM_ARCH_TIMEX_H
+#define __ASM_ARCH_TIMEX_H
+#include "mach/chip.h"
+
+#define PAE_PTS_CLK (1000000)
+
+#define CLOCK_TICK_RATE TIMER_CLK
+
+extern struct sys_timer fh_timer;
+
+#endif /* __ASM_ARCH_TIMEX_H__ */
diff --git a/arch/arm/mach-fh/include/mach/uncompress.h b/arch/arm/mach-fh/include/mach/uncompress.h
new file mode 100644
index 00000000..819c6257
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/uncompress.h
@@ -0,0 +1,57 @@
+/*
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Initially based on:
+ * arch/arm/plat-omap/include/mach/uncompress.h
+ *
+ * Original copyrights follow.
+ *
+ * Copyright (C) 2000 RidgeRun, Inc.
+ * Author: Greg Lonnon <glonnon@ridgerun.com>
+ *
+ * Rewritten by:
+ * Author: <source@mvista.com>
+ * 2004 (c) MontaVista Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+
+#include <asm/mach-types.h>
+#include <mach/chip.h>
+
+#define REG_UART_THR (0x0000)
+#define REG_UART_USR (0x007c)
+
+#define REG_UART0_THR (*(unsigned char *)(CONSOLE_REG_BASE + REG_UART_THR))
+#define REG_UART0_USR (*(unsigned char *)(CONSOLE_REG_BASE + REG_UART_USR))
+
+static void putc(char c)
+{
+ while (!(REG_UART0_USR & (1 << 1)))
+ barrier();
+
+ REG_UART0_THR = c;
+}
+
+static inline void flush(void)
+{
+ while (!(REG_UART0_USR & (1 << 2)))
+ barrier();
+}
+
+static inline void set_uart_info(u32 phys, void *__iomem virt)
+{
+
+}
+
+static inline void __arch_decomp_setup(unsigned long arch_id)
+{
+
+}
+
+#define arch_decomp_setup() __arch_decomp_setup(arch_id)
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-fh/include/mach/vmalloc.h b/arch/arm/mach-fh/include/mach/vmalloc.h
new file mode 100644
index 00000000..7796486e
--- /dev/null
+++ b/arch/arm/mach-fh/include/mach/vmalloc.h
@@ -0,0 +1,16 @@
+/*
+ * DaVinci vmalloc definitions
+ *
+ * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <mach/hardware.h>
+
+/* Allow vmalloc range until the IO virtual range minus a 2M "hole" */
+///#define VMALLOC_END (IO_VIRT - (2<<20))
+
+#define VMALLOC_END (PAGE_OFFSET + 0x3e000000)
diff --git a/arch/arm/mach-fh/iomux.c b/arch/arm/mach-fh/iomux.c
new file mode 100644
index 00000000..f042a4bd
--- /dev/null
+++ b/arch/arm/mach-fh/iomux.c
@@ -0,0 +1,854 @@
+#include <mach/iomux.h>
+#include <mach/pmu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+
+Iomux_Pad fh_iomux_cfg[] = {
+ {
+ .func_name = { "RESETN", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "TEST", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "CIS_CLK", "", "", "", },
+ .reg_type = 5,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_HSYNC", "GPIO20", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_VSYNC", "GPIO21", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_PCLK", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 0,
+ },
+ {
+ .func_name = { "CIS_D0", "GPIO22", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D1", "GPIO23", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D2", "GPIO24", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D3", "GPIO25", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D4", "GPIO26", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D5", "GPIO27", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D6", "GPIO28", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D7", "GPIO29", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D8", "GPIO30", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D9", "GPIO31", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D10", "GPIO32", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_D11", "GPIO33", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_REF_CLK", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "MAC_MDC", "GPIO34", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 0,
+ },
+ {
+ .func_name = { "MAC_MDIO", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_COL", "GPIO35", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_CRS", "GPIO36", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_RXCK", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "MAC_RXD0", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = -1,
+ },
+
+ {
+ .func_name = { "MAC_RXD1", "GPIO38", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_RXD2", "GPIO39", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_RXD3", "GPIO40", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_RXDV", "GPIO41", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_TXCK", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "MAC_TXD0", "GPIO42", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_TXD1", "GPIO43", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_TXD2", "GPIO44", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_TXD3", "GPIO45", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_TXEN", "GPIO46", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "MAC_RXER", "GPIO47", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO0", "ARC_JTAG_TCK", "GPIO0", "CIS_SSI0_CSN1", },
+ .reg_type = 21,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO1", "ARC_JTAG_TRSTN", "GPIO1", "CIS_SSI0_RXD", },
+ .reg_type = 21,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO2", "ARC_JTAG_TMS", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO3", "ARC_JTAG_TDI", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO4", "ARC_JTAG_TDO", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "JTAG_TCK", "GPIO5", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "JTAG_TRSTN", "GPIO6", "PWM_OUT3", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "JTAG_TMS", "GPIO7", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "JTAG_TDI", "GPIO8", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "JTAG_TDO", "GPIO9", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO10", "UART1_OUT", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO11", "UART1_IN", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO12", "PWM_OUT0", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO13", "PWM_OUT1", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "GPIO14", "PWM_OUT2", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "UART0_IN", "GPIO48", "UART0_IN", " I2S_WS", },
+ .reg_type = 21,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "UART0_OUT", "GPIO49", "UART0_OUT", "I2S_CLK", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_SCL", "GPIO56", "CIS_SCL", "CIS_SSI0_CLK", },
+ .reg_type = 13,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "CIS_SDA", "GPIO57", "CIS_SDA", "CIS_SSI0_TXD", },
+ .reg_type = 13,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SCL1", "GPIO50", "SCL1", "I2S_DI", },
+ .reg_type = 21,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SDA1", "GPIO51", "I2S_DO", "", },
+ .reg_type = 21,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SSI0_CLK", "", "", "", },
+ .reg_type = 5,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SSI0_TXD", "", "", "", },
+ .reg_type = 5,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SSI0_CSN0", "GPIO54", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SSI0_CSN1", "GPIO55", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SSI0_RXD", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "SD0_CD", "GPIO52", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SD0_WP", "GPIO53", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SD0_CLK", "", "", "", },
+ .reg_type = 5,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD0_CMD_RSP", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD0_DATA0", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD0_DATA1", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 2,
+ },
+ {
+ .func_name = { "SD0_DATA2", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD0_DATA3", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD1_CLK", "SSI1_CLK", "", "", },
+ .reg_type = 8,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_NONE,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SD1_CD", "GPIO_58", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SD1_WP", "GPIO_59", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+ {
+ .func_name = { "SD1_DATA0", "SSI1_TXD", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD1_DATA1", "SSI1_CSN0", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD1_DATA2", "SSI1_CSN1", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD1_DATA3", "", "", "", },
+ .reg_type = 17,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "SD1_CMD_RSP", "SSI1_RXD", "", "", },
+ .reg_type = 20,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = 3,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "CLK_SW0", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "CLK_SW1", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "CLK_SW2", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "CLK_SW3", "", "", "", },
+ .reg_type = 9,
+ .func_sel = 0,
+ .pupd = IOMUX_PUPD_UP,
+ .drv_cur = -1,
+ },
+ {
+ .func_name = { "RESERVED", "", "", "", },
+ .reg_type = 20,
+ .func_sel = -1,
+ },
+ {
+ .func_name = { "MAC_TXER", "GPIO37", "", "", },
+ .reg_type = 20,
+ .func_sel = 1,
+ .pupd = IOMUX_PUPD_DOWN,
+ .drv_cur = 1,
+ },
+};
+
+
+static void fh_iomux_setmfs(Iomux_Object *iomux_obj, Iomux_Pad *pad)
+{
+ switch (pad->reg_type) {
+ case 8:
+ (IOMUX_PADTYPE(8)pad->reg)->bit.mfs = pad->func_sel;
+ break;
+ case 13:
+ (IOMUX_PADTYPE(13)pad->reg)->bit.mfs = pad->func_sel;
+ break;
+ case 20:
+ (IOMUX_PADTYPE(20)pad->reg)->bit.mfs = pad->func_sel;
+ break;
+ case 21:
+ (IOMUX_PADTYPE(21)pad->reg)->bit.mfs = pad->func_sel;
+ break;
+ default:
+ break;
+ }
+
+}
+
+#ifdef IOMUX_DEBUG
+
+static int fh_iomux_getmfs(Iomux_Object *iomux_obj, Iomux_Pad *pad)
+{
+ int mfs;
+
+ switch (pad->reg_type) {
+ case 8:
+ mfs = (IOMUX_PADTYPE(8)pad->reg)->bit.mfs;
+ break;
+ case 13:
+ mfs = (IOMUX_PADTYPE(13)pad->reg)->bit.mfs;
+ break;
+ case 20:
+ mfs = (IOMUX_PADTYPE(20)pad->reg)->bit.mfs;
+ break;
+ case 21:
+ mfs = (IOMUX_PADTYPE(21)pad->reg)->bit.mfs;
+ break;
+ default:
+ mfs = -1;
+ break;
+
+ }
+ return mfs;
+}
+
+
+static void fh_iomux_print(Iomux_Object *iomux_obj)
+{
+ int i;
+ u32 reg;
+
+ printk("\tPad No.\t\tFunction Select\t\tRegister\n");
+
+ for (i = 0; i < ARRAY_SIZE(fh_iomux_cfg); i++) {
+ int curr_func;
+
+ curr_func = fh_iomux_getmfs(iomux_obj, &iomux_obj->pads[i]);
+ reg = readl((u32)iomux_obj->pads[i].reg);
+
+ if (curr_func < 0)
+ printk("\t%d\t\t%-8s(no mfs)\t0x%08x\n", i, iomux_obj->pads[i].func_name[0],
+ reg);
+ else
+ printk("\t%d\t\t%-16s\t0x%08x\n", i, iomux_obj->pads[i].func_name[curr_func],
+ reg);
+
+ }
+
+}
+
+#endif
+
+static void fh_iomux_setcur(Iomux_Object *iomux_obj, Iomux_Pad *pad)
+{
+ switch (pad->reg_type) {
+ case 5:
+ (IOMUX_PADTYPE(5)pad->reg)->bit.e8_e4 = pad->drv_cur;
+ break;
+ case 8:
+ (IOMUX_PADTYPE(8)pad->reg)->bit.e8_e4 = pad->drv_cur;
+ break;
+ case 13:
+ (IOMUX_PADTYPE(13)pad->reg)->bit.e4_e2 = pad->drv_cur;
+ break;
+ case 17:
+ (IOMUX_PADTYPE(17)pad->reg)->bit.e8_e4 = pad->drv_cur;
+ break;
+ case 20:
+ (IOMUX_PADTYPE(20)pad->reg)->bit.e4_e2 = pad->drv_cur;
+ break;
+ case 21:
+ (IOMUX_PADTYPE(21)pad->reg)->bit.e4_e2 = pad->drv_cur;
+ break;
+ default:
+ break;
+ }
+
+}
+
+static void fh_iomux_setpupd(Iomux_Object *iomux_obj, Iomux_Pad *pad)
+{
+ switch (pad->reg_type) {
+ case 9:
+ (IOMUX_PADTYPE(9)pad->reg)->bit.pu_pd = pad->pupd;
+ break;
+ case 17:
+ (IOMUX_PADTYPE(17)pad->reg)->bit.pu_pd = pad->pupd;
+ break;
+ case 20:
+ (IOMUX_PADTYPE(20)pad->reg)->bit.pu_pd = pad->pupd;
+ break;
+ case 21:
+ (IOMUX_PADTYPE(21)pad->reg)->bit.pu_pd = pad->pupd;
+ break;
+ default:
+ break;
+ }
+}
+
+static void fh_iomux_setrest(Iomux_Object *iomux_obj, Iomux_Pad *pad)
+{
+ switch (pad->reg_type) {
+ case 5:
+ (IOMUX_PADTYPE(5)pad->reg)->bit.sr = 0;
+ break;
+ case 8:
+ (IOMUX_PADTYPE(8)pad->reg)->bit.sr = 0;
+ break;
+ case 9:
+ (IOMUX_PADTYPE(9)pad->reg)->bit.ie = 1;
+ (IOMUX_PADTYPE(9)pad->reg)->bit.smt = 1;
+ break;
+ case 13:
+ (IOMUX_PADTYPE(13)pad->reg)->bit.ie = 1;
+ (IOMUX_PADTYPE(13)pad->reg)->bit.smt = 1;
+ break;
+ case 17:
+ (IOMUX_PADTYPE(17)pad->reg)->bit.sr = 0;
+ (IOMUX_PADTYPE(17)pad->reg)->bit.ie = 1;
+ (IOMUX_PADTYPE(17)pad->reg)->bit.e = 1;
+ (IOMUX_PADTYPE(17)pad->reg)->bit.smt = 1;
+ break;
+ case 20:
+ (IOMUX_PADTYPE(20)pad->reg)->bit.sr = 0;
+ (IOMUX_PADTYPE(20)pad->reg)->bit.ie = 1;
+ (IOMUX_PADTYPE(20)pad->reg)->bit.smt = 1;
+ break;
+ case 21:
+ (IOMUX_PADTYPE(21)pad->reg)->bit.sr = 0;
+ (IOMUX_PADTYPE(21)pad->reg)->bit.ie = 1;
+ (IOMUX_PADTYPE(21)pad->reg)->bit.smt = 1;
+ break;
+ default:
+ break;
+ }
+
+}
+
+void fh_iomux_init(Iomux_Object *iomux_obj)
+{
+ int i;
+ u32 reg = 0;
+
+ iomux_obj->pads = fh_iomux_cfg;
+
+ for (i = 0; i < ARRAY_SIZE(fh_iomux_cfg); i++) {
+ iomux_obj->pads[i].reg_offset = i * 4;
+ iomux_obj->pads[i].reg = &reg;
+
+#if defined(CONFIG_FH_PWM_NUM) && CONFIG_FH_PWM_NUM == 4
+ //for pwm3 only
+ if(fh_iomux_cfg[i].func_sel == 2
+ && iomux_obj->pads[i].reg_offset == 0xa8)
+ {
+ fh_pmu_set_reg(0x128, 0x00101110);
+ fh_iomux_cfg[i].func_sel = 1;
+ }
+#endif
+
+ if(iomux_obj->pads[i].func_sel < 0)
+ continue;
+
+ fh_iomux_setmfs(iomux_obj, &fh_iomux_cfg[i]);
+ fh_iomux_setcur(iomux_obj, &fh_iomux_cfg[i]);
+ fh_iomux_setpupd(iomux_obj, &fh_iomux_cfg[i]);
+ fh_iomux_setrest(iomux_obj, &fh_iomux_cfg[i]);
+ fh_pmu_set_reg(0x5c + iomux_obj->pads[i].reg_offset, reg);
+ }
+#ifdef CONFIG_FH_GMAC_RMII
+ //(IOMUX_PADTYPE(17)(iomux_obj->pads[18]).reg)->bit.e = 1;
+ reg = fh_pmu_get_reg(REG_PMU_PAD_MAC_REF_CLK_CFG);
+ reg |= (1 << 13);
+ fh_pmu_set_reg(REG_PMU_PAD_MAC_REF_CLK_CFG, reg);
+#else
+ //(IOMUX_PADTYPE(17)(iomux_obj->pads[18]).reg)->bit.e = 0;
+ reg = fh_pmu_get_reg(REG_PMU_PAD_MAC_REF_CLK_CFG);
+ reg &= ~(1 << 13);
+ fh_pmu_set_reg(REG_PMU_PAD_MAC_REF_CLK_CFG, reg);
+#endif
+#ifdef IOMUX_DEBUG
+ fh_iomux_print(iomux_obj);
+#endif
+}
diff --git a/arch/arm/mach-fh/irq.c b/arch/arm/mach-fh/irq.c
new file mode 100644
index 00000000..a17666fa
--- /dev/null
+++ b/arch/arm/mach-fh/irq.c
@@ -0,0 +1,151 @@
+/*
+ * Fullhan FH board support
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/system.h>
+#include <asm/mach/irq.h>
+
+#include <mach/chip.h>
+#include <mach/fh_predefined.h>
+#include <mach/irqs.h>
+
+static void fh_intc_ack(struct irq_data *d)
+{
+
+}
+static void fh_intc_enable(struct irq_data *d)
+{
+ if (d->irq >= NR_INTERNAL_IRQS)
+ return;
+
+ if (d->irq > 31) {
+ SET_REG_M(VINTC(REG_IRQ_EN_HIGH), 1 << (d->irq - 32),
+ 1 << (d->irq - 32));
+ } else
+ SET_REG_M(VINTC(REG_IRQ_EN_LOW), 1 << d->irq, 1 << d->irq);
+
+}
+static void fh_intc_disable(struct irq_data *d)
+{
+ if (d->irq >= NR_INTERNAL_IRQS)
+ return;
+ if (d->irq > 31)
+ SET_REG_M(VINTC(REG_IRQ_EN_HIGH), 0, 1 << (d->irq - 32));
+ else
+ SET_REG_M(VINTC(REG_IRQ_EN_LOW), 0, 1 << d->irq);
+}
+
+static void fh_intc_mask(struct irq_data *d)
+{
+ if (d->irq >= NR_INTERNAL_IRQS)
+ return;
+ if (d->irq > 31) {
+ SET_REG_M(VINTC(REG_IRQ_IRQMASK_HIGH), 1 << (d->irq - 32),
+ 1 << (d->irq - 32));
+ } else
+ SET_REG_M(VINTC(REG_IRQ_IRQMASK_LOW), 1 << d->irq, 1 << d->irq);
+}
+
+static void fh_intc_unmask(struct irq_data *d)
+{
+ if (d->irq >= NR_INTERNAL_IRQS)
+ return;
+ if (d->irq > 31)
+ SET_REG_M(VINTC(REG_IRQ_IRQMASK_HIGH), 0, 1 << (d->irq - 32));
+ else
+ SET_REG_M(VINTC(REG_IRQ_IRQMASK_LOW), 0, 1 << d->irq);
+}
+
+#ifdef CONFIG_PM
+
+static u32 wakeups_high;
+static u32 wakeups_low;
+static u32 backups_high;
+static u32 backups_low;
+
+static int fh_intc_set_wake(struct irq_data *d, unsigned value)
+{
+ if (unlikely(d->irq >= NR_IRQS))
+ return -EINVAL;
+
+ if (value) {
+ if (d->irq > 31)
+ wakeups_high |= (1 << (d->irq - 32));
+ else
+ wakeups_low |= (1 << d->irq);
+ } else {
+ if (d->irq > 31)
+ wakeups_high &= ~(1 << (d->irq - 32));
+ else
+ wakeups_low &= ~(1 << d->irq);
+ }
+ return 0;
+}
+
+void fh_irq_suspend(void)
+{
+ backups_high = GET_REG(VINTC(REG_IRQ_EN_HIGH));
+ backups_low = GET_REG(VINTC(REG_IRQ_EN_LOW));
+
+ SET_REG(VINTC(REG_IRQ_EN_HIGH), wakeups_high);
+ SET_REG(VINTC(REG_IRQ_EN_LOW), wakeups_low);
+}
+
+void fh_irq_resume(void)
+{
+ SET_REG(VINTC(REG_IRQ_EN_HIGH), backups_high);
+ SET_REG(VINTC(REG_IRQ_EN_LOW), backups_low);
+}
+
+#else
+#define fh_intc_set_wake NULL
+#endif
+
+static struct irq_chip fh_irq_chip = {
+ .name = "FH_INTC",
+ .irq_ack = fh_intc_ack,
+ .irq_mask = fh_intc_mask,
+ .irq_unmask = fh_intc_unmask,
+
+ .irq_enable = fh_intc_enable,
+ .irq_disable = fh_intc_disable,
+ .irq_set_wake = fh_intc_set_wake,
+};
+
+void __init fh_intc_init(void)
+{
+ int i;
+
+ //disable all interrupts
+ SET_REG(VINTC(REG_IRQ_EN_LOW), 0x0);
+ SET_REG(VINTC(REG_IRQ_EN_HIGH), 0x0);
+
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_set_chip_and_handler(i, &fh_irq_chip, handle_level_irq);
+ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+ }
+
+}
diff --git a/arch/arm/mach-fh/pinctrl.c b/arch/arm/mach-fh/pinctrl.c
new file mode 100644
index 00000000..2b8f6ef1
--- /dev/null
+++ b/arch/arm/mach-fh/pinctrl.c
@@ -0,0 +1,345 @@
+#ifdef CONFIG_MACH_FH8830
+#ifdef CONFIG_MACH_FH8830_QFN
+#include <mach/fh8830_iopad_qfn.h>
+#else
+#include <mach/fh8830_iopad_bga.h>
+#endif
+#endif
+
+#ifdef CONFIG_MACH_FH8833
+#ifdef CONFIG_MACH_FH8833_QFN56
+#include <mach/fh8633_iopad_dvp.h>
+#else
+#include <mach/fh8833_iopad_mipi.h>
+#endif
+#endif
+
+#if defined(CONFIG_MACH_FH8856) || defined(CONFIG_MACH_FH8852)
+#include <mach/board_config.h>
+#ifdef FH_BOARD_8856
+#include <mach/fh8856_iopad.h>
+#endif
+#ifdef FH_BOARD_8852
+#include <mach/fh8852_iopad.h>
+#endif
+#ifdef FH_BOARD_8858
+#include <mach/fh8858_iopad.h>
+#endif
+#endif
+
+#ifdef CONFIG_MACH_FH8626V100
+#include <mach/board_config.h>
+#include <mach/fh8626v100_iopad.h>
+#endif
+
+#ifdef CONFIG_MACH_ZY2
+#include <mach/board_config.h>
+#include <mach/zy2_iopad.h>
+#endif
+
+#include <mach/pinctrl.h>
+#include <linux/module.h>
+
+/* #define FH_PINCTRL_DEBUG */
+#ifdef FH_PINCTRL_DEBUG
+#define PRINT_DBG(fmt,args...) OS_PRINT(fmt,##args)
+#else
+#define PRINT_DBG(fmt,args...) do{} while(0)
+#endif
+
+static PinCtrl_Object pinctrl_obj;
+OS_LIST fh_pinctrl_devices = OS_LIST_INIT(fh_pinctrl_devices);
+
+static void fh_pinctrl_check_duplicate_pin(PinCtrl_Pin *pin, int start_pad)
+{
+ int i;
+ PinCtrl_Pin *p;
+ if (!pin)
+ return;
+ for (i = start_pad; i < PAD_NUM; i++) {
+ p = pinctrl_obj.pinlist[i];
+ if (p && p != pin && !strcmp(pin->func_name, p->func_name)) {
+ OS_PRINT("WARNING: %s already exists in pad %d\n",
+ p->func_name, p->pad_id);
+ }
+ }
+}
+
+static int fh_pinctrl_func_select(PinCtrl_Pin *pin, unsigned int flag)
+{
+ unsigned int reg;
+
+ if(!pin)
+ {
+ OS_PRINT("ERROR: pin is null\n\n");
+ return -1;
+ }
+
+ if(flag & NEED_CHECK_PINLIST)
+ {
+ if(pinctrl_obj.pinlist[pin->pad_id])
+ {
+ OS_PRINT("ERROR: pad %d has already been set\n\n", pin->pad_id);
+ return -2;
+ }
+ }
+ fh_pinctrl_check_duplicate_pin(pin, 0);
+
+ reg = GET_REG(pinctrl_obj.vbase + pin->reg_offset);
+
+ pin->reg = (PinCtrl_Register *)&reg;
+
+ pin->reg->bit.mfs = pin->func_sel;
+
+ if (pin->pullup_pulldown == PUPD_DOWN) {
+ pin->reg->bit.pun = PUPD_DISABLE;
+ pin->reg->bit.pdn = PUPD_ENABLE;
+ }
+ else if(pin->pullup_pulldown == PUPD_UP)
+ {
+ pin->reg->bit.pun = PUPD_ENABLE;
+ pin->reg->bit.pdn = PUPD_DISABLE;
+ }
+ else
+ {
+ pin->reg->bit.pdn = PUPD_ZERO;
+ pin->reg->bit.pun = PUPD_ZERO;
+ }
+
+ pin->reg->bit.ds = pin->driving_curr;
+ pin->reg->bit.st = 1;
+
+ pin->reg->bit.ie = 1;
+
+ SET_REG(pinctrl_obj.vbase + pin->reg_offset, pin->reg->dw);
+
+ pinctrl_obj.pinlist[pin->pad_id] = pin;
+
+ return 0;
+}
+
+static int fh_pinctrl_mux_switch(PinCtrl_Mux *mux, unsigned int flag)
+{
+ if(mux->cur_pin >= MUX_NUM)
+ {
+ OS_PRINT("ERROR: selected function is not exist, sel_func=%d\n\n", mux->cur_pin);
+ return -3;
+ }
+
+ if(!mux->mux_pin[mux->cur_pin])
+ {
+ OS_PRINT("ERROR: mux->mux_pin[%d] has no pin\n\n", mux->cur_pin);
+ return -4;
+ }
+
+ PRINT_DBG("\t%s[%d]\n", mux->mux_pin[mux->cur_pin]->func_name, mux->cur_pin);
+ return fh_pinctrl_func_select(mux->mux_pin[mux->cur_pin], flag);
+}
+
+
+static int fh_pinctrl_device_switch(PinCtrl_Device *dev, unsigned int flag)
+{
+ int i, ret;
+ for(i=0; i<dev->mux_count; i++)
+ {
+ unsigned int *mux_addr = (unsigned int *)((unsigned int)dev
+ + sizeof(*dev) - 4 + i*4);
+ PinCtrl_Mux *mux = (PinCtrl_Mux *)(*mux_addr);
+
+ ret = fh_pinctrl_mux_switch(mux, flag);
+ if(ret)
+ {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static PinCtrl_Device * fh_pinctrl_get_device_by_name(char *name)
+{
+ PinCtrl_Device *dev = OS_NULL;
+
+ list_for_each_entry(dev, &fh_pinctrl_devices, list)
+ {
+ if(!strcmp(name, dev->dev_name))
+ {
+ return dev;
+ }
+ }
+
+ return 0;
+}
+
+int fh_pinctrl_check_pinlist(void)
+{
+ int i;
+ for (i=0; i<PAD_NUM; i++) {
+ if (!pinctrl_obj.pinlist[i]) {
+ PRINT_DBG("ERROR: pad %d is still empty\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static int fh_pinctrl_init_devices(char** devlist, int listsize, unsigned int flag)
+{
+ int i, ret;
+ PinCtrl_Device *dev;
+
+ memset(pinctrl_obj.pinlist, 0, sizeof(pinctrl_obj.pinlist));
+
+ for(i=0; i<listsize; i++)
+ {
+ dev = fh_pinctrl_get_device_by_name(devlist[i]);
+
+ if(!dev)
+ {
+ OS_PRINT("ERROR: cannot find device %s\n", devlist[i]);
+ return -5;
+ }
+
+ PRINT_DBG("%s:\n", dev->dev_name);
+ ret = fh_pinctrl_device_switch(dev, flag);
+ PRINT_DBG("\n");
+ if(ret)
+ {
+ return ret;
+ }
+
+ }
+
+ fh_pinctrl_check_pinlist();
+
+ return 0;
+
+}
+
+static void fh_pinctrl_init_pin(void)
+{
+ int i;
+
+ for(i=0; i<PAD_NUM; i++)
+ {
+ PinCtrl_Pin *pin = pinctrl_obj.pinlist[i];
+ if(!pin)
+ {
+ unsigned int reg;
+ PRINT_DBG("ERROR: pad %d is empty\n", i);
+ reg = GET_REG(pinctrl_obj.vbase + i * 4);
+ reg &= ~(0x1000);
+ SET_REG(pinctrl_obj.vbase + i * 4, reg);
+ continue;
+ }
+ pin->reg->dw = GET_REG(pinctrl_obj.vbase +
+ pin->reg_offset);
+
+ pin->input_enable = pin->reg->bit.ie;
+ pin->output_enable = pin->reg->bit.oe;
+ }
+}
+
+
+void fh_pinctrl_init(unsigned int base)
+{
+ pinctrl_obj.vbase = pinctrl_obj.pbase = (void *)base;
+
+ fh_pinctrl_init_devicelist(&fh_pinctrl_devices);
+ fh_pinctrl_init_devices(fh_pinctrl_selected_devices,
+ ARRAY_SIZE(fh_pinctrl_selected_devices),
+ NEED_CHECK_PINLIST);
+ fh_pinctrl_init_pin();
+}
+
+void fh_pinctrl_prt(struct seq_file *sfile)
+{
+ int i;
+ seq_printf(sfile, "%2s\t%8s\t%4s\t%8s\t%4s\t%4s\t%4s\t%4s\n",
+ "id", "name", "addr", "reg", "sel", "ie", "oe", "pupd");
+ for(i=0; i<PAD_NUM; i++)
+ {
+ if(!pinctrl_obj.pinlist[i])
+ {
+ OS_PRINT("ERROR: pad %d is empty\n", i);
+ continue;
+ }
+ fh_pinctrl_check_duplicate_pin(pinctrl_obj.pinlist[i], i+1);
+ seq_printf(sfile, "%02d\t%8s\t0x%04x\t0x%08x\t%04d\t%04d\t%04d\t%04d\n",
+ pinctrl_obj.pinlist[i]->pad_id,
+ pinctrl_obj.pinlist[i]->func_name,
+ pinctrl_obj.pinlist[i]->reg_offset + 0x80,
+ GET_REG(pinctrl_obj.vbase + pinctrl_obj.pinlist[i]->reg_offset),
+ pinctrl_obj.pinlist[i]->func_sel,
+ pinctrl_obj.pinlist[i]->input_enable,
+ pinctrl_obj.pinlist[i]->output_enable,
+ pinctrl_obj.pinlist[i]->pullup_pulldown);
+ }
+
+}
+
+
+int fh_pinctrl_smux(char *devname, char* muxname, int muxsel, unsigned int flag)
+{
+ PinCtrl_Device *dev;
+ int i, ret;
+
+ dev = fh_pinctrl_get_device_by_name(devname);
+
+ if(!dev)
+ {
+ OS_PRINT("ERROR: cannot find device %s\n", devname);
+ return -4;
+ }
+
+ for(i=0; i<dev->mux_count; i++)
+ {
+ unsigned int *mux_addr = (unsigned int *)((unsigned int)dev
+ + sizeof(*dev) - 4 + i*4);
+ PinCtrl_Mux *mux = (PinCtrl_Mux *)(*mux_addr);
+
+ if(!strcmp(muxname, mux->mux_pin[0]->func_name))
+ {
+ mux->cur_pin = muxsel;
+ ret = fh_pinctrl_mux_switch(mux, flag);
+ return ret;
+ }
+ }
+
+ if(i == dev->mux_count)
+ {
+ OS_PRINT("ERROR: cannot find mux %s of device %s\n", muxname, devname);
+ return -6;
+ }
+
+ fh_pinctrl_check_pinlist();
+
+ return 0;
+}
+EXPORT_SYMBOL(fh_pinctrl_smux);
+
+int fh_pinctrl_sdev(char *devname, unsigned int flag)
+{
+ PinCtrl_Device *dev;
+ int ret;
+
+ dev = fh_pinctrl_get_device_by_name(devname);
+ if(!dev)
+ {
+ OS_PRINT("ERROR: cannot find device %s\n", devname);
+ return -7;
+ }
+
+ OS_PRINT("%s:\n", dev->dev_name);
+ ret = fh_pinctrl_device_switch(dev, flag);
+ OS_PRINT("\n");
+ if(ret)
+ {
+ return ret;
+ }
+
+ fh_pinctrl_check_pinlist();
+
+ return 0;
+}
+EXPORT_SYMBOL(fh_pinctrl_sdev);
diff --git a/arch/arm/mach-fh/pm.c b/arch/arm/mach-fh/pm.c
new file mode 100644
index 00000000..a7ec90b5
--- /dev/null
+++ b/arch/arm/mach-fh/pm.c
@@ -0,0 +1,223 @@
+/*
+ * FH Power Management Routines
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/delay.h>
+
+#include <mach/sram.h>
+#include <mach/system.h>
+#include <mach/io.h>
+#include <mach/gpio.h>
+#include <mach/ddrc.h>
+#include <mach/pmu.h>
+
+#ifdef CONFIG_PM
+static u32 old_clk_gate = 0;
+
+static void (*fh_sram_suspend)(void);
+
+static inline void fh_pm_pll0_to_xtal(void)
+{
+ u32 reg;
+
+ reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
+ reg &= ~(0x1);
+ fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
+}
+
+static inline void fh_pm_xtal_to_pll0(void)
+{
+ u32 reg;
+
+ reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
+ reg |= 0x1;
+ fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
+}
+
+static inline void fh_pm_gate_clocks(void)
+{
+ u32 reg;
+ old_clk_gate = fh_pmu_get_reg(REG_PMU_CLK_GATE);
+ reg = fh_pmu_get_reg(REG_PMU_CLK_GATE);
+ reg |= 0x7fff3fb1;
+ fh_pmu_set_reg(REG_PMU_CLK_GATE, reg);
+}
+
+static inline void fh_pm_ungate_clocks(void)
+{
+ u32 reg;
+
+ reg = old_clk_gate;
+ fh_pmu_set_reg(REG_PMU_CLK_GATE, reg);
+}
+
+
+static void fh_sram_push(void *dest, void *src, unsigned int size)
+{
+ memcpy(dest, src, size);
+ flush_icache_range((unsigned long)dest, (unsigned long)(dest + size));
+}
+
+static int fh_pm_valid_state(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static void fh_pm_suspend_to_ram(void)
+{
+ fh_pm_pll0_to_xtal();
+ fh_pm_gate_clocks();
+
+ fh_sram_suspend();
+
+ fh_pm_ungate_clocks();
+ fh_pm_xtal_to_pll0();
+}
+
+static inline void fh_ddrc_selfrefresh_enable(void)
+{
+ u32 reg;
+
+ /*
+ * Ensure that the Cadence DDR Controller is idle,
+ * that is when the controller_busy signal is low.
+ */
+ do {
+ reg = readl(VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_57);
+ } while (reg & DDRC_CONTROLLER_BUSY);
+
+ /*
+ * Put the memories into self-refresh mode
+ * by issuing one of the self-refresh entry commands
+ * through the Low Power Control Module
+ */
+ writel(DDRC_LPI_SR_WAKEUP_TIME | DDRC_LP_CMD_SELFREFRESH | DDRC_CKSRX_DELAY,
+ VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_31);
+
+ //wait no more
+ /*
+ do
+ {
+ reg = readl(VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_97);
+ }
+ while(reg & DDRC_CKE_STATUS);
+ */
+}
+
+static inline void fh_ddrc_selfrefresh_disable(void)
+{
+ //Exit any low power state
+ writel(DDRC_LPI_SR_WAKEUP_TIME | DDRC_LP_CMD_EXITLOWPOWER | DDRC_CKSRX_DELAY,
+ VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_31);
+}
+
+static void fh_pm_suspend_to_cache(void)
+{
+ asm volatile("mov r1, #0\n\t"
+ "mcr p15, 0, r1, c7, c10, 4\n\t"
+ "mcr p15, 0, r1, c7, c0, 4\n\t"
+ : /* no output */
+ : /* no input */
+ : "r1");
+
+ fh_ddrc_selfrefresh_enable();
+
+ asm volatile("mov r1, #0\n\t"
+ "mcr p15, 0, r1, c7, c10, 4\n\t"
+ "mcr p15, 0, r1, c7, c0, 4\n\t"
+ : /* no output */
+ : /* no input */
+ : "r1");
+ fh_ddrc_selfrefresh_disable();
+}
+
+static int fh_pm_enter(suspend_state_t state)
+{
+ int ret = 0;
+
+ fh_irq_suspend();
+ fh_gpio_irq_suspend();
+
+ switch (state) {
+ case PM_SUSPEND_ON:
+ cpu_do_idle();
+ break;
+ case PM_SUSPEND_STANDBY:
+ fh_pm_suspend_to_cache();
+ break;
+ case PM_SUSPEND_MEM:
+ fh_pm_suspend_to_ram();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ fh_gpio_irq_resume();
+ fh_irq_resume();
+
+ return ret;
+}
+
+static const struct platform_suspend_ops fh_pm_ops = {
+ .enter = fh_pm_enter,
+ .valid = fh_pm_valid_state,
+};
+
+static int __init fh_pm_probe(struct platform_device *pdev)
+{
+ fh_sram_suspend = sram_alloc(fh_cpu_suspend_sz, NULL);
+ if (!fh_sram_suspend) {
+ dev_err(&pdev->dev, "cannot allocate SRAM memory\n");
+ return -ENOMEM;
+ }
+ fh_sram_push(fh_sram_suspend, fh_cpu_suspend,
+ fh_cpu_suspend_sz);
+ suspend_set_ops(&fh_pm_ops);
+
+ return 0;
+}
+
+static int __exit fh_pm_remove(struct platform_device *pdev)
+{
+ sram_free(fh_sram_suspend, fh_cpu_suspend_sz);
+ return 0;
+}
+
+static struct platform_driver fh_pm_driver = {
+ .driver =
+ {
+ .name = "pm-fh",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(fh_pm_remove),
+};
+
+static int __init fh_pm_init(void)
+{
+ return platform_driver_probe(&fh_pm_driver, fh_pm_probe);
+}
+late_initcall(fh_pm_init);
+#endif
diff --git a/arch/arm/mach-fh/pmu.c b/arch/arm/mach-fh/pmu.c
new file mode 100644
index 00000000..ed97a53f
--- /dev/null
+++ b/arch/arm/mach-fh/pmu.c
@@ -0,0 +1,48 @@
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#include <mach/io.h>
+#include <mach/chip.h>
+#include <mach/pmu.h>
+
+static int fh_pmu_flag_stop = 0;
+
+void fh_pmu_stop(void)
+{
+ fh_pmu_flag_stop = 1;
+}
+EXPORT_SYMBOL(fh_pmu_stop);
+
+void fh_pmu_set_reg(u32 offset, u32 data)
+{
+ if (fh_pmu_flag_stop)
+ return;
+
+ if (offset > PMU_REG_SIZE) {
+ pr_err("fh_pmu_set_reg: offset is out of range");
+ return;
+ }
+ writel(data, VPMU(PMU_REG_BASE + offset));
+}
+EXPORT_SYMBOL(fh_pmu_set_reg);
+
+u32 fh_pmu_get_reg(u32 offset)
+{
+ if (fh_pmu_flag_stop)
+ return 0;
+
+ if (offset > PMU_REG_SIZE) {
+ pr_err("fh_pmu_get_reg: offset is out of range");
+ return 0;
+ }
+ return readl(VPMU(PMU_REG_BASE + offset));
+}
+EXPORT_SYMBOL(fh_pmu_get_reg);
+
+int fh_pmu_init(void)
+{
+ return fh_chipid_init();
+}
diff --git a/arch/arm/mach-fh/sleep.S b/arch/arm/mach-fh/sleep.S
new file mode 100644
index 00000000..5eb4ac23
--- /dev/null
+++ b/arch/arm/mach-fh/sleep.S
@@ -0,0 +1,144 @@
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/chip.h>
+#include <mach/io.h>
+#include <mach/ddrc.h>
+
+#ifdef CONFIG_PM
+#define PMU_MASK_SWITCH_PLL0 0x1
+#define PMU_MASK_DDR_SEL 0x1000000
+#define PMU_MASK_DDR_DIV 0xff
+#define PMU_MASK_PLL1_PDN 0x80000000
+
+
+ .macro wait_ddrc_idle
+1: ldr r3, [r1, #OFFSET_DENAL_CTL_57]
+ tst r3, #DDRC_CONTROLLER_BUSY
+ bne 1b
+ .endm
+
+
+ .macro enable_ddrc_selfrefresh
+ ldr r3, .fh_ddrc_cmd_en_self_refresh
+ str r3, [r1, #OFFSET_DENAL_CTL_31]
+ .endm
+
+
+ .macro wait_ddrc_cke
+1: ldr r3, [r1, #OFFSET_DENAL_CTL_97]
+ tst r3, #DDRC_CKE_STATUS
+ bne 1b
+ .endm
+
+
+ .macro disable_ddrc_selfrefresh
+ ldr r3, .fh_ddrc_cmd_dis_self_refresh
+ str r3, [r1, #OFFSET_DENAL_CTL_31]
+ .endm
+
+
+ .macro ddr_to_pll0
+ ldr r3, [r2, #REG_PMU_CLK_SEL]
+ bic r3, r3, #PMU_MASK_DDR_SEL
+ str r3, [r2, #REG_PMU_CLK_SEL]
+ .endm
+
+
+ .macro ddr_to_pll1
+ ldr r3, [r2, #REG_PMU_CLK_SEL]
+ orr r3, r3, #PMU_MASK_DDR_SEL
+ str r3, [r2, #REG_PMU_CLK_SEL]
+ .endm
+
+#if 1
+ .macro ddr_dec_feq
+ ldr r3, [r2, #REG_PMU_CLK_DIV1]
+ orr r3, r3, #PMU_MASK_DDR_DIV
+ str r3, [r2, #REG_PMU_CLK_SEL]
+ .endm
+
+
+ .macro ddr_inc_feq
+ ldr r3, [r2, #REG_PMU_CLK_DIV1]
+ bic r3, r3, #PMU_MASK_DDR_DIV
+ orr r3, r3, #0x1
+ str r3, [r2, #REG_PMU_CLK_SEL]
+ .endm
+
+
+ .macro pll1_power_down
+ ldr r3, [r2, #REG_PMU_PLL1_CTRL]
+ bic r3, r3, #PMU_MASK_PLL1_PDN
+ str r3, [r2, #REG_PMU_PLL1_CTRL]
+ .endm
+
+
+ .macro pll1_power_on
+ ldr r3, [r2, #REG_PMU_PLL1_CTRL]
+ orr r3, r3, #PMU_MASK_PLL1_PDN
+ str r3, [r2, #REG_PMU_PLL1_CTRL]
+ .endm
+#endif
+
+ .text
+ENTRY(fh_cpu_suspend)
+
+ stmfd sp!, {r0-r12, lr} @ save registers on stack
+
+ /*
+
+ * Register usage:
+ * R1 = Base address of DDRC
+ * R2 = Base register for PMU
+ * R3 = temporary register
+ * R4 = temporary register
+ *
+ * R9 = Test address
+ */
+
+ ldr r1, .fh_va_base_ddrc
+ ldr r2, .fh_va_base_pmu
+ ldr r9, .fh_va_test_addr
+
+ wait_ddrc_idle
+ enable_ddrc_selfrefresh
+ wait_ddrc_cke
+
+ @ddr_dec_feq
+ ddr_to_pll0
+
+ @pll1_power_down
+
+ mcr p15, 0, r0, c7, c10, 4 @ Data Synchronization Barrier operation
+ mcr p15, 0, r0, c7, c0, 4 @ Wait-for-Interrupt
+
+ @pll1_power_on
+
+ @ddr_inc_feq
+ ddr_to_pll1
+
+ disable_ddrc_selfrefresh
+
+ ldmfd sp!, {r0-r12, pc}
+
+ENDPROC(fh_cpu_suspend)
+
+.fh_va_base_ddrc:
+ .word VA_DDRC_REG_BASE
+
+.fh_va_base_pmu:
+ .word VA_PMU_REG_BASE
+
+.fh_va_test_addr:
+ .word 0xc03efef0
+
+.fh_ddrc_cmd_en_self_refresh:
+ .word 0x3000a01
+
+.fh_ddrc_cmd_dis_self_refresh:
+ .word 0x3000101
+
+ENTRY(fh_cpu_suspend_sz)
+ .word . - fh_cpu_suspend
+ENDPROC(fh_cpu_suspend_sz)
+#endif
diff --git a/arch/arm/mach-fh/sram.c b/arch/arm/mach-fh/sram.c
new file mode 100644
index 00000000..4df4838b
--- /dev/null
+++ b/arch/arm/mach-fh/sram.c
@@ -0,0 +1,53 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/genalloc.h>
+#include <asm-generic/sizes.h>
+#include <mach/sram.h>
+#include <mach/chip.h>
+#include <mach/io.h>
+
+static struct gen_pool *sram_pool;
+
+void *sram_alloc(size_t len, dma_addr_t *dma)
+{
+ unsigned long vaddr;
+
+ if (!sram_pool)
+ return NULL;
+
+ vaddr = gen_pool_alloc(sram_pool, len);
+ if (!vaddr)
+ return NULL;
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(sram_alloc);
+
+void sram_free(void *addr, size_t len)
+{
+ gen_pool_free(sram_pool, (unsigned long) addr, len);
+}
+EXPORT_SYMBOL(sram_free);
+
+
+/*
+ * REVISIT This supports CPU and DMA access to/from SRAM, but it
+ * doesn't (yet?) support some other notable uses of SRAM: as TCM
+ * for data and/or instructions; and holding code needed to enter
+ * and exit suspend states (while DRAM can't be used).
+ */
+static int __init sram_init(void)
+{
+ int status = 0;
+
+ sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
+ if (!sram_pool)
+ status = -ENOMEM;
+
+ if (sram_pool)
+ status = gen_pool_add(sram_pool, VA_RAM_REG_BASE, SRAM_SIZE, -1);
+ WARN_ON(status < 0);
+ return status;
+}
+core_initcall(sram_init);
+
diff --git a/arch/arm/mach-fh/time.c b/arch/arm/mach-fh/time.c
new file mode 100644
index 00000000..bde1f79e
--- /dev/null
+++ b/arch/arm/mach-fh/time.c
@@ -0,0 +1,281 @@
+/*
+ * FH timer subsystem
+ *
+ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/irqreturn.h>
+#include <linux/math64.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <asm/sched_clock.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/time.h>
+
+#include <mach/hardware.h>
+#include <mach/timex.h>
+#include <mach/chip.h>
+#include <mach/fh_predefined.h>
+#include <mach/irqs.h>
+#include <mach/pmu.h>
+#include <mach/clock.h>
+#include <mach/fh_simple_timer.h>
+
+
+static struct clock_event_device clockevent_fh;
+static struct clocksource clocksource_fh;
+#ifndef CONFIG_USE_PTS_AS_CLOCKSOURCE
+static unsigned int prev_cycle;
+#endif
+
+struct clk *timer_clk, *pts_clk;
+
+/*
+ * clockevent
+ */
+static int fh_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ int sync_cnt = 0;
+
+ SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(1)), 0x00, 0x1);
+ /* zy/ticket/100 : update apb Timer LOADCNT */
+ /* CURRENTVALE could,t be start from new LOADCOUNT */
+ /* cause is timer clk 1M hz and apb is 150M hz */
+ /* check current cnt for it is disabled */
+ while (GET_REG(VTIMER(REG_TIMER_CUR_VAL(1))) != 0) {
+ sync_cnt++;
+ if (sync_cnt >= 50) {
+ /* typical cnt is 5 when in 1M timer clk */
+ /* so here use 50 to check whether it is err */
+ pr_err("timer problem,can't disable");
+ }
+ }
+
+ SET_REG(VTIMER(REG_TIMER_LOADCNT(1)), cycles);
+ SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(1)), 0x01, 0x1);
+#ifdef CONFIG_ARCH_FH8810
+ unsigned int curr_val;
+
+ curr_val = GET_REG(VTIMER(REG_TIMER_CUR_VAL(1))) ;
+ if (curr_val > 0x80000000) {
+ panic("timer curr %u, want cycles %lu\n", curr_val, cycles);
+
+ SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(1)), 0x01, 0x1);
+ SET_REG(VTIMER(REG_TIMER_LOADCNT(1)), cycles);
+
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfff7ffff);
+ while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff)
+ ;
+ }
+#endif
+ return 0;
+}
+
+static void fh_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ SET_REG(VTIMER(REG_TIMER_CTRL_REG(1)), 0x3);
+ SET_REG(VTIMER(REG_TIMER_LOADCNT(1)), TIMER_CLK / HZ);
+#ifdef CONFIG_ARCH_FH8810
+ fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfff7ffff);
+ while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff)
+ ;
+#endif
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ SET_REG(VTIMER(REG_TIMER_CTRL_REG(1)), 0x0);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ SET_REG(VTIMER(REG_TIMER_CTRL_REG(1)), 0x3);
+ break;
+ }
+}
+
+
+static irqreturn_t fh_clock_timer_interrupt_handle(int irq, void *dev_id)
+{
+ unsigned int status;
+ status = GET_REG(VTIMER(REG_TIMERS_INTSTATUS));
+
+
+#ifdef CONFIG_FH_SIMPLE_TIMER
+ if (status & (1 << SIMPLE_TIMER_BASE))
+ fh_simple_timer_interrupt();
+#endif
+ if (status & 0x2) {
+ GET_REG(VTIMER(REG_TIMER_EOI_REG(1)));
+ clockevent_fh.event_handler(&clockevent_fh);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction fh_eventtimer_irq = {
+ .name = "System Timer Tick",
+ .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER,
+ .handler = fh_clock_timer_interrupt_handle,
+ .dev_id = &clockevent_fh,
+};
+
+
+static void fh_timer_resources(void)
+{
+
+}
+static DEFINE_CLOCK_DATA(cd);
+
+static void notrace fh_update_sched_clock(void)
+{
+ const cycle_t cyc = clocksource_fh.read(&clocksource_fh);
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+ const cycle_t cyc = clocksource_fh.read(&clocksource_fh);
+
+ return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+static void fh_clocksource_init(void)
+{
+#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
+ unsigned long clock_tick_rate = pts_clk->frequency;
+#else
+ unsigned long clock_tick_rate = timer_clk->frequency;
+ prev_cycle = 0;
+#endif
+
+ if (clocksource_register_hz(&clocksource_fh, clock_tick_rate))
+ panic("register clocksouce :%s error\n", clocksource_fh.name);
+
+ printk(KERN_INFO "timer mult: 0x%x, timer shift: 0x%x\n",
+ clocksource_fh.mult, clocksource_fh.shift);
+
+ /* force check the mult/shift of clocksource */
+ init_fixed_sched_clock(&cd, fh_update_sched_clock, 32, clock_tick_rate,
+ clocksource_fh.mult, clocksource_fh.shift);
+
+ SET_REG(VTIMER(REG_TIMER_CTRL_REG(0)), 0x5);
+}
+
+static cycle_t fh_clocksource_read(struct clocksource *cs)
+{
+#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
+#if (defined(CONFIG_ARCH_FH8856) || defined(CONFIG_ARCH_ZY2) \
+ || defined(CONFIG_ARCH_FH8626V100))
+ fh_pmu_set_reg(REG_PMU_PTSLO, 0x01);
+ u64 high = fh_pmu_get_reg(REG_PMU_PTSHI);
+ u64 low = fh_pmu_get_reg(REG_PMU_PTSLO);
+ u64 pts = (((unsigned long long)high)<<32)|((unsigned long long)low);
+ return pts;
+#else
+ return GET_REG(VPAE(REG_PAE_PTS_REG));
+#endif
+#else
+ unsigned int cycle;
+ cycle = ~GET_REG(VTIMER(REG_TIMER_CUR_VAL(0)));
+#ifdef CONFIG_ARCH_FH8810
+ if (unlikely(prev_cycle > cycle))
+ cycle = ~GET_REG(VTIMER(REG_TIMER_CUR_VAL(0)));
+ prev_cycle = cycle;
+#endif
+ return cycle;
+#endif
+}
+
+static void fh_clockevent_init(void)
+{
+ setup_irq(TMR0_IRQ, &fh_eventtimer_irq);
+ clockevent_fh.mult = div_sc(timer_clk->frequency,
+ NSEC_PER_SEC, clockevent_fh.shift);
+ clockevent_fh.max_delta_ns = clockevent_delta2ns(0xffffffff,
+ &clockevent_fh);
+
+ clockevent_fh.min_delta_ns = clockevent_delta2ns(0xf, &clockevent_fh);
+
+ clockevent_fh.cpumask = cpumask_of(0);
+ clockevents_register_device(&clockevent_fh);
+}
+
+
+static void __init fh_timer_init(void)
+{
+
+ timer_clk = clk_get(NULL, "tmr0_clk");
+ pts_clk = clk_get(NULL, "pts_clk");
+
+#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
+ clk_set_rate(pts_clk, PAE_PTS_CLK);
+ clk_enable(pts_clk);
+ pts_clk->frequency = PAE_PTS_CLK;
+#if (defined(CONFIG_ARCH_FH8856) || defined(CONFIG_ARCH_ZY2) \
+ || defined(CONFIG_ARCH_FH8626V100))
+ u32 pmuuser0 = fh_pmu_get_reg(REG_PMU_USER0);
+ pmuuser0 &= ~(0x1); /*pts hw refresh mode */
+ fh_pmu_set_reg(REG_PMU_USER0, pmuuser0);
+#endif
+#endif
+
+ clk_set_rate(timer_clk, TIMER_CLK);
+ clk_enable(timer_clk);
+
+ timer_clk->frequency = TIMER_CLK;
+
+ if (IS_ERR(timer_clk) || IS_ERR(pts_clk))
+ pr_err("fh_timer: clock is not defined\n");
+
+
+ fh_timer_resources();
+ fh_clocksource_init();
+ fh_clockevent_init();
+#ifdef CONFIG_FH_SIMPLE_TIMER
+ fh_simple_timer_init();
+#endif
+}
+
+
+
+static struct clocksource clocksource_fh = {
+ .name = "fh_clocksource",
+ .rating = 300,
+ .read = fh_clocksource_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+
+static struct clock_event_device clockevent_fh = {
+ .name = "fh_clockevent",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
+ .set_next_event = fh_set_next_event,
+ .set_mode = fh_set_mode,
+};
+
+struct sys_timer fh_timer = {
+ .init = fh_timer_init,
+};
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 3b3776d0..4e198455 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1113,3 +1113,10 @@ blissc MACH_BLISSC BLISSC 3491
thales_adc MACH_THALES_ADC THALES_ADC 3492
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
atdgp318 MACH_ATDGP318 ATDGP318 3494
+fh8810 MACH_FH8810 FH8810 9999
+fh8830 MACH_FH8830 FH8830 9999
+fh8833 MACH_FH8833 FH8833 9999
+fh8856 MACH_FH8856 FH8856 9999
+fh8852 MACH_FH8852 FH8852 9999
+fh8626v100 MACH_FH8626V100 FH8626V100 9999
+zy2 MACH_ZY2 ZY2 9999
diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
deleted file mode 120000
index 7cb65789..00000000
--- a/arch/microblaze/boot/dts/system.dts
+++ /dev/null
@@ -1 +0,0 @@
-../../platform/generic/system.dts
\ No newline at end of file
diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
new file mode 100644
index 00000000..3f85df2b
--- /dev/null
+++ b/arch/microblaze/boot/dts/system.dts
@@ -0,0 +1,367 @@
+/*
+ * Device Tree Generator version: 1.1
+ *
+ * (C) Copyright 2007-2008 Xilinx, Inc.
+ * (C) Copyright 2007-2009 Michal Simek
+ *
+ * Michal SIMEK <monstr@monstr.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * CAUTION: This file is automatically generated by libgen.
+ * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
+ *
+ * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101
+ */
+
+/dts-v1/;
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,microblaze";
+ hard-reset-gpios = <&LEDs_8Bit 2 1>;
+ model = "testing";
+ DDR2_SDRAM: memory@90000000 {
+ device_type = "memory";
+ reg = < 0x90000000 0x10000000 >;
+ } ;
+ aliases {
+ ethernet0 = &Hard_Ethernet_MAC;
+ serial0 = &RS232_Uart_1;
+ } ;
+ chosen {
+ bootargs = "console=ttyUL0,115200 highres=on";
+ linux,stdout-path = "/plb@0/serial@84000000";
+ } ;
+ cpus {
+ #address-cells = <1>;
+ #cpus = <0x1>;
+ #size-cells = <0>;
+ microblaze_0: cpu@0 {
+ clock-frequency = <125000000>;
+ compatible = "xlnx,microblaze-7.10.d";
+ d-cache-baseaddr = <0x90000000>;
+ d-cache-highaddr = <0x9fffffff>;
+ d-cache-line-size = <0x10>;
+ d-cache-size = <0x2000>;
+ device_type = "cpu";
+ i-cache-baseaddr = <0x90000000>;
+ i-cache-highaddr = <0x9fffffff>;
+ i-cache-line-size = <0x10>;
+ i-cache-size = <0x2000>;
+ model = "microblaze,7.10.d";
+ reg = <0>;
+ timebase-frequency = <125000000>;
+ xlnx,addr-tag-bits = <0xf>;
+ xlnx,allow-dcache-wr = <0x1>;
+ xlnx,allow-icache-wr = <0x1>;
+ xlnx,area-optimized = <0x0>;
+ xlnx,cache-byte-size = <0x2000>;
+ xlnx,d-lmb = <0x1>;
+ xlnx,d-opb = <0x0>;
+ xlnx,d-plb = <0x1>;
+ xlnx,data-size = <0x20>;
+ xlnx,dcache-addr-tag = <0xf>;
+ xlnx,dcache-always-used = <0x1>;
+ xlnx,dcache-byte-size = <0x2000>;
+ xlnx,dcache-line-len = <0x4>;
+ xlnx,dcache-use-fsl = <0x1>;
+ xlnx,debug-enabled = <0x1>;
+ xlnx,div-zero-exception = <0x1>;
+ xlnx,dopb-bus-exception = <0x0>;
+ xlnx,dynamic-bus-sizing = <0x1>;
+ xlnx,edge-is-positive = <0x1>;
+ xlnx,family = "virtex5";
+ xlnx,endianness = <0x1>;
+ xlnx,fpu-exception = <0x1>;
+ xlnx,fsl-data-size = <0x20>;
+ xlnx,fsl-exception = <0x0>;
+ xlnx,fsl-links = <0x0>;
+ xlnx,i-lmb = <0x1>;
+ xlnx,i-opb = <0x0>;
+ xlnx,i-plb = <0x1>;
+ xlnx,icache-always-used = <0x1>;
+ xlnx,icache-line-len = <0x4>;
+ xlnx,icache-use-fsl = <0x1>;
+ xlnx,ill-opcode-exception = <0x1>;
+ xlnx,instance = "microblaze_0";
+ xlnx,interconnect = <0x1>;
+ xlnx,interrupt-is-edge = <0x0>;
+ xlnx,iopb-bus-exception = <0x0>;
+ xlnx,mmu-dtlb-size = <0x4>;
+ xlnx,mmu-itlb-size = <0x2>;
+ xlnx,mmu-tlb-access = <0x3>;
+ xlnx,mmu-zones = <0x10>;
+ xlnx,number-of-pc-brk = <0x1>;
+ xlnx,number-of-rd-addr-brk = <0x0>;
+ xlnx,number-of-wr-addr-brk = <0x0>;
+ xlnx,opcode-0x0-illegal = <0x1>;
+ xlnx,pvr = <0x2>;
+ xlnx,pvr-user1 = <0x0>;
+ xlnx,pvr-user2 = <0x0>;
+ xlnx,reset-msr = <0x0>;
+ xlnx,sco = <0x0>;
+ xlnx,unaligned-exceptions = <0x1>;
+ xlnx,use-barrel = <0x1>;
+ xlnx,use-dcache = <0x1>;
+ xlnx,use-div = <0x1>;
+ xlnx,use-ext-brk = <0x1>;
+ xlnx,use-ext-nm-brk = <0x1>;
+ xlnx,use-extended-fsl-instr = <0x0>;
+ xlnx,use-fpu = <0x2>;
+ xlnx,use-hw-mul = <0x2>;
+ xlnx,use-icache = <0x1>;
+ xlnx,use-interrupt = <0x1>;
+ xlnx,use-mmu = <0x3>;
+ xlnx,use-msr-instr = <0x1>;
+ xlnx,use-pcmp-instr = <0x1>;
+ } ;
+ } ;
+ mb_plb: plb@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
+ ranges ;
+ FLASH: flash@a0000000 {
+ bank-width = <2>;
+ compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
+ reg = < 0xa0000000 0x2000000 >;
+ xlnx,family = "virtex5";
+ xlnx,include-datawidth-matching-0 = <0x1>;
+ xlnx,include-datawidth-matching-1 = <0x0>;
+ xlnx,include-datawidth-matching-2 = <0x0>;
+ xlnx,include-datawidth-matching-3 = <0x0>;
+ xlnx,include-negedge-ioregs = <0x0>;
+ xlnx,include-plb-ipif = <0x1>;
+ xlnx,include-wrbuf = <0x1>;
+ xlnx,max-mem-width = <0x10>;
+ xlnx,mch-native-dwidth = <0x20>;
+ xlnx,mch-plb-clk-period-ps = <0x1f40>;
+ xlnx,mch-splb-awidth = <0x20>;
+ xlnx,mch0-accessbuf-depth = <0x10>;
+ xlnx,mch0-protocol = <0x0>;
+ xlnx,mch0-rddatabuf-depth = <0x10>;
+ xlnx,mch1-accessbuf-depth = <0x10>;
+ xlnx,mch1-protocol = <0x0>;
+ xlnx,mch1-rddatabuf-depth = <0x10>;
+ xlnx,mch2-accessbuf-depth = <0x10>;
+ xlnx,mch2-protocol = <0x0>;
+ xlnx,mch2-rddatabuf-depth = <0x10>;
+ xlnx,mch3-accessbuf-depth = <0x10>;
+ xlnx,mch3-protocol = <0x0>;
+ xlnx,mch3-rddatabuf-depth = <0x10>;
+ xlnx,mem0-width = <0x10>;
+ xlnx,mem1-width = <0x20>;
+ xlnx,mem2-width = <0x20>;
+ xlnx,mem3-width = <0x20>;
+ xlnx,num-banks-mem = <0x1>;
+ xlnx,num-channels = <0x0>;
+ xlnx,priority-mode = <0x0>;
+ xlnx,synch-mem-0 = <0x0>;
+ xlnx,synch-mem-1 = <0x0>;
+ xlnx,synch-mem-2 = <0x0>;
+ xlnx,synch-mem-3 = <0x0>;
+ xlnx,synch-pipedelay-0 = <0x2>;
+ xlnx,synch-pipedelay-1 = <0x2>;
+ xlnx,synch-pipedelay-2 = <0x2>;
+ xlnx,synch-pipedelay-3 = <0x2>;
+ xlnx,tavdv-ps-mem-0 = <0x1adb0>;
+ xlnx,tavdv-ps-mem-1 = <0x3a98>;
+ xlnx,tavdv-ps-mem-2 = <0x3a98>;
+ xlnx,tavdv-ps-mem-3 = <0x3a98>;
+ xlnx,tcedv-ps-mem-0 = <0x1adb0>;
+ xlnx,tcedv-ps-mem-1 = <0x3a98>;
+ xlnx,tcedv-ps-mem-2 = <0x3a98>;
+ xlnx,tcedv-ps-mem-3 = <0x3a98>;
+ xlnx,thzce-ps-mem-0 = <0x88b8>;
+ xlnx,thzce-ps-mem-1 = <0x1b58>;
+ xlnx,thzce-ps-mem-2 = <0x1b58>;
+ xlnx,thzce-ps-mem-3 = <0x1b58>;
+ xlnx,thzoe-ps-mem-0 = <0x1b58>;
+ xlnx,thzoe-ps-mem-1 = <0x1b58>;
+ xlnx,thzoe-ps-mem-2 = <0x1b58>;
+ xlnx,thzoe-ps-mem-3 = <0x1b58>;
+ xlnx,tlzwe-ps-mem-0 = <0x88b8>;
+ xlnx,tlzwe-ps-mem-1 = <0x0>;
+ xlnx,tlzwe-ps-mem-2 = <0x0>;
+ xlnx,tlzwe-ps-mem-3 = <0x0>;
+ xlnx,twc-ps-mem-0 = <0x2af8>;
+ xlnx,twc-ps-mem-1 = <0x3a98>;
+ xlnx,twc-ps-mem-2 = <0x3a98>;
+ xlnx,twc-ps-mem-3 = <0x3a98>;
+ xlnx,twp-ps-mem-0 = <0x11170>;
+ xlnx,twp-ps-mem-1 = <0x2ee0>;
+ xlnx,twp-ps-mem-2 = <0x2ee0>;
+ xlnx,twp-ps-mem-3 = <0x2ee0>;
+ xlnx,xcl0-linesize = <0x4>;
+ xlnx,xcl0-writexfer = <0x1>;
+ xlnx,xcl1-linesize = <0x4>;
+ xlnx,xcl1-writexfer = <0x1>;
+ xlnx,xcl2-linesize = <0x4>;
+ xlnx,xcl2-writexfer = <0x1>;
+ xlnx,xcl3-linesize = <0x4>;
+ xlnx,xcl3-writexfer = <0x1>;
+ } ;
+ Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,compound";
+ ranges ;
+ ethernet@81c00000 {
+ compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
+ device_type = "network";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 5 2 >;
+ llink-connected = <&PIM3>;
+ local-mac-address = [ 00 0a 35 00 00 00 ];
+ reg = < 0x81c00000 0x40 >;
+ xlnx,bus2core-clk-ratio = <0x1>;
+ xlnx,phy-type = <0x1>;
+ xlnx,phyaddr = <0x1>;
+ xlnx,rxcsum = <0x0>;
+ xlnx,rxfifo = <0x1000>;
+ xlnx,temac-type = <0x0>;
+ xlnx,txcsum = <0x0>;
+ xlnx,txfifo = <0x1000>;
+ } ;
+ } ;
+ IIC_EEPROM: i2c@81600000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 6 2 >;
+ reg = < 0x81600000 0x10000 >;
+ xlnx,clk-freq = <0x7735940>;
+ xlnx,family = "virtex5";
+ xlnx,gpo-width = <0x1>;
+ xlnx,iic-freq = <0x186a0>;
+ xlnx,scl-inertial-delay = <0x0>;
+ xlnx,sda-inertial-delay = <0x0>;
+ xlnx,ten-bit-adr = <0x0>;
+ } ;
+ LEDs_8Bit: gpio@81400000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 7 2 >;
+ reg = < 0x81400000 0x10000 >;
+ xlnx,all-inputs = <0x0>;
+ xlnx,all-inputs-2 = <0x0>;
+ xlnx,dout-default = <0x0>;
+ xlnx,dout-default-2 = <0x0>;
+ xlnx,family = "virtex5";
+ xlnx,gpio-width = <0x8>;
+ xlnx,interrupt-present = <0x1>;
+ xlnx,is-bidir = <0x1>;
+ xlnx,is-bidir-2 = <0x1>;
+ xlnx,is-dual = <0x0>;
+ xlnx,tri-default = <0xffffffff>;
+ xlnx,tri-default-2 = <0xffffffff>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ } ;
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ heartbeat {
+ label = "Heartbeat";
+ gpios = <&LEDs_8Bit 4 1>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ yellow {
+ label = "Yellow";
+ gpios = <&LEDs_8Bit 5 1>;
+ };
+
+ red {
+ label = "Red";
+ gpios = <&LEDs_8Bit 6 1>;
+ };
+
+ green {
+ label = "Green";
+ gpios = <&LEDs_8Bit 7 1>;
+ };
+ } ;
+ RS232_Uart_1: serial@84000000 {
+ clock-frequency = <125000000>;
+ compatible = "xlnx,xps-uartlite-1.00.a";
+ current-speed = <115200>;
+ device_type = "serial";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 8 0 >;
+ port-number = <0>;
+ reg = < 0x84000000 0x10000 >;
+ xlnx,baudrate = <0x1c200>;
+ xlnx,data-bits = <0x8>;
+ xlnx,family = "virtex5";
+ xlnx,odd-parity = <0x0>;
+ xlnx,use-parity = <0x0>;
+ } ;
+ SysACE_CompactFlash: sysace@83600000 {
+ compatible = "xlnx,xps-sysace-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 4 2 >;
+ reg = < 0x83600000 0x10000 >;
+ xlnx,family = "virtex5";
+ xlnx,mem-width = <0x10>;
+ } ;
+ debug_module: debug@84400000 {
+ compatible = "xlnx,mdm-1.00.d";
+ reg = < 0x84400000 0x10000 >;
+ xlnx,family = "virtex5";
+ xlnx,interconnect = <0x1>;
+ xlnx,jtag-chain = <0x2>;
+ xlnx,mb-dbg-ports = <0x1>;
+ xlnx,uart-width = <0x8>;
+ xlnx,use-uart = <0x1>;
+ xlnx,write-fsl-ports = <0x0>;
+ } ;
+ mpmc@90000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,mpmc-4.02.a";
+ ranges ;
+ PIM3: sdma@84600180 {
+ compatible = "xlnx,ll-dma-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 2 2 1 2 >;
+ reg = < 0x84600180 0x80 >;
+ } ;
+ } ;
+ xps_intc_0: interrupt-controller@81800000 {
+ #interrupt-cells = <0x2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller ;
+ reg = < 0x81800000 0x10000 >;
+ xlnx,kind-of-intr = <0x100>;
+ xlnx,num-intr-inputs = <0x9>;
+ } ;
+ xps_timer_1: timer@83c00000 {
+ compatible = "xlnx,xps-timer-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 3 2 >;
+ reg = < 0x83c00000 0x10000 >;
+ xlnx,count-width = <0x20>;
+ xlnx,family = "virtex5";
+ xlnx,gen0-assert = <0x1>;
+ xlnx,gen1-assert = <0x1>;
+ xlnx,one-timer-only = <0x0>;
+ xlnx,trig0-assert = <0x1>;
+ xlnx,trig1-assert = <0x1>;
+ } ;
+ } ;
+} ;
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
old mode 100644
new mode 100755
index 940d70cb..5ae0fdea
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -396,7 +396,7 @@ EXPORT_SYMBOL_GPL(af_alg_free_sg);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
{
struct cmsghdr *cmsg;
-
+ struct af_alg_usr_def *p_usr_def;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
@@ -418,7 +418,11 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
return -EINVAL;
con->op = *(u32 *)CMSG_DATA(cmsg);
break;
-
+ case ALG_USR_DEF:
+ p_usr_def = (struct af_alg_usr_def *)CMSG_DATA(cmsg);
+ memcpy(&con->usr_def, p_usr_def,
+ sizeof(struct af_alg_usr_def));
+ break;
default:
return -EINVAL;
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
old mode 100644
new mode 100755
index 6a6dfc06..d89ddbc7
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -247,6 +247,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
struct skcipher_ctx *ctx = ask->private;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct af_alg_usr_def *p_usr_def = crypto_ablkcipher_usr_def(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
long copied = 0;
@@ -270,6 +271,8 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
return -EINVAL;
}
+ memcpy(p_usr_def, &con.usr_def, sizeof(struct af_alg_usr_def));
+
if (con.iv && con.iv->ivlen != ivsize)
return -EINVAL;
}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3bb154d8..67f5c27f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -126,4 +126,6 @@ source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
+source "drivers/pwm/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 09f3232b..c3217634 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -6,6 +6,7 @@
#
obj-y += gpio/
+obj-y += pwm/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e0b25de1..8624d851 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -292,4 +292,24 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110 from AES
algorithms execution.
+config FH_AES
+ tristate "FH AES support"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_DES
+# select CRYPTO_AUTHENC
+# select CRYPTO_ALGAPI
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_SEQIV
+
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_aes.
+
+
+config FH_AES_SELF_TEST
+ bool "fh aes self test"
+ depends on FH_AES
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 53ea5015..46f30b50 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -13,3 +13,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+
+obj-$(CONFIG_FH_AES) += fh_aes.o
+obj-$(CONFIG_FH_AES_SELF_TEST) += fh_aes_test.o
diff --git a/drivers/crypto/fh_aes.c b/drivers/crypto/fh_aes.c
new file mode 100755
index 00000000..d1bcf899
--- /dev/null
+++ b/drivers/crypto/fh_aes.c
@@ -0,0 +1,1548 @@
+/*****************************************************************************
+ * Include Section
+ * add all #include here
+ *****************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <crypto/hash.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <crypto/rng.h>
+#include "fh_aes.h"
+
+/*****************************************************************************
+ * Define section
+ * add all #define here
+ *****************************************************************************/
+
+#define CRYPTO_QUEUE_LEN (1000)
+#define CRYPTION_POS (0)
+#define METHOD_POS (1)
+#define EMODE_POS (4)
+
+#define aes_readl(aes, name) \
+ __raw_readl(&(((struct fh_aes_reg *)aes->regs)->name))
+
+#define aes_writel(aes, name, val) \
+ __raw_writel((val), &(((struct fh_aes_reg *)aes->regs)->name))
+
+#define aes_readw(aes, name) \
+ __raw_readw(&(((struct fh_aes_reg *)aes->regs)->name))
+
+#define aes_writew(aes, name, val) \
+ __raw_writew((val), &(((struct fh_aes_reg *)aes->regs)->name))
+
+#define aes_readb(aes, name) \
+ __raw_readb(&(((struct fh_aes_reg *)aes->regs)->name))
+
+#define aes_writeb(aes, name, val) \
+ __raw_writeb((val), &(((struct fh_aes_reg *)aes->regs)->name))
+
+
+#ifdef CONFIG_FH_EFUSE
+#define FH_AESV2
+#else
+#undef FH_AESV2
+#endif
+
+#define FH_AES_ALLIGN_SIZE 64
+#define FH_AES_MALLOC_SIZE 4096
+#define FH_AES_CTL_MAX_PROCESS_SIZE (FH_AES_MALLOC_SIZE - 1)
+
+#ifdef FH_AESV2
+#include <../drivers/misc/fh_efuse.h>
+extern struct wrap_efuse_obj s_efuse_obj;
+#endif
+/****************************************************************************
+ * ADT section
+ * add definition of user defined Data Type that only be used in this file here
+ ***************************************************************************/
+enum {
+ ENCRYPT = 0 << CRYPTION_POS,
+ DECRYPT = 1 << CRYPTION_POS,
+};
+
+enum {
+ ECB_MODE = 0 << EMODE_POS,
+ CBC_MODE = 1 << EMODE_POS,
+ CTR_MODE = 2 << EMODE_POS,
+ CFB_MODE = 4 << EMODE_POS,
+ OFB_MODE = 5 << EMODE_POS,
+};
+
+enum {
+ DES_METHOD = 0 << METHOD_POS,
+ TRIPLE_DES_METHOD = 1 << METHOD_POS,
+ AES_128_METHOD = 4 << METHOD_POS,
+ AES_192_METHOD = 5 << METHOD_POS,
+ AES_256_METHOD = 6 << METHOD_POS,
+};
+
+/*****************************************************************************
+
+ * static fun;
+ *****************************************************************************/
+
+static int fh_aes_handle_req(struct fh_aes_dev *dev,
+ struct ablkcipher_request *req);
+/*aes*/
+static int fh_aes_crypt(struct ablkcipher_request *req, unsigned long mode);
+static int fh_aes_ecb_encrypt(struct ablkcipher_request *req);
+static int fh_aes_ecb_decrypt(struct ablkcipher_request *req);
+static int fh_aes_cbc_encrypt(struct ablkcipher_request *req);
+static int fh_aes_cbc_decrypt(struct ablkcipher_request *req);
+static int fh_aes_ctr_encrypt(struct ablkcipher_request *req);
+static int fh_aes_ctr_decrypt(struct ablkcipher_request *req);
+static int fh_aes_ofb_encrypt(struct ablkcipher_request *req);
+static int fh_aes_ofb_decrypt(struct ablkcipher_request *req);
+static int fh_aes_cfb_encrypt(struct ablkcipher_request *req);
+static int fh_aes_cfb_decrypt(struct ablkcipher_request *req);
+
+/*des*/
+static int fh_des_ecb_encrypt(struct ablkcipher_request *req);
+static int fh_des_ecb_decrypt(struct ablkcipher_request *req);
+static int fh_des_cbc_encrypt(struct ablkcipher_request *req);
+static int fh_des_cbc_decrypt(struct ablkcipher_request *req);
+static int fh_des_ofb_encrypt(struct ablkcipher_request *req);
+static int fh_des_ofb_decrypt(struct ablkcipher_request *req);
+static int fh_des_cfb_encrypt(struct ablkcipher_request *req);
+static int fh_des_cfb_decrypt(struct ablkcipher_request *req);
+
+/*tri des*/
+static int fh_des_tri_ecb_encrypt(struct ablkcipher_request *req);
+static int fh_des_tri_ecb_decrypt(struct ablkcipher_request *req);
+static int fh_des_tri_cbc_encrypt(struct ablkcipher_request *req);
+static int fh_des_tri_cbc_decrypt(struct ablkcipher_request *req);
+static int fh_des_tri_ofb_encrypt(struct ablkcipher_request *req);
+static int fh_des_tri_ofb_decrypt(struct ablkcipher_request *req);
+static int fh_des_tri_cfb_encrypt(struct ablkcipher_request *req);
+static int fh_des_tri_cfb_decrypt(struct ablkcipher_request *req);
+static int fh_aes_setkey(struct crypto_ablkcipher *cipher, const uint8_t *key,
+ unsigned int keylen);
+static int fh_aes_cra_init(struct crypto_tfm *tfm);
+static void fh_aes_tx(struct fh_aes_dev *dev);
+static void fh_aes_rx(struct fh_aes_dev *dev);
+static irqreturn_t fh_aes_interrupt(int irq, void *dev_id);
+static void aes_biglittle_swap(u8 *buf);
+static int fh_set_indata(struct fh_aes_dev *dev, struct scatterlist *sg);
+static int fh_set_outdata(struct fh_aes_dev *dev, struct scatterlist *sg);
+static void fh_set_aes_key_reg(struct fh_aes_dev *dev, uint8_t *key,
+ uint8_t *iv, unsigned int keylen);
+static void fh_set_dma_indata(struct fh_aes_dev *dev,
+ struct scatterlist *sg);
+static void fh_set_dma_outdata(struct fh_aes_dev *dev,
+ struct scatterlist *sg);
+static void fh_unset_indata(struct fh_aes_dev *dev);
+static void fh_unset_outdata(struct fh_aes_dev *dev);
+static void fh_aes_complete(struct fh_aes_dev *dev, int err);
+static void fh_aes_crypt_start(struct fh_aes_dev *dev, unsigned long mode);
+static void fh_aes_work_cb(struct work_struct *w);
+
+#define fh_des_setkey fh_aes_setkey
+/*****************************************************************************
+ * Global variables section - Local
+ * define global variables(will be refered only in this file) here,
+ * static keyword should be used to limit scope of local variable to this file
+ * e.g.
+ * static uint8_t ufoo;
+ *****************************************************************************/
+struct fh_aes_dev *pobj_aes_dev = NULL;
+static struct crypto_alg algs[] = {
+ {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = fh_aes_setkey,
+ .encrypt = fh_aes_ecb_encrypt,
+ .decrypt = fh_aes_ecb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = fh_aes_setkey,
+ .encrypt = fh_aes_cbc_encrypt,
+ .decrypt = fh_aes_cbc_decrypt,
+ }
+ },
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = fh_aes_setkey,
+ .encrypt = fh_aes_ctr_encrypt,
+ .decrypt = fh_aes_ctr_decrypt,
+ }
+ },
+ {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = fh_aes_setkey,
+ .encrypt = fh_aes_ofb_encrypt,
+ .decrypt = fh_aes_ofb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = fh_aes_setkey,
+ .encrypt = fh_aes_cfb_encrypt,
+ .decrypt = fh_aes_cfb_decrypt,
+ }
+ },
+ {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_ecb_encrypt,
+ .decrypt = fh_des_ecb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_cbc_encrypt,
+ .decrypt = fh_des_cbc_decrypt,
+ }
+ },
+ {
+ .cra_name = "ofb(des)",
+ .cra_driver_name = "ofb-des-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_ofb_encrypt,
+ .decrypt = fh_des_ofb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cfb(des)",
+ .cra_driver_name = "cfb-des-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_cfb_encrypt,
+ .decrypt = fh_des_cfb_decrypt,
+ }
+ },
+ {
+ .cra_name = "ecb(des3)",
+ .cra_driver_name = "ecb-des3-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_tri_ecb_encrypt,
+ .decrypt = fh_des_tri_ecb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cbc(des3)",
+ .cra_driver_name = "cbc-des3-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_tri_cbc_encrypt,
+ .decrypt = fh_des_tri_cbc_decrypt,
+ }
+ },
+ {
+ .cra_name = "ofb(des3)",
+ .cra_driver_name = "ofb-des3-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_tri_ofb_encrypt,
+ .decrypt = fh_des_tri_ofb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cfb(des3)",
+ .cra_driver_name = "cfb-des3-fh",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct fh_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = fh_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = fh_des_setkey,
+ .encrypt = fh_des_tri_cfb_encrypt,
+ .decrypt = fh_des_tri_cfb_decrypt,
+ }
+ },
+};
+
+#ifdef CONFIG_FH_AES_SELF_TEST
+extern void fh_aes_self_test_all(void);
+#endif
+
+/* function body */
+static int fh_aes_handle_req(struct fh_aes_dev *dev,
+ struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int err;
+ spin_lock_irqsave(&dev->lock, flags);
+ err = ablkcipher_enqueue_request(&dev->queue, req);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ queue_work(dev->workqueue, &dev->work);
+ return err;
+}
+
+static int fh_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct fh_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+ struct fh_aes_dev *dev = ctx->dev;
+ AES_DBG("%s\n", __func__);
+ dev->reqctx = reqctx;
+ /*if (!(mode & CFB_MODE)) {*/
+ if ((!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
+ && (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE))) {
+ pr_err("request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+ /*}*/
+ AES_DBG("reqctx->mode value: %x\n", (unsigned int)mode);
+ reqctx->mode = mode;
+ return fh_aes_handle_req(dev, req);
+}
+
+static int fh_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ u32 method = 0;
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+ return fh_aes_crypt(req, method | ECB_MODE | ENCRYPT);
+}
+
+
+static int fh_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ u32 method = 0;
+
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+ return fh_aes_crypt(req, method | ECB_MODE | DECRYPT);
+}
+
+static int fh_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ u32 method = 0;
+
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+ return fh_aes_crypt(req, method | CBC_MODE | ENCRYPT);
+}
+
+static int fh_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct fh_aes_ctx *ctx;
+ u32 method;
+
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ method = 0;
+ AES_DBG("%s\n", __func__);
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+
+ return fh_aes_crypt(req, method | CBC_MODE | DECRYPT);
+}
+
+static int fh_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct fh_aes_ctx *ctx;
+ u32 method;
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ method = 0;
+
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+
+ return fh_aes_crypt(req, method | CTR_MODE | ENCRYPT);
+}
+
+static int fh_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct fh_aes_ctx *ctx;
+ u32 method;
+
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ method = 0;
+ AES_DBG("%s\n", __func__);
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+ return fh_aes_crypt(req, method | CTR_MODE | DECRYPT);
+}
+
+static int fh_aes_ofb_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct fh_aes_ctx *ctx;
+ u32 method;
+
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ method = 0;
+
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+ return fh_aes_crypt(req, method | OFB_MODE | ENCRYPT);
+}
+
+static int fh_aes_ofb_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct fh_aes_ctx *ctx;
+ u32 method;
+
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ method = 0;
+
+ AES_DBG("%s\n", __func__);
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+
+ return fh_aes_crypt(req, method | OFB_MODE | DECRYPT);
+}
+
+static int fh_aes_cfb_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct fh_aes_ctx *ctx;
+ u32 method;
+
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ method = 0;
+
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+ return fh_aes_crypt(req, method | CFB_MODE | ENCRYPT);
+}
+
+static int fh_aes_cfb_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm;
+ struct fh_aes_ctx *ctx;
+ u32 method;
+
+ tfm = crypto_ablkcipher_reqtfm(req);
+ ctx = crypto_ablkcipher_ctx(tfm);
+ method = 0;
+
+ AES_DBG("%s\n", __func__);
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ method = AES_128_METHOD;
+ break;
+ case AES_KEYSIZE_192:
+ method = AES_192_METHOD;
+ break;
+ case AES_KEYSIZE_256:
+ method = AES_256_METHOD;
+ break;
+ default:
+ break;
+ }
+
+ return fh_aes_crypt(req, method | CFB_MODE | DECRYPT);
+}
+static int fh_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | ECB_MODE | ENCRYPT);
+}
+
+static int fh_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | ECB_MODE | DECRYPT);
+}
+
+static int fh_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | CBC_MODE | ENCRYPT);
+}
+
+static int fh_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | CBC_MODE | DECRYPT);
+}
+
+static int fh_des_ofb_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | OFB_MODE | ENCRYPT);
+}
+
+static int fh_des_ofb_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | OFB_MODE | DECRYPT);
+}
+
+static int fh_des_cfb_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | CFB_MODE | ENCRYPT);
+}
+
+static int fh_des_cfb_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = DES_METHOD;
+
+ return fh_aes_crypt(req, method | CFB_MODE | DECRYPT);
+}
+static int fh_des_tri_ecb_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | ECB_MODE | ENCRYPT);
+}
+
+static int fh_des_tri_ecb_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | ECB_MODE | DECRYPT);
+}
+
+static int fh_des_tri_cbc_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | CBC_MODE | ENCRYPT);
+}
+
+static int fh_des_tri_cbc_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | CBC_MODE | DECRYPT);
+}
+
+static int fh_des_tri_ofb_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | OFB_MODE | ENCRYPT);
+}
+
+static int fh_des_tri_ofb_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | OFB_MODE | DECRYPT);
+}
+
+static int fh_des_tri_cfb_encrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | CFB_MODE | ENCRYPT);
+}
+
+static int fh_des_tri_cfb_decrypt(struct ablkcipher_request *req)
+{
+ u32 method;
+ method = 0;
+ method = TRIPLE_DES_METHOD;
+ return fh_aes_crypt(req, method | CFB_MODE | DECRYPT);
+}
+static int fh_aes_setkey(struct crypto_ablkcipher *cipher, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct fh_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ int i = 0;
+ AES_DBG("%s\n", __func__);
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192
+ && keylen != AES_KEYSIZE_256 && keylen != DES_KEY_SIZE
+ && keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ for (; i < keylen; i++)
+ AES_DBG("%x", key[i]);
+ AES_DBG("\n");
+
+ memcpy(ctx->aes_key, key, keylen);
+ ctx->keylen = keylen;
+ return 0;
+}
+
+static int fh_aes_cra_init(struct crypto_tfm *tfm)
+{
+ struct fh_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ ctx->dev = pobj_aes_dev;
+ tfm->crt_ablkcipher.reqsize = sizeof(struct fh_aes_reqctx);
+ AES_DBG("%s\n", __func__);
+ return 0;
+}
+
+static void fh_aes_tx(struct fh_aes_dev *dev)
+{
+ /*int err = 0;*/
+ unsigned int i = 0;
+ struct ablkcipher_request *req = dev->req;
+ struct scatterlist *temp_sg = req->dst;
+ int len = 0;
+ fh_unset_outdata(dev);
+ do {
+ sg_copy_from_buffer(temp_sg, 1, &dev->ctl_dst_xbuf[i],
+ sg_dma_len(temp_sg));
+ len += sg_dma_len(temp_sg);
+ i += sg_dma_len(temp_sg);
+ temp_sg = sg_next(temp_sg);
+ } while (temp_sg != NULL);
+ /*fh_aes_complete(dev, err);*/
+}
+
+static void fh_aes_rx(struct fh_aes_dev *dev)
+{
+ fh_unset_indata(dev);
+}
+
+static irqreturn_t fh_aes_interrupt(int irq, void *dev_id)
+{
+
+ u32 isr_status;
+ /*unsigned long flags;*/
+ struct platform_device *pdev = (struct platform_device *) dev_id;
+ struct fh_aes_dev *dev = platform_get_drvdata(pdev);
+ /*u32 isr = dev->en_isr;*/
+ AES_DBG("%s\n", __func__);
+ /*spin_lock_irqsave(&dev->lock, flags);*/
+ aes_writel(dev, dma_control, 0);
+ isr_status = aes_readl(dev, intr_src);
+ aes_writel(dev, intr_clear_status, 0x07);
+ aes_writel(dev, intr_enable, 0);
+ if (isr_status & 0x02)
+ printk("dma rev hreap error...\n");
+ if (isr_status & 0x04)
+ printk("dma stop src ..\n");
+ if (isr_status & 0x01) {
+ AES_DBG("dma done..\n");
+ complete(&(dev->done));
+ }
+ /*spin_unlock_irqrestore(&dev->lock, flags);*/
+ return IRQ_HANDLED;
+}
+
+static void aes_biglittle_swap(u8 *buf)
+{
+ u8 tmp, tmp1;
+ tmp = buf[0];
+ tmp1 = buf[1];
+ buf[0] = buf[3];
+ buf[1] = buf[2];
+ buf[2] = tmp1;
+ buf[3] = tmp;
+}
+
+static int fh_set_indata(struct fh_aes_dev *dev, struct scatterlist *sg)
+{
+ int err;
+ unsigned int i = 0;
+ unsigned int len = 0;
+ struct scatterlist *temp_sg = sg;
+ unsigned char *src_xbuf;
+ src_xbuf = &dev->ctl_src_xbuf[0];
+ do {
+ if (len + sg_dma_len(temp_sg) > FH_AES_CTL_MAX_PROCESS_SIZE) {
+ printk("%s: total size > driver size 0x%x\n", __func__, FH_AES_CTL_MAX_PROCESS_SIZE);
+ err = -ENOMEM;
+ goto exit;
+ }
+ sg_copy_to_buffer(temp_sg, 1, &src_xbuf[i], sg_dma_len(temp_sg));
+ len += sg_dma_len(temp_sg);
+ i += sg_dma_len(temp_sg);
+ temp_sg = sg_next(temp_sg);
+ } while (temp_sg != NULL);
+
+ sg_init_one(&dev->src_sg[0], &src_xbuf[0], len);
+ err = dma_map_sg(dev->dev, &dev->src_sg[0], 1, DMA_TO_DEVICE);
+ if (!err) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ dev->sg_src = &dev->src_sg[0];
+ err = 0;
+exit:
+ return err;
+}
+
+static int fh_set_outdata(struct fh_aes_dev *dev, struct scatterlist *sg)
+{
+ int err;
+ sg_init_one(&dev->dst_sg[0],
+ &dev->ctl_dst_xbuf[0], FH_AES_CTL_MAX_PROCESS_SIZE);
+ err = dma_map_sg(dev->dev, &dev->dst_sg[0], 1, DMA_FROM_DEVICE);
+ if (!err) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ dev->sg_dst = &dev->dst_sg[0];
+ err = 0;
+exit:
+ return err;
+}
+
+static void fh_set_aes_key_reg(struct fh_aes_dev *dev, uint8_t *key,
+ uint8_t *iv, unsigned int keylen)
+{
+
+ int i;
+ u32 method;
+ u32 temp_key_buf[32];
+ u32 temp_iv_buf[32];
+ u32 *p_dst = NULL;
+ u32 key_size = 0;
+ if (dev->iv_flag == true) {
+ /*set iv*/
+ /*if aes mode ....set 128 bit iv, des set 64bit iv..*/
+ AES_DBG("set iv reg\n");
+ if ((dev->control_reg & AES_128_METHOD)
+ || ((dev->control_reg & AES_192_METHOD))
+ || (dev->control_reg & AES_256_METHOD)) {
+ AES_DBG("aes iv mode...\n");
+
+ memcpy((u8 *)&temp_iv_buf[0], iv, 16);
+ p_dst = &temp_iv_buf[0];
+ for (i = 0; i < 16 / sizeof(u32); i++)
+ aes_biglittle_swap((u8 *)(p_dst + i));
+ memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->initial_vector0,
+ temp_iv_buf, 16);
+ } else {
+ AES_DBG("des iv mode...\n");
+
+ memcpy((u8 *)&temp_iv_buf[0], iv, 8);
+ p_dst = &temp_iv_buf[0];
+ for (i = 0; i < 8 / sizeof(u32); i++)
+ aes_biglittle_swap((u8 *)(p_dst + i));
+
+ memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->initial_vector0,
+ temp_iv_buf, 8);
+
+ }
+ }
+ /*set key...*/
+ method = dev->control_reg & 0x0e;
+ AES_DBG("set key reg\n");
+
+ switch (method) {
+ case AES_128_METHOD:
+ AES_DBG("set key aes 128 mode..\n");
+ key_size = 16;
+
+ break;
+ case AES_192_METHOD:
+ AES_DBG("set key aes 192 mode..\n");
+ key_size = 24;
+ break;
+
+ case AES_256_METHOD:
+ AES_DBG("set key aes 256 mode..\n");
+ key_size = 32;
+ break;
+
+ case DES_METHOD:
+ AES_DBG("set key des normal mode..\n");
+ key_size = 8;
+ break;
+
+ case TRIPLE_DES_METHOD:
+ AES_DBG("set key des triple mode..\n");
+ key_size = 24;
+ break;
+
+ default:
+ AES_DBG("error method!!\n");
+ break;
+ }
+#ifdef FH_AESV2
+ if (dev->p_usr_def->mode & CRYPTO_EX_MEM_SET_KEY) {
+ s_efuse_obj.trans_key_start_no = 0;
+ s_efuse_obj.trans_key_size = key_size / 4;
+ efuse_trans_key(&s_efuse_obj,
+ s_efuse_obj.trans_key_start_no,
+ s_efuse_obj.trans_key_size, dev->p_usr_def);
+ } else {
+ s_efuse_obj.old_usr_def.mode &= ~CRYPTO_EX_MEM_SET_KEY;
+ s_efuse_obj.old_usr_def.mode |= CRYPTO_CPU_SET_KEY;
+ memcpy((u8 *)&temp_key_buf[0], key, key_size);
+ p_dst = &temp_key_buf[0];
+ for (i = 0; i < key_size / sizeof(u32); i++)
+ aes_biglittle_swap((u8 *)(p_dst + i));
+ memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->security_key0,
+ (u8 *)&temp_key_buf[0],
+ key_size);
+ }
+
+#else
+ memcpy((u8 *)&temp_key_buf[0], key, key_size);
+ p_dst = &temp_key_buf[0];
+ for (i = 0; i < key_size / sizeof(u32); i++)
+ aes_biglittle_swap((u8 *)(p_dst + i));
+
+ memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->security_key0,
+ (u8 *)&temp_key_buf[0],
+ key_size);
+
+#endif
+
+}
+
+static void fh_set_dma_indata(struct fh_aes_dev *dev,
+ struct scatterlist *sg)
+{
+ aes_writel(dev, dma_src_add, sg_dma_address(sg));
+ AES_DBG("%s :dma trans size is :%x,add is:%x\n",
+ __func__, sg_dma_len(sg), sg_dma_address(sg));
+ aes_writel(dev, dma_trans_size, sg_dma_len(sg));
+}
+
+static void fh_set_dma_outdata(struct fh_aes_dev *dev,
+ struct scatterlist *sg)
+{
+ aes_writel(dev, dma_dst_add, sg_dma_address(sg));
+}
+
+static void fh_unset_indata(struct fh_aes_dev *dev)
+{
+ dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
+}
+
+static void fh_unset_outdata(struct fh_aes_dev *dev)
+{
+ dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
+}
+
+static void fh_aes_complete(struct fh_aes_dev *dev, int err)
+{
+ if (dev->req->base.complete)
+ dev->req->base.complete(&dev->req->base, err);
+}
+
+static void fh_aes_crypt_start(struct fh_aes_dev *dev, unsigned long mode)
+{
+
+ struct ablkcipher_request *req = dev->req;
+ u32 control_reg;
+ u32 outfifo_thold = 0;
+ u32 infifo_thold = 0;
+ u32 isr;
+ int err;
+ unsigned long flags;
+ control_reg = 0;
+ spin_lock_irqsave(&dev->lock, flags);
+ if ((mode & CBC_MODE) || (mode & CTR_MODE) || (mode & CFB_MODE)
+ || (mode & OFB_MODE)) {
+ control_reg |= 1 << 7;
+ dev->iv_flag = true;
+ } else
+ dev->iv_flag = false;
+
+ /*emode & method*/
+ control_reg |= (unsigned int) mode;
+ dev->control_reg = control_reg;
+ outfifo_thold = 0;
+ infifo_thold = 8;
+ isr = dev->en_isr;
+
+ AES_DBG("control_reg:0x%x\n", control_reg);
+ aes_writel(dev, encrypt_control, control_reg);
+ /*set key...*/
+ fh_set_aes_key_reg(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
+ err = fh_set_indata(dev, req->src);
+ if (err)
+ goto indata_error;
+
+ err = fh_set_outdata(dev, req->dst);
+ if (err)
+ goto outdata_error;
+
+ fh_set_dma_indata(dev, dev->sg_src);
+ fh_set_dma_outdata(dev, dev->sg_dst);
+
+ /*set fifo..*/
+ AES_DBG("outfifo thold:%x\n", outfifo_thold);
+ AES_DBG("infifo thold:%x\n", infifo_thold);
+ aes_writel(dev, fifo_threshold, outfifo_thold << 8 | infifo_thold);
+ /*set isr..*/
+ AES_DBG("intr enable:%x\n", isr);
+ aes_writel(dev, intr_enable, isr);
+ /*enable dma go..*/
+ aes_writel(dev, dma_control, 1);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return;
+
+outdata_error:
+ AES_DBG("outdata_error ..\n");
+ fh_unset_indata(dev);
+
+indata_error:
+ AES_DBG("indata_error ..\n");
+ fh_aes_complete(dev, err);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+}
+
+static void fh_aes_work_cb(struct work_struct *w)
+{
+ struct fh_aes_dev *dev = container_of(w, struct fh_aes_dev, work);
+ struct crypto_async_request *async_req, *backlog;
+ struct fh_aes_reqctx *reqctx;
+ struct crypto_ablkcipher *p_tfm;
+ struct af_alg_usr_def *p_usr_def;
+ unsigned long flags;
+ AES_DBG("%s\n", __func__);
+ /*get the req need to handle*/
+ spin_lock_irqsave(&dev->lock, flags);
+ async_req = crypto_dequeue_request(&dev->queue);
+ backlog = crypto_get_backlog(&dev->queue);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!async_req)
+ return;
+ init_completion(&dev->done);
+ dev->req = ablkcipher_request_cast(async_req);
+ p_tfm = crypto_ablkcipher_reqtfm(dev->req);
+ p_usr_def = crypto_ablkcipher_usr_def(p_tfm);
+ dev->p_usr_def = p_usr_def;
+ dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
+ reqctx = ablkcipher_request_ctx(dev->req);
+ fh_aes_crypt_start(dev, reqctx->mode);
+ wait_for_completion(&dev->done);
+ fh_aes_rx(dev);
+ fh_aes_tx(dev);
+ fh_aes_complete(dev, 0);
+ if (backlog) {
+ if (backlog->complete)
+ backlog->complete(backlog, -EINPROGRESS);
+ }
+ /*call the queue work until empty.*/
+ if (dev->queue.qlen != 0)
+ queue_work(dev->workqueue, &dev->work);
+}
+
+/*add chenjn dsp use...*/
+typedef struct {
+ unsigned int base;
+ void *vbase;
+ unsigned int size;
+} MEM_INFO;
+typedef struct {
+ MEM_INFO mem;
+ unsigned char *remap_base; /**<已用大小*/
+} RW_MEM_INFO;
+
+struct tcrypt_result {
+ struct completion completion;
+ int err;
+};
+
+int aes_128_ecb_encrypt(char *key_128, RW_MEM_INFO in,
+RW_MEM_INFO out, unsigned int data_len_align16);
+
+int fh_aes_ctl_mem_init(struct fh_aes_dev *pdata)
+{
+ unsigned int t1;
+ unsigned int t2;
+ unsigned int t3;
+ unsigned int t4;
+
+ t1 = (unsigned int)kmalloc(FH_AES_MALLOC_SIZE
+ + FH_AES_ALLIGN_SIZE, GFP_KERNEL);
+ if (!t1)
+ goto err1;
+
+ t2 = (unsigned int)kmalloc(FH_AES_MALLOC_SIZE
+ + FH_AES_ALLIGN_SIZE, GFP_KERNEL);
+ if (!t2)
+ goto err2;
+
+
+ t3 = ((t1 + FH_AES_ALLIGN_SIZE - 1) & (~(FH_AES_ALLIGN_SIZE - 1)));
+ t4 = ((t2 + FH_AES_ALLIGN_SIZE - 1) & (~(FH_AES_ALLIGN_SIZE - 1)));
+
+ pdata->ctl_raw_src_xbuf = (unsigned char *)t1;
+ pdata->ctl_raw_dst_xbuf = (unsigned char *)t2;
+ pdata->ctl_src_xbuf = (unsigned char *)t3;
+ pdata->ctl_dst_xbuf = (unsigned char *)t4;
+ return 0;
+err2:
+ kfree((void *)t1);
+err1:
+ return -1;
+
+}
+
+static int __devinit fh_aes_probe(struct platform_device *pdev)
+{
+
+ int i, j, err = -ENODEV;
+ struct fh_aes_dev *pdata;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct resource *ioarea;
+
+ AES_DBG("aes probe get in..\n");
+ if (pobj_aes_dev) {
+ dev_err(&pdev->dev, "second crypto dev..\n");
+ return -EEXIST;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "get platform source error..\n");
+ return -ENODEV;
+ }
+
+ ioarea = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "aes region already claimed\n");
+ /*BUG_ON(ioarea);*/
+ return -EBUSY;
+ }
+
+ pdata = kzalloc(sizeof(struct fh_aes_dev), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto err_malloc;
+ }
+
+ spin_lock_init(&pdata->lock);
+ pdata->regs = ioremap(res->start, resource_size(res));
+
+ if (!pdata->regs) {
+ dev_err(&pdev->dev, "aes region already mapped\n");
+ err = -EINVAL;
+ goto err_iomap;
+ }
+ pdata->irq_no = platform_get_irq(pdev, 0);
+ if (pdata->irq_no < 0) {
+ err = pdata->irq_no;
+ dev_warn(dev, "aes interrupt is not available.\n");
+ goto err_irq;
+ }
+ /*only enable dma done isr..*/
+ pdata->en_isr = 1 << 0;
+ err = request_irq(pdata->irq_no, fh_aes_interrupt, 0,
+ dev_name(&pdev->dev), pdev);
+
+ if (err) {
+ dev_dbg(&pdev->dev, "request_irq failed, %d\n", err);
+ goto err_irq;
+ }
+ /*bind to plat dev..*/
+ pdata->dev = dev;
+ /*bing to static para..only one aes controller in fh..*/
+ pobj_aes_dev = pdata;
+ platform_set_drvdata(pdev, pdata);
+
+ pdata->workqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
+ if (!pdata->workqueue) {
+ dev_err(&pdev->dev, "aes workqueue init error.\n");
+ goto err_irq;
+ }
+ INIT_WORK(&pdata->work, fh_aes_work_cb);
+
+
+ crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ INIT_LIST_HEAD(&algs[i].cra_list);
+ err = crypto_register_alg(&algs[i]);
+
+ if (err) {
+ dev_warn(dev, "register alg error...\n");
+ goto err_algs;
+ }
+ }
+
+ err = fh_aes_ctl_mem_init(pdata);
+ if (err) {
+ dev_err(&pdev->dev, "aes malloc mem error..\n");
+ goto err_algs;
+ }
+ pr_info("aes driver registered\n");
+
+#ifdef CONFIG_FH_AES_SELF_TEST
+
+ fh_aes_self_test_all();
+#endif
+
+ return 0;
+err_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&algs[j]);
+ destroy_workqueue(pdata->workqueue);
+ platform_set_drvdata(pdev, NULL);
+ pobj_aes_dev = NULL;
+ free_irq(pdata->irq_no, pdata);
+
+err_irq:
+ iounmap(pdata->regs);
+
+err_iomap:
+ kfree(pdata);
+
+err_malloc:
+ release_mem_region(res->start, resource_size(res));
+ return err;
+}
+
+static int __devexit fh_aes_remove(struct platform_device *pdev)
+{
+
+ int i;
+ struct fh_aes_dev *pdata = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_alg(&algs[i]);
+
+ destroy_workqueue(pdata->workqueue);
+ platform_set_drvdata(pdev, NULL);
+ pobj_aes_dev = NULL;
+ free_irq(pdata->irq_no, pdata);
+ iounmap(pdata->regs);
+ kfree(pdata->ctl_raw_src_xbuf);
+ kfree(pdata->ctl_raw_dst_xbuf);
+ pdata->ctl_raw_src_xbuf = NULL;
+ pdata->ctl_raw_dst_xbuf = NULL;
+ pdata->ctl_src_xbuf = NULL;
+ pdata->ctl_dst_xbuf = NULL;
+ kfree(pdata);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+static struct platform_driver fh_aes_driver = {
+ .driver = {
+ .name = "fh_aes",
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_aes_probe,
+ .remove = __devexit_p(fh_aes_remove),
+};
+
+static int __init fh_aes_init(void)
+{
+ return platform_driver_register(&fh_aes_driver);
+}
+late_initcall(fh_aes_init);
+
+static void __exit fh_aes_exit(void)
+{
+ platform_driver_unregister(&fh_aes_driver);
+}
+module_exit(fh_aes_exit);
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+ struct tcrypt_result *res = req->data;
+ if (err == -EINPROGRESS)
+ return;
+ complete(&res->completion);
+}
+
+int aes_128_ecb_encrypt(char *key_128, RW_MEM_INFO in,
+RW_MEM_INFO out, unsigned int data_len_align16)
+{
+ static char *xbuf;
+ static char *dst_xbuf;
+ static struct crypto_ablkcipher *tfm;
+ static struct ablkcipher_request *req;
+ static int malloc_flag;
+ /*const char *algo = NULL;*/
+ struct scatterlist sg[8];
+ struct scatterlist dst_sg[8];
+ void *data;
+ void *dst_data;
+ struct tcrypt_result wait_result;
+
+ /*malloc buf...*/
+ if (malloc_flag != 0)
+ goto work_go;
+ malloc_flag = 1;
+ xbuf = (void *)__get_free_page(GFP_KERNEL);
+ if (!xbuf) {
+ printk("no pages.\n");
+ return -1;
+ }
+
+ dst_xbuf = (void *)__get_free_page(GFP_KERNEL);
+ if (!dst_xbuf) {
+ free_page((unsigned long)xbuf);
+ printk("no pages.\n");
+ return -1;
+ }
+
+ tfm = crypto_alloc_ablkcipher("ecb-aes-fh",
+ CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC, 0);
+ if (IS_ERR(tfm)) {
+ printk("aes_test: failed to alloc cipher!\n");
+ free_page((unsigned long)xbuf);
+ free_page((unsigned long)dst_xbuf);
+ return -1;
+ }
+ req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ printk(KERN_ERR "alg: skcipher: Failed to allocate request "
+ "for\n");
+ return -1;
+ }
+
+work_go:
+ init_completion(&wait_result.completion);
+ crypto_ablkcipher_setkey(tfm, (u8 *)key_128, 16);
+ ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &wait_result);
+ data = xbuf;
+ dst_data = dst_xbuf;
+ /*encrypt*/
+ memcpy(data, in.remap_base, data_len_align16);
+ sg_init_one(&sg[0], data, data_len_align16);
+ sg_init_one(&dst_sg[0], dst_data, data_len_align16);
+ ablkcipher_request_set_crypt(req, sg, dst_sg, data_len_align16, NULL);
+ crypto_ablkcipher_encrypt(req);
+ wait_for_completion(&wait_result.completion);
+ memcpy(out.remap_base, dst_data, data_len_align16);
+
+ return 0;
+
+}
+EXPORT_SYMBOL(aes_128_ecb_encrypt);
+
+
+MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fullhan AES driver support");
diff --git a/drivers/crypto/fh_aes.h b/drivers/crypto/fh_aes.h
new file mode 100755
index 00000000..2bed7084
--- /dev/null
+++ b/drivers/crypto/fh_aes.h
@@ -0,0 +1,122 @@
+/*
+ * fh_aes.h
+ *
+ * Created on: 3.12.2015
+ * Author: duobao
+ */
+
+#ifndef FH_AES_H_
+#define FH_AES_H_
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/ctr.h>
+
+struct fh_aes_reg {
+ u32 encrypt_control; /*0*/
+ u32 reserved_4_8; /*4*/
+ u32 fifo_status; /*8*/
+ u32 parity_error; /*c*/
+ u32 security_key0; /*10*/
+ u32 security_key1; /*14*/
+ u32 security_key2; /*18*/
+ u32 security_key3; /*1c*/
+ u32 security_key4; /*20*/
+ u32 security_key5; /*24*/
+ u32 security_key6; /*28*/
+ u32 security_key7; /*2c*/
+ u32 initial_vector0; /*30*/
+ u32 initial_vector1; /*34*/
+ u32 initial_vector2; /*38*/
+ u32 initial_vector3; /*3c*/
+ u32 reserved_40_44; /*40*/
+ u32 reserved_44_48; /*44*/
+ u32 dma_src_add; /*48*/
+ u32 dma_dst_add; /*4c*/
+ u32 dma_trans_size; /*50*/
+ u32 dma_control; /*54*/
+ u32 fifo_threshold; /*58*/
+ u32 intr_enable; /*5c*/
+ u32 intr_src; /*60*/
+ u32 mask_intr_status; /*64*/
+ u32 intr_clear_status; /*68*/
+ u32 reserved_6c_70; /*6c*/
+ u32 revision; /*70*/
+ u32 feature; /*74*/
+ u32 reserved_78_7c; /*78*/
+ u32 reserved_7c_80; /*7c*/
+ u32 last_initial_vector0; /*80*/
+ u32 last_initial_vector1; /*84*/
+ u32 last_initial_vector2; /*88*/
+ u32 last_initial_vector3; /*8c*/
+};
+
+/*requst ctx.....*/
+struct fh_aes_reqctx {
+ unsigned long mode;
+};
+/*aes ctx....*/
+struct fh_aes_ctx {
+ struct fh_aes_dev *dev; /*bind to aes dev..*/
+ uint8_t aes_key[AES_MAX_KEY_SIZE]; /*rec key value..*/
+ int keylen; /*rec key len.*/
+};
+
+struct fh_aes_dev {
+ /*common driver paras..*/
+ void *regs;
+ struct device *dev; /*bind to the platform dev...*/
+ struct clk *clk;
+ spinlock_t lock; /*just lock...*/
+ u32 irq_no; /*board info...*/
+ u32 en_isr; /*software rec the isr src*/
+ bool iv_flag;
+ u32 control_reg;
+ /*crypto need below...*/
+ struct fh_aes_ctx *ctx; /*bind to the aes ctx...*/
+ struct fh_aes_reqctx *reqctx; /*bind to the req ctx..*/
+ struct scatterlist *sg_src; /*rec the src data need to be handled*/
+ struct scatterlist *sg_dst; /*rec the dst data need to be handled*/
+ struct tasklet_struct tasklet; /*async process the crypto*/
+ struct ablkcipher_request *req; /*active req...*/
+ struct crypto_queue queue;
+ unsigned char *ctl_src_xbuf;
+ unsigned char *ctl_dst_xbuf;
+ unsigned char *ctl_raw_src_xbuf;
+ unsigned char *ctl_raw_dst_xbuf;
+ struct scatterlist src_sg[1];
+ struct scatterlist dst_sg[1];
+ struct completion done;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ struct af_alg_usr_def *p_usr_def;
+};
+
+
+/*#define FH_AES_SELF_TEST*/
+/*#define FH_AES_DEBUG*/
+#ifdef FH_AES_DEBUG
+#define AES_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define AES_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define AES_PRINT_RESULT(fmt, args...) printk(fmt, ## args)
+
+#endif /* fh_AES_H_ */
+
+
diff --git a/drivers/crypto/fh_aes_test.c b/drivers/crypto/fh_aes_test.c
new file mode 100644
index 00000000..a31a73bd
--- /dev/null
+++ b/drivers/crypto/fh_aes_test.c
@@ -0,0 +1,937 @@
+/*
+ * fh_aes_test.c
+ *
+ * Created on: May 7, 2015
+ * Author: yu.zhang
+ */
+#ifdef CONFIG_FH_AES_SELF_TEST
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <crypto/hash.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <crypto/rng.h>
+#include "fh_aes.h"
+
+
+
+#define XBUFSIZE 128
+
+struct tcrypt_result {
+ struct completion completion;
+ int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+ struct tcrypt_result *res = req->data;
+ if (err == -EINPROGRESS)
+ return;
+// res->err = err;
+ AES_DBG("crypt all over....\n");
+ complete(&res->completion);
+
+}
+
+static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+{
+ int i;
+ for (i = 0; i < XBUFSIZE; i++) {
+ buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf[i])
+ goto err_free_buf;
+ }
+
+ return 0;
+err_free_buf:
+ while (i-- > 0)
+ free_page((unsigned long)buf[i]);
+
+ return -ENOMEM;
+}
+
+static void testmgr_free_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++)
+ free_page((unsigned long)buf[i]);
+}
+
+void cesa_data_print(char *string, u8 *data, u32 length, u8 align)
+{
+ u32 i;
+
+ if (NULL != string)
+ {
+ printk("%s \n", string);
+ }
+
+ if (NULL != data)
+ {
+ for(i=0; i<length; i++)
+ {
+ //printk("%02x ", data[i]);
+ printk("0x%02x,", data[i]);
+ if(0 == ((i+1)%align))//(0 == ((i+1) & (align-1)))
+ {
+ printk("\n");
+ }
+ }
+ printk("\n");
+ }
+}
+
+static struct tcrypt_result result;
+static const unsigned char aes_plain_text[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2,
+ 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11,
+ 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57,
+ 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac,
+ 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46,
+ 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19,
+ 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45,
+ 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b,
+ 0xe6, 0x6c, 0x37, 0x10,
+};
+
+static const unsigned char aes_ecb_128_encrypt_text[] =
+{
+ 0x3a, 0xd7, 0x7b, 0xb4,
+ 0x0d, 0x7a, 0x36, 0x60,
+ 0xa8, 0x9e, 0xca, 0xf3,
+ 0x24, 0x66, 0xef, 0x97,
+ 0xf5, 0xd3, 0xd5, 0x85,
+ 0x03, 0xb9, 0x69, 0x9d,
+ 0xe7, 0x85, 0x89, 0x5a,
+ 0x96, 0xfd, 0xba, 0xaf,
+ 0x43, 0xb1, 0xcd, 0x7f,
+ 0x59, 0x8e, 0xce, 0x23,
+ 0x88, 0x1b, 0x00, 0xe3,
+ 0xed, 0x03, 0x06, 0x88,
+ 0x7b, 0x0c, 0x78, 0x5e,
+ 0x27, 0xe8, 0xad, 0x3f,
+ 0x82, 0x23, 0x20, 0x71,
+ 0x04, 0x72, 0x5d, 0xd4,
+};
+
+static const unsigned char aes_cbc_128_encrypt_text[] = {
+
+ 0x76, 0x49, 0xab, 0xac,
+ 0x81, 0x19, 0xb2, 0x46,
+ 0xce, 0xe9, 0x8e, 0x9b,
+ 0x12, 0xe9, 0x19, 0x7d,
+ 0x50, 0x86, 0xcb, 0x9b,
+ 0x50, 0x72, 0x19, 0xee,
+ 0x95, 0xdb, 0x11, 0x3a,
+ 0x91, 0x76, 0x78, 0xb2,
+ 0x73, 0xbe, 0xd6, 0xb8,
+ 0xe3, 0xc1, 0x74, 0x3b,
+ 0x71, 0x16, 0xe6, 0x9e,
+ 0x22, 0x22, 0x95, 0x16,
+ 0x3f, 0xf1, 0xca, 0xa1,
+ 0x68, 0x1f, 0xac, 0x09,
+ 0x12, 0x0e, 0xca, 0x30,
+ 0x75, 0x86, 0xe1, 0xa7,
+
+};
+
+static const unsigned char aes_ctr_128_encrypt_text[] =
+{
+ 0x3b, 0x3f, 0xd9, 0x2e,
+ 0xb7, 0x2d, 0xad, 0x20,
+ 0x33, 0x34, 0x49, 0xf8,
+ 0xe8, 0x3c, 0xfb, 0x4a,
+ 0x01, 0x0c, 0x04, 0x19,
+ 0x99, 0xe0, 0x3f, 0x36,
+ 0x44, 0x86, 0x24, 0x48,
+ 0x3e, 0x58, 0x2d, 0x0e,
+ 0xa6, 0x22, 0x93, 0xcf,
+ 0xa6, 0xdf, 0x74, 0x53,
+ 0x5c, 0x35, 0x41, 0x81,
+ 0x16, 0x87, 0x74, 0xdf,
+ 0x2d, 0x55, 0xa5, 0x47,
+ 0x06, 0x27, 0x3c, 0x50,
+ 0xd7, 0xb4, 0xf8, 0xa8,
+ 0xcd, 0xdc, 0x6e, 0xd7,
+};
+
+static const unsigned char aes_ofb_128_encrypt_text[] =
+{
+ 0x3b, 0x3f, 0xd9, 0x2e,
+ 0xb7, 0x2d, 0xad, 0x20,
+ 0x33, 0x34, 0x49, 0xf8,
+ 0xe8, 0x3c, 0xfb, 0x4a,
+ 0x77, 0x89, 0x50, 0x8d,
+ 0x16, 0x91, 0x8f, 0x03,
+ 0xf5, 0x3c, 0x52, 0xda,
+ 0xc5, 0x4e, 0xd8, 0x25,
+ 0x97, 0x40, 0x05, 0x1e,
+ 0x9c, 0x5f, 0xec, 0xf6,
+ 0x43, 0x44, 0xf7, 0xa8,
+ 0x22, 0x60, 0xed, 0xcc,
+ 0x30, 0x4c, 0x65, 0x28,
+ 0xf6, 0x59, 0xc7, 0x78,
+ 0x66, 0xa5, 0x10, 0xd9,
+ 0xc1, 0xd6, 0xae, 0x5e,
+};
+
+static const unsigned char aes_cfb_128_encrypt_text[] =
+{
+ 0x3b, 0x79, 0x42, 0x4c,
+ 0x9c, 0x0d, 0xd4, 0x36,
+ 0xba, 0xce, 0x9e, 0x0e,
+ 0xd4, 0x58, 0x6a, 0x4f,
+};
+
+static const unsigned char aes_ecb_192_encrypt_text[] =
+{
+ 0xbd, 0x33, 0x4f, 0x1d,
+ 0x6e, 0x45, 0xf2, 0x5f,
+ 0xf7, 0x12, 0xa2, 0x14,
+ 0x57, 0x1f, 0xa5, 0xcc,
+ 0x97, 0x41, 0x04, 0x84,
+ 0x6d, 0x0a, 0xd3, 0xad,
+ 0x77, 0x34, 0xec, 0xb3,
+ 0xec, 0xee, 0x4e, 0xef,
+ 0xef, 0x7a, 0xfd, 0x22,
+ 0x70, 0xe2, 0xe6, 0x0a,
+ 0xdc, 0xe0, 0xba, 0x2f,
+ 0xac, 0xe6, 0x44, 0x4e,
+ 0x9a, 0x4b, 0x41, 0xba,
+ 0x73, 0x8d, 0x6c, 0x72,
+ 0xfb, 0x16, 0x69, 0x16,
+ 0x03, 0xc1, 0x8e, 0x0e,
+};
+
+static const unsigned char aes_cbc_192_encrypt_text[] = {
+ 0x4f, 0x02, 0x1d, 0xb2,
+ 0x43, 0xbc, 0x63, 0x3d,
+ 0x71, 0x78, 0x18, 0x3a,
+ 0x9f, 0xa0, 0x71, 0xe8,
+ 0xb4, 0xd9, 0xad, 0xa9,
+ 0xad, 0x7d, 0xed, 0xf4,
+ 0xe5, 0xe7, 0x38, 0x76,
+ 0x3f, 0x69, 0x14, 0x5a,
+ 0x57, 0x1b, 0x24, 0x20,
+ 0x12, 0xfb, 0x7a, 0xe0,
+ 0x7f, 0xa9, 0xba, 0xac,
+ 0x3d, 0xf1, 0x02, 0xe0,
+ 0x08, 0xb0, 0xe2, 0x79,
+ 0x88, 0x59, 0x88, 0x81,
+ 0xd9, 0x20, 0xa9, 0xe6,
+ 0x4f, 0x56, 0x15, 0xcd,
+};
+
+static const unsigned char aes_ctr_192_encrypt_text[] =
+{
+ 0xcd, 0xc8, 0x0d, 0x6f,
+ 0xdd, 0xf1, 0x8c, 0xab,
+ 0x34, 0xc2, 0x59, 0x09,
+ 0xc9, 0x9a, 0x41, 0x74,
+ 0x37, 0xd8, 0xa6, 0x39,
+ 0x17, 0x1f, 0xdc, 0xca,
+ 0x63, 0xeb, 0xd1, 0x7c,
+ 0xe2, 0xd7, 0x32, 0x1a,
+ 0x79, 0xa0, 0xc9, 0x6b,
+ 0x53, 0xc7, 0xee, 0xec,
+ 0xd9, 0xed, 0x71, 0x57,
+ 0xc4, 0x44, 0xfc, 0x7a,
+ 0x84, 0x5c, 0x37, 0xb2,
+ 0xf5, 0x11, 0x69, 0x7b,
+ 0x0e, 0x89, 0xd5, 0xed,
+ 0x60, 0xc4, 0xd4, 0x9e,
+};
+
+static const unsigned char aes_ofb_192_encrypt_text[] =
+{
+ 0xcd, 0xc8, 0x0d, 0x6f,
+ 0xdd, 0xf1, 0x8c, 0xab,
+ 0x34, 0xc2, 0x59, 0x09,
+ 0xc9, 0x9a, 0x41, 0x74,
+ 0xfc, 0xc2, 0x8b, 0x8d,
+ 0x4c, 0x63, 0x83, 0x7c,
+ 0x09, 0xe8, 0x17, 0x00,
+ 0xc1, 0x10, 0x04, 0x01,
+ 0x8d, 0x9a, 0x9a, 0xea,
+ 0xc0, 0xf6, 0x59, 0x6f,
+ 0x55, 0x9c, 0x6d, 0x4d,
+ 0xaf, 0x59, 0xa5, 0xf2,
+ 0x6d, 0x9f, 0x20, 0x08,
+ 0x57, 0xca, 0x6c, 0x3e,
+ 0x9c, 0xac, 0x52, 0x4b,
+ 0xd9, 0xac, 0xc9, 0x2a,
+};
+
+static const unsigned char aes_cfb_192_encrypt_text[] =
+{
+ 0xcd, 0xa2, 0x52, 0x1e,
+ 0xf0, 0xa9, 0x05, 0xca,
+ 0x44, 0xcd, 0x05, 0x7c,
+ 0xbf, 0x0d, 0x47, 0xa0,
+};
+
+static const unsigned char aes_ecb_256_encrypt_text[] =
+{
+ 0xf3, 0xee, 0xd1, 0xbd,
+ 0xb5, 0xd2, 0xa0, 0x3c,
+ 0x06, 0x4b, 0x5a, 0x7e,
+ 0x3d, 0xb1, 0x81, 0xf8,
+ 0x59, 0x1c, 0xcb, 0x10,
+ 0xd4, 0x10, 0xed, 0x26,
+ 0xdc, 0x5b, 0xa7, 0x4a,
+ 0x31, 0x36, 0x28, 0x70,
+ 0xb6, 0xed, 0x21, 0xb9,
+ 0x9c, 0xa6, 0xf4, 0xf9,
+ 0xf1, 0x53, 0xe7, 0xb1,
+ 0xbe, 0xaf, 0xed, 0x1d,
+ 0x23, 0x30, 0x4b, 0x7a,
+ 0x39, 0xf9, 0xf3, 0xff,
+ 0x06, 0x7d, 0x8d, 0x8f,
+ 0x9e, 0x24, 0xec, 0xc7,
+};
+
+static const unsigned char aes_cbc_256_encrypt_text[] = {
+ 0xf5, 0x8c, 0x4c, 0x04,
+ 0xd6, 0xe5, 0xf1, 0xba,
+ 0x77, 0x9e, 0xab, 0xfb,
+ 0x5f, 0x7b, 0xfb, 0xd6,
+ 0x9c, 0xfc, 0x4e, 0x96,
+ 0x7e, 0xdb, 0x80, 0x8d,
+ 0x67, 0x9f, 0x77, 0x7b,
+ 0xc6, 0x70, 0x2c, 0x7d,
+ 0x39, 0xf2, 0x33, 0x69,
+ 0xa9, 0xd9, 0xba, 0xcf,
+ 0xa5, 0x30, 0xe2, 0x63,
+ 0x04, 0x23, 0x14, 0x61,
+ 0xb2, 0xeb, 0x05, 0xe2,
+ 0xc3, 0x9b, 0xe9, 0xfc,
+ 0xda, 0x6c, 0x19, 0x07,
+ 0x8c, 0x6a, 0x9d, 0x1b,
+};
+
+static const unsigned char aes_ctr_256_encrypt_text[] =
+{
+ 0xdc, 0x7e, 0x84, 0xbf,
+ 0xda, 0x79, 0x16, 0x4b,
+ 0x7e, 0xcd, 0x84, 0x86,
+ 0x98, 0x5d, 0x38, 0x60,
+ 0xd5, 0x77, 0x78, 0x8b,
+ 0x8d, 0x8a, 0x85, 0x74,
+ 0x55, 0x13, 0xa5, 0xd5,
+ 0x0f, 0x82, 0x1f, 0x30,
+ 0xff, 0xe9, 0x6d, 0x5c,
+ 0xf5, 0x4b, 0x23, 0x8d,
+ 0xcc, 0x8d, 0x67, 0x83,
+ 0xa8, 0x7f, 0x3b, 0xea,
+ 0xe9, 0xaf, 0x54, 0x63,
+ 0x44, 0xcb, 0x9c, 0xa4,
+ 0xd1, 0xe5, 0x53, 0xff,
+ 0xc0, 0x6b, 0xc7, 0x3e,
+};
+
+static const unsigned char aes_ofb_256_encrypt_text[] =
+{
+ 0xdc, 0x7e, 0x84, 0xbf,
+ 0xda, 0x79, 0x16, 0x4b,
+ 0x7e, 0xcd, 0x84, 0x86,
+ 0x98, 0x5d, 0x38, 0x60,
+ 0x4f, 0xeb, 0xdc, 0x67,
+ 0x40, 0xd2, 0x0b, 0x3a,
+ 0xc8, 0x8f, 0x6a, 0xd8,
+ 0x2a, 0x4f, 0xb0, 0x8d,
+ 0x71, 0xab, 0x47, 0xa0,
+ 0x86, 0xe8, 0x6e, 0xed,
+ 0xf3, 0x9d, 0x1c, 0x5b,
+ 0xba, 0x97, 0xc4, 0x08,
+ 0x01, 0x26, 0x14, 0x1d,
+ 0x67, 0xf3, 0x7b, 0xe8,
+ 0x53, 0x8f, 0x5a, 0x8b,
+ 0xe7, 0x40, 0xe4, 0x84,
+};
+
+static const unsigned char aes_cfb_256_encrypt_text[] =
+{
+ 0xdc, 0x1f, 0x1a, 0x85,
+ 0x20, 0xa6, 0x4d, 0xb5,
+ 0x5f, 0xcc, 0x8a, 0xc5,
+ 0x54, 0x84, 0x4e, 0x88,
+};
+
+static const unsigned char des_ecb_plain_text[] =
+{
+ 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const unsigned char des_ecb_encrypt_text[] =
+{
+ 0xca, 0xee, 0x53, 0x4c,
+ 0x52, 0x3e, 0x1e, 0x79,
+ 0xdc, 0x6c, 0x45, 0x73,
+ 0x61, 0xc7, 0xb5, 0xd9,
+ 0x94, 0xc0, 0x83, 0xb0,
+ 0x53, 0xf8, 0xba, 0x52,
+ 0x10, 0x68, 0xe3, 0xa8,
+ 0x88, 0xd2, 0x00, 0x1c,
+ 0x9b, 0x5a, 0xe1, 0x79,
+ 0x4d, 0xfa, 0x4e, 0x12,
+ 0x17, 0x92, 0xe3, 0xf0,
+ 0x79, 0x62, 0x4d, 0xcb,
+ 0x60, 0x36, 0x31, 0xf0,
+ 0xa2, 0xc5, 0x87, 0x4f,
+ 0x24, 0xc7, 0x4a, 0x9a,
+ 0x79, 0x75, 0x4b, 0xc7,
+};
+
+static const unsigned char des_cbc_plain_text[] =
+{
+ 0x4e, 0x6f, 0x77, 0x20,
+ 0x69, 0x73, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x74,
+ 0x69, 0x6d, 0x65, 0x20,
+ 0x66, 0x6f, 0x72, 0x20,
+ 0x61, 0x6c, 0x6c, 0x20,
+
+};
+
+static const unsigned char des_cbc_encrypt_text[] =
+{
+ 0xe5, 0xc7, 0xcd, 0xde,
+ 0x87, 0x2b, 0xf2, 0x7c,
+ 0x43, 0xe9, 0x34, 0x00,
+ 0x8c, 0x38, 0x9c, 0x0f,
+ 0x68, 0x37, 0x88, 0x49,
+ 0x9a, 0x7c, 0x05, 0xf6,
+};
+
+static const unsigned char des_ofb_plain_text[] =
+{
+ 0x4e, 0x6f, 0x77, 0x20,
+ 0x69, 0x73, 0x20, 0x74,
+ 0x43, 0xe9, 0x34, 0x00,
+ 0x8c, 0x38, 0x9c, 0x0f,
+ 0x68, 0x37, 0x88, 0x49,
+ 0x9a, 0x7c, 0x05, 0xf6,
+};
+
+static const unsigned char des_ofb_encrypt_text[] =
+{
+ 0xf3, 0x09, 0x62, 0x49,
+ 0xc7, 0xf4, 0x6e, 0x51,
+ 0x1e, 0x7e, 0x5e, 0x50,
+ 0xcb, 0xbe, 0xc4, 0x10,
+ 0x33, 0x35, 0xa1, 0x8a,
+ 0xde, 0x4a, 0x91, 0x15,
+};
+
+static const unsigned char des_cfb_plain_text[] =
+{
+ 0x4e, 0x6f, 0x77, 0x20,
+ 0x69, 0x73, 0x20, 0x74,
+ 0x43, 0xe9, 0x34, 0x00,
+ 0x8c, 0x38, 0x9c, 0x0f,
+ 0x68, 0x37, 0x88, 0x49,
+ 0x9a, 0x7c, 0x05, 0xf6,
+};
+
+static const unsigned char des_cfb_encrypt_text[] =
+{
+ 0xf3, 0x1f, 0xda, 0x07,
+ 0x01, 0x14, 0x62, 0xee,
+ 0x33, 0x79, 0xda, 0x67,
+ 0xd6, 0xd8, 0x94, 0x3b,
+ 0x71, 0xde, 0xe2, 0xf3,
+ 0x50, 0x80, 0xd6, 0x2b,
+};
+
+static const unsigned char des3_ecb_plain_text[] =
+{
+ 0x4e, 0x6f, 0x77, 0x20,
+ 0x69, 0x73, 0x20, 0x74,
+ 0x43, 0xe9, 0x34, 0x00,
+ 0x8c, 0x38, 0x9c, 0x0f,
+ 0x68, 0x37, 0x88, 0x49,
+ 0x9a, 0x7c, 0x05, 0xf6,
+};
+
+static const unsigned char des3_ecb_encrypt_text[] =
+{
+ 0x31, 0x4f, 0x83, 0x27,
+ 0xfa, 0x7a, 0x09, 0xa8,
+ 0xd5, 0x89, 0x5f, 0xad,
+ 0xe9, 0x8f, 0xae, 0xdf,
+ 0x98, 0xf4, 0x70, 0xeb,
+ 0x35, 0x53, 0xa5, 0xda,
+};
+
+static const unsigned char des3_cbc_plain_text[] =
+{
+ 0x4e, 0x6f, 0x77, 0x20,
+ 0x69, 0x73, 0x20, 0x74,
+ 0x43, 0xe9, 0x34, 0x00,
+ 0x8c, 0x38, 0x9c, 0x0f,
+ 0x68, 0x37, 0x88, 0x49,
+ 0x9a, 0x7c, 0x05, 0xf6,
+};
+
+static const unsigned char des3_cbc_encrypt_text[] =
+{
+ 0xf3, 0xc0, 0xff, 0x02,
+ 0x6c, 0x02, 0x30, 0x89,
+ 0xc4, 0x3a, 0xdd, 0x8f,
+ 0xd8, 0xcd, 0x5e, 0x43,
+ 0x2b, 0xfd, 0x41, 0xd3,
+ 0x13, 0x0b, 0xcf, 0x40,
+};
+
+static const unsigned char des3_ofb_plain_text[] =
+{
+ 0xe6, 0xf7, 0x72, 0x06,
+ 0x97, 0x32, 0x07, 0x04,
+ 0x3e, 0x93, 0x40, 0x08,
+ 0xc3, 0x89, 0xc0, 0x0f,
+ 0x83, 0x78, 0x84, 0x99,
+ 0xa7, 0xc0, 0x5f, 0x06,
+};
+
+static const unsigned char des3_ofb_encrypt_text[] =
+{
+ 0x46, 0xe6, 0xc2, 0x7a,
+ 0xe4, 0x51, 0x34, 0x71,
+ 0xcc, 0x7c, 0x01, 0x7c,
+ 0xa8, 0x82, 0x2b, 0x28,
+ 0x62, 0xf5, 0x7c, 0x40,
+ 0x2a, 0x8a, 0x8b, 0xaf,
+};
+
+static const unsigned char des3_cfb_plain_text[] =
+{
+ 0x4e, 0x6f, 0x77, 0x20,
+ 0x69, 0x73, 0x20, 0x74,
+ 0x43, 0xe9, 0x34, 0x00,
+ 0x8c, 0x38, 0x9c, 0x0f,
+ 0x68, 0x37, 0x88, 0x49,
+ 0x9a, 0x7c, 0x05, 0xf6,
+};
+
+static const unsigned char des3_cfb_encrypt_text[] =
+{
+ 0xee, 0x9b, 0x04, 0xff,
+ 0xca, 0xce, 0xc8, 0x06,
+ 0x5b, 0xcc, 0x4f, 0x67,
+ 0x8f, 0x00, 0xd7, 0x14,
+ 0x4d, 0xf6, 0x74, 0xfc,
+ 0xd6, 0x98, 0x9f, 0x8f,
+};
+
+static int fh_ablkcipher_self_test(const char *alg_name,
+ u8 *src_data,
+ u8 *res_data,
+ u32 len,
+ u8 *key, u32 keylen,
+ u8 *iv
+ )
+{
+ struct crypto_ablkcipher *tfm;
+ struct ablkcipher_request *req;
+ const char *algo;
+
+ struct scatterlist sg[8];
+ struct scatterlist dst_sg[8];
+
+ char *xbuf[XBUFSIZE];
+ char *dst_xbuf[XBUFSIZE];
+
+ void *data;
+ void *dst_data;
+
+ if (testmgr_alloc_buf(xbuf)) {
+ AES_DBG("no pages.\n");
+ return -1;
+ }
+
+ if (testmgr_alloc_buf(dst_xbuf)) {
+ AES_DBG("no pages.\n");
+ return -1;
+ }
+
+ AES_DBG("aes self test get in...\n");
+ AES_DBG(" *_* step 1\n");
+ tfm =
+ crypto_alloc_ablkcipher(alg_name,
+ CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC, 0);
+ if (IS_ERR(tfm)) {
+ AES_DBG("aes_test: failed to alloc cipher!\n");
+ return -1;
+ }
+
+ AES_DBG(" *_* step 2\n");
+ algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
+ init_completion(&result.completion);
+
+ AES_DBG(" *_* step 3\n");
+ crypto_ablkcipher_setkey(tfm, (u8 *) key, keylen);
+
+ AES_DBG(" *_* step 4\n");
+ req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
+ "for %s\n", algo);
+ return -1;
+ }
+
+ AES_DBG(" *_* step 5\n");
+ ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ AES_DBG(" *_* step 6\n");
+ data = xbuf[0];
+ dst_data = dst_xbuf[0];
+
+ memcpy(data, src_data, len);
+ memset(dst_data, 0, len);
+ sg_init_one(&sg[0], data, len);
+ sg_init_one(&dst_sg[0], dst_data, len);
+
+ AES_DBG(" *_* step 7\n");
+ ablkcipher_request_set_crypt(req, sg, dst_sg, len, (void *)iv);
+
+ AES_DBG(" *_* step 8\n");
+ crypto_ablkcipher_encrypt(req);
+
+ wait_for_completion(&result.completion);
+
+ if (memcmp(dst_data, res_data, len))
+ AES_PRINT_RESULT(" encrypt error....\n");
+ else
+ AES_PRINT_RESULT(" encrypt ok....\n");
+
+ memcpy(data, res_data, len);
+ memset(dst_data, 0, len);
+ sg_init_one(&sg[0], data, len);
+ sg_init_one(&dst_sg[0], dst_data, len);
+ AES_DBG(" *_* step 8\n");
+ ablkcipher_request_set_crypt(req, sg, dst_sg, len, (void *)iv);
+ AES_DBG(" *_* step 9\n");
+ crypto_ablkcipher_decrypt(req);
+ wait_for_completion(&result.completion);
+
+ if (memcmp(dst_data, src_data, len))
+ AES_PRINT_RESULT(" decrypt error....\n");
+ else
+ AES_PRINT_RESULT(" decrypt ok....\n");
+
+
+
+ ablkcipher_request_free(req);
+ crypto_free_ablkcipher(tfm);
+ testmgr_free_buf(xbuf);
+ testmgr_free_buf(dst_xbuf);
+
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+void fh_aes_self_test_all(void)
+{
+ u32 aes128_key[4] = {
+ 0x16157e2b,
+ 0xa6d2ae28,
+ 0x8815f7ab,
+ 0x3c4fcf09
+ };
+ u32 aes_iv[4] = {
+ 0x03020100,
+ 0x07060504,
+ 0x0b0a0908,
+ 0x0f0e0d0c
+ };
+ u32 aes192_key[] = {
+ 0xf7b0738e,
+ 0x52640eda,
+ 0x2bf310c8,
+ 0xe5799080,
+ 0xd2eaf862,
+ 0x7b6b2c52,
+ };
+ u32 aes256_key[] = {
+ 0x10eb3d60,
+ 0xbe71ca15,
+ 0xf0ae732b,
+ 0x81777d85,
+ 0x072c351f,
+ 0xd708613b,
+ 0xa310982d,
+ 0xf4df1409,
+ };
+
+ u32 des_iv[] = {
+ 0x78563412,
+ 0xefcdab90,
+ };
+
+ u32 des_key[] = {
+ 0x67452301,
+ 0xefcdab89,
+ };
+
+ u32 des3_key[] = {
+ 0x67452301,
+ 0xefcdab89,
+ 0x89674523,
+ 0x01efcdab,
+ 0xab896745,
+ 0x2301efcd,
+ };
+
+ pr_info("aes ecb128 self test go...\n");
+ fh_ablkcipher_self_test("ecb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ecb_128_encrypt_text,
+ 64,
+ (u8 *)aes128_key, 16,
+ NULL);
+
+ pr_info("aes cbc128 self test go...\n");
+ fh_ablkcipher_self_test("cbc-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_cbc_128_encrypt_text,
+ 64,
+ (u8 *)aes128_key, 16,
+ (u8 *)aes_iv);
+
+ pr_info("aes ctr128 self test go...\n");
+ fh_ablkcipher_self_test("ctr-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ctr_128_encrypt_text,
+ 64,
+ (u8 *)aes128_key, 16,
+ (u8 *)aes_iv);
+
+ pr_info("aes ofb128 self test go...\n");
+ fh_ablkcipher_self_test("ofb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ofb_128_encrypt_text,
+ 64,
+ (u8 *)aes128_key, 16,
+ (u8 *)aes_iv);
+
+ pr_info("aes cfb128 self test go...\n");
+ fh_ablkcipher_self_test("cfb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_cfb_128_encrypt_text,
+ 16,
+ (u8 *)aes128_key, 16,
+ (u8 *)aes_iv);
+
+ pr_info("aes ecb192 self test go...\n");
+ fh_ablkcipher_self_test("ecb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ecb_192_encrypt_text,
+ 64,
+ (u8 *)aes192_key, 24,
+ NULL);
+
+ pr_info("aes cbc192 self test go...\n");
+ fh_ablkcipher_self_test("cbc-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_cbc_192_encrypt_text,
+ 64,
+ (u8 *)aes192_key, 24,
+ (u8 *)aes_iv);
+
+ pr_info("aes ctr192 self test go...\n");
+ fh_ablkcipher_self_test("ctr-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ctr_192_encrypt_text,
+ 64,
+ (u8 *)aes192_key, 24,
+ (u8 *)aes_iv);
+
+ pr_info("aes ofb192 self test go...\n");
+ fh_ablkcipher_self_test("ofb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ofb_192_encrypt_text,
+ 64,
+ (u8 *)aes192_key, 24,
+ (u8 *)aes_iv);
+
+ pr_info("aes cfb192 self test go...\n");
+ fh_ablkcipher_self_test("cfb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_cfb_192_encrypt_text,
+ 16,
+ (u8 *)aes192_key, 24,
+ (u8 *)aes_iv);
+
+ pr_info("aes ecb256 self test go...\n");
+ fh_ablkcipher_self_test("ecb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ecb_256_encrypt_text,
+ 64,
+ (u8 *)aes256_key, 32,
+ NULL);
+
+ pr_info("aes cbc256 self test go...\n");
+ fh_ablkcipher_self_test("cbc-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_cbc_256_encrypt_text,
+ 64,
+ (u8 *)aes256_key, 32,
+ (u8 *)aes_iv);
+
+ pr_info("aes ctr256 self test go...\n");
+ fh_ablkcipher_self_test("ctr-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ctr_256_encrypt_text,
+ 64,
+ (u8 *)aes256_key, 32,
+ (u8 *)aes_iv);
+
+ pr_info("aes ofb256 self test go...\n");
+ fh_ablkcipher_self_test("ofb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_ofb_256_encrypt_text,
+ 64,
+ (u8 *)aes256_key, 32,
+ (u8 *)aes_iv);
+
+ pr_info("aes cfb256 self test go...\n");
+ fh_ablkcipher_self_test("cfb-aes-fh",
+ (u8 *)aes_plain_text,
+ (u8 *)aes_cfb_256_encrypt_text,
+ 16,
+ (u8 *)aes256_key, 32,
+ (u8 *)aes_iv);
+
+ pr_info("des ecb self test go...\n");
+ fh_ablkcipher_self_test("ecb-des-fh",
+ (u8 *)des_ecb_plain_text,
+ (u8 *)des_ecb_encrypt_text,
+ 64,
+ (u8 *)des_key, 8,
+ NULL);
+
+ pr_info("des cbc self test go...\n");
+ fh_ablkcipher_self_test("cbc-des-fh",
+ (u8 *)des_cbc_plain_text,
+ (u8 *)des_cbc_encrypt_text,
+ 24,
+ (u8 *)des_key, 8,
+ (u8 *)des_iv);
+
+ pr_info("des ofb self test go...\n");
+ fh_ablkcipher_self_test("ofb-des-fh",
+ (u8 *)des_ofb_plain_text,
+ (u8 *)des_ofb_encrypt_text,
+ 24,
+ (u8 *)des_key, 8,
+ (u8 *)des_iv);
+
+ pr_info("des cfb self test go...\n");
+ fh_ablkcipher_self_test("cfb-des-fh",
+ (u8 *)des_cfb_plain_text,
+ (u8 *)des_cfb_encrypt_text,
+ 24,
+ (u8 *)des_key, 8,
+ (u8 *)des_iv);
+
+
+ pr_info("des3 ecb self test go...\n");
+ fh_ablkcipher_self_test("ecb-des3-fh",
+ (u8 *)des3_ecb_plain_text,
+ (u8 *)des3_ecb_encrypt_text,
+ 24,
+ (u8 *)des3_key, 24,
+ NULL);
+
+ pr_info("des3 cbc self test go...\n");
+ fh_ablkcipher_self_test("cbc-des3-fh",
+ (u8 *)des3_cbc_plain_text,
+ (u8 *)des3_cbc_encrypt_text,
+ 24,
+ (u8 *)des3_key, 24,
+ (u8 *)des_iv);
+
+ pr_info("des3 ofb self test go...\n");
+ fh_ablkcipher_self_test("ofb-des3-fh",
+ (u8 *)des3_ofb_plain_text,
+ (u8 *)des3_ofb_encrypt_text,
+ 24,
+ (u8 *)des3_key, 24,
+ (u8 *)des_iv);
+
+ pr_info("des3 cfb self test go...\n");
+ fh_ablkcipher_self_test("cfb-des3-fh",
+ (u8 *)des3_cfb_plain_text,
+ (u8 *)des3_cfb_encrypt_text,
+ 24,
+ (u8 *)des3_key, 24,
+ (u8 *)des_iv);
+}
+
+#endif
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 25cf327c..76f36670 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,6 +89,25 @@ config DW_DMAC
Support the Synopsys DesignWare AHB DMA controller. This
can be integrated in chips such as the Atmel AT32ap7000.
+config FH_DMAC
+ tristate "FH DesignWare AHB DMA support"
+ depends on HAVE_CLK
+ select DMA_ENGINE
+
+ help
+ Support the Synopsys DesignWare AHB DMA controller. This
+ can be integrated in chips such as the FullHan.
+
+if FH_DMAC
+
+config FH_DMAC_MISC
+ bool "FH DMAC Misc Device Enable"
+ default y
+ help
+ FH DMAC Misc Device Enable
+
+endif
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
@@ -271,5 +290,7 @@ config DMATEST
help
Simple DMA test client. Say N unless you're debugging a
DMA Device driver.
-
endif
+
+
+
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 836095ab..252d297a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_FH_DMAC) += fh_dmac.o
\ No newline at end of file
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8bcb15fb..ce1de9b4 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -45,6 +45,9 @@
* See Documentation/dmaengine.txt for more details
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -61,9 +64,9 @@
#include <linux/slab.h>
static DEFINE_MUTEX(dma_list_mutex);
+static DEFINE_IDR(dma_idr);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
-static struct idr dma_idr;
/* --- sysfs implementation --- */
@@ -170,7 +173,8 @@ static struct class dma_devclass = {
#define dma_device_satisfies_mask(device, mask) \
__dma_device_satisfies_mask((device), &(mask))
static int
-__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
+__dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
@@ -260,10 +264,13 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
do {
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
- printk(KERN_ERR "dma_sync_wait_timeout!\n");
+ pr_err("%s: timeout!\n", __func__);
return DMA_ERROR;
}
- } while (status == DMA_IN_PROGRESS);
+ if (status != DMA_IN_PROGRESS)
+ break;
+ cpu_relax();
+ } while (1);
return status;
}
@@ -311,7 +318,7 @@ static int __init dma_channel_table_init(void)
}
if (err) {
- pr_err("dmaengine: initialization failure\n");
+ pr_err("initialization failure\n");
for_each_dma_cap_mask(cap, dma_cap_mask_all)
if (channel_table[cap])
free_percpu(channel_table[cap]);
@@ -331,6 +338,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
}
EXPORT_SYMBOL(dma_find_channel);
+/*
+ * net_dma_find_channel - find a channel for net_dma
+ * net_dma has alignment requirements
+ */
+struct dma_chan *net_dma_find_channel(void)
+{
+ struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
+ if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
+ return NULL;
+
+ return chan;
+}
+EXPORT_SYMBOL(net_dma_find_channel);
+
/**
* dma_issue_pending_all - flush all pending operations across all channels
*/
@@ -442,7 +463,8 @@ static void dma_channel_rebalance(void)
}
}
-static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
+ struct dma_device *dev,
dma_filter_fn fn, void *fn_param)
{
struct dma_chan *chan;
@@ -484,7 +506,8 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
*/
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
{
struct dma_device *device, *_d;
struct dma_chan *chan = NULL;
@@ -505,12 +528,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
err = dma_chan_get(chan);
if (err == -ENODEV) {
- pr_debug("%s: %s module removed\n", __func__,
- dma_chan_name(chan));
+ pr_debug("%s: %s module removed\n",
+ __func__, dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
@@ -520,13 +543,34 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
}
mutex_unlock(&dma_list_mutex);
- pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
+ pr_debug("%s: %s (%s)\n",
+ __func__,
+ chan ? "success" : "fail",
chan ? dma_chan_name(chan) : NULL);
return chan;
}
EXPORT_SYMBOL_GPL(__dma_request_channel);
+#if 0
+/**
+ * dma_request_slave_channel - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ */
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
+{
+ /* If device-tree is present get slave info from here */
+ if (dev->of_node)
+ return of_dma_request_slave_channel(dev->of_node, name);
+
+ /* If device was enumerated by ACPI get slave info from here */
+ if (ACPI_HANDLE(dev))
+ return acpi_dma_request_slave_chan_by_name(dev, name);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+#endif
void dma_release_channel(struct dma_chan *chan)
{
mutex_lock(&dma_list_mutex);
@@ -563,8 +607,8 @@ void dmaengine_get(void)
list_del_rcu(&device->global_node);
break;
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
}
}
@@ -647,19 +691,19 @@ static bool device_has_all_tx_types(struct dma_device *device)
static int get_dma_id(struct dma_device *device)
{
int rc;
+ int dma_id;
- idr_retry:
- if (!idr_pre_get(&dma_idr, GFP_KERNEL))
- return -ENOMEM;
mutex_lock(&dma_list_mutex);
- rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
- mutex_unlock(&dma_list_mutex);
- if (rc == -EAGAIN)
- goto idr_retry;
- else if (rc != 0)
- return rc;
- return 0;
+ if (!idr_pre_get(&dma_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ rc = idr_get_new(&dma_idr, NULL, &dma_id);
+ if (rc >= 0)
+ device->dev_id = dma_id;
+
+ mutex_unlock(&dma_list_mutex);
+ return rc < 0 ? rc : 0;
}
/**
@@ -692,12 +736,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
- BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
+ BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+ !device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
@@ -1000,7 +1044,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
pr_err("%s timeout waiting for descriptor submission\n",
- __func__);
+ __func__);
return DMA_ERROR;
}
cpu_relax();
@@ -1049,8 +1093,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
- idr_init(&dma_idr);
- mutex_init(&dma_list_mutex);
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
new file mode 100644
index 00000000..17f983a4
--- /dev/null
+++ b/drivers/dma/dmaengine.h
@@ -0,0 +1,89 @@
+/*
+ * The contents of this file are private to DMA engine drivers, and is not
+ * part of the API to be used by DMA engine users.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+
+#include <linux/bug.h>
+#include <linux/dmaengine.h>
+
+/**
+ * dma_cookie_init - initialize the cookies for a DMA channel
+ * @chan: dma channel to initialize
+ */
+static inline void dma_cookie_init(struct dma_chan *chan)
+{
+ chan->cookie = DMA_MIN_COOKIE;
+ chan->completed_cookie = DMA_MIN_COOKIE;
+}
+
+/**
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
+ * @tx: descriptor needing cookie
+ *
+ * Assign a unique non-zero per-channel cookie to the descriptor.
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *chan = tx->chan;
+ dma_cookie_t cookie;
+
+ cookie = chan->cookie + 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
+ tx->cookie = chan->cookie = cookie;
+
+ return cookie;
+}
+
+/**
+ * dma_cookie_complete - complete a descriptor
+ * @tx: descriptor to complete
+ *
+ * Mark this descriptor complete by updating the channels completed
+ * cookie marker. Zero the descriptors cookie to prevent accidental
+ * repeated completions.
+ *
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
+{
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
+ tx->chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+}
+
+/**
+ * dma_cookie_status - report cookie status
+ * @chan: dma channel
+ * @cookie: cookie we are interested in
+ * @state: dma_tx_state structure to return last/used cookies
+ *
+ * Report the status of the cookie, filling in the state structure if
+ * non-NULL. No locking is required.
+ */
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ dma_cookie_t used, complete;
+
+ used = chan->cookie;
+ complete = chan->completed_cookie;
+ barrier();
+ if (state) {
+ state->last = complete;
+ state->used = used;
+ state->residue = 0;
+ }
+ return dma_async_is_complete(cookie, complete, used);
+}
+
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
+{
+ if (state)
+ state->residue = residue;
+}
+
+#endif
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
deleted file mode 100644
index c3419518..00000000
--- a/drivers/dma/dw_dmac_regs.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Driver for the Synopsys DesignWare AHB DMA Controller
- *
- * Copyright (C) 2005-2007 Atmel Corporation
- * Copyright (C) 2010-2011 ST Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/dw_dmac.h>
-
-#define DW_DMA_MAX_NR_CHANNELS 8
-
-/*
- * Redefine this macro to handle differences between 32- and 64-bit
- * addressing, big vs. little endian, etc.
- */
-#define DW_REG(name) u32 name; u32 __pad_##name
-
-/* Hardware register definitions. */
-struct dw_dma_chan_regs {
- DW_REG(SAR); /* Source Address Register */
- DW_REG(DAR); /* Destination Address Register */
- DW_REG(LLP); /* Linked List Pointer */
- u32 CTL_LO; /* Control Register Low */
- u32 CTL_HI; /* Control Register High */
- DW_REG(SSTAT);
- DW_REG(DSTAT);
- DW_REG(SSTATAR);
- DW_REG(DSTATAR);
- u32 CFG_LO; /* Configuration Register Low */
- u32 CFG_HI; /* Configuration Register High */
- DW_REG(SGR);
- DW_REG(DSR);
-};
-
-struct dw_dma_irq_regs {
- DW_REG(XFER);
- DW_REG(BLOCK);
- DW_REG(SRC_TRAN);
- DW_REG(DST_TRAN);
- DW_REG(ERROR);
-};
-
-struct dw_dma_regs {
- /* per-channel registers */
- struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
-
- /* irq handling */
- struct dw_dma_irq_regs RAW; /* r */
- struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
- struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
- struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
-
- DW_REG(STATUS_INT); /* r */
-
- /* software handshaking */
- DW_REG(REQ_SRC);
- DW_REG(REQ_DST);
- DW_REG(SGL_REQ_SRC);
- DW_REG(SGL_REQ_DST);
- DW_REG(LAST_SRC);
- DW_REG(LAST_DST);
-
- /* miscellaneous */
- DW_REG(CFG);
- DW_REG(CH_EN);
- DW_REG(ID);
- DW_REG(TEST);
-
- /* optional encoded params, 0x3c8..0x3 */
-};
-
-/* Bitfields in CTL_LO */
-#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
-#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
-#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
-#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
-#define DWC_CTLL_DST_DEC (1<<7)
-#define DWC_CTLL_DST_FIX (2<<7)
-#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
-#define DWC_CTLL_SRC_DEC (1<<9)
-#define DWC_CTLL_SRC_FIX (2<<9)
-#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
-#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
-#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
-#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
-#define DWC_CTLL_FC(n) ((n) << 20)
-#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
-#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
-#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
-#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
-/* plus 4 transfer types for peripheral-as-flow-controller */
-#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
-#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
-#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
-#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
-
-/* Bitfields in CTL_HI */
-#define DWC_CTLH_DONE 0x00001000
-#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
-
-/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
-#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
-#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
-#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
-#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
-#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
-#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
-#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
-#define DWC_CFGL_RELOAD_SAR (1 << 30)
-#define DWC_CFGL_RELOAD_DAR (1 << 31)
-
-/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
-#define DWC_CFGH_DS_UPD_EN (1 << 5)
-#define DWC_CFGH_SS_UPD_EN (1 << 6)
-
-/* Bitfields in SGR */
-#define DWC_SGR_SGI(x) ((x) << 0)
-#define DWC_SGR_SGC(x) ((x) << 20)
-
-/* Bitfields in DSR */
-#define DWC_DSR_DSI(x) ((x) << 0)
-#define DWC_DSR_DSC(x) ((x) << 20)
-
-/* Bitfields in CFG */
-#define DW_CFG_DMA_EN (1 << 0)
-
-#define DW_REGLEN 0x400
-
-enum dw_dmac_flags {
- DW_DMA_IS_CYCLIC = 0,
-};
-
-struct dw_dma_chan {
- struct dma_chan chan;
- void __iomem *ch_regs;
- u8 mask;
- u8 priority;
- bool paused;
-
- spinlock_t lock;
-
- /* these other elements are all protected by lock */
- unsigned long flags;
- dma_cookie_t completed;
- struct list_head active_list;
- struct list_head queue;
- struct list_head free_list;
- struct dw_cyclic_desc *cdesc;
-
- unsigned int descs_allocated;
-};
-
-static inline struct dw_dma_chan_regs __iomem *
-__dwc_regs(struct dw_dma_chan *dwc)
-{
- return dwc->ch_regs;
-}
-
-#define channel_readl(dwc, name) \
- readl(&(__dwc_regs(dwc)->name))
-#define channel_writel(dwc, name, val) \
- writel((val), &(__dwc_regs(dwc)->name))
-
-static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct dw_dma_chan, chan);
-}
-
-struct dw_dma {
- struct dma_device dma;
- void __iomem *regs;
- struct tasklet_struct tasklet;
- struct clk *clk;
-
- u8 all_chan_mask;
-
- struct dw_dma_chan chan[0];
-};
-
-static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
-{
- return dw->regs;
-}
-
-#define dma_readl(dw, name) \
- readl(&(__dw_regs(dw)->name))
-#define dma_writel(dw, name, val) \
- writel((val), &(__dw_regs(dw)->name))
-
-#define channel_set_bit(dw, reg, mask) \
- dma_writel(dw, reg, ((mask) << 8) | (mask))
-#define channel_clear_bit(dw, reg, mask) \
- dma_writel(dw, reg, ((mask) << 8) | 0)
-
-static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
-{
- return container_of(ddev, struct dw_dma, dma);
-}
-
-/* LLI == Linked List Item; a.k.a. DMA block descriptor */
-struct dw_lli {
- /* values that are not changed by hardware */
- dma_addr_t sar;
- dma_addr_t dar;
- dma_addr_t llp; /* chain to next lli */
- u32 ctllo;
- /* values that may get written back: */
- u32 ctlhi;
- /* sstat and dstat can snapshot peripheral register state.
- * silicon config may discard either or both...
- */
- u32 sstat;
- u32 dstat;
-};
-
-struct dw_desc {
- /* FIRST values the hardware uses */
- struct dw_lli lli;
-
- /* THEN values for driver housekeeping */
- struct list_head desc_node;
- struct list_head tx_list;
- struct dma_async_tx_descriptor txd;
- size_t len;
-};
-
-static inline struct dw_desc *
-txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
-{
- return container_of(txd, struct dw_desc, txd);
-}
diff --git a/drivers/dma/fh_dmac.c b/drivers/dma/fh_dmac.c
new file mode 100644
index 00000000..e3f1d5b0
--- /dev/null
+++ b/drivers/dma/fh_dmac.c
@@ -0,0 +1,1869 @@
+/*
+ * Core driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include "dmaengine.h"
+#include <mach/fh_dmac_regs.h>
+
+/*
+ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
+ * (FH_ahb_dmac) which is used with various AMBA 2.0 systems (not all
+ * of which use ARM any more). See the "Databook" from Synopsys for
+ * information beyond what licensees probably provide.
+ *
+ * The driver has currently been tested only with the Atmel AT32AP7000,
+ * which does not support descriptor writeback.
+ */
+
+static inline unsigned int fhc_get_dms(struct fh_dma_slave *slave)
+{
+ return slave ? slave->dst_master : 0;
+}
+
+static inline unsigned int fhc_get_sms(struct fh_dma_slave *slave)
+{
+ return slave ? slave->src_master : 1;
+}
+
+static inline void fhc_set_masters(struct fh_dma_chan *fhc)
+{
+ struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
+ struct fh_dma_slave *dms = fhc->chan.private;
+ unsigned char mmax = fhd->nr_masters - 1;
+
+ if (fhc->request_line == ~0) {
+ fhc->src_master = min_t(unsigned char, mmax, fhc_get_sms(dms));
+ fhc->dst_master = min_t(unsigned char, mmax, fhc_get_dms(dms));
+ }
+}
+
+#define FHC_DEFAULT_CTLLO(_chan) ({ \
+ struct fh_dma_chan *_fhc = to_fh_dma_chan(_chan); \
+ struct dma_slave_config *_sconfig = &_fhc->dma_sconfig; \
+ bool _is_slave = is_slave_direction(_fhc->direction); \
+ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
+ FH_DMA_MSIZE_16; \
+ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
+ FH_DMA_MSIZE_16; \
+ \
+ (FHC_CTLL_DST_MSIZE(_dmsize) \
+ | FHC_CTLL_SRC_MSIZE(_smsize) \
+ | FHC_CTLL_LLP_D_EN \
+ | FHC_CTLL_LLP_S_EN \
+ | FHC_CTLL_DMS(_fhc->dst_master) \
+ | FHC_CTLL_SMS(_fhc->src_master)); \
+ })
+
+#define FHC_DEFAULT_CTLLO_OLD(private) ({ \
+ struct fh_dma_slave *__slave = (private); \
+ int dms = __slave ? __slave->dst_master : 0; \
+ int sms = __slave ? __slave->src_master : 1; \
+ u8 smsize = __slave ? __slave->src_msize : FH_DMA_MSIZE_16; \
+ u8 dmsize = __slave ? __slave->dst_msize : FH_DMA_MSIZE_16; \
+ \
+ (FHC_CTLL_DST_MSIZE(dmsize) \
+ | FHC_CTLL_SRC_MSIZE(smsize) \
+ | FHC_CTLL_LLP_D_EN \
+ | FHC_CTLL_LLP_S_EN \
+ | FHC_CTLL_DMS(dms) \
+ | FHC_CTLL_SMS(sms)); \
+ })
+
+/*
+ * Number of descriptors to allocate for each channel. This should be
+ * made configurable somehow; preferably, the clients (at least the
+ * ones using slave transfers) should be able to give us a hint.
+ */
+#define NR_DESCS_PER_CHANNEL 4096
+
+/*----------------------------------------------------------------------*/
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2parent(struct dma_chan *chan)
+{
+ return chan->dev->device.parent;
+}
+
+static struct fh_desc *fhc_first_active(struct fh_dma_chan *fhc)
+{
+ return to_fh_desc(fhc->active_list.next);
+}
+
+static struct fh_desc *fhc_desc_get(struct fh_dma_chan *fhc)
+{
+ struct fh_desc *desc, *_desc;
+ struct fh_desc *ret = NULL;
+ unsigned int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ list_for_each_entry_safe(desc, _desc, &fhc->free_list, desc_node) {
+ i++;
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del(&desc->desc_node);
+ ret = desc;
+ break;
+ }
+ dev_dbg(chan2dev(&fhc->chan), "desc %p not ACKed\n", desc);
+ }
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ dev_vdbg(chan2dev(&fhc->chan), "scanned %u descriptors on freelist\n", i);
+
+ return ret;
+}
+
+/*
+ * Move a descriptor, including any children, to the free list.
+ * `desc' must not be on any lists.
+ */
+static void fhc_desc_put(struct fh_dma_chan *fhc, struct fh_desc *desc)
+{
+ unsigned long flags;
+
+ if (desc) {
+ struct fh_desc *child;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ list_for_each_entry(child, &desc->tx_list, desc_node)
+ dev_vdbg(chan2dev(&fhc->chan),
+ "moving child desc %p to freelist\n",
+ child);
+ list_splice_init(&desc->tx_list, &fhc->free_list);
+ dev_vdbg(chan2dev(&fhc->chan), "moving desc %p to freelist\n", desc);
+ list_add(&desc->desc_node, &fhc->free_list);
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ }
+}
+
+static void fhc_initialize(struct fh_dma_chan *fhc)
+{
+ struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
+ struct fh_dma_slave *dms = fhc->chan.private;
+ u32 cfghi = FHC_CFGH_FIFO_MODE;
+ u32 cfglo = FHC_CFGL_CH_PRIOR(fhc->priority);
+ struct fh_dma_extra *ext_para = &fhc->ext_para;
+
+ if (fhc->initialized == true)
+ return;
+
+ if (dms) {
+ cfghi = dms->cfg_hi;
+ cfglo |= dms->cfg_lo & ~FHC_CFGL_CH_PRIOR_MASK;
+ } else {
+ if (fhc->direction == DMA_MEM_TO_DEV) {
+ cfghi = FHC_CFGH_DST_PER(fhc->request_line);
+ if (ext_para->protctl_flag == PROTCTL_ENABLE) {
+ cfghi &= ~FHC_PROTCTL_MASK;
+ cfghi |= FHC_PROTCTL(ext_para->protctl_data);
+ }
+ } else if (fhc->direction == DMA_DEV_TO_MEM) {
+ cfghi = FHC_CFGH_SRC_PER(fhc->request_line);
+ if (ext_para->protctl_flag == PROTCTL_ENABLE) {
+ cfghi &= ~FHC_PROTCTL_MASK;
+ cfghi |= FHC_PROTCTL(ext_para->protctl_data);
+ }
+ } else if (fhc->direction == DMA_MEM_TO_MEM) {
+ cfghi &= ~FHC_PROTCTL_MASK;
+ cfghi |= FHC_PROTCTL(2);
+ }
+ }
+ channel_writel(fhc, CFG_LO, cfglo);
+ channel_writel(fhc, CFG_HI, cfghi);
+
+ /* Enable interrupts */
+ channel_set_bit(fhd, MASK.XFER, fhc->mask);
+ channel_set_bit(fhd, MASK.BLOCK, fhc->mask);
+ channel_set_bit(fhd, MASK.ERROR, fhc->mask);
+
+ fhc->initialized = true;
+}
+
+/*----------------------------------------------------------------------*/
+
+static inline unsigned int fhc_fast_fls(unsigned long long v)
+{
+ /*
+ * We can be a lot more clever here, but this should take care
+ * of the most common optimization.
+ */
+ if (!(v & 7))
+ return 3;
+ else if (!(v & 3))
+ return 2;
+ else if (!(v & 1))
+ return 1;
+ return 0;
+}
+
+static inline void fhc_dump_chan_regs(struct fh_dma_chan *fhc)
+{
+ dev_err(chan2dev(&fhc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(fhc, SAR),
+ channel_readl(fhc, DAR),
+ channel_readl(fhc, LLP),
+ channel_readl(fhc, CTL_HI),
+ channel_readl(fhc, CTL_LO));
+}
+
+static inline void fhc_chan_disable(struct fh_dma *fhd, struct fh_dma_chan *fhc)
+{
+ channel_clear_bit(fhd, CH_EN, fhc->mask);
+ while (dma_readl(fhd, CH_EN) & fhc->mask)
+ cpu_relax();
+}
+
+/*----------------------------------------------------------------------*/
+
+/* Perform single block transfer */
+static inline void fhc_do_single_block(struct fh_dma_chan *fhc,
+ struct fh_desc *desc)
+{
+ struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
+ u32 ctllo;
+
+ /* Software emulation of LLP mode relies on interrupts to continue
+ * multi block transfer. */
+ ctllo = desc->lli.ctllo | FHC_CTLL_INT_EN;
+
+ channel_writel(fhc, SAR, desc->lli.sar);
+ channel_writel(fhc, DAR, desc->lli.dar);
+ channel_writel(fhc, CTL_LO, ctllo);
+ channel_writel(fhc, CTL_HI, desc->lli.ctlhi);
+ channel_set_bit(fhd, CH_EN, fhc->mask);
+
+ /* Move pointer to next descriptor */
+ fhc->tx_node_active = fhc->tx_node_active->next;
+}
+
+/* Called with fhc->lock held and bh disabled */
+static void fhc_dostart(struct fh_dma_chan *fhc, struct fh_desc *first)
+{
+ struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
+ unsigned long was_soft_llp;
+
+ /* ASSERT: channel is idle */
+ if (dma_readl(fhd, CH_EN) & fhc->mask) {
+ dev_err(chan2dev(&fhc->chan),
+ "BUG: Attempted to start non-idle channel\n");
+ fhc_dump_chan_regs(fhc);
+
+ /* The tasklet will hopefully advance the queue... */
+ return;
+ }
+
+ if (fhc->nollp) {
+ was_soft_llp = test_and_set_bit(FH_DMA_IS_SOFT_LLP,
+ &fhc->flags);
+ if (was_soft_llp) {
+ dev_err(chan2dev(&fhc->chan),
+ "BUG: Attempted to start new LLP transfer "
+ "inside ongoing one\n");
+ return;
+ }
+
+ fhc_initialize(fhc);
+
+ fhc->residue = first->total_len;
+ fhc->tx_node_active = &first->tx_list;
+
+ /* Submit first block */
+ fhc_do_single_block(fhc, first);
+
+ return;
+ }
+
+ fhc_initialize(fhc);
+
+ channel_writel(fhc, LLP, first->txd.phys);
+ channel_writel(fhc, CTL_LO,
+ FHC_CTLL_LLP_D_EN | FHC_CTLL_LLP_S_EN);
+ channel_writel(fhc, CTL_HI, 0);
+ channel_set_bit(fhd, CH_EN, fhc->mask);
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+fhc_descriptor_complete(struct fh_dma_chan *fhc, struct fh_desc *desc,
+ bool callback_required)
+{
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ struct fh_desc *child;
+ unsigned long flags;
+
+ dev_vdbg(chan2dev(&fhc->chan), "descriptor %u complete\n", txd->cookie);
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ dma_cookie_complete(txd);
+ if (callback_required) {
+ callback = txd->callback;
+ param = txd->callback_param;
+ }
+
+ /* async_tx_ack */
+ list_for_each_entry(child, &desc->tx_list, desc_node)
+ async_tx_ack(&child->txd);
+ async_tx_ack(&desc->txd);
+
+ list_splice_init(&desc->tx_list, &fhc->free_list);
+ list_move(&desc->desc_node, &fhc->free_list);
+
+ if (!is_slave_direction(fhc->direction)) {
+ struct device *parent = chan2parent(&fhc->chan);
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(parent, desc->lli.dar,
+ desc->total_len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(parent, desc->lli.dar,
+ desc->total_len, DMA_FROM_DEVICE);
+ }
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(parent, desc->lli.sar,
+ desc->total_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(parent, desc->lli.sar,
+ desc->total_len, DMA_TO_DEVICE);
+ }
+ }
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ if (callback)
+ callback(param);
+}
+
+static void fhc_complete_all(struct fh_dma *fhd, struct fh_dma_chan *fhc)
+{
+ struct fh_desc *desc, *_desc;
+ LIST_HEAD(list);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ if (dma_readl(fhd, CH_EN) & fhc->mask) {
+ dev_err(chan2dev(&fhc->chan),
+ "BUG: XFER bit set, but channel not idle!\n");
+
+ /* Try to continue after resetting the channel... */
+ fhc_chan_disable(fhd, fhc);
+ }
+
+ /*
+ * Submit queued descriptors ASAP, i.e. before we go through
+ * the completed ones.
+ */
+ list_splice_init(&fhc->active_list, &list);
+ if (!list_empty(&fhc->queue)) {
+ list_move(fhc->queue.next, &fhc->active_list);
+ fhc_dostart(fhc, fhc_first_active(fhc));
+ }
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ fhc_descriptor_complete(fhc, desc, true);
+}
+
+/* Returns how many bytes were already received from source */
+static inline u32 fhc_get_sent(struct fh_dma_chan *fhc)
+{
+ u32 ctlhi = channel_readl(fhc, CTL_HI);
+ u32 ctllo = channel_readl(fhc, CTL_LO);
+
+ return (ctlhi & FHC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+}
+
+static void fhc_scan_descriptors(struct fh_dma *fhd, struct fh_dma_chan *fhc)
+{
+ dma_addr_t llp;
+ struct fh_desc *desc, *_desc;
+ struct fh_desc *child;
+ u32 status_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ /*
+ * Clear block interrupt flag before scanning so that we don't
+ * miss any, and read LLP before RAW_XFER to ensure it is
+ * valid if we decide to scan the list.
+ */
+ dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
+ llp = channel_readl(fhc, LLP);
+ status_xfer = dma_readl(fhd, RAW.XFER);
+
+ if (status_xfer & fhc->mask) {
+ /* Everything we've submitted is done */
+ dma_writel(fhd, CLEAR.XFER, fhc->mask);
+ if (test_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags)) {
+ struct list_head *head, *active = fhc->tx_node_active;
+
+ /*
+ * We are inside first active descriptor.
+ * Otherwise something is really wrong.
+ */
+ desc = fhc_first_active(fhc);
+
+ head = &desc->tx_list;
+ if (active != head) {
+ /* Update desc to reflect last sent one */
+ if (active != head->next)
+ desc = to_fh_desc(active->prev);
+
+ fhc->residue -= desc->len;
+
+ child = to_fh_desc(active);
+
+ /* Submit next block */
+ fhc_do_single_block(fhc, child);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return;
+ }
+
+ /* We are done here */
+ clear_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags);
+ }
+ fhc->residue = 0;
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ fhc_complete_all(fhd, fhc);
+ return;
+ }
+
+ if (list_empty(&fhc->active_list)) {
+ fhc->residue = 0;
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return;
+ }
+
+ if (test_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags)) {
+ dev_vdbg(chan2dev(&fhc->chan), "%s: soft LLP mode\n", __func__);
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return;
+ }
+
+ dev_vdbg(chan2dev(&fhc->chan), "%s: llp=0x%llx\n", __func__,
+ (unsigned long long)llp);
+
+ list_for_each_entry_safe(desc, _desc, &fhc->active_list, desc_node) {
+ /* Initial residue value */
+ fhc->residue = desc->total_len;
+
+ /* Check first descriptors addr */
+ if (desc->txd.phys == llp) {
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return;
+ }
+
+ /* Check first descriptors llp */
+ if (desc->lli.llp == llp) {
+ /* This one is currently in progress */
+ fhc->residue -= fhc_get_sent(fhc);
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return;
+ }
+
+ fhc->residue -= desc->len;
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
+ if (child->lli.llp == llp) {
+ /* Currently in progress */
+ fhc->residue -= fhc_get_sent(fhc);
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return;
+ }
+ fhc->residue -= child->len;
+ }
+
+ /*
+ * No descriptors so far seem to be in progress, i.e.
+ * this one must be done.
+ */
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ fhc_descriptor_complete(fhc, desc, true);
+ spin_lock_irqsave(&fhc->lock, flags);
+ }
+
+ dev_err(chan2dev(&fhc->chan),
+ "BUG: All descriptors done, but channel not idle!\n");
+
+ /* Try to continue after resetting the channel... */
+ fhc_chan_disable(fhd, fhc);
+
+ if (!list_empty(&fhc->queue)) {
+ list_move(fhc->queue.next, &fhc->active_list);
+ fhc_dostart(fhc, fhc_first_active(fhc));
+ }
+ spin_unlock_irqrestore(&fhc->lock, flags);
+}
+
+static inline void fhc_dump_lli(struct fh_dma_chan *fhc, struct fh_lli *lli)
+{
+ dev_crit(chan2dev(&fhc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+
+}
+
+static void fhc_handle_error(struct fh_dma *fhd, struct fh_dma_chan *fhc)
+{
+ struct fh_desc *bad_desc;
+ struct fh_desc *child;
+ unsigned long flags;
+
+ fhc_scan_descriptors(fhd, fhc);
+
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ /*
+ * The descriptor currently at the head of the active list is
+ * borked. Since we don't have any way to report errors, we'll
+ * just have to scream loudly and try to carry on.
+ */
+ bad_desc = fhc_first_active(fhc);
+ list_del_init(&bad_desc->desc_node);
+ list_move(fhc->queue.next, fhc->active_list.prev);
+
+ /* Clear the error flag and try to restart the controller */
+ dma_writel(fhd, CLEAR.ERROR, fhc->mask);
+ if (!list_empty(&fhc->active_list))
+ fhc_dostart(fhc, fhc_first_active(fhc));
+
+ /*
+ * WARN may seem harsh, but since this only happens
+ * when someone submits a bad physical address in a
+ * descriptor, we should consider ourselves lucky that the
+ * controller flagged an error instead of scribbling over
+ * random memory locations.
+ */
+ dev_WARN(chan2dev(&fhc->chan), "Bad descriptor submitted for DMA!\n"
+ " cookie: %d\n", bad_desc->txd.cookie);
+ fhc_dump_lli(fhc, &bad_desc->lli);
+ list_for_each_entry(child, &bad_desc->tx_list, desc_node)
+ fhc_dump_lli(fhc, &child->lli);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ /* Pretend the descriptor completed successfully */
+ fhc_descriptor_complete(fhc, bad_desc, true);
+}
+
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+inline dma_addr_t fh_dma_get_src_addr(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ return channel_readl(fhc, SAR);
+}
+EXPORT_SYMBOL(fh_dma_get_src_addr);
+
+inline dma_addr_t fh_dma_get_dst_addr(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ return channel_readl(fhc, DAR);
+}
+EXPORT_SYMBOL(fh_dma_get_dst_addr);
+
+/* Called with fhc->lock held and all DMAC interrupts disabled */
+static void fhc_handle_cyclic(struct fh_dma *fhd, struct fh_dma_chan *fhc,
+ u32 status_err, u32 status_xfer, u32 status_block)
+{
+ unsigned long flags;
+
+ if (status_block & fhc->mask) {
+ void (*callback) (void *param);
+ void *callback_param;
+
+ dev_vdbg(chan2dev(&fhc->chan), "new cyclic period llp 0x%08x\n",
+ channel_readl(fhc, LLP));
+ dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
+
+ callback = fhc->cdesc->period_callback;
+ callback_param = fhc->cdesc->period_callback_param;
+
+ if (callback)
+ callback(callback_param);
+ }
+
+ /*
+ * Error and transfer complete are highly unlikely, and will most
+ * likely be due to a configuration error by the user.
+ */
+ if (unlikely(status_err & fhc->mask) ||
+ unlikely(status_xfer & fhc->mask)) {
+ int i;
+
+ dev_err(chan2dev(&fhc->chan), "cyclic DMA unexpected %s "
+ "interrupt, stopping DMA transfer\n",
+ status_xfer ? "xfer" : "error");
+
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ fhc_dump_chan_regs(fhc);
+
+ fhc_chan_disable(fhd, fhc);
+
+ /* Make sure DMA does not restart by loading a new list */
+ channel_writel(fhc, LLP, 0);
+ channel_writel(fhc, CTL_LO, 0);
+ channel_writel(fhc, CTL_HI, 0);
+
+ dma_writel(fhd, CLEAR.ERROR, fhc->mask);
+ dma_writel(fhd, CLEAR.XFER, fhc->mask);
+ dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
+
+ for (i = 0; i < fhc->cdesc->periods; i++)
+ fhc_dump_lli(fhc, &fhc->cdesc->desc[i]->lli);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ }
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void fh_dma_tasklet(unsigned long data)
+{
+ struct fh_dma *fhd = (struct fh_dma *)data;
+ struct fh_dma_chan *fhc;
+ u32 status_xfer;
+ u32 status_err;
+ u32 status_block;
+ int i;
+
+ status_xfer = dma_readl(fhd, RAW.XFER);
+ status_block = dma_readl(fhd, RAW.BLOCK);
+ status_err = dma_readl(fhd, RAW.ERROR);
+
+ dev_vdbg(fhd->dma.dev, "%s: status_err=%x\n", __func__, status_err);
+
+ for (i = 0; i < fhd->dma.chancnt; i++) {
+ fhc = &fhd->chan[i];
+ if (test_bit(FH_DMA_IS_CYCLIC, &fhc->flags))
+ fhc_handle_cyclic(fhd, fhc, status_err,
+ status_xfer, status_block);
+ else if (status_err & (1 << i))
+ fhc_handle_error(fhd, fhc);
+ else if (status_xfer & (1 << i))
+ fhc_scan_descriptors(fhd, fhc);
+ }
+
+ /*
+ * Re-enable interrupts.
+ */
+ channel_set_bit(fhd, MASK.XFER, fhd->all_chan_mask);
+ channel_set_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
+ channel_set_bit(fhd, MASK.ERROR, fhd->all_chan_mask);
+}
+
+static irqreturn_t fh_dma_interrupt(int irq, void *dev_id)
+{
+ struct fh_dma *fhd = dev_id;
+ u32 status;
+
+ dev_vdbg(fhd->dma.dev, "%s: status=0x%x\n", __func__,
+ dma_readl(fhd, STATUS_INT));
+
+ /*
+ * Just disable the interrupts. We'll turn them back on in the
+ * softirq handler.
+ */
+ channel_clear_bit(fhd, MASK.XFER, fhd->all_chan_mask);
+ channel_clear_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
+ channel_clear_bit(fhd, MASK.ERROR, fhd->all_chan_mask);
+
+ status = dma_readl(fhd, STATUS_INT);
+ if (status) {
+ dev_err(fhd->dma.dev,
+ "BUG: Unexpected interrupts pending: 0x%x\n",
+ status);
+
+ /* Try to recover */
+ channel_clear_bit(fhd, MASK.XFER, (1 << 8) - 1);
+ channel_clear_bit(fhd, MASK.BLOCK, (1 << 8) - 1);
+ channel_clear_bit(fhd, MASK.SRC_TRAN, (1 << 8) - 1);
+ channel_clear_bit(fhd, MASK.DST_TRAN, (1 << 8) - 1);
+ channel_clear_bit(fhd, MASK.ERROR, (1 << 8) - 1);
+ }
+
+ tasklet_schedule(&fhd->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+static dma_cookie_t fhc_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct fh_desc *desc = txd_to_fh_desc(tx);
+ struct fh_dma_chan *fhc = to_fh_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ /*
+ * REVISIT: We should attempt to chain as many descriptors as
+ * possible, perhaps even appending to those already submitted
+ * for DMA. But this is hard to do in a race-free manner.
+ */
+ if (list_empty(&fhc->active_list)) {
+ dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
+ desc->txd.cookie);
+ list_add_tail(&desc->desc_node, &fhc->active_list);
+ fhc_dostart(fhc, fhc_first_active(fhc));
+ } else {
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
+ desc->txd.cookie);
+
+ list_add_tail(&desc->desc_node, &fhc->queue);
+ }
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+fhc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma *fhd = to_fh_dma(chan->device);
+ struct fh_desc *desc;
+ struct fh_desc *first;
+ struct fh_desc *prev;
+ size_t xfer_count;
+ size_t offset;
+ unsigned int src_width;
+ unsigned int dst_width;
+ unsigned int data_width;
+ u32 ctllo;
+
+ dev_vdbg(chan2dev(chan),
+ "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
+ (unsigned long long)dest, (unsigned long long)src,
+ len, flags);
+
+ if (unlikely(!len)) {
+ dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+ return NULL;
+ }
+
+ fhc->direction = DMA_MEM_TO_MEM;
+
+ data_width = min_t(unsigned int, fhd->data_width[fhc->src_master],
+ fhd->data_width[fhc->dst_master]);
+
+ src_width = dst_width = min_t(unsigned int, data_width,
+ fhc_fast_fls(src | dest | len));
+
+ ctllo = FHC_DEFAULT_CTLLO(chan)
+ | FHC_CTLL_DST_WIDTH(dst_width)
+ | FHC_CTLL_SRC_WIDTH(src_width)
+ | FHC_CTLL_DST_INC
+ | FHC_CTLL_SRC_INC
+ | FHC_CTLL_FC_M2M;
+ prev = first = NULL;
+
+ for (offset = 0; offset < len; offset += xfer_count << src_width) {
+ xfer_count = min_t(size_t, (len - offset) >> src_width,
+ fhc->block_size);
+
+ desc = fhc_desc_get(fhc);
+ if (!desc)
+ goto err_desc_get;
+
+ desc->lli.sar = src + offset;
+ desc->lli.dar = dest + offset;
+ desc->lli.ctllo = ctllo;
+ desc->lli.ctlhi = xfer_count;
+ desc->len = xfer_count << src_width;
+
+ if (!first) {
+ first = desc;
+ } else {
+ prev->lli.llp = desc->txd.phys;
+ list_add_tail(&desc->desc_node,
+ &first->tx_list);
+ }
+ prev = desc;
+ }
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last block */
+ prev->lli.ctllo |= FHC_CTLL_INT_EN;
+
+ prev->lli.llp = 0;
+ first->txd.flags = flags;
+ first->total_len = len;
+
+ return &first->txd;
+
+err_desc_get:
+ fhc_desc_put(fhc, first);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+fhc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ /*struct fh_dma *fhd = to_fh_dma(chan->device);*/
+ struct dma_slave_config *sconfig = &fhc->dma_sconfig;
+ struct fh_desc *prev;
+ struct fh_desc *first;
+ u32 ctllo;
+ dma_addr_t reg;
+ unsigned int reg_width;
+ unsigned int mem_width;
+ unsigned int data_width;
+ unsigned int i;
+ struct scatterlist *sg;
+ size_t total_len = 0;
+ struct fh_dma_extra *ext_para = (struct fh_dma_extra *)context;
+ dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
+ if (unlikely(!is_slave_direction(direction) || !sg_len))
+ return NULL;
+
+ fhc->direction = direction;
+
+ prev = first = NULL;
+ if (ext_para)
+ memcpy(&fhc->ext_para, ext_para, sizeof(struct fh_dma_extra));
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ reg_width = __fls(sconfig->dst_addr_width);
+ reg = sconfig->dst_addr;
+ if(!ext_para){
+ ctllo = (FHC_DEFAULT_CTLLO(chan)
+ | FHC_CTLL_DST_WIDTH(reg_width)
+ | FHC_CTLL_DST_FIX
+ | FHC_CTLL_SRC_INC);
+ }
+ else{
+ ctllo = (FHC_DEFAULT_CTLLO(chan) | FHC_CTLL_DST_WIDTH(reg_width));
+ ctllo |= ext_para->sinc << 9;
+ ctllo |= ext_para->dinc << 7;
+ ctllo &= ~(FHC_CTLL_SMS(3));
+ ctllo &= ~(FHC_CTLL_DMS(3));
+ ctllo |= FHC_CTLL_SMS(ext_para->src_master);
+ ctllo |= FHC_CTLL_DMS(ext_para->dst_master);
+ }
+
+ ctllo |= sconfig->device_fc ? FHC_CTLL_FC(FH_DMA_FC_P_M2P) :
+ FHC_CTLL_FC(FH_DMA_FC_D_M2P);
+
+ /*data_width = fhd->data_width[fhc->src_master];*/
+ data_width = __fls(sconfig->src_addr_width);
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct fh_desc *desc;
+ u32 len, dlen, mem;
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ mem_width = min_t(unsigned int,
+ data_width, fhc_fast_fls(mem | len));
+
+slave_sg_todev_fill_desc:
+ desc = fhc_desc_get(fhc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ desc->lli.sar = mem;
+ desc->lli.dar = reg;
+ desc->lli.ctllo = ctllo | FHC_CTLL_SRC_WIDTH(mem_width);
+ if ((len >> mem_width) > fhc->block_size) {
+ dlen = fhc->block_size << mem_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+
+ desc->lli.ctlhi = dlen >> mem_width;
+ desc->len = dlen;
+
+ if (!first) {
+ first = desc;
+ } else {
+ prev->lli.llp = desc->txd.phys;
+ list_add_tail(&desc->desc_node,
+ &first->tx_list);
+ }
+ prev = desc;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_todev_fill_desc;
+ }
+ break;
+ case DMA_DEV_TO_MEM:
+ reg_width = __fls(sconfig->src_addr_width);
+ reg = sconfig->src_addr;
+
+ if(!ext_para){
+ ctllo = (FHC_DEFAULT_CTLLO(chan)
+ | FHC_CTLL_SRC_WIDTH(reg_width)
+ | FHC_CTLL_DST_INC
+ | FHC_CTLL_SRC_FIX);
+ }
+ else{
+ ctllo = (FHC_DEFAULT_CTLLO(chan) | FHC_CTLL_SRC_WIDTH(reg_width));
+ ctllo |= ext_para->sinc << 9;
+ ctllo |= ext_para->dinc << 7;
+ ctllo &= ~(FHC_CTLL_SMS(3));
+ ctllo &= ~(FHC_CTLL_DMS(3));
+ ctllo |= FHC_CTLL_SMS(ext_para->src_master);
+ ctllo |= FHC_CTLL_DMS(ext_para->dst_master);
+ }
+
+
+ ctllo |= sconfig->device_fc ? FHC_CTLL_FC(FH_DMA_FC_P_P2M) :
+ FHC_CTLL_FC(FH_DMA_FC_D_P2M);
+
+ /*data_width = fhd->data_width[fhc->dst_master];*/
+ data_width = __fls(sconfig->dst_addr_width);
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct fh_desc *desc;
+ u32 len, dlen, mem;
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ mem_width = min_t(unsigned int,
+ data_width, fhc_fast_fls(mem | len));
+
+slave_sg_fromdev_fill_desc:
+ desc = fhc_desc_get(fhc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ desc->lli.sar = reg;
+ desc->lli.dar = mem;
+ desc->lli.ctllo = ctllo | FHC_CTLL_DST_WIDTH(mem_width);
+ if ((len >> reg_width) > fhc->block_size) {
+ dlen = fhc->block_size << reg_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+ desc->lli.ctlhi = dlen >> reg_width;
+ desc->len = dlen;
+
+ if (!first) {
+ first = desc;
+ } else {
+ prev->lli.llp = desc->txd.phys;
+ list_add_tail(&desc->desc_node,
+ &first->tx_list);
+ }
+ prev = desc;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_fromdev_fill_desc;
+ }
+ break;
+ default:
+ return NULL;
+ }
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last block */
+ prev->lli.ctllo |= FHC_CTLL_INT_EN;
+
+ prev->lli.llp = 0;
+ first->total_len = total_len;
+
+ return &first->txd;
+
+err_desc_get:
+ fhc_desc_put(fhc, first);
+ return NULL;
+}
+
+/*
+ * Fix sconfig's burst size according to fh_dmac. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * NOTE: burst size 2 is not supported by controller.
+ *
+ * This can be done by finding least significant bit set: n & (n - 1)
+ */
+static inline void convert_burst(u32 *maxburst)
+{
+ if (*maxburst > 1)
+ *maxburst = fls(*maxburst) - 2;
+ else
+ *maxburst = 0;
+}
+
+static int
+set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+
+ /* Check if chan will be configured for slave transfers */
+ if (!is_slave_direction(sconfig->direction))
+ return -EINVAL;
+
+ memcpy(&fhc->dma_sconfig, sconfig, sizeof(*sconfig));
+ fhc->direction = sconfig->direction;
+
+ /* Take the request line from slave_id member */
+ if (fhc->request_line == ~0)
+ fhc->request_line = sconfig->slave_id;
+
+ convert_burst(&fhc->dma_sconfig.src_maxburst);
+ convert_burst(&fhc->dma_sconfig.dst_maxburst);
+
+ return 0;
+}
+
+static inline void fhc_chan_pause(struct fh_dma_chan *fhc)
+{
+ u32 cfglo = channel_readl(fhc, CFG_LO);
+ unsigned int count = 20; /* timeout iterations */
+
+ channel_writel(fhc, CFG_LO, cfglo | FHC_CFGL_CH_SUSP);
+ while (!(channel_readl(fhc, CFG_LO) & FHC_CFGL_FIFO_EMPTY) && count--)
+ udelay(2);
+
+ fhc->paused = true;
+}
+
+static inline void fhc_chan_resume(struct fh_dma_chan *fhc)
+{
+ u32 cfglo = channel_readl(fhc, CFG_LO);
+
+ channel_writel(fhc, CFG_LO, cfglo & ~FHC_CFGL_CH_SUSP);
+
+ fhc->paused = false;
+}
+
+static int fhc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma *fhd = to_fh_dma(chan->device);
+ struct fh_desc *desc, *_desc;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ if (cmd == DMA_PAUSE) {
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ fhc_chan_pause(fhc);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ } else if (cmd == DMA_RESUME) {
+ if (!fhc->paused)
+ return 0;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ fhc_chan_resume(fhc);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ clear_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags);
+
+ fhc_chan_disable(fhd, fhc);
+
+ fhc_chan_resume(fhc);
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&fhc->queue, &list);
+ list_splice_init(&fhc->active_list, &list);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ fhc_descriptor_complete(fhc, desc, false);
+ } else if (cmd == DMA_SLAVE_CONFIG) {
+ return set_runtime_config(chan, (struct dma_slave_config *)arg);
+ } else {
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static inline u32 fhc_get_residue(struct fh_dma_chan *fhc)
+{
+ unsigned long flags;
+ u32 residue;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ residue = fhc->residue;
+ if (test_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags) && residue)
+ residue -= fhc_get_sent(fhc);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return residue;
+}
+
+static enum dma_status
+fhc_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret != DMA_SUCCESS) {
+ fhc_scan_descriptors(to_fh_dma(chan->device), fhc);
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ }
+
+ if (ret != DMA_SUCCESS)
+ dma_set_residue(txstate, fhc_get_residue(fhc));
+
+ if (fhc->paused)
+ return DMA_PAUSED;
+
+ return ret;
+}
+
+static void fhc_issue_pending(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+
+ if (!list_empty(&fhc->queue))
+ fhc_scan_descriptors(to_fh_dma(chan->device), fhc);
+}
+
+static int fhc_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma *fhd = to_fh_dma(chan->device);
+ struct fh_desc *desc;
+ int i;
+ unsigned long flags;
+
+ dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
+ /* ASSERT: channel is idle */
+ if (dma_readl(fhd, CH_EN) & fhc->mask) {
+ dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
+ return -EIO;
+ }
+
+ dma_cookie_init(chan);
+
+ /*
+ * NOTE: some controllers may have additional features that we
+ * need to initialize here, like "scatter-gather" (which
+ * doesn't mean what you think it means), and status writeback.
+ */
+
+ fhc_set_masters(fhc);
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ i = fhc->descs_allocated;
+ while (fhc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+ dma_addr_t phys;
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ desc = dma_pool_alloc(fhd->desc_pool, GFP_ATOMIC, &phys);
+ if (!desc)
+ goto err_desc_alloc;
+
+ memset(desc, 0, sizeof(struct fh_desc));
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = fhc_tx_submit;
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.phys = phys;
+
+ fhc_desc_put(fhc, desc);
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ i = ++fhc->descs_allocated;
+ }
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
+
+ return i;
+
+err_desc_alloc:
+ dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
+
+ return i;
+}
+
+static void fhc_free_chan_resources(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma *fhd = to_fh_dma(chan->device);
+ struct fh_desc *desc, *_desc;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
+ fhc->descs_allocated);
+
+ /* ASSERT: channel is idle */
+ BUG_ON(!list_empty(&fhc->active_list));
+ BUG_ON(!list_empty(&fhc->queue));
+ BUG_ON(dma_readl(to_fh_dma(chan->device), CH_EN) & fhc->mask);
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ list_splice_init(&fhc->free_list, &list);
+ fhc->descs_allocated = 0;
+ fhc->initialized = false;
+ fhc->request_line = ~0;
+
+ /* Disable interrupts */
+ channel_clear_bit(fhd, MASK.XFER, fhc->mask);
+ channel_clear_bit(fhd, MASK.BLOCK, fhc->mask);
+ channel_clear_bit(fhd, MASK.ERROR, fhc->mask);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+ dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
+ dma_pool_free(fhd->desc_pool, desc, desc->txd.phys);
+ }
+
+ dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
+}
+
+
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+/**
+ * fh_dma_cyclic_start - start the cyclic DMA transfer
+ * @chan: the DMA channel to start
+ *
+ * Must be called with soft interrupts disabled. Returns zero on success or
+ * -errno on failure.
+ */
+int fh_dma_cyclic_start(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
+ unsigned long flags;
+
+ if (!test_bit(FH_DMA_IS_CYCLIC, &fhc->flags)) {
+ dev_err(chan2dev(&fhc->chan), "missing prep for cyclic DMA\n");
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ /* Assert channel is idle */
+ if (dma_readl(fhd, CH_EN) & fhc->mask) {
+ dev_err(chan2dev(&fhc->chan),
+ "BUG: Attempted to start non-idle channel\n");
+ fhc_dump_chan_regs(fhc);
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ return -EBUSY;
+ }
+
+ dma_writel(fhd, CLEAR.ERROR, fhc->mask);
+ dma_writel(fhd, CLEAR.XFER, fhc->mask);
+ dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
+
+ fhc_initialize(fhc);
+
+ /* Setup DMAC channel registers */
+ channel_writel(fhc, LLP, fhc->cdesc->desc[0]->txd.phys);
+ channel_writel(fhc, CTL_LO, FHC_CTLL_LLP_D_EN | FHC_CTLL_LLP_S_EN);
+ channel_writel(fhc, CTL_HI, 0);
+
+ channel_set_bit(fhd, CH_EN, fhc->mask);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(fh_dma_cyclic_start);
+
+/**
+ * fh_dma_cyclic_stop - stop the cyclic DMA transfer
+ * @chan: the DMA channel to stop
+ *
+ * Must be called with soft interrupts disabled.
+ */
+void fh_dma_cyclic_stop(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ fhc_chan_disable(fhd, fhc);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+}
+EXPORT_SYMBOL(fh_dma_cyclic_stop);
+
+/**
+ * fh_dma_cyclic_prep - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ *
+ * Must be called before trying to start the transfer. Returns a valid struct
+ * fh_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+ */
+struct fh_cyclic_desc *fh_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma_slave *fhs = chan->private;
+ struct fh_cyclic_desc *cdesc;
+ struct fh_cyclic_desc *retval = NULL;
+ struct fh_desc *desc;
+ struct fh_desc *last = NULL;
+ unsigned long was_cyclic;
+ unsigned int reg_width;
+ unsigned int periods;
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+ if (fhc->nollp) {
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ dev_dbg(chan2dev(&fhc->chan),
+ "channel doesn't support LLP transfers\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!list_empty(&fhc->queue) || !list_empty(&fhc->active_list)) {
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ dev_dbg(chan2dev(&fhc->chan),
+ "queue and/or active list are not empty\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ was_cyclic = test_and_set_bit(FH_DMA_IS_CYCLIC, &fhc->flags);
+ spin_unlock_irqrestore(&fhc->lock, flags);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(&fhc->chan),
+ "channel already prepared for cyclic DMA\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ retval = ERR_PTR(-EINVAL);
+
+ reg_width = fhs->reg_width;
+
+ if (unlikely(!is_slave_direction(direction)))
+ goto out_err;
+
+ fhc->direction = direction;
+
+ periods = buf_len / period_len;
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer. */
+ if (period_len > (fhc->block_size << reg_width))
+ goto out_err;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto out_err;
+
+ retval = ERR_PTR(-ENOMEM);
+
+ if (periods > NR_DESCS_PER_CHANNEL)
+ goto out_err;
+
+ cdesc = kzalloc(sizeof(struct fh_cyclic_desc), GFP_KERNEL);
+ if (!cdesc)
+ goto out_err;
+
+ cdesc->desc = kzalloc(sizeof(struct fh_desc *) * periods, GFP_KERNEL);
+ if (!cdesc->desc)
+ goto out_err_alloc;
+
+ for (i = 0; i < periods; i++) {
+ desc = fhc_desc_get(fhc);
+ if (!desc)
+ goto out_err_desc_get;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ desc->lli.dar = fhs->tx_reg;
+ desc->lli.sar = buf_addr + (period_len * i);
+ desc->lli.ctllo = (FHC_DEFAULT_CTLLO_OLD(chan->private)
+ | FHC_CTLL_DST_WIDTH(reg_width)
+ | FHC_CTLL_SRC_WIDTH(reg_width)
+ | FHC_CTLL_DST_FIX
+ | FHC_CTLL_SRC_INC
+ | FHC_CTLL_FC(fhs->fc)
+ | FHC_CTLL_INT_EN);
+
+ break;
+ case DMA_DEV_TO_MEM:
+ desc->lli.dar = buf_addr + (period_len * i);
+ desc->lli.sar = fhs->rx_reg;
+ desc->lli.ctllo = (FHC_DEFAULT_CTLLO_OLD(chan->private)
+ | FHC_CTLL_SRC_WIDTH(reg_width)
+ | FHC_CTLL_DST_WIDTH(reg_width)
+ | FHC_CTLL_DST_INC
+ | FHC_CTLL_SRC_FIX
+ | FHC_CTLL_FC(fhs->fc)
+ | FHC_CTLL_INT_EN);
+
+
+ break;
+ default:
+ break;
+ }
+
+ desc->lli.ctlhi = (period_len >> reg_width);
+ cdesc->desc[i] = desc;
+
+ if (last)
+ {
+ last->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan),
+ last->txd.phys,
+ sizeof(last->lli),
+ DMA_TO_DEVICE);
+ }
+
+ last = desc;
+ }
+
+ /* Let's make a cyclic list */
+ last->lli.llp = cdesc->desc[0]->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+ sizeof(last->lli), DMA_TO_DEVICE);
+
+ dev_dbg(chan2dev(&fhc->chan), "cyclic prepared buf 0x%llx len %zu "
+ "period %zu periods %d\n", (unsigned long long)buf_addr,
+ buf_len, period_len, periods);
+
+ cdesc->periods = periods;
+ fhc->cdesc = cdesc;
+
+ return cdesc;
+
+out_err_desc_get:
+ while (i--)
+ fhc_desc_put(fhc, cdesc->desc[i]);
+out_err_alloc:
+ kfree(cdesc);
+out_err:
+ clear_bit(FH_DMA_IS_CYCLIC, &fhc->flags);
+ return (struct fh_cyclic_desc *)retval;
+}
+EXPORT_SYMBOL(fh_dma_cyclic_prep);
+
+/**
+ * fh_dma_cyclic_free - free a prepared cyclic DMA transfer
+ * @chan: the DMA channel to free
+ */
+void fh_dma_cyclic_free(struct dma_chan *chan)
+{
+ struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
+ struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
+ struct fh_cyclic_desc *cdesc = fhc->cdesc;
+ int i;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(&fhc->chan), "%s\n", __func__);
+
+ if (!cdesc)
+ return;
+
+ spin_lock_irqsave(&fhc->lock, flags);
+
+ fhc_chan_disable(fhd, fhc);
+
+ dma_writel(fhd, CLEAR.ERROR, fhc->mask);
+ dma_writel(fhd, CLEAR.XFER, fhc->mask);
+ dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
+
+ spin_unlock_irqrestore(&fhc->lock, flags);
+
+ for (i = 0; i < cdesc->periods; i++)
+ fhc_desc_put(fhc, cdesc->desc[i]);
+
+ kfree(cdesc->desc);
+ kfree(cdesc);
+
+ clear_bit(FH_DMA_IS_CYCLIC, &fhc->flags);
+}
+EXPORT_SYMBOL(fh_dma_cyclic_free);
+
+/*----------------------------------------------------------------------*/
+
+static void fh_dma_off(struct fh_dma *fhd)
+{
+ int i;
+
+ dma_writel(fhd, CFG, 0);
+
+ channel_clear_bit(fhd, MASK.XFER, fhd->all_chan_mask);
+ channel_clear_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
+ channel_clear_bit(fhd, MASK.SRC_TRAN, fhd->all_chan_mask);
+ channel_clear_bit(fhd, MASK.DST_TRAN, fhd->all_chan_mask);
+ channel_clear_bit(fhd, MASK.ERROR, fhd->all_chan_mask);
+
+ while (dma_readl(fhd, CFG) & FH_CFG_DMA_EN)
+ cpu_relax();
+
+ for (i = 0; i < fhd->dma.chancnt; i++)
+ fhd->chan[i].initialized = false;
+}
+
+static int fh_dma_probe(struct platform_device *pdev)
+{
+ struct fh_dma_platform_data *pdata;
+ struct resource *io;
+ struct fh_dma *fhd;
+ size_t size;
+ void __iomem *regs;
+ bool autocfg;
+ unsigned int fh_params;
+ unsigned int nr_channels;
+ unsigned int max_blk_size = 0;
+ int irq;
+ int err;
+ int i;
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (!request_mem_region(io->start, FH_REGLEN, pdev->dev.driver->name))
+ return -EBUSY;
+
+ regs = ioremap(io->start, FH_REGLEN);
+ if (!regs) {
+ err = -ENOMEM;
+ goto err_release_r;
+ }
+
+ /* Apply default dma_mask if needed */
+ if (!pdev->dev.dma_mask) {
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+
+ fh_params = dma_read_byaddr(regs, FH_PARAMS);
+ autocfg = fh_params >> FH_PARAMS_EN & 0x1;
+
+ dev_dbg(&pdev->dev, "FH_PARAMS: 0x%08x\n", fh_params);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata && autocfg) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* Fill platform data with the default values */
+ pdata->is_private = true;
+ pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
+ pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
+ } else if (!pdata || pdata->nr_channels > FH_DMA_MAX_NR_CHANNELS)
+ return -EINVAL;
+
+ if (autocfg)
+ nr_channels = (fh_params >> FH_PARAMS_NR_CHAN & 0x7) + 1;
+ else
+ nr_channels = pdata->nr_channels;
+
+ size = sizeof(struct fh_dma) + nr_channels * sizeof(struct fh_dma_chan);
+ fhd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!fhd)
+ return -ENOMEM;
+
+ fhd->clk = clk_get(&pdev->dev, "dmac0_hclk_gate");
+ if (IS_ERR(fhd->clk))
+ printk(KERN_INFO "cannot get dmac0_hclk\n");
+ else
+ clk_enable(fhd->clk);
+
+ fhd->regs = regs;
+
+ /* Get hardware configuration parameters */
+ if (autocfg) {
+ max_blk_size = dma_readl(fhd, MAX_BLK_SIZE);
+
+ fhd->nr_masters = (fh_params >> FH_PARAMS_NR_MASTER & 3) + 1;
+ for (i = 0; i < fhd->nr_masters; i++) {
+ fhd->data_width[i] =
+ (fh_params >> FH_PARAMS_DATA_WIDTH(i) & 3) + 2;
+ }
+ } else {
+ fhd->nr_masters = pdata->nr_masters;
+ memcpy(fhd->data_width, pdata->data_width, 4);
+ }
+
+ /* Calculate all channel mask before DMA setup */
+ fhd->all_chan_mask = (1 << nr_channels) - 1;
+
+ /* Force dma off, just in case */
+ fh_dma_off(fhd);
+
+ /* Disable BLOCK interrupts as well */
+ channel_clear_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
+
+ err = devm_request_irq(&pdev->dev, irq, fh_dma_interrupt, 0,
+ dev_name(&pdev->dev), fhd);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, fhd);
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ fhd->desc_pool = dmam_pool_create("fh_dmac_desc_pool", &pdev->dev,
+ sizeof(struct fh_desc), 4, 0);
+ if (!fhd->desc_pool) {
+ dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ return -ENOMEM;
+ }
+
+ tasklet_init(&fhd->tasklet, fh_dma_tasklet, (unsigned long)fhd);
+
+ INIT_LIST_HEAD(&fhd->dma.channels);
+ for (i = 0; i < nr_channels; i++) {
+ struct fh_dma_chan *fhc = &fhd->chan[i];
+ int r = nr_channels - i - 1;
+
+ fhc->chan.device = &fhd->dma;
+ dma_cookie_init(&fhc->chan);
+ if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
+ list_add_tail(&fhc->chan.device_node,
+ &fhd->dma.channels);
+ else
+ list_add(&fhc->chan.device_node, &fhd->dma.channels);
+
+ /* 7 is highest priority & 0 is lowest. */
+ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+ fhc->priority = r;
+ else
+ fhc->priority = i;
+
+ fhc->ch_regs = &__fh_regs(fhd)->CHAN[i];
+ spin_lock_init(&fhc->lock);
+ fhc->mask = 1 << i;
+
+ INIT_LIST_HEAD(&fhc->active_list);
+ INIT_LIST_HEAD(&fhc->queue);
+ INIT_LIST_HEAD(&fhc->free_list);
+
+ channel_clear_bit(fhd, CH_EN, fhc->mask);
+
+ fhc->direction = DMA_TRANS_NONE;
+ fhc->request_line = ~0;
+
+ /* Hardware configuration */
+ if (autocfg) {
+ unsigned int fhc_params;
+
+ fhc_params = dma_read_byaddr(regs + r * sizeof(u32),
+ FHC_PARAMS);
+
+ dev_dbg(&pdev->dev, "FHC_PARAMS[%d]: 0x%08x\n", i,
+ fhc_params);
+
+ /* Decode maximum block size for given channel. The
+ * stored 4 bit value represents blocks from 0x00 for 3
+ * up to 0x0a for 4095. */
+ fhc->block_size =
+ (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
+ fhc->nollp =
+ (fhc_params >> FHC_PARAMS_MBLK_EN & 0x1) == 0;
+ } else {
+ fhc->block_size = pdata->block_size;
+
+ /* Check if channel supports multi block transfer */
+ channel_writel(fhc, LLP, 0xfffffffc);
+ fhc->nollp =
+ (channel_readl(fhc, LLP) & 0xfffffffc) == 0;
+ channel_writel(fhc, LLP, 0);
+ }
+ }
+
+ /* Clear all interrupts on all channels. */
+ dma_writel(fhd, CLEAR.XFER, fhd->all_chan_mask);
+ dma_writel(fhd, CLEAR.BLOCK, fhd->all_chan_mask);
+ dma_writel(fhd, CLEAR.SRC_TRAN, fhd->all_chan_mask);
+ dma_writel(fhd, CLEAR.DST_TRAN, fhd->all_chan_mask);
+ dma_writel(fhd, CLEAR.ERROR, fhd->all_chan_mask);
+
+ dma_cap_set(DMA_MEMCPY, fhd->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, fhd->dma.cap_mask);
+ if (pdata->is_private)
+ dma_cap_set(DMA_PRIVATE, fhd->dma.cap_mask);
+ fhd->dma.dev = &pdev->dev;
+ fhd->dma.device_alloc_chan_resources = fhc_alloc_chan_resources;
+ fhd->dma.device_free_chan_resources = fhc_free_chan_resources;
+
+ fhd->dma.device_prep_dma_memcpy = fhc_prep_dma_memcpy;
+
+ fhd->dma.device_prep_slave_sg = fhc_prep_slave_sg;
+ fhd->dma.device_control = fhc_control;
+
+ fhd->dma.device_tx_status = fhc_tx_status;
+ fhd->dma.device_issue_pending = fhc_issue_pending;
+
+ dma_writel(fhd, CFG, FH_CFG_DMA_EN);
+
+ err = dma_async_device_register(&fhd->dma);
+
+ if(err)
+ pr_err("dma register failed, ret %d\n", err);
+
+ dev_info(&pdev->dev, "FH DMA Controller, %d channels\n",
+ nr_channels);
+
+ return 0;
+
+err_release_r:
+ release_resource(io);
+ return err;
+}
+
+static int fh_dma_remove(struct platform_device *pdev)
+{
+ struct fh_dma *fhd = platform_get_drvdata(pdev);
+ struct fh_dma_chan *fhc, *_fhc;
+
+ fh_dma_off(fhd);
+ dma_async_device_unregister(&fhd->dma);
+
+ tasklet_kill(&fhd->tasklet);
+
+ list_for_each_entry_safe(fhc, _fhc, &fhd->dma.channels,
+ chan.device_node) {
+ list_del(&fhc->chan.device_node);
+ channel_clear_bit(fhd, CH_EN, fhc->mask);
+ }
+
+ return 0;
+}
+
+static void fh_dma_shutdown(struct platform_device *pdev)
+{
+ struct fh_dma *fhd = platform_get_drvdata(pdev);
+
+ fh_dma_off(fhd);
+ clk_disable(fhd->clk);
+}
+
+static int fh_dma_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fh_dma *fhd = platform_get_drvdata(pdev);
+
+ fh_dma_off(fhd);
+ clk_disable(fhd->clk);
+
+ return 0;
+}
+
+static int fh_dma_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fh_dma *fhd = platform_get_drvdata(pdev);
+
+ clk_enable(fhd->clk);
+ dma_writel(fhd, CFG, FH_CFG_DMA_EN);
+
+ return 0;
+}
+
+static const struct dev_pm_ops fh_dma_dev_pm_ops = {
+ .suspend_noirq = fh_dma_suspend_noirq,
+ .resume_noirq = fh_dma_resume_noirq,
+ .freeze_noirq = fh_dma_suspend_noirq,
+ .thaw_noirq = fh_dma_resume_noirq,
+ .restore_noirq = fh_dma_resume_noirq,
+ .poweroff_noirq = fh_dma_suspend_noirq,
+};
+
+static struct platform_driver fh_dma_driver = {
+ .probe = fh_dma_probe,
+ .remove = fh_dma_remove,
+ .shutdown = fh_dma_shutdown,
+ .driver = {
+ .name = "fh_dmac",
+ .pm = &fh_dma_dev_pm_ops,
+ },
+};
+
+static int __init fh_dma_init(void)
+{
+ return platform_driver_register(&fh_dma_driver);
+}
+subsys_initcall(fh_dma_init);
+
+static void __exit fh_dma_exit(void)
+{
+ platform_driver_unregister(&fh_dma_driver);
+}
+module_exit(fh_dma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("FH DMA Controller driver");
diff --git a/drivers/dma/fh_dmac_regs.h b/drivers/dma/fh_dmac_regs.h
new file mode 100644
index 00000000..8ca1589f
--- /dev/null
+++ b/drivers/dma/fh_dmac_regs.h
@@ -0,0 +1,236 @@
+/*
+ * Driver for the Synopsys DesignWare AHB DMA Controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <mach/fh_dmac.h>
+
+#define FH_DMA_MAX_NR_CHANNELS 8
+
+/*
+ * Redefine this macro to handle differences between 32- and 64-bit
+ * addressing, big vs. little endian, etc.
+ */
+#define FH_REG(name) u32 name; u32 __pad_##name
+
+/* Hardware register definitions. */
+struct fh_dma_chan_regs {
+ FH_REG(SAR); /* Source Address Register */
+ FH_REG(DAR); /* Destination Address Register */
+ FH_REG(LLP); /* Linked List Pointer */
+ u32 CTL_LO; /* Control Register Low */
+ u32 CTL_HI; /* Control Register High */
+ FH_REG(SSTAT);
+ FH_REG(DSTAT);
+ FH_REG(SSTATAR);
+ FH_REG(DSTATAR);
+ u32 CFG_LO; /* Configuration Register Low */
+ u32 CFG_HI; /* Configuration Register High */
+ FH_REG(SGR);
+ FH_REG(DSR);
+};
+
+struct fh_dma_irq_regs {
+ FH_REG(XFER);
+ FH_REG(BLOCK);
+ FH_REG(SRC_TRAN);
+ FH_REG(DST_TRAN);
+ FH_REG(ERROR);
+};
+
+struct fh_dma_regs {
+ /* per-channel registers */
+ struct fh_dma_chan_regs CHAN[FH_DMA_MAX_NR_CHANNELS];
+
+ /* irq handling */
+ struct fh_dma_irq_regs RAW; /* r */
+ struct fh_dma_irq_regs STATUS; /* r (raw & mask) */
+ struct fh_dma_irq_regs MASK; /* rw (set = irq enabled) */
+ struct fh_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
+
+ FH_REG(STATUS_INT); /* r */
+
+ /* software handshaking */
+ FH_REG(REQ_SRC);
+ FH_REG(REQ_DST);
+ FH_REG(SGL_REQ_SRC);
+ FH_REG(SGL_REQ_DST);
+ FH_REG(LAST_SRC);
+ FH_REG(LAST_DST);
+
+ /* miscellaneous */
+ FH_REG(CFG);
+ FH_REG(CH_EN);
+ FH_REG(ID);
+ FH_REG(TEST);
+
+ /* optional encoded params, 0x3c8..0x3 */
+};
+
+/* Bitfields in CTL_LO */
+#define FHC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
+#define FHC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
+#define FHC_CTLL_SRC_WIDTH(n) ((n)<<4)
+#define FHC_CTLL_DST_INC (0<<7) /* DAR update/not */
+#define FHC_CTLL_DST_DEC (1<<7)
+#define FHC_CTLL_DST_FIX (2<<7)
+#define FHC_CTLL_SRC_INC (0<<9) /* SAR update/not */
+#define FHC_CTLL_SRC_DEC (1<<9)
+#define FHC_CTLL_SRC_FIX (2<<9)
+#define FHC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
+#define FHC_CTLL_SRC_MSIZE(n) ((n)<<14)
+#define FHC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
+#define FHC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
+#define FHC_CTLL_FC(n) ((n) << 20)
+#define FHC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
+#define FHC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
+#define FHC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
+#define FHC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
+/* plus 4 transfer types for peripheral-as-flow-controller */
+#define FHC_CTLL_DMS(n) ((n)<<23) /* dst master select */
+#define FHC_CTLL_SMS(n) ((n)<<25) /* src master select */
+#define FHC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
+#define FHC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
+
+/* Bitfields in CTL_HI */
+#define FHC_CTLH_DONE 0x00001000
+#define FHC_CTLH_BLOCK_TS_MASK 0x00000fff
+
+/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/FH_dmac.h> */
+#define FHC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
+#define FHC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
+#define FHC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
+#define FHC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
+#define FHC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
+#define FHC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
+#define FHC_CFGL_MAX_BURST(x) ((x) << 20)
+#define FHC_CFGL_RELOAD_SAR (1 << 30)
+#define FHC_CFGL_RELOAD_DAR (1 << 31)
+
+/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/FH_dmac.h> */
+#define FHC_CFGH_DS_UPD_EN (1 << 5)
+#define FHC_CFGH_SS_UPD_EN (1 << 6)
+
+/* Bitfields in SGR */
+#define FHC_SGR_SGI(x) ((x) << 0)
+#define FHC_SGR_SGC(x) ((x) << 20)
+
+/* Bitfields in DSR */
+#define FHC_DSR_DSI(x) ((x) << 0)
+#define FHC_DSR_DSC(x) ((x) << 20)
+
+/* Bitfields in CFG */
+#define FH_CFG_DMA_EN (1 << 0)
+
+#define FH_REGLEN 0x400
+
+enum fh_dmac_flags {
+ FH_DMA_IS_CYCLIC = 0,
+};
+
+struct fh_dma_chan {
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ bool paused;
+ bool initialized;
+ spinlock_t lock;
+
+ /* these other elements are all protected by lock */
+ unsigned long flags;
+ dma_cookie_t completed;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ struct fh_cyclic_desc *cdesc;
+
+ unsigned int descs_allocated;
+
+};
+
+static inline struct fh_dma_chan_regs __iomem *
+__fhc_regs(struct fh_dma_chan *fhc)
+{
+ return fhc->ch_regs;
+}
+
+#define channel_readl(fhc, name) \
+ readl(&(__fhc_regs(fhc)->name))
+#define channel_writel(fhc, name, val) \
+ writel((val), &(__fhc_regs(fhc)->name))
+
+static inline struct fh_dma_chan *to_fh_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fh_dma_chan, chan);
+}
+
+struct fh_dma {
+ struct dma_device dma;
+ void __iomem *regs;
+ struct tasklet_struct tasklet;
+ struct clk *clk;
+
+ u8 all_chan_mask;
+
+ struct fh_dma_chan chan[0];
+};
+
+static inline struct fh_dma_regs __iomem *__fh_regs(struct fh_dma *fh)
+{
+ return fh->regs;
+}
+
+#define dma_readl(fh, name) \
+ readl(&(__fh_regs(fh)->name))
+#define dma_writel(fh, name, val) \
+ writel((val), &(__fh_regs(fh)->name))
+
+#define channel_set_bit(fh, reg, mask) \
+ dma_writel(fh, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(fh, reg, mask) \
+ dma_writel(fh, reg, ((mask) << 8) | 0)
+
+static inline struct fh_dma *to_fh_dma(struct dma_device *ddev)
+{
+ return container_of(ddev, struct fh_dma, dma);
+}
+
+/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+struct fh_lli {
+ /* values that are not changed by hardware */
+ u32 sar;
+ u32 dar;
+ u32 llp; /* chain to next lli */
+ u32 ctllo;
+ /* values that may get written back: */
+ u32 ctlhi;
+ /* sstat and dstat can snapshot peripheral register state.
+ * silicon config may discard either or both...
+ */
+ u32 sstat;
+ u32 dstat;
+};
+
+struct fh_desc {
+ /* FIRST values the hardware uses */
+ struct fh_lli lli;
+
+ /* THEN values for driver housekeeping */
+ struct list_head desc_node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor txd;
+ size_t len;
+};
+
+static inline struct fh_desc *
+txd_to_fh_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct fh_desc, txd);
+}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2967002a..3780557d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -85,6 +85,11 @@ config GPIO_IT8761E
tristate "IT8761E GPIO support"
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
+
+config GPIO_FH
+ tristate "FH GPIO support"
+ help
+ Say yes here to support GPIO functionality of FH.
config GPIO_EXYNOS4
def_bool y
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b605f8ec..3562c0f9 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
obj-$(CONFIG_GPIO_IT8761E) += it8761e_gpio.o
+obj-$(CONFIG_GPIO_FH) += fh_gpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
diff --git a/drivers/gpio/fh_gpio.c b/drivers/gpio/fh_gpio.c
new file mode 100644
index 00000000..11e8eb9c
--- /dev/null
+++ b/drivers/gpio/fh_gpio.c
@@ -0,0 +1,543 @@
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm-generic/gpio.h>
+#include <mach/gpio.h>
+
+struct fh_gpio_chip *fh_gpio0, *fh_gpio1;
+
+static inline void __iomem* gpio_to_base(unsigned int gpio)
+{
+ if (gpio >= fh_gpio1->chip.base && gpio < (fh_gpio1->chip.base + fh_gpio1->chip.ngpio))
+ {
+ return fh_gpio1->base;
+ }
+ else if (gpio < (fh_gpio0->chip.base + fh_gpio0->chip.ngpio)) {
+ return fh_gpio0->base;
+ } else {
+ pr_err("ERROR: incorrect GPIO num\n");
+ return NULL;
+ }
+}
+
+static int _set_gpio_irq_type(unsigned int gpio, unsigned int type)
+{
+ u32 int_type, int_polarity;
+ u32 bit = gpio % 32;
+ void __iomem *base;
+ base = gpio_to_base(gpio);
+
+ switch (type & IRQF_TRIGGER_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ int_type = GPIO_INT_TYPE_EDGE;
+ /* toggle trigger */
+ if (FH_GPIO_GetValue((u32)base, bit))
+ int_polarity = GPIO_INT_POL_LOW;
+ else
+ int_polarity = GPIO_INT_POL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ int_type = GPIO_INT_TYPE_EDGE;
+ int_polarity = GPIO_INT_POL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ int_type = GPIO_INT_TYPE_EDGE;
+ int_polarity = GPIO_INT_POL_LOW;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ int_type = GPIO_INT_TYPE_LEVEL;
+ int_polarity = GPIO_INT_POL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ int_type = GPIO_INT_TYPE_LEVEL;
+ int_polarity = GPIO_INT_POL_LOW;
+ break;
+ case IRQ_TYPE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ FH_GPIO_SetInterruptType((u32)base, bit, int_type);
+ FH_GPIO_SetInterruptPolarity((u32)base, bit, int_polarity);
+ return 0;
+}
+
+int fh_set_gpio_irq(struct gpio_irq_info * info)
+{
+ void __iomem* base;
+ base = gpio_to_base(info->irq_gpio);
+
+ return _set_gpio_irq_type(info->irq_gpio, info->irq_type);
+}
+EXPORT_SYMBOL(fh_set_gpio_irq);
+
+void fh_irq_enable(unsigned int gpio)
+{
+ void __iomem* base;
+ int gpio_num = gpio % 32;
+ base = gpio_to_base(gpio);
+
+ FH_GPIO_EnableInterrupt((u32)base, gpio_num, TRUE);
+}
+EXPORT_SYMBOL(fh_irq_enable);
+
+void fh_irq_disable(unsigned int gpio)
+{
+ void __iomem* base;
+ int gpio_num = gpio % 32;
+ base = gpio_to_base(gpio);
+
+ FH_GPIO_EnableInterrupt((u32)base, gpio_num, FALSE);
+}
+EXPORT_SYMBOL(fh_irq_disable);
+
+void fh_clear_gpio_irq(int gpio_id)
+{
+ void __iomem* base;
+ int gpio_num = gpio_id % 32;
+ base = gpio_to_base(gpio_id);
+
+ FH_GPIO_ClearInterrupt((u32)base, gpio_num);
+}
+EXPORT_SYMBOL(fh_clear_gpio_irq);
+
+
+static inline void __iomem* irq_to_controller(struct irq_data* d)
+{
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
+
+ if (likely(d->irq >= NR_INTERNAL_IRQS))
+ return fh_gpio->base;
+ pr_err("irq num: %d is not a gpio irq!\n", d->irq);
+ return 0;
+}
+
+static void gpio_irq_ack(struct irq_data* d)
+{
+ void __iomem* base;
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
+ base = irq_to_controller(d);
+
+ FH_GPIO_ClearInterrupt((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base);
+}
+
+static void gpio_irq_enable(struct irq_data *d)
+{
+ void __iomem* base;
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
+ base = irq_to_controller(d);
+
+ FH_GPIO_EnableInterrupt((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, TRUE);
+}
+
+static void gpio_irq_disable(struct irq_data *d)
+{
+ void __iomem* base;
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
+ base = irq_to_controller(d);
+
+ FH_GPIO_EnableInterrupt((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, FALSE);
+}
+
+static void gpio_irq_mask(struct irq_data *d)
+{
+ void __iomem* base;
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
+ base = irq_to_controller(d);
+
+ FH_GPIO_EnableInterruptMask((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, TRUE);
+}
+
+static void gpio_irq_unmask(struct irq_data *d)
+{
+ void __iomem* base;
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
+ base = irq_to_controller(d);
+
+ FH_GPIO_EnableInterruptMask((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, FALSE);
+}
+
+static int gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ void __iomem* base;
+ base = irq_to_controller(d);
+
+ return _set_gpio_irq_type(d->irq - NR_INTERNAL_IRQS, type);
+}
+
+#ifdef CONFIG_PM
+
+static int gpio_irq_set_wake(struct irq_data *d, unsigned value)
+{
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
+
+ if (unlikely(d->irq >= NR_IRQS))
+ return -EINVAL;
+
+ if (value)
+ fh_gpio->gpio_wakeups |= (1 << (d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base));
+ else
+ fh_gpio->gpio_wakeups &= ~(1 << (d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base));
+
+ return 0;
+}
+
+void fh_gpio_irq_suspend(void)
+{
+ fh_gpio0->gpio_backups = FH_GPIO_GetEnableInterrupts((u32)fh_gpio0->base);
+ fh_gpio1->gpio_backups = FH_GPIO_GetEnableInterrupts((u32)fh_gpio1->base);
+
+ FH_GPIO_SetEnableInterrupts((u32)fh_gpio0->base, fh_gpio0->gpio_wakeups);
+ FH_GPIO_SetEnableInterrupts((u32)fh_gpio1->base, fh_gpio1->gpio_wakeups);
+}
+
+void fh_gpio_irq_resume(void)
+{
+ FH_GPIO_SetEnableInterrupts((u32)fh_gpio0->base, fh_gpio0->gpio_backups);
+ FH_GPIO_SetEnableInterrupts((u32)fh_gpio1->base, fh_gpio1->gpio_backups);
+}
+
+#else
+#define gpio_irq_set_wake NULL
+#endif
+
+static struct irq_chip gpio_irqchip = {
+ .name = "FH_GPIO_INTC",
+ .irq_ack = gpio_irq_ack,
+ .irq_enable = gpio_irq_enable,
+ .irq_disable = gpio_irq_disable,
+ .irq_mask = gpio_irq_mask,
+ .irq_unmask = gpio_irq_unmask,
+ .irq_set_type = gpio_irq_type,
+ .irq_set_wake = gpio_irq_set_wake,
+};
+
+static void gpio_toggle_trigger(unsigned int gpio, unsigned int offs)
+{
+ u32 int_polarity;
+
+ void __iomem* base = gpio_to_base(gpio);
+ if (FH_GPIO_GetValue((u32)base, offs))
+ int_polarity = GPIO_INT_POL_LOW;
+ else
+ int_polarity = GPIO_INT_POL_HIGH;
+
+ printk(">>>>> do trigger gpio=%d, set polarity=%x\n", gpio, int_polarity);
+ FH_GPIO_SetInterruptPolarity((u32)base, offs, int_polarity);
+}
+
+static inline u32 irq_get_trigger_type(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ return d ? irqd_get_trigger_type(d) : 0;
+}
+
+static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+ struct irq_chip *irqchip = irq_data_get_irq_chip(irqdata);
+ struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(irqdata);
+ u32 irq_status;
+ int gpio_num, gpio;
+
+ irq_status = FH_GPIO_GetInterruptStatus((u32)fh_gpio->base);
+
+ if (unlikely(irq_status == 0)) {
+ pr_err("gpio irq status is zero.\n");
+ return;
+ }
+
+ /* temporarily mask (level sensitive) parent IRQ */
+ irqchip->irq_mask(irqdata);
+
+ gpio_num = fls(irq_status) - 1;
+
+ FH_GPIO_ClearInterrupt((u32)fh_gpio->base, gpio_num);
+
+ gpio = gpio_num + fh_gpio->chip.base;
+
+ generic_handle_irq(gpio_to_irq(gpio));
+
+ if ((irq_get_trigger_type(gpio_to_irq(gpio)) & IRQ_TYPE_SENSE_MASK)
+ == IRQ_TYPE_EDGE_BOTH)
+ gpio_toggle_trigger(gpio, gpio_num);
+
+ irqchip->irq_unmask(irqdata);
+ /* now it may re-trigger */
+}
+
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
+static void fh_gpio_irq_init(struct platform_device *pdev)
+{
+ int i, gpio_irq;
+ struct fh_gpio_chip *plat_data;
+
+ plat_data = pdev->dev.platform_data;
+
+ for (i = 0; i < 32; i++) {
+ gpio_irq = i + NR_INTERNAL_IRQS + 32 * pdev->id;
+ irq_set_lockdep_class(gpio_irq, &gpio_lock_class);
+ irq_set_chip_and_handler(gpio_irq, &gpio_irqchip, handle_simple_irq);
+ set_irq_flags(gpio_irq, IRQF_VALID);
+ irq_set_chip_data(gpio_irq, plat_data);
+ }
+
+ irq_set_chip_data(plat_data->irq, plat_data);
+ irq_set_chained_handler(plat_data->irq, gpio_irq_handler);
+ enable_irq_wake(plat_data->irq);
+}
+
+static int chip_to_irq(struct gpio_chip *c, unsigned offset)
+{
+ struct fh_gpio_chip* chip;
+ chip = container_of(c, struct fh_gpio_chip, chip);
+ return offset + NR_INTERNAL_IRQS + chip->chip.base;
+}
+
+static int chip_gpio_get(struct gpio_chip *c, unsigned offset)
+{
+ u32 bit = offset % 32;
+ struct fh_gpio_chip* chip;
+ chip = container_of(c, struct fh_gpio_chip, chip);
+
+ if(offset / 32)
+ return FH_GPIOB_GetValue((u32)chip->base, bit);
+ else
+ return FH_GPIO_GetValue((u32)chip->base, bit);
+}
+
+static void chip_gpio_set(struct gpio_chip *c, unsigned offset, int val)
+{
+ u32 bit = offset % 32;
+ struct fh_gpio_chip* chip;
+ chip = container_of(c, struct fh_gpio_chip, chip);
+ if(offset / 32)
+ FH_GPIOB_SetValue((u32)chip->base, bit, val);
+ else
+ FH_GPIO_SetValue((u32)chip->base, bit, val);
+}
+
+static int chip_direction_input(struct gpio_chip *c, unsigned offset)
+{
+ u32 bit = offset % 32;
+ unsigned long flags;
+ struct fh_gpio_chip* chip;
+ chip = container_of(c, struct fh_gpio_chip, chip);
+ spin_lock_irqsave(&chip->lock, flags);
+ if(offset / 32)
+ FH_GPIOB_SetDirection((u32)chip->base, bit, GPIO_DIR_INPUT);
+ else
+ FH_GPIO_SetDirection((u32)chip->base, bit, GPIO_DIR_INPUT);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
+{
+ u32 bit = offset % 32;
+ unsigned long flags;
+ struct fh_gpio_chip* chip;
+ chip = container_of(c, struct fh_gpio_chip, chip);
+
+ spin_lock_irqsave(&chip->lock, flags);
+ if(offset / 32)
+ {
+ FH_GPIOB_SetDirection((u32)chip->base, bit, GPIO_DIR_OUTPUT);
+ FH_GPIOB_SetValue((u32)chip->base, bit, val);
+ }
+ else
+ {
+ FH_GPIO_SetDirection((u32)chip->base, bit, GPIO_DIR_OUTPUT);
+ FH_GPIO_SetValue((u32)chip->base, bit, val);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int chip_gpio_set_debounce(struct gpio_chip *c, unsigned offset,
+ unsigned debounce)
+{
+ u32 bit = offset % 32;
+ unsigned long flags;
+ char db_clk_name[16] = {0};
+ struct clk *gpio_clk = NULL;
+ int ret = 0;
+ struct fh_gpio_chip *chip;
+ bool enabled = !!debounce;
+ unsigned int clk_rate = 0;
+
+ sprintf(db_clk_name, "gpio%d_dbclk", (offset / 32));
+ gpio_clk = clk_get(NULL, db_clk_name);
+ if (IS_ERR(gpio_clk))
+ return PTR_ERR(gpio_clk);
+
+ clk_rate = 1000000UL / debounce;
+
+ ret = clk_set_rate(gpio_clk, clk_rate);
+ if (ret) {
+ pr_err("Set GPIO Debounce Clk fail\n");
+ return ret;
+ }
+
+ ret = clk_enable(gpio_clk);
+ if (ret) {
+ pr_err("Set GPIO Debounce Clk fail\n");
+ return ret;
+ }
+
+ chip = container_of(c, struct fh_gpio_chip, chip);
+ spin_lock_irqsave(&chip->lock, flags);
+ FH_GPIO_EnableDebounce((u32)chip->base, bit, enabled);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+void fh_gpio_set(int gpio_id, int value)
+{
+ __gpio_set_value(gpio_id, value);
+}
+EXPORT_SYMBOL(fh_gpio_set);
+
+int fh_gpio_get(int gpio_id, int* value)
+{
+ *value = __gpio_get_value(gpio_id);
+ return 0;
+}
+EXPORT_SYMBOL(fh_gpio_get);
+
+int fh_gpio_reset(int gpio_id)
+{
+ return 0;
+}
+EXPORT_SYMBOL(fh_gpio_reset);
+
+static int __devinit fh_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int err = -EIO;
+ struct fh_gpio_chip *plat_data;
+
+ /* There are two ways to get the GPIO base address; one is by
+ * fetching it from MSR_LBAR_GPIO, the other is by reading the
+ * PCI BAR info. The latter method is easier (especially across
+ * different architectures), so we'll stick with that for now. If
+ * it turns out to be unreliable in the face of crappy BIOSes, we
+ * can always go back to using MSRs.. */
+
+ plat_data = pdev->dev.platform_data;
+ plat_data->chip.get = chip_gpio_get;
+ plat_data->chip.set = chip_gpio_set;
+ plat_data->chip.direction_input = chip_direction_input;
+ plat_data->chip.direction_output = chip_direction_output;
+ plat_data->chip.to_irq = chip_to_irq;
+ plat_data->chip.set_debounce = chip_gpio_set_debounce;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ goto done;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "can't request region\n");
+ goto done;
+ }
+
+ /* set up the driver-specific struct */
+ plat_data->base = ioremap(res->start, resource_size(res));
+
+ if(pdev->id)
+ fh_gpio1 = plat_data;
+ else
+ fh_gpio0 = plat_data;
+
+ plat_data->pdev = pdev;
+ spin_lock_init(&plat_data->lock);
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("%s: ERROR: getting resource failed"
+ "cannot get IORESOURCE_MEM\n", __func__);
+ goto release_region;
+ }
+ plat_data->irq = res->start;
+
+ /* finally, register with the generic GPIO API */
+ err = gpiochip_add(&plat_data->chip);
+ if (err) {
+ pr_err("GPIO support load fail.\n");
+ goto release_region;
+ }
+
+ fh_gpio_irq_init(pdev);
+ pr_debug("GPIO support successfully loaded.\n\tBase Addr: 0x%p\n",
+ plat_data->base);
+
+ return 0;
+
+release_region:
+ release_region(res->start, resource_size(res));
+done:
+ return err;
+}
+
+static int __devexit fh_gpio_remove(struct platform_device *pdev)
+{
+ struct resource *r;
+ int err;
+ struct fh_gpio_chip *plat_data;
+
+ plat_data = pdev->dev.platform_data;
+ err = gpiochip_remove(&plat_data->chip);
+ if (err) {
+ dev_err(&pdev->dev, "unable to remove gpio_chip\n");
+ return err;
+ }
+
+ iounmap(plat_data->base);
+
+ r = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ release_region(r->start, resource_size(r));
+ return 0;
+}
+
+static struct platform_driver fh_gpio_driver = {
+ .driver = {
+ .name = GPIO_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_gpio_probe,
+ .remove = __devexit_p(fh_gpio_remove),
+};
+
+static int __init fh_gpio_init(void)
+{
+ return platform_driver_register(&fh_gpio_driver);
+}
+
+static void __exit fh_gpio_exit(void)
+{
+ platform_driver_unregister(&fh_gpio_driver);
+}
+
+module_init(fh_gpio_init);
+module_exit(fh_gpio_exit);
+
+MODULE_AUTHOR("QIN");
+MODULE_DESCRIPTION("FH GPIO Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform: FH");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 646068e5..82347f92 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -298,7 +298,18 @@ config I2C_AT91
documented way to issue repeated START conditions, as needed
to support combined I2C messages. Use the i2c-gpio driver
unless your system can cope with those limitations.
+
+config I2C_FH_INTERRUPT
+ tristate "FH I2C Driver with Interrupt"
+ help
+ This supports the use of the I2C interface on Fullhan
+ processors.
+
+ Only master mode is supported.
+ This driver can also be built as a module. If so, the module
+ will be called
+
config I2C_AU1550
tristate "Au1550/Au1200 SMBus interface"
depends on SOC_AU1550 || SOC_AU1200
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e6cf294d..93dbee32 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -82,5 +82,6 @@ obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
+obj-$(CONFIG_I2C_FH_INTERRUPT) += i2c_fh_interrupt.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c_fh_interrupt.c b/drivers/i2c/busses/i2c_fh_interrupt.c
new file mode 100644
index 00000000..fa3daa47
--- /dev/null
+++ b/drivers/i2c/busses/i2c_fh_interrupt.c
@@ -0,0 +1,938 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <mach/i2c.h>
+//#define FH_I2C_DEBUG
+
+#ifdef FH_I2C_DEBUG
+#define PRINT_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define PRINT_DBG(fmt, args...) do { } while (0)
+#endif
+
+/*
+ * Registers offset
+ */
+#define DW_IC_CON 0x0
+#define DW_IC_TAR 0x4
+#define DW_IC_DATA_CMD 0x10
+#define DW_IC_SS_SCL_HCNT 0x14
+#define DW_IC_SS_SCL_LCNT 0x18
+#define DW_IC_FS_SCL_HCNT 0x1c
+#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_INTR_STAT 0x2c
+#define DW_IC_INTR_MASK 0x30
+#define DW_IC_RAW_INTR_STAT 0x34
+#define DW_IC_RX_TL 0x38
+#define DW_IC_TX_TL 0x3c
+#define DW_IC_CLR_INTR 0x40
+#define DW_IC_CLR_RX_UNDER 0x44
+#define DW_IC_CLR_RX_OVER 0x48
+#define DW_IC_CLR_TX_OVER 0x4c
+#define DW_IC_CLR_RD_REQ 0x50
+#define DW_IC_CLR_TX_ABRT 0x54
+#define DW_IC_CLR_RX_DONE 0x58
+#define DW_IC_CLR_ACTIVITY 0x5c
+#define DW_IC_CLR_STOP_DET 0x60
+#define DW_IC_CLR_START_DET 0x64
+#define DW_IC_CLR_GEN_CALL 0x68
+#define DW_IC_ENABLE 0x6c
+#define DW_IC_STATUS 0x70
+#define DW_IC_TXFLR 0x74
+#define DW_IC_RXFLR 0x78
+#define DW_IC_COMP_PARAM_1 0xf4
+#define DW_IC_TX_ABRT_SOURCE 0x80
+
+#define DW_IC_CON_MASTER 0x1
+#define DW_IC_CON_SPEED_STD 0x2
+#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_10BITADDR_MASTER 0x10
+#define DW_IC_CON_RESTART_EN 0x20
+#define DW_IC_CON_SLAVE_DISABLE 0x40
+
+#define DW_IC_INTR_RX_UNDER 0x001
+#define DW_IC_INTR_RX_OVER 0x002
+#define DW_IC_INTR_RX_FULL 0x004
+#define DW_IC_INTR_TX_OVER 0x008
+#define DW_IC_INTR_TX_EMPTY 0x010
+#define DW_IC_INTR_RD_REQ 0x020
+#define DW_IC_INTR_TX_ABRT 0x040
+#define DW_IC_INTR_RX_DONE 0x080
+#define DW_IC_INTR_ACTIVITY 0x100
+#define DW_IC_INTR_STOP_DET 0x200
+#define DW_IC_INTR_START_DET 0x400
+#define DW_IC_INTR_GEN_CALL 0x800
+
+#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \
+ DW_IC_INTR_TX_EMPTY | \
+ DW_IC_INTR_TX_ABRT | \
+ DW_IC_INTR_STOP_DET)
+
+#define DW_IC_STATUS_ACTIVITY 0x1
+#define DW_IC_STATUS_MASTER_ACTIVITY 0x20
+
+#define DW_IC_ERR_TX_ABRT 0x1
+
+/*
+ * status codes
+ */
+#define STATUS_IDLE 0x0
+#define STATUS_WRITE_IN_PROGRESS 0x1
+#define STATUS_READ_IN_PROGRESS 0x2
+
+#define TIMEOUT 20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK 0
+#define ABRT_10ADDR1_NOACK 1
+#define ABRT_10ADDR2_NOACK 2
+#define ABRT_TXDATA_NOACK 3
+#define ABRT_GCALL_NOACK 4
+#define ABRT_GCALL_READ 5
+#define ABRT_SBYTE_ACKDET 7
+#define ABRT_SBYTE_NORSTRT 9
+#define ABRT_10B_RD_NORSTRT 10
+#define ABRT_MASTER_DIS 11
+#define ARB_LOST 12
+
+#define DW_IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK)
+#define DW_IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK)
+#define DW_IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK)
+#define DW_IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK)
+#define DW_IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK)
+#define DW_IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ)
+#define DW_IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET)
+#define DW_IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT)
+#define DW_IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT)
+#define DW_IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS)
+#define DW_IC_TX_ARB_LOST (1UL << ARB_LOST)
+
+#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \
+ DW_IC_TX_ABRT_10ADDR1_NOACK | \
+ DW_IC_TX_ABRT_10ADDR2_NOACK | \
+ DW_IC_TX_ABRT_TXDATA_NOACK | \
+ DW_IC_TX_ABRT_GCALL_NOACK)
+
+static char *abort_sources[] = {
+ [ABRT_7B_ADDR_NOACK] =
+ "slave address not acknowledged (7bit mode)",
+ [ABRT_10ADDR1_NOACK] =
+ "first address byte not acknowledged (10bit mode)",
+ [ABRT_10ADDR2_NOACK] =
+ "second address byte not acknowledged (10bit mode)",
+ [ABRT_TXDATA_NOACK] =
+ "data not acknowledged",
+ [ABRT_GCALL_NOACK] =
+ "no acknowledgement for a general call",
+ [ABRT_GCALL_READ] =
+ "read after general call",
+ [ABRT_SBYTE_ACKDET] =
+ "start byte acknowledged",
+ [ABRT_SBYTE_NORSTRT] =
+ "trying to send start byte when restart is disabled",
+ [ABRT_10B_RD_NORSTRT] =
+ "trying to read when restart is disabled (10bit mode)",
+ [ABRT_MASTER_DIS] =
+ "trying to use disabled adapter",
+ [ARB_LOST] =
+ "lost arbitration",
+};
+
+/**
+ * struct fh_i2c_dev - private i2c-designware data
+ * @dev: driver model device node
+ * @base: IO registers pointer
+ * @cmd_complete: tx completion indicator
+ * @lock: protect this struct and IO registers
+ * @clk: input reference clock
+ * @cmd_err: run time hadware error code
+ * @msgs: points to an array of messages currently being transferred
+ * @msgs_num: the number of elements in msgs
+ * @msg_write_idx: the element index of the current tx message in the msgs
+ * array
+ * @tx_buf_len: the length of the current tx buffer
+ * @tx_buf: the current tx buffer
+ * @msg_read_idx: the element index of the current rx message in the msgs
+ * array
+ * @rx_buf_len: the length of the current rx buffer
+ * @rx_buf: the current rx buffer
+ * @msg_err: error status of the current transfer
+ * @status: i2c master status, one of STATUS_*
+ * @abort_source: copy of the TX_ABRT_SOURCE register
+ * @irq: interrupt number for the i2c master
+ * @adapter: i2c subsystem adapter node
+ * @tx_fifo_depth: depth of the hardware tx fifo
+ * @rx_fifo_depth: depth of the hardware rx fifo
+ */
+struct fh_i2c_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct mutex lock;
+ struct clk *clk;
+ int cmd_err;
+ struct i2c_msg *msgs;
+ int msgs_num;
+ int msg_write_idx;
+ u32 tx_buf_len;
+ u8 *tx_buf;
+ int msg_read_idx;
+ u32 rx_buf_len;
+ u8 *rx_buf;
+ int msg_err;
+ unsigned int status;
+ u32 abort_source;
+ int irq;
+ struct i2c_adapter adapter;
+ unsigned int tx_fifo_depth;
+ unsigned int rx_fifo_depth;
+};
+
+
+static int i2c_fh_wait_master_not_active(struct fh_i2c_dev *dev)
+{
+ int timeout = 200; //2000 us
+
+ while (I2c_IsActiveMst( dev->base))
+ {
+ if (timeout <= 0)
+ {
+ dev_warn(dev->dev, "timeout waiting for master not active\n");
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ udelay(10);
+ }
+
+ return 0;
+}
+
+static u32
+i2c_fh_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+{
+ /*
+ * DesignWare I2C core doesn't seem to have solid strategy to meet
+ * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec
+ * will result in violation of the tHD;STA spec.
+ */
+ if (cond)
+ /*
+ * Conditional expression:
+ *
+ * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH
+ *
+ * This is based on the DW manuals, and represents an ideal
+ * configuration. The resulting I2C bus speed will be
+ * faster than any of the others.
+ *
+ * If your hardware is free from tHD;STA issue, try this one.
+ */
+ return (ic_clk * tSYMBOL + 5000) / 10000 - 8 + offset;
+ else
+ /*
+ * Conditional expression:
+ *
+ * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
+ *
+ * This is just experimental rule; the tHD;STA period turned
+ * out to be proportinal to (_HCNT + 3). With this setting,
+ * we could meet both tHIGH and tHD;STA timing specs.
+ *
+ * If unsure, you'd better to take this alternative.
+ *
+ * The reason why we need to take into account "tf" here,
+ * is the same as described in i2c_fh_scl_lcnt().
+ */
+ return (ic_clk * (tSYMBOL + tf) + 5000) / 10000 - 3 + offset;
+}
+
+static u32 i2c_fh_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+{
+ /*
+ * Conditional expression:
+ *
+ * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf)
+ *
+ * DW I2C core starts counting the SCL CNTs for the LOW period
+ * of the SCL clock (tLOW) as soon as it pulls the SCL line.
+ * In order to meet the tLOW timing spec, we need to take into
+ * account the fall time of SCL signal (tf). Default tf value
+ * should be 0.3 us, for safety.
+ */
+ return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
+}
+
+/**
+ * i2c_fh_init() - initialize the designware i2c master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+static void i2c_fh_init(struct fh_i2c_dev *dev)
+{
+ u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
+ u32 ic_con, hcnt, lcnt;
+
+ /* Disable the adapter */
+ i2c_fh_wait_master_not_active(dev);
+ I2c_DisEnable((unsigned int)dev->base);
+
+ /* set standard and fast speed deviders for high/low periods */
+
+ /* Standard-mode */
+
+ hcnt = i2c_fh_scl_hcnt(input_clock_khz,
+ 40, /* tHD;STA = tHIGH = 4.0 us */
+ 3, /* tf = 0.3 us */
+ 0, /* 0: DW default, 1: Ideal */
+ 0); /* No offset */
+ lcnt = i2c_fh_scl_lcnt(input_clock_khz,
+ 47, /* tLOW = 4.7 us */
+ 3, /* tf = 0.3 us */
+ 0); /* No offset */
+ I2c_SetSsHcnt( dev->base ,hcnt);
+ I2c_SetSsLcnt( dev->base ,lcnt);
+ pr_info("\tClock: %dkhz, Standard-mode HCNT:LCNT = %d:%d\n", input_clock_khz, hcnt, lcnt);
+
+ /* Fast-mode */
+ hcnt = i2c_fh_scl_hcnt(input_clock_khz,
+ 6, /* tHD;STA = tHIGH = 0.6 us */
+ 3, /* tf = 0.3 us */
+ 0, /* 0: DW default, 1: Ideal */
+ 0); /* No offset */
+ lcnt = i2c_fh_scl_lcnt(input_clock_khz,
+ 13, /* tLOW = 1.3 us */
+ 3, /* tf = 0.3 us */
+ 0); /* No offset */
+ I2c_SetFsHcnt( dev->base ,hcnt);
+ I2c_SetFsLcnt( dev->base ,lcnt);
+ //dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+
+ /* Configure Tx/Rx FIFO threshold levels */
+
+ I2c_SetTxRxTl(dev->base ,dev->tx_fifo_depth - 1,0);
+ /* configure the i2c master */
+ ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;/* DW_IC_CON_SPEED_STD */;
+ I2c_SetCon(dev->base,ic_con);
+
+}
+
+/*
+ * Waiting for bus not busy
+ */
+static int i2c_fh_wait_bus_not_busy(struct fh_i2c_dev *dev)
+{
+ int timeout = TIMEOUT;
+
+ while (I2c_IsActiveMst( dev->base)) {
+ if (timeout <= 0) {
+ dev_warn(dev->dev, "timeout waiting for bus ready\n");
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ msleep(1);
+ }
+
+ return 0;
+}
+
+static void i2c_fh_xfer_init(struct fh_i2c_dev *dev)
+{
+ struct i2c_msg *msgs = dev->msgs;
+ u32 ic_con;
+
+ /* Disable the adapter */
+ i2c_fh_wait_master_not_active(dev);
+ I2c_DisEnable((unsigned int)dev->base);
+
+ /* set the slave (target) address */
+ I2c_SetDeviceId(dev->base,msgs[dev->msg_write_idx].addr);
+
+ /* if the slave address is ten bit address, enable 10BITADDR */
+ ic_con = I2c_GetCon(dev->base);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ else
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ I2c_SetCon(dev->base,ic_con);
+
+ /* Enable the adapter */
+ I2c_Enable(dev->base);
+
+ /* Enable interrupts */
+ I2c_SetIntrMask(dev->base,DW_IC_INTR_DEFAULT_MASK);
+
+}
+
+/*
+ * Initiate (and continue) low level master read/write transaction.
+ * This function is only called from i2c_fh_isr, and pumping i2c_msg
+ * messages into the tx buffer. Even if the size of i2c_msg data is
+ * longer than the size of the tx buffer, it handles everything.
+ */
+static void
+i2c_fh_xfer_msg(struct fh_i2c_dev *dev)
+{
+ struct i2c_msg *msgs = dev->msgs;
+ u32 intr_mask, cmd;
+ int tx_limit, rx_limit;
+ u32 addr = msgs[dev->msg_write_idx].addr;
+ u32 buf_len = dev->tx_buf_len;
+ u8 *buf = dev->tx_buf;;
+
+ PRINT_DBG("i2c_fh_xfer_msg start, dev->msgs_num: %d\n", dev->msgs_num);
+
+ intr_mask = DW_IC_INTR_DEFAULT_MASK;
+
+ for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++)
+ {
+ /*
+ * if target address has changed, we need to
+ * reprogram the target address in the i2c
+ * adapter when we are done with this transfer
+ */
+ if (msgs[dev->msg_write_idx].addr != addr) {
+ dev_err(dev->dev,
+ "%s: invalid target address\n", __func__);
+ dev->msg_err = -EINVAL;
+ break;
+ }
+
+ if (msgs[dev->msg_write_idx].len == 0) {
+ dev_err(dev->dev,
+ "%s: invalid message length\n", __func__);
+ dev->msg_err = -EINVAL;
+ break;
+ }
+
+ if (!(dev->status & STATUS_WRITE_IN_PROGRESS))
+ {
+ /* new i2c_msg */
+ buf = msgs[dev->msg_write_idx].buf;
+ buf_len = msgs[dev->msg_write_idx].len;
+
+ PRINT_DBG("new msg: len: %d, buf: 0x%x\n", buf_len, buf[0]);
+ }
+
+ tx_limit = dev->tx_fifo_depth - I2c_GetTxTl(dev->base );
+ rx_limit = dev->rx_fifo_depth - I2c_GetRxTl(dev->base );
+
+ while (buf_len > 0 && tx_limit > 0 && rx_limit > 0)
+ {
+ if (msgs[dev->msg_write_idx].flags & I2C_M_RD)
+ {
+ cmd = 0x100;
+ rx_limit--;
+ }
+ else
+ {
+ cmd = *buf++;
+ }
+
+ tx_limit--; buf_len--;
+
+ if (!buf_len &&
+ ((dev->msg_write_idx == dev->msgs_num - 1) ||
+ (msgs[dev->msg_write_idx].flags &
+ I2C_M_SPERATE_MSG))) {
+ /* 2015-11-8 ar0130 bug fixed */
+ /* 20*1000 about 2 *0.1 ms
+ * (for i2c send one byte @ 100KHz)
+ * fixme: define MACRO get timeout value; */
+ unsigned int _timeout = 20000;
+ while (I2C_GetTransmitFifoLevel(dev->base) &&
+ _timeout--)
+ ;
+ cmd |= 0x200;
+ }
+ I2c_Write(dev->base, cmd);
+ }
+ PRINT_DBG("\n");
+
+ dev->tx_buf = buf;
+ dev->tx_buf_len = buf_len;
+
+ if (buf_len > 0)
+ {
+ /* more bytes to be written */
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ break;
+ }
+ else
+ {
+ dev->status &= ~STATUS_WRITE_IN_PROGRESS;
+ }
+ }
+
+ /*
+ * If i2c_msg index search is completed, we don't need TX_EMPTY
+ * interrupt any more.
+ */
+
+ if (dev->msg_write_idx == dev->msgs_num)
+ intr_mask &= ~DW_IC_INTR_TX_EMPTY;
+
+ if (dev->msg_err)
+ intr_mask = 0;
+
+ I2c_SetIntrMask(dev->base,intr_mask);
+
+}
+
+static void
+i2c_fh_read(struct fh_i2c_dev *dev)
+{
+ struct i2c_msg *msgs = dev->msgs;
+ int rx_valid;
+
+ for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++)
+ {
+ u32 len;
+ u8 *buf;
+
+ if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
+ continue;
+
+ if (!(dev->status & STATUS_READ_IN_PROGRESS))
+ {
+ len = msgs[dev->msg_read_idx].len;
+ buf = msgs[dev->msg_read_idx].buf;
+ }
+ else
+ {
+ PRINT_DBG("STATUS_READ_IN_PROGRESS\n");
+ len = dev->rx_buf_len;
+ buf = dev->rx_buf;
+ }
+
+ rx_valid = I2c_GetRxFLR(dev->base);
+
+ if(rx_valid == 0)
+ {
+ PRINT_DBG("rx_valid == 0\n");
+ }
+
+ for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+ {
+ *buf++ = I2c_Read(dev->base);
+ }
+
+ PRINT_DBG("i2c_fh_read, len: %d, buf[0]: 0x%x\n", msgs[dev->msg_read_idx].len, msgs[dev->msg_read_idx].buf[0]);
+
+ if (len > 0)
+ {
+ PRINT_DBG("len > 0\n");
+ dev->status |= STATUS_READ_IN_PROGRESS;
+ dev->rx_buf_len = len;
+ dev->rx_buf = buf;
+ return;
+ } else
+ dev->status &= ~STATUS_READ_IN_PROGRESS;
+ }
+}
+
+static int i2c_fh_handle_tx_abort(struct fh_i2c_dev *dev)
+{
+ unsigned long abort_source = dev->abort_source;
+ int i;
+
+ if (abort_source & DW_IC_TX_ABRT_NOACK) {
+ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+ {
+ PRINT_DBG(
+ "%s: %s\n", __func__, abort_sources[i]);
+ }
+ return -EREMOTEIO;
+ }
+
+ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+ dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
+
+ if (abort_source & DW_IC_TX_ARB_LOST)
+ return -EAGAIN;
+ else if (abort_source & DW_IC_TX_ABRT_GCALL_READ)
+ return -EINVAL; /* wrong msgs[] data */
+ else
+ return -EIO;
+}
+
+/*
+ * Prepare controller for a transaction and call i2c_fh_xfer_msg
+ */
+static int
+i2c_fh_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct fh_i2c_dev *dev = i2c_get_adapdata(adap);
+ int ret;
+
+ PRINT_DBG("-------i2c, %s: msgs: %d\n", __func__, num);
+
+ mutex_lock(&dev->lock);
+
+ INIT_COMPLETION(dev->cmd_complete);
+ dev->msgs = msgs;
+ dev->msgs_num = num;
+ dev->cmd_err = 0;
+ dev->msg_write_idx = 0;
+ dev->msg_read_idx = 0;
+ dev->msg_err = 0;
+ dev->status = STATUS_IDLE;
+ dev->abort_source = 0;
+
+ ret = i2c_fh_wait_bus_not_busy(dev);
+ if (ret < 0)
+ {
+ goto done;
+ }
+
+ /* start the transfers */
+ i2c_fh_xfer_init(dev);
+
+ /* wait for tx to complete */
+ ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
+ if (ret == 0) {
+ dev_err(dev->dev, "controller timed out\n");
+ i2c_fh_init(dev);
+ ret = -ETIMEDOUT;
+ goto done;
+ } else if (ret < 0)
+ goto done;
+
+ if (dev->msg_err)
+ {
+ PRINT_DBG("dev->msg_err\n");
+ ret = dev->msg_err;
+ goto done;
+ }
+
+ /* no error */
+ if (likely(!dev->cmd_err)) {
+ /* Disable the adapter */
+ i2c_fh_wait_master_not_active(dev);
+ I2c_DisEnable(dev->base);
+ ret = num;
+ goto done;
+ }
+
+ /* We have an error */
+ if (dev->cmd_err == DW_IC_ERR_TX_ABRT)
+ {
+ PRINT_DBG("dev->cmd_err == DW_IC_ERR_TX_ABRT\n");
+ ret = i2c_fh_handle_tx_abort(dev);
+ goto done;
+ }
+
+ ret = -EIO;
+
+done:
+ PRINT_DBG("buf: 0x%x\n", dev->msgs[num - 1].buf[0]);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static u32 i2c_fh_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
+}
+
+static u32 i2c_fh_read_clear_intrbits(struct fh_i2c_dev *dev)
+{
+ u32 stat;
+
+ /*
+ * The IC_INTR_STAT register just indicates "enabled" interrupts.
+ * Ths unmasked raw version of interrupt status bits are available
+ * in the IC_RAW_INTR_STAT register.
+ *
+ * That is,
+ * stat = readl(IC_INTR_STAT);
+ * equals to,
+ * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
+ *
+ * The raw version might be useful for debugging purposes.
+ */
+ stat = readl(dev->base + DW_IC_INTR_STAT);
+
+ /*
+ * Do not use the IC_CLR_INTR register to clear interrupts, or
+ * you'll miss some interrupts, triggered during the period from
+ * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
+ *
+ * Instead, use the separately-prepared IC_CLR_* registers.
+ */
+ if (stat & DW_IC_INTR_RX_UNDER)
+ I2c_ClrIntr(dev->base,DW_IC_CLR_RX_UNDER);
+ if (stat & DW_IC_INTR_RX_OVER)
+ I2c_ClrIntr(dev->base , DW_IC_CLR_RX_OVER);
+ if (stat & DW_IC_INTR_TX_OVER)
+ I2c_ClrIntr(dev->base , DW_IC_CLR_TX_OVER);
+ if (stat & DW_IC_INTR_RD_REQ)
+ I2c_ClrIntr(dev->base , DW_IC_CLR_RD_REQ);
+ if (stat & DW_IC_INTR_TX_ABRT) {
+ /*
+ * The IC_TX_ABRT_SOURCE register is cleared whenever
+ * the IC_CLR_TX_ABRT is read. Preserve it beforehand.
+ */
+ dev->abort_source = readl(dev->base + DW_IC_TX_ABRT_SOURCE);
+ I2c_ClrIntr(dev->base , DW_IC_CLR_TX_ABRT);
+ }
+ if (stat & DW_IC_INTR_RX_DONE)
+ I2c_ClrIntr(dev->base ,DW_IC_CLR_RX_DONE);
+ if (stat & DW_IC_INTR_ACTIVITY)
+ I2c_ClrIntr(dev->base ,DW_IC_CLR_ACTIVITY);
+ if (stat & DW_IC_INTR_STOP_DET)
+ I2c_ClrIntr(dev->base , DW_IC_CLR_STOP_DET);
+ if (stat & DW_IC_INTR_START_DET)
+ I2c_ClrIntr(dev->base , DW_IC_CLR_START_DET);
+ if (stat & DW_IC_INTR_GEN_CALL)
+ I2c_ClrIntr(dev->base , DW_IC_CLR_GEN_CALL);
+
+ return stat;
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C interrupt
+ * occurs.
+ */
+static irqreturn_t i2c_fh_isr(int this_irq, void *dev_id)
+{
+ struct fh_i2c_dev *dev = dev_id;
+ u32 stat;
+
+ stat = i2c_fh_read_clear_intrbits(dev);
+ PRINT_DBG("-----------i2c, %s: stat=0x%x\n", __func__, stat);
+
+ if (stat & DW_IC_INTR_TX_ABRT)
+ {
+ PRINT_DBG("DW_IC_INTR_TX_ABRT\n");
+ dev->cmd_err |= DW_IC_ERR_TX_ABRT;
+ dev->status = STATUS_IDLE;
+
+ /*
+ * Anytime TX_ABRT is set, the contents of the tx/rx
+ * buffers are flushed. Make sure to skip them.
+ */
+ I2c_SetIntrMask( dev->base,DW_IC_INTR_NONE);
+ goto tx_aborted;
+ }
+
+ if (stat & DW_IC_INTR_RX_FULL)
+ {
+ PRINT_DBG("i2c_fh_read\n");
+ i2c_fh_read(dev);
+ }
+
+ if (stat & DW_IC_INTR_TX_EMPTY)
+ {
+ PRINT_DBG("i2c_fh_xfer_msg\n");
+ i2c_fh_xfer_msg(dev);
+ }
+
+ /*
+ * No need to modify or disable the interrupt mask here.
+ * i2c_fh_xfer_msg() will take care of it according to
+ * the current transmit status.
+ */
+
+tx_aborted:
+ if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+ complete(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+static struct i2c_algorithm i2c_fh_algo =
+{
+ .master_xfer = i2c_fh_xfer,
+ .functionality = i2c_fh_func,
+};
+
+static int __devinit fh_i2c_probe(struct platform_device *pdev)
+{
+ struct fh_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ struct resource *mem, *ioarea;
+ int irq, r;
+ char clk_name[32] = {0};
+ pr_info("I2C driver:\n\tplatform registration... ");
+
+ /* NOTE: driver uses the static register mapping */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq; /* -ENXIO */
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea)
+ {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct fh_i2c_dev), GFP_KERNEL);
+ if (!dev)
+ {
+ r = -ENOMEM;
+ goto err_release_region;
+ }
+
+ init_completion(&dev->cmd_complete);
+ mutex_init(&dev->lock);
+ dev->dev = get_device(&pdev->dev);
+ dev->irq = irq;
+ platform_set_drvdata(pdev, dev);
+
+ snprintf(clk_name, sizeof(clk_name), "i2c%d_clk", pdev->id);
+ dev->clk = clk_get(NULL, clk_name);
+
+ if (IS_ERR(dev->clk))
+ {
+ r = -ENODEV;
+ goto err_free_mem;
+ }
+ clk_enable(dev->clk);
+
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (dev->base == NULL)
+ {
+ dev_err(&pdev->dev, "failure mapping io resources\n");
+ r = -ENOMEM;
+ goto err_unuse_clocks;
+ }
+ {
+ dev->tx_fifo_depth = I2c_GetTxFifoDepth(dev->base);
+ dev->rx_fifo_depth = I2c_GetRxFifoDepth(dev->base);
+ }
+ i2c_fh_init(dev);
+
+ pr_info("\ttx fifo depth: %d, rx fifo depth: %d\n", dev->tx_fifo_depth, dev->rx_fifo_depth);
+
+ I2c_SetIntrMask( dev->base,DW_IC_INTR_NONE); /* disable IRQ */
+ r = request_irq(dev->irq, i2c_fh_isr, IRQF_DISABLED,\
+ dev_name(&pdev->dev), dev);
+ if (r)
+ {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
+ goto err_iounmap;
+ }
+
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ strlcpy(adap->name, "FH I2C adapter",
+ sizeof(adap->name));
+ adap->algo = &i2c_fh_algo;
+ adap->dev.parent = &pdev->dev;
+
+ adap->nr = pdev->id;
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(&pdev->dev, "failure adding adapter\n");
+ goto err_free_irq;
+ }
+
+ pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
+ "\t\tIO base addr: 0x%p)\n", "I2C", pdev->name,
+ pdev->id, dev->irq, dev->base);
+
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_iounmap:
+ iounmap(dev->base);
+err_unuse_clocks:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+err_free_mem:
+ platform_set_drvdata(pdev, NULL);
+ put_device(&pdev->dev);
+ kfree(dev);
+err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return r;
+}
+
+static int __devexit fh_i2c_remove(struct platform_device *pdev)
+{
+ struct fh_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ platform_set_drvdata(pdev, NULL);
+ i2c_del_adapter(&dev->adapter);
+ put_device(&pdev->dev);
+
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+ i2c_fh_wait_master_not_active(dev);
+ writel(0, dev->base + DW_IC_ENABLE);
+ free_irq(dev->irq, dev);
+ kfree(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ return 0;
+}
+
+static struct platform_driver fh_i2c_driver =
+{
+ .remove = __devexit_p(fh_i2c_remove),
+ .driver =
+ {
+ .name = "fh_i2c",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init fh_i2c_init_driver(void)
+{
+ return platform_driver_probe(&fh_i2c_driver, fh_i2c_probe);
+}
+module_init(fh_i2c_init_driver);
+
+static void __exit fh_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&fh_i2c_driver);
+}
+module_exit(fh_i2c_exit_driver);
+
+MODULE_AUTHOR("QIN");
+MODULE_ALIAS("platform:fh");
+MODULE_DESCRIPTION("FH I2C bus adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 35464744..6d75ca4f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -157,6 +157,8 @@ config INTEL_MID_PTI
an Intel Atom (non-netbook) mobile device containing a MIPI
P1149.7 standard implementation.
+
+
config SGI_IOC4
tristate "SGI IOC4 Base IO support"
depends on PCI
@@ -404,6 +406,87 @@ config EP93XX_PWM
To compile this driver as a module, choose M here: the module will
be called ep93xx_pwm.
+config FH_DW_I2S
+ tristate "FH DW I2S support"
+ default n
+
+
+
+
+config FH_PINCTRL
+ tristate "FH Pinctrl support"
+ default n
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called.
+
+
+config FH_SADC_V1
+ depends on ARCH_FULLHAN
+ tristate "FH SADC support(v1 version)"
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_sadc.
+
+ HW para:10bits precision, 8 channels, 5M clk in.
+ one conversion need almost (12/5M *1)second
+
+config FH_SADC_V11
+ depends on FH_SADC_V1 && ARCH_FH8856
+ tristate "FH SADC support(v11 version)"
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_sadc.
+
+ HW para:10bits precision, 8 channels, 5M clk in.
+ one conversion need almost (12/5M *1)second
+
+config FH_SADC_V2
+ depends on !FH_SADC_V1
+ tristate "FH SADC support(v2 version)"
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_sadc.
+
+ HW para:12bits precision, 8 channels, 5M clk in.
+ one conversion need almost (12/5M *1)second
+
+config FH_SADC_V21
+ depends on FH_SADC_V2
+ tristate "FH SADC support(v22 version)"
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_sadc.
+
+ HW para:12bits precision, 8 channels, 5M clk in.
+ one conversion need almost (12/5M *1)second
+
+config FH_SADC_V22
+ depends on FH_SADC_V2
+ tristate "FH SADC support(v22 version)"
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_sadc.
+
+ HW para:12bits precision, 8 channels, 5M clk in.
+ one conversion need almost (12/5M *1)second
+
+config FH_EFUSE
+ tristate "FH EFUSE support"
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_efuse.
+
+ HW para:60 bytes could be programmed.
+ the "efuse2aes map" is fixed by hardware..EX. 0~4 : aes key0, 5~8 : aes key1.
+
+config FH_CLK_MISC
+ tristate "FH clk miscdev support"
+ default n
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called.
+
config DS1682
tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5f03172c..bf609f91 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
0bj-$(CONFIG_INTEL_MID_PTI) += pti.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
+obj-$(CONFIG_ATMEL_ACW) += atmel-acw.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_BMP085) += bmp085.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
@@ -31,6 +32,13 @@ obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
+obj-$(CONFIG_FH_PINCTRL) += fh_pinctrl_dev.o
+obj-$(CONFIG_FH_DW_I2S) += fh_dw_i2s.o
+obj-$(CONFIG_FH_I2S) += fh_i2s.o
+obj-$(CONFIG_FH_SADC_V1) += fh_sadc.o
+obj-$(CONFIG_FH_SADC_V2) += fh_sadc_v2.o
+obj-$(CONFIG_FH_EFUSE) += fh_efuse.o
+
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
@@ -46,3 +54,5 @@ obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
+obj-$(CONFIG_FH_DMAC_MISC) += fh_dma_miscdev.o
+obj-$(CONFIG_FH_CLK_MISC) += fh_clk_miscdev.o
diff --git a/drivers/misc/ac.h b/drivers/misc/ac.h
new file mode 100644
index 00000000..93531a8d
--- /dev/null
+++ b/drivers/misc/ac.h
@@ -0,0 +1,61 @@
+#ifndef __AC_H
+#define __AC_H
+
+/* #define _FHIOWR _IOWR */
+#define _FHIOWR(x, y, z) y
+
+#define FH_AUDIO_IOCTL_BASE 'M'
+#define AC_INIT_CAPTURE_MEM _FHIOWR(FH_AUDIO_IOCTL_BASE, 0, int)
+#define AC_INIT_PLAYBACK_MEM _FHIOWR(FH_AUDIO_IOCTL_BASE, 1, int)
+#define AC_SET_VOL _FHIOWR(FH_AUDIO_IOCTL_BASE, 2, int)
+#define AC_SET_INPUT_MODE _FHIOWR(FH_AUDIO_IOCTL_BASE, 3, int)
+#define AC_SET_OUTPUT_MODE _FHIOWR(FH_AUDIO_IOCTL_BASE, 4, int)
+#define AC_AI_EN _FHIOWR(FH_AUDIO_IOCTL_BASE, 5, int)
+#define AC_AO_EN _FHIOWR(FH_AUDIO_IOCTL_BASE, 6, int)
+#define AC_AI_DISABLE _FHIOWR(FH_AUDIO_IOCTL_BASE, 7, int)
+#define AC_AO_DISABLE _FHIOWR(FH_AUDIO_IOCTL_BASE, 8, int)
+#define AC_AI_PAUSE _FHIOWR(FH_AUDIO_IOCTL_BASE, 9, int)
+#define AC_AI_RESUME _FHIOWR(FH_AUDIO_IOCTL_BASE, 10, int)
+#define AC_AO_PAUSE _FHIOWR(FH_AUDIO_IOCTL_BASE, 11, int)
+#define AC_AO_RESUME _FHIOWR(FH_AUDIO_IOCTL_BASE, 12, int)
+#define AC_AI_READ _FHIOWR(FH_AUDIO_IOCTL_BASE, 13, int)
+#define AC_AO_WRITE _FHIOWR(FH_AUDIO_IOCTL_BASE, 14, int)
+#define AC_AI_SET_VOL _FHIOWR(FH_AUDIO_IOCTL_BASE, 15, int)
+#define AC_AO_SET_VOL _FHIOWR(FH_AUDIO_IOCTL_BASE, 16, int)
+#define AC_AI_MICIN_SET_VOL _FHIOWR(FH_AUDIO_IOCTL_BASE, 17, int)
+#define AC_AEC_SET_CONFIG _FHIOWR(FH_AUDIO_IOCTL_BASE, 18, int)
+#define AC_NR_SET_CONFIG _FHIOWR(FH_AUDIO_IOCTL_BASE, 19, int)
+#define AC_NR2_SET_CONFIG _FHIOWR(FH_AUDIO_IOCTL_BASE, 20, int)
+#define AC_AGC_SET_CONFIG _FHIOWR(FH_AUDIO_IOCTL_BASE, 21, int)
+#define AC_WORK_MODE _FHIOWR(FH_AUDIO_IOCTL_BASE, 22, int)
+#define AC_AI_READ_FRAME_EXT _FHIOWR(FH_AUDIO_IOCTL_BASE, 23, int)
+#define AC_AO_SET_MODE _FHIOWR(FH_AUDIO_IOCTL_BASE, 24, int)
+#define AC_USING_EXTERNAL_CODEC _FHIOWR(FH_AUDIO_IOCTL_BASE, 25, int)
+#define AC_EXT_INTF _FHIOWR(FH_AUDIO_IOCTL_BASE, 100, int)
+
+enum io_select {
+ mic_in = 0,
+ line_in = 1,
+ speaker_out = 2,
+ line_out = 3,
+};
+
+struct fh_audio_cfg_arg {
+ int io_type;
+ int volume;
+ int rate;
+ int frame_bit;
+ int channels;
+ int buffer_size;
+ int period_size;
+ int enc_type; /* encoding type */
+};
+
+
+struct fh_audio_ai_read_frame_ext {
+ unsigned int len;
+ unsigned char *data;
+ unsigned long long pts;
+};
+
+#endif
diff --git a/drivers/misc/fh_clk_miscdev.c b/drivers/misc/fh_clk_miscdev.c
new file mode 100644
index 00000000..d3ea3bef
--- /dev/null
+++ b/drivers/misc/fh_clk_miscdev.c
@@ -0,0 +1,176 @@
+#include <linux/miscdevice.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#include <linux/clk.h>
+
+#include "fh_clk_miscdev.h"
+
+//#define FH_CLK_DEBUG
+
+#if defined(FH_CLK_DEBUG)
+#define PRINT_CLK_DBG(fmt, args...) \
+ do \
+ { \
+ printk("FH_CLK_DEBUG: "); \
+ printk(fmt, ##args); \
+ } while (0)
+#else
+#define PRINT_CLK_DBG(fmt, args...) \
+ do \
+ { \
+ } while (0)
+#endif
+
+static int fh_clk_miscdev_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int fh_clk_miscdev_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+
+static long fh_clk_miscdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENODEV;
+ struct clk *clk;
+ struct clk_usr uclk;
+
+ if (unlikely(_IOC_TYPE(cmd) != CLK_IOCTL_MAGIC))
+ {
+ pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
+ __func__, _IOC_TYPE(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (unlikely(_IOC_NR(cmd) > CLK_IOCTL_MAXNR))
+ {
+ pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ {
+ ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+ else if(_IOC_DIR(cmd) & _IOC_WRITE)
+ {
+ ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+
+ if(ret)
+ {
+ pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -EACCES);
+ return -EACCES;
+ }
+
+ switch(cmd)
+ {
+ case ENABLE_CLK:
+ if (copy_from_user((void *)&uclk,
+ (void __user *)arg,
+ sizeof(struct clk_usr)))
+ return -EFAULT;
+ clk = clk_get(NULL, uclk.name);
+ if(!IS_ERR(clk)) {
+ clk_enable(clk);
+ ret = 0;
+ }
+ break;
+ case DISABLE_CLK:
+ if (copy_from_user((void *)&uclk,
+ (void __user *)arg,
+ sizeof(struct clk_usr)))
+ return -EFAULT;
+ clk = clk_get(NULL, uclk.name);
+ if (!IS_ERR(clk)) {
+ clk_disable(clk);
+ ret = 0;
+ }
+ break;
+ case SET_CLK_RATE:
+ if (copy_from_user((void *)&uclk,
+ (void __user *)arg,
+ sizeof(struct clk_usr)))
+ return -EFAULT;
+ clk = clk_get(NULL, uclk.name);
+ ret = PTR_ERR(clk);
+ if (!IS_ERR(clk))
+ ret = clk_set_rate(clk, uclk.frequency);
+
+ PRINT_CLK_DBG("%s, set clk: %s, rate: %lu\n",
+ __func__, uclk.name, uclk.frequency);
+ break;
+ case GET_CLK_RATE:
+ if (copy_from_user((void *)&uclk,
+ (void __user *)arg,
+ sizeof(struct clk_usr)))
+ return -EFAULT;
+ clk = clk_get(NULL, uclk.name);
+ ret = PTR_ERR(clk);
+ if (!IS_ERR(clk)) {
+ uclk.frequency = clk_get_rate(clk);
+ ret = 0;
+ }
+ PRINT_CLK_DBG("%s, get clk: %s, rate: %lu\n",
+ __func__, uclk.name, uclk.frequency);
+ }
+
+
+ return ret;
+}
+
+static const struct file_operations fh_clk_fops =
+{
+ .owner = THIS_MODULE,
+ .open = fh_clk_miscdev_open,
+ .release = fh_clk_miscdev_release,
+ .unlocked_ioctl = fh_clk_miscdev_ioctl,
+};
+
+static struct miscdevice fh_clk_miscdev =
+{
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &fh_clk_fops,
+};
+
+int __init fh_clk_miscdev_init(void)
+{
+ int err;
+
+ err = misc_register(&fh_clk_miscdev);
+
+ if(err < 0)
+ {
+ pr_err("%s: ERROR: %s registration failed, ret=%d",
+ __func__, DEVICE_NAME, err);
+ return -ENXIO;
+ }
+
+ pr_info("CLK misc driver init successfully\n");
+ return 0;
+}
+
+
+static void __exit fh_clk_miscdev_exit(void)
+{
+ misc_deregister(&fh_clk_miscdev);
+}
+module_init(fh_clk_miscdev_init);
+module_exit(fh_clk_miscdev_exit);
+
+MODULE_AUTHOR("QIN");
+MODULE_DESCRIPTION("Misc Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform: FH");
diff --git a/drivers/misc/fh_clk_miscdev.h b/drivers/misc/fh_clk_miscdev.h
new file mode 100644
index 00000000..ce405554
--- /dev/null
+++ b/drivers/misc/fh_clk_miscdev.h
@@ -0,0 +1,10 @@
+
+#ifndef FH_CLK_MISCDEV_H_
+#define FH_CLK_MISCDEV_H_
+
+#include <mach/clock.h>
+
+#define DEVICE_NAME "fh_clk_miscdev"
+
+
+#endif /* FH_CLK_MISCDEV_H_ */
diff --git a/drivers/misc/fh_dma_miscdev.c b/drivers/misc/fh_dma_miscdev.c
new file mode 100644
index 00000000..7fad61de
--- /dev/null
+++ b/drivers/misc/fh_dma_miscdev.c
@@ -0,0 +1,363 @@
+#include <linux/miscdevice.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+
+
+#include "fh_dma_miscdev.h"
+
+#define MEMCPY_UNIT (4095 * 4 * 64) //4095 xfer * 32-bit * 64 desc
+
+//#define FH_DMA_DEBUG
+
+#ifdef FH_DMA_DEBUG
+#define PRINT_DMA_DBG(fmt, args...) \
+ do \
+ { \
+ printk("FH_DMA_DEBUG: "); \
+ printk(fmt, ## args); \
+ } \
+ while(0)
+#else
+#define PRINT_DMA_DBG(fmt, args...) do { } while (0)
+#endif
+
+
+static void fh_dma_callback(void *data)
+{
+ PRINT_DMA_DBG("dma transfer done, end=%lu\n", jiffies);
+ complete(data);
+}
+
+static int kick_off_dma(struct dma_chan *channel, unsigned int src_offset, unsigned int dst_offset, unsigned int size)
+{
+ int ret;
+ struct completion cmp;
+ struct dma_async_tx_descriptor *dma_tx_desc = NULL;
+ struct dma_device *dma_dev = channel->device;
+ dma_cookie_t cookie;
+ unsigned long timeout;
+ unsigned long flag;
+
+ flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
+
+ PRINT_DMA_DBG("try to copy 0x%x bytes: 0x%x --> 0x%x\n", MEMCPY_UNIT, src_offset, dst_offset);
+
+ dma_tx_desc = dma_dev->device_prep_dma_memcpy(channel, dst_offset, src_offset, size, flag);
+
+ PRINT_DMA_DBG("device_prep_dma_memcpy end\n");
+
+ if(!dma_tx_desc)
+ {
+ pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ init_completion(&cmp);
+ dma_tx_desc->callback = fh_dma_callback;
+ dma_tx_desc->callback_param = &cmp;
+ PRINT_DMA_DBG("tx_submit start\n");
+ cookie = dma_tx_desc->tx_submit(dma_tx_desc);
+ PRINT_DMA_DBG("tx_submit end\n");
+ if (dma_submit_error(cookie))
+ {
+ pr_err("ERROR: %s, tx_submit fail\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ PRINT_DMA_DBG("dma_async_issue_pending start\n");
+ dma_async_issue_pending(channel);
+ PRINT_DMA_DBG("dma_async_issue_pending end, %d\n", DMA_MEMCPY_TIMEOUT);
+
+ timeout = wait_for_completion_timeout(&cmp, msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));
+
+ PRINT_DMA_DBG("wait_for_completion_timeout end, timeout: %lu\n", timeout);
+
+ if(!timeout)
+ {
+ pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);
+
+ if(ret)
+ {
+ pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n", __func__, ret);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int fh_dma_start_transfer(struct dma_chan *channel, struct dma_memcpy* memcpy)
+{
+ int ret;
+ unsigned int i;
+
+ for(i=0; i<memcpy->size / MEMCPY_UNIT; i++)
+ {
+ ret = kick_off_dma(channel, memcpy->src_addr_phy + MEMCPY_UNIT*i, memcpy->dst_addr_phy + MEMCPY_UNIT*i, MEMCPY_UNIT);
+ if(ret)
+ {
+ return ret;
+ }
+ }
+
+ ret = kick_off_dma(channel, memcpy->src_addr_phy + MEMCPY_UNIT*i, memcpy->dst_addr_phy + MEMCPY_UNIT*i, memcpy->size % MEMCPY_UNIT);
+ return ret;
+}
+
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+ struct dma_memcpy* memcpy = param;
+ PRINT_DMA_DBG("chan_filter, channel id: %d\n", memcpy->chan_id);
+ if(memcpy->chan_id < 0)
+ {
+ return false;
+ }
+
+ if(memcpy->chan_id == chan->chan_id)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+
+}
+
+static int fh_dma_memcpy(struct dma_memcpy* memcpy)
+{
+ //fixme: ioctl should be atomic, otherwise channel will be changed.
+ struct dma_chan *dma_channel;
+ dma_cap_mask_t mask;
+ int ret;
+
+ PRINT_DMA_DBG("fh_dma_memcpy start\n");
+ PRINT_DMA_DBG("ioctl, memcpy->size: 0x%x\n", memcpy->size);
+
+
+ PRINT_DMA_DBG("fh_dma_request_channel start\n");
+ dma_cap_zero(mask);
+ PRINT_DMA_DBG("dma_cap_zero end\n");
+ dma_cap_set(DMA_MEMCPY, mask);
+ PRINT_DMA_DBG("dma_cap_set end\n");
+
+ dma_channel = dma_request_channel(mask, chan_filter, memcpy);
+
+ PRINT_DMA_DBG("dma_request_channel finished, channel_addr: 0x%x\n", (u32)dma_channel);
+
+ if(!dma_channel)
+ {
+ pr_err("ERROR: %s, No Channel Available, channel: %d\n", __func__, memcpy->chan_id);
+ return -EBUSY;
+ }
+ memcpy->chan_id = dma_channel->chan_id;
+ PRINT_DMA_DBG("dma channel name: %s\n", dma_chan_name(dma_channel));
+
+ ret = fh_dma_start_transfer(dma_channel, memcpy);
+
+ if(ret)
+ {
+ pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);
+ }
+
+ dma_channel->device->device_free_chan_resources(dma_channel);
+ dma_release_channel(dma_channel);
+
+ return ret;
+}
+
+static long fh_dma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct dma_memcpy memcpy;
+
+
+ if (unlikely(_IOC_TYPE(cmd) != DMA_IOCTL_MAGIC))
+ {
+ pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
+ __func__, _IOC_TYPE(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (unlikely(_IOC_NR(cmd) > DMA_IOCTL_MAXNR))
+ {
+ pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ {
+ ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+ else if(_IOC_DIR(cmd) & _IOC_WRITE)
+ {
+ ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+
+ if(ret)
+ {
+ pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -EACCES);
+ return -EACCES;
+ }
+
+ switch(cmd)
+ {
+
+ case DMA_MEMCOPY:
+ if(copy_from_user((void *)&memcpy,
+ (void __user *)arg,
+ sizeof(struct dma_memcpy)))
+ {
+ return -EFAULT;
+ }
+ ret = fh_dma_memcpy(&memcpy);
+ break;
+ }
+
+ return ret;
+}
+
+static int fh_dma_open(struct inode *inode, struct file *file)
+{
+ PRINT_DMA_DBG("fh_dma_open\n");
+ return 0;
+}
+
+static int fh_dma_release(struct inode *inode, struct file *filp)
+{
+ PRINT_DMA_DBG("fh_dma_release\n");
+ return 0;
+}
+
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter = 0;
+ if (*pos == 0)
+ return &counter;
+ else
+ {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+
+ seq_printf(sfile, "\nISP Status\n");
+ seq_printf(sfile, "\nCTRL: \n");
+
+#if 0
+ int i;
+ u32 data;
+ seq_printf(sfile, "ipf reg:\n");
+ for(i=0; i<10; i++)
+ {
+ data = GET_IPF_REG_V(i*4);
+ seq_printf(sfile, "0x%05x, 0x%08x\n", i*4, data);
+ }
+#endif
+
+ return 0;
+}
+
+static const struct seq_operations fh_dma_seq_ops =
+{
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int isp_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fh_dma_seq_ops);
+}
+
+static struct file_operations fh_dma_proc_ops =
+{
+ .owner = THIS_MODULE,
+ .open = isp_proc_open,
+ .read = seq_read,
+};
+
+static const struct file_operations fh_dma_fops =
+{
+ .owner = THIS_MODULE,
+ .open = fh_dma_open,
+ .release = fh_dma_release,
+ .unlocked_ioctl = fh_dma_ioctl,
+};
+
+static struct miscdevice fh_dma_device =
+{
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &fh_dma_fops,
+};
+
+static int __init fh_dma_init(void)
+{
+ int ret;
+ struct proc_dir_entry *proc_file;
+ ret = misc_register(&fh_dma_device);
+
+ if(ret < 0)
+ {
+ pr_err("%s: ERROR: %s registration failed",
+ __func__, DEVICE_NAME);
+ return -ENXIO;
+ }
+
+ proc_file = create_proc_entry(PROC_FILE, 0644, NULL);
+
+ if (proc_file)
+ proc_file->proc_fops = &fh_dma_proc_ops;
+ else
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, DEVICE_NAME);
+
+
+ return ret;
+}
+
+static void __exit fh_dma_exit(void)
+{
+ remove_proc_entry(PROC_FILE, NULL);
+ misc_deregister(&fh_dma_device);
+}
+module_init(fh_dma_init);
+module_exit(fh_dma_exit);
+
+MODULE_AUTHOR("QIN");
+MODULE_DESCRIPTION("Misc Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform: FH");
diff --git a/drivers/misc/fh_dma_miscdev.h b/drivers/misc/fh_dma_miscdev.h
new file mode 100644
index 00000000..c294c331
--- /dev/null
+++ b/drivers/misc/fh_dma_miscdev.h
@@ -0,0 +1,32 @@
+
+#ifndef FH_DMA_MISCDEV_H_
+#define FH_DMA_MISCDEV_H_
+
+#include <linux/dmaengine.h>
+
+
+#define DEVICE_NAME "fh_dma_misc"
+#define PROC_FILE "driver/dma_misc"
+
+#define DMA_IOCTL_MAGIC 'd'
+#define RESERVERD _IO(DMA_IOCTL_MAGIC, 0)
+#define REQUEST_CHANNEL _IOWR(DMA_IOCTL_MAGIC, 1, __u32)
+#define DMA_MEMCOPY _IOWR(DMA_IOCTL_MAGIC, 2, __u32)
+
+#define DMA_IOCTL_MAXNR 14
+
+#define DMA_MEMCPY_TIMEOUT 5000 //msec
+
+struct dma_memcpy
+{
+ int chan_id;
+ void *src_addr_vir;
+ void *dst_addr_vir;
+ unsigned int size;
+ unsigned int src_addr_phy;
+ unsigned int dst_addr_phy;
+};
+
+
+
+#endif /* FH_DMA_MISCDEV_H_ */
diff --git a/drivers/misc/fh_dw_i2s.c b/drivers/misc/fh_dw_i2s.c
new file mode 100644
index 00000000..b27c41c3
--- /dev/null
+++ b/drivers/misc/fh_dw_i2s.c
@@ -0,0 +1,1537 @@
+/**@file
+ * @Copyright (c) 2016 Shanghai Fullhan Microelectronics Co., Ltd.
+ * @brief
+ *
+ * @author fullhan
+ * @date 2016-7-15
+ * @version V1.0
+ * @version V1.1 modify code style
+ * @note: misc audio driver for fh8830 embedded audio codec.
+ * @note History:
+ * @note <author> <time> <version > <desc>
+ * @note
+ * @warning: the codec is fixed to 24 bit,
+ * so remember to move the 24 bit data to 16 bit in
+ * application layer, the next version CPU will sovle this bug.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqreturn.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <linux/ioctl.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <mach/fh_dmac.h>
+#include <mach/pmu.h>
+#include <mach/fh_predefined.h>
+#include <mach/chip.h>
+#include <mach/fh_i2s.h>
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "ac.h"
+
+#define DEVICE_NAME "fh_audio"
+
+#define NR_DESCS_PER_CHANNEL 64
+#define FIX_SAMPLE_BIT 32
+
+#define AUDIO_DMA_PREALLOC_SIZE (128*1024)
+
+#define I2S_INTR_RX_AVAILABLE 0x1
+#define I2S_INTR_RX_OVERRUN 0x2
+#define I2S_INTR_TX_AVAILABLE 0x10
+#define I2S_INTR_TX_OVERRUN 0x20
+
+#define I2S_REG_IER_OFFSET 0x00 /*i2s enable reg*/
+#define I2S_REG_IRER_OFFSET 0x04 /*i2s receiver block enable*/
+#define I2S_REG_ITER_OFFSET 0x08 /*i2s transmitter block*/
+#define I2S_REG_CER_OFFSET 0x0c /*clk en*/
+#define I2S_REG_CCR_OFFSET 0x10 /*clk cfg reg*/
+#define I2S_REG_RXFFR_OFFSET 0x14 /*reset rx fifo reg*/
+#define I2S_REG_TXFFR_OFFSET 0x18 /*reset tx fifo reg*/
+#define I2S_REG_LRBR0_OFFSET 0x20 /*left rx/tx buf reg*/
+#define I2S_REG_RRBR0_OFFSET 0x24 /*right rx/tx buf reg*/
+#define I2S_REG_RER0_OFFSET 0x28 /*rx en register*/
+#define I2S_REG_TER0_OFFSET 0x2c /*tx en*/
+#define I2S_REG_RCR0_OFFSET 0x30 /*rx config*/
+#define I2S_REG_TCR0_OFFSET 0x34 /*tx config*/
+#define I2S_REG_ISR0_OFFSET 0x38 /*intt status reg*/
+#define I2S_REG_IMR0_OFFSET 0x3c /*intt mask reg*/
+#define I2S_REG_ROR0_OFFSET 0x40 /*rx overrun reg*/
+#define I2S_REG_TOR0_OFFSET 0x44 /*tx overrun reg*/
+#define I2S_REG_RFCR0_OFFSET 0x48 /*rx fifo config reg*/
+#define I2S_REG_TFCR0_OFFSET 0x4c /*tx fifo config reg*/
+#define I2S_REG_RFF0_OFFSET 0x50 /*rx fifo flush reg*/
+#define I2S_REG_TFF0_OFFSET 0x54 /*tx fifo flush reg*/
+
+#define I2S_REG_RXDMA_OFFSET 0x1c0 /*Receiver Block DMA Register*/
+#define I2S_REG_RRXDMA_OFFSET 0x1c4 /*Reset Receiver Block DMA Register*/
+#define I2S_REG_TXDMA_OFFSET 0x1c8 /*Transmitter Block DMA Register*/
+#define I2S_REG_RTXDMA_OFFSET 0x1cc /*Reset Transmitter Block DMA Register*/
+
+#define I2S_REG_DMACR_OFFSET 0x180 /* DMA Control Register */
+#define I2S_REG_DMATDLR_OFFSET 0x184
+#define I2S_REG_DMARDLR_OFFSET 0x188
+
+#define I2S_DMA_RXEN_BIT 0
+#define I2S_DMA_TXEN_BIT 1
+
+#define I2S_DMA_CAP_CHANNEL 2
+#define I2S_DMA_PLAY_CHANNEL 3
+
+/* #define FH_AUDIO_DEBUG*/
+#ifdef FH_AUDIO_DEBUG
+#define PRINT_AUDIO_DBG(fmt, args...) \
+ do { \
+ printk(KERN_INFO "FH_AUDIO_DEBUG: "); \
+ printk(fmt, ## args); \
+ } while (0);
+#else
+#define PRINT_AUDIO_DBG(fmt, args...) do { } while (0)
+#endif
+
+enum audio_type {
+ capture = 0, playback,
+};
+
+enum audio_state {
+ STATE_NORMAL = 0, STATE_INIT, STATE_STOP, STATE_RUN, STATE_PAUSE
+};
+
+struct audio_config {
+ int rate;
+ int volume;
+ enum io_select io_type;
+ int frame_bit;
+ int channels;
+ int buffer_size;
+ int period_size;
+ int buffer_bytes;
+ int period_bytes;
+ int start_threshold;
+ int stop_threshold;
+};
+
+struct audio_ptr_t {
+ struct audio_config cfg;
+ enum audio_state state;
+ long size;
+ int hw_ptr;
+ int appl_ptr;
+ spinlock_t lock;
+ struct device dev;
+ u8 *area; /*virtual pointer*/
+ dma_addr_t addr; /*physical address*/
+ u8 *mmap_addr;
+};
+
+struct fh_audio_cfg {
+ struct audio_ptr_t capture;
+ struct audio_ptr_t playback;
+ wait_queue_head_t readqueue;
+ wait_queue_head_t writequeue;
+ struct semaphore sem_capture;
+ struct semaphore sem_playback;
+};
+
+struct fh_dma_chan {
+ struct dma_chan *chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ bool paused;
+ bool initialized;
+ spinlock_t lock;
+ /* these other elements are all protected by lock */
+ unsigned long flags;
+ dma_cookie_t completed;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ struct fh_cyclic_desc *cdesc;
+ unsigned int descs_allocated;
+};
+
+struct channel_assign {
+ int capture_channel;
+ int playback_channel;
+};
+
+struct audio_dev {
+ struct channel_assign channel_assign;
+ int dma_master;
+ struct fh_audio_cfg audio_config;
+ struct miscdevice fh_audio_miscdev;
+ struct fh_i2s_platform_data *plat_data;
+};
+
+static const struct file_operations fh_i2s_fops;
+
+static struct audio_dev fh_audio_dev = {
+ .channel_assign = {
+ .capture_channel = I2S_DMA_CAP_CHANNEL,
+ .playback_channel = I2S_DMA_PLAY_CHANNEL,
+ },
+ .dma_master = 1,
+ .fh_audio_miscdev = {
+ .fops = &fh_i2s_fops,
+ .name = DEVICE_NAME,
+ .minor = MISC_DYNAMIC_MINOR,
+ }
+};
+
+static struct {
+ spinlock_t lock;
+ unsigned long paddr;
+ void __iomem *vaddr;
+ unsigned long acw_vaddr;
+ struct clk *clk;
+ struct clk *acodec_clk;
+ unsigned long in_use;
+ unsigned long next_heartbeat;
+ struct timer_list timer;
+ int expect_close;
+ int irq;
+} fh_i2s_module;
+
+static struct infor_record_t
+{
+ int record_pid;
+ int play_pid;
+} infor_record;
+
+static struct fh_dma_chan *dma_rx_transfer;
+static struct fh_dma_chan *dma_tx_transfer;
+
+static struct work_struct playback_wq;
+
+static struct audio_param_store {
+ int input_volume;
+ enum io_select input_io_type;
+} audio_param_store;
+
+static void fh_i2s_tx_dma_done(void *arg);
+static void fh_i2s_rx_dma_done(void *arg);
+static bool fh_i2s_dma_chan_filter(struct dma_chan *chan, void *filter_param);
+static void create_proc(void);
+static void remove_proc(void);
+static int config_i2s_clk(u32 rate, u32 bit);
+
+static void fh_i2s_stop_playback(struct fh_audio_cfg *audio_config)
+{
+ if (audio_config->playback.state == STATE_INIT)
+ goto free;
+ if (audio_config->playback.state == STATE_STOP)
+ return;
+ audio_config->playback.state = STATE_STOP;
+ clear_bit(I2S_DMA_TXEN_BIT, fh_i2s_module.vaddr + I2S_REG_DMACR_OFFSET);
+ writel(0, fh_i2s_module.vaddr + I2S_REG_ITER_OFFSET);
+ fh_dma_cyclic_stop(dma_tx_transfer->chan);
+ fh_dma_cyclic_free(dma_tx_transfer->chan);
+
+free:
+ up(&audio_config->sem_playback);
+}
+
+static void fh_i2s_stop_capture(struct fh_audio_cfg *audio_config)
+{
+ if (audio_config->capture.state == STATE_INIT)
+ goto free;
+ if (audio_config->capture.state == STATE_STOP)
+ return;
+ audio_config->capture.state = STATE_STOP;
+
+ clear_bit(I2S_DMA_RXEN_BIT, fh_i2s_module.vaddr + I2S_REG_DMACR_OFFSET);
+ writel(0, fh_i2s_module.vaddr + I2S_REG_IRER_OFFSET);
+
+ fh_dma_cyclic_stop(dma_rx_transfer->chan);
+ fh_dma_cyclic_free(dma_rx_transfer->chan);
+
+free:
+ up(&audio_config->sem_capture);
+}
+
+static void switch_io_type(enum audio_type type, enum io_select io_type)
+{
+}
+
+static int get_param_from_volume(int volume)
+{
+ int param, max_param, min_param, max_volume;
+
+ max_volume = 100;
+ if (volume < 0 || volume > max_volume) {
+ printk(KERN_ERR "unsupported input volume\n");
+ return -EINVAL;
+ }
+ max_param = 63;
+ min_param = 0;
+ param = max_param - (max_volume - volume);
+ if (param <= min_param)
+ param = min_param;
+ return param;
+}
+
+static inline long bytes_to_frames(int frame_bit, int bytes)
+{
+ return bytes * 8 / frame_bit;
+}
+
+static inline long frames_to_bytes(int frame_bit, int frames)
+{
+ return frames * frame_bit / 8;
+}
+
+static void switch_input_volume(int volume)
+{
+}
+
+static void reset_dma_buff(struct audio_ptr_t *config)
+{
+ config->appl_ptr = 0;
+ config->hw_ptr = 0;
+}
+
+static int config_i2s_frame_bit(int bit)
+{
+ int wss = 0;
+ int wlen = 0;
+
+ switch (bit) {
+ case 16:
+ wss = 0;
+ wlen = 0b010;
+ break;
+ case 24:
+ wss = 1;
+ wlen = 0b100;
+ break;
+ case 32:
+ wss = 2;
+ wlen = 0b101;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ /* config sclk cycles (ws_out) */
+ writel(wss << 3, fh_i2s_module.vaddr + I2S_REG_CCR_OFFSET);
+
+ writel(wlen, fh_i2s_module.vaddr + I2S_REG_RCR0_OFFSET);
+ writel(wlen, fh_i2s_module.vaddr + I2S_REG_TCR0_OFFSET);
+ return 0;
+}
+
+static int init_audio(enum audio_type type, struct fh_audio_cfg *audio_config,
+ struct fh_audio_cfg_arg *cfg)
+{
+ struct audio_ptr_t *config = NULL;
+ int ret = 0;
+ int pid = current->tgid;
+
+ if (type == capture) {
+ config = &audio_config->capture;
+ init_waitqueue_head(&audio_config->readqueue);
+ } else if (type == playback) {
+ config = &audio_config->playback;
+ init_waitqueue_head(&audio_config->writequeue);
+ }
+
+ config->cfg.io_type = cfg->io_type;
+ config->cfg.volume = cfg->volume;
+ config->cfg.rate = cfg->rate;
+ config->cfg.channels = cfg->channels;
+ config->cfg.buffer_size = cfg->buffer_size;
+ config->cfg.frame_bit = cfg->frame_bit;
+ config->cfg.period_size = cfg->period_size;
+ config->cfg.buffer_bytes =
+ frames_to_bytes(config->cfg.frame_bit, config->cfg.buffer_size);
+ config->cfg.period_bytes =
+ frames_to_bytes(config->cfg.frame_bit, config->cfg.period_size);
+ config->cfg.start_threshold = config->cfg.buffer_bytes;
+ config->cfg.stop_threshold = config->cfg.buffer_bytes;
+ reset_dma_buff(config);
+ spin_lock_init(&config->lock);
+ audio_param_store.input_io_type = config->cfg.io_type;
+ audio_param_store.input_volume = config->cfg.volume;
+
+ ret = config_i2s_clk(cfg->rate, cfg->frame_bit);
+ if (ret) {
+ pr_err("config_i2s_clk error %d\n", ret);
+ return ret;
+ }
+
+ /* * config wrapper work format * */
+ writel(1, fh_i2s_module.vaddr + I2S_REG_RER0_OFFSET);/*rx en*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_TER0_OFFSET);/*tx en*/
+
+ /*set dma fifo size*/
+ writel(0x1f, fh_i2s_module.vaddr + I2S_REG_DMARDLR_OFFSET);
+ /*set dma fifo size*/
+ writel(0x1f, fh_i2s_module.vaddr + I2S_REG_DMATDLR_OFFSET);
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_CER_OFFSET);/*en clk*/
+
+ if (type == capture) {
+ infor_record.record_pid = pid;
+ audio_config->capture.state = STATE_INIT;
+ } else if (type == playback) {
+ infor_record.play_pid = pid;
+ audio_config->playback.state = STATE_INIT;
+ }
+
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_IER_OFFSET);
+
+ return 0;
+}
+
+static int avail_data_len(enum audio_type type, struct fh_audio_cfg *stream)
+{
+ int delta;
+
+ if (capture == type) {
+ spin_lock(&stream->capture.lock);
+ delta = stream->capture.hw_ptr - stream->capture.appl_ptr;
+ spin_unlock(&stream->capture.lock);
+ if (delta < 0)
+ delta += stream->capture.size;
+ return delta;
+ } else {
+ spin_lock(&stream->playback.lock);
+ delta = stream->playback.appl_ptr - stream->playback.hw_ptr;
+ spin_unlock(&stream->playback.lock);
+ if (delta <= 0)
+ delta += stream->playback.size;
+ return stream->playback.size - delta;
+ }
+}
+
+static int fh_audio_close(struct inode *ip, struct file *fp)
+{
+ struct miscdevice *miscdev = fp->private_data;
+ struct audio_dev
+ *dev = container_of(miscdev, struct audio_dev, fh_audio_miscdev);
+ struct fh_audio_cfg *audio_config = &dev->audio_config;
+ int pid;
+
+ pid = current->tgid;
+ if (infor_record.play_pid == pid)
+ fh_i2s_stop_playback(audio_config);
+
+ if (infor_record.record_pid == pid)
+ fh_i2s_stop_capture(audio_config);
+ return 0;
+}
+
+static int register_tx_dma(struct fh_audio_cfg *audio_config)
+{
+ int ret;
+ struct fh_dma_slave *tx_config;
+
+ tx_config = kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
+ if (!tx_config)
+ return -ENOMEM;
+
+ tx_config->cfg_hi = FHC_CFGH_DST_PER(11);
+ tx_config->dst_msize = FH_DMA_MSIZE_8;
+ tx_config->src_msize = FH_DMA_MSIZE_8;
+ if (audio_config->playback.cfg.frame_bit == 16)
+ tx_config->reg_width = FH_DMA_SLAVE_WIDTH_16BIT;
+ else
+ tx_config->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
+ tx_config->fc = FH_DMA_FC_D_M2P;
+ tx_config->tx_reg = (u32)fh_i2s_module.paddr + I2S_REG_TXDMA_OFFSET;
+ tx_config->src_master = 0;
+ tx_config->dst_master = fh_audio_dev.dma_master;
+
+ dma_tx_transfer->chan->private = tx_config;
+ if ((audio_config->playback.cfg.buffer_bytes
+ < audio_config->playback.cfg.period_bytes)
+ || (audio_config->playback.cfg.buffer_bytes <= 0)
+ || (audio_config->playback.cfg.period_bytes <= 0)
+ || (audio_config->playback.cfg.buffer_bytes
+ / audio_config->playback.cfg.period_bytes
+ > NR_DESCS_PER_CHANNEL)) {
+ printk(KERN_ERR "buffer_size and period_size are invalid\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dma_tx_transfer->cdesc = fh_dma_cyclic_prep(dma_tx_transfer->chan,
+ audio_config->playback.addr,
+ audio_config->playback.cfg.buffer_bytes,
+ audio_config->playback.cfg.period_bytes,
+ DMA_MEM_TO_DEV);
+ if (IS_ERR(dma_tx_transfer->cdesc)) {
+ printk(KERN_ERR "cyclic desc err %ld\n",
+ PTR_ERR(dma_tx_transfer->cdesc));
+ ret = -ENOMEM;
+ goto fail;
+ }
+ dma_tx_transfer->cdesc->period_callback = fh_i2s_tx_dma_done;
+ dma_tx_transfer->cdesc->period_callback_param = audio_config;
+ fh_dma_cyclic_start(dma_tx_transfer->chan);
+ kfree(tx_config);
+ /*must set NULL to tell DMA driver that we free the DMA slave*/
+ dma_tx_transfer->chan->private = NULL;
+ return 0;
+fail:
+ return ret;
+}
+
+static int register_rx_dma(struct fh_audio_cfg *audio_config)
+{
+ int ret;
+ struct fh_dma_slave *rx_config;
+
+ rx_config = kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
+ if (!rx_config)
+ return -ENOMEM;
+ rx_config->cfg_hi = FHC_CFGH_SRC_PER(10);
+ rx_config->dst_msize = FH_DMA_MSIZE_8;
+ rx_config->src_msize = FH_DMA_MSIZE_8;
+ if (audio_config->capture.cfg.frame_bit == 16)
+ rx_config->reg_width = FH_DMA_SLAVE_WIDTH_16BIT;
+ else
+ rx_config->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
+ rx_config->fc = FH_DMA_FC_D_P2M;
+ rx_config->rx_reg = (u32)fh_i2s_module.paddr + I2S_REG_RXDMA_OFFSET;
+ rx_config->src_master = fh_audio_dev.dma_master;
+ rx_config->dst_master = 0;
+ dma_rx_transfer->chan->private = rx_config;
+ if ((audio_config->capture.cfg.buffer_bytes
+ < audio_config->capture.cfg.period_bytes)
+ || (audio_config->capture.cfg.buffer_bytes <= 0)
+ || (audio_config->capture.cfg.period_bytes <= 0)
+ || (audio_config->capture.cfg.buffer_bytes
+ / audio_config->capture.cfg.period_bytes
+ > NR_DESCS_PER_CHANNEL)) {
+ printk(KERN_ERR "buffer_size and period_size are invalid\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ dma_rx_transfer->cdesc = fh_dma_cyclic_prep(dma_rx_transfer->chan,
+ audio_config->capture.addr,
+ audio_config->capture.cfg.buffer_bytes,
+ audio_config->capture.cfg.period_bytes,
+ DMA_DEV_TO_MEM);
+ if (IS_ERR(dma_rx_transfer->cdesc)) {
+ printk(KERN_ERR "cyclic desc err %ld\n",
+ PTR_ERR(dma_rx_transfer->cdesc));
+ ret = -ENOMEM;
+ goto fail;
+ }
+ dma_rx_transfer->cdesc->period_callback = fh_i2s_rx_dma_done;
+ dma_rx_transfer->cdesc->period_callback_param = audio_config;
+ fh_dma_cyclic_start(dma_rx_transfer->chan);
+
+ kfree(rx_config);
+ /*must set NULL to tell DMA driver that we free the DMA slave*/
+ dma_rx_transfer->chan->private = NULL;
+ return 0;
+fail:
+ kfree(rx_config);
+ return ret;
+}
+
+static void playback_start_wq_handler(struct work_struct *work)
+{
+ int avail;
+
+ while (1) {
+ if (STATE_STOP == fh_audio_dev.audio_config.playback.state)
+ return;
+ avail = avail_data_len(playback, &fh_audio_dev.audio_config);
+ if (avail
+ > fh_audio_dev.audio_config.playback.cfg.period_bytes)
+ msleep(20);
+ else {
+ /*enable rx fifo*/
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_ITER_OFFSET);
+ /*reset dma*/
+ writel(0x1, fh_i2s_module.vaddr
+ + I2S_REG_RTXDMA_OFFSET);
+ /*reset dma*/
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_TXFFR_OFFSET);
+ /*reset dma*/
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_TER0_OFFSET);
+ break;
+ }
+ }
+}
+
+static int fh_i2s_start_playback(struct fh_audio_cfg *audio_config)
+{
+ int ret;
+
+ if (audio_config->playback.state == STATE_RUN)
+ return 0;
+
+ if (audio_config->playback.cfg.buffer_bytes
+ >= AUDIO_DMA_PREALLOC_SIZE) {
+ pr_info("DMA prealloc buffer is smaller than"
+ "audio_config->buffer_bytes\n");
+ return -ENOMEM;
+ }
+ memset(audio_config->playback.area, 0,
+ audio_config->playback.cfg.buffer_bytes);
+ audio_config->playback.size = audio_config->playback.cfg.buffer_bytes;
+ audio_config->playback.state = STATE_RUN;
+ ret = register_tx_dma(audio_config);
+ if (ret)
+ return ret;
+ INIT_WORK(&playback_wq, playback_start_wq_handler);
+ schedule_work(&playback_wq);
+
+ /*reset tx dma*/
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_RTXDMA_OFFSET);
+ /*reset tx fifo*/
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_TXFFR_OFFSET);
+
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_ITER_OFFSET);/*tx en*/
+ /*dma en tx*/
+ set_bit(I2S_DMA_TXEN_BIT, fh_i2s_module.vaddr + I2S_REG_DMACR_OFFSET);
+
+ return 0;
+}
+
+static int fh_i2s_start_capture(struct fh_audio_cfg *audio_config)
+{
+ int ret;
+
+ if (audio_config->capture.state == STATE_RUN)
+ return 0;
+
+ if (audio_config->capture.cfg.buffer_bytes >= AUDIO_DMA_PREALLOC_SIZE) {
+ printk(
+ "DMA prealloc buffer is smaller than audio_config->buffer_bytes\n");
+ return -ENOMEM;
+ }
+ memset(audio_config->capture.area, 0,
+ audio_config->capture.cfg.buffer_bytes);
+ audio_config->capture.size = audio_config->capture.cfg.buffer_bytes;
+
+ audio_config->capture.state = STATE_RUN;
+
+ ret = register_rx_dma(audio_config);
+ if (ret)
+ return ret;
+
+ /*reset rx dma*/
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_RRXDMA_OFFSET);
+ /*reset rx fifo*/
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_RXFFR_OFFSET);
+
+ writel(0x1, fh_i2s_module.vaddr + I2S_REG_IRER_OFFSET);/*rx en*/
+
+ /*dma en rx*/
+ set_bit(I2S_DMA_RXEN_BIT, fh_i2s_module.vaddr + I2S_REG_DMACR_OFFSET);
+
+ return 0;
+}
+
+static void fh_i2s_rx_dma_done(void *arg)
+{
+ struct fh_audio_cfg *audio_config;
+
+ audio_config = (struct fh_audio_cfg *) arg;
+ spin_lock(&audio_config->capture.lock);
+ audio_config->capture.hw_ptr += audio_config->capture.cfg.period_bytes;
+ if (audio_config->capture.hw_ptr > audio_config->capture.size)
+ audio_config->capture.hw_ptr = audio_config->capture.hw_ptr
+ - audio_config->capture.size;
+ spin_unlock(&audio_config->capture.lock);
+ if (waitqueue_active(&audio_config->readqueue)) {
+ int avail = avail_data_len(capture, audio_config);
+ if (avail > audio_config->capture.cfg.period_bytes)
+ wake_up_interruptible(&audio_config->readqueue);
+ }
+
+}
+
+static void fh_i2s_tx_dma_done(void *arg)
+{
+ struct fh_audio_cfg *audio_config;
+
+ audio_config = (struct fh_audio_cfg *) arg;
+ spin_lock(&audio_config->playback.lock);
+ audio_config->playback.hw_ptr +=
+ audio_config->playback.cfg.period_bytes;
+ if (audio_config->playback.hw_ptr > audio_config->playback.size) {
+ audio_config->playback.hw_ptr = audio_config->playback.hw_ptr
+ - audio_config->playback.size;
+ }
+ memset(audio_config->playback.area + audio_config->playback.hw_ptr
+ - audio_config->playback.cfg.period_bytes, 0,
+ audio_config->playback.cfg.period_bytes);
+ spin_unlock(&audio_config->playback.lock);
+ if (waitqueue_active(&audio_config->writequeue)) {
+ int avail = avail_data_len(playback, audio_config);
+
+ if (avail > audio_config->playback.cfg.period_bytes)
+ wake_up_interruptible(&audio_config->writequeue);
+ }
+}
+
+static bool fh_i2s_dma_chan_filter(struct dma_chan *chan,
+ void *filter_param)
+{
+ int dma_channel = *(int *) filter_param;
+ bool ret = false;
+
+ if (chan->chan_id == dma_channel)
+ ret = true;
+ return ret;
+}
+
+static int arg_config_support(struct fh_audio_cfg_arg *cfg)
+{
+ int ret;
+
+ ret = get_param_from_volume(cfg->volume);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+static long fh_audio_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fh_audio_cfg_arg cfg;
+ struct miscdevice *miscdev = filp->private_data;
+ struct audio_dev *dev =
+ container_of(miscdev, struct audio_dev, fh_audio_miscdev);
+ struct fh_audio_cfg *audio_config = &dev->audio_config;
+ int ret;
+ int value, pid;
+ int __user *p = (int __user *)arg;
+
+ pid = current->tgid;
+ switch (cmd) {
+ case AC_INIT_CAPTURE_MEM:
+
+ if (copy_from_user((void *)&cfg, (void __user *)arg,
+ sizeof(struct fh_audio_cfg_arg))) {
+ printk(KERN_ERR "copy err\n");
+ return -EIO;
+ }
+ if (0 == arg_config_support(&cfg)) {
+ if (down_trylock(&audio_config->sem_capture)) {
+ pr_info("another thread is running capture.\n");
+ return -EBUSY;
+ }
+ ret = init_audio(capture, audio_config, &cfg);
+ if (ret) {
+ up(&audio_config->sem_capture);
+ return ret;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ break;
+ case AC_INIT_PLAYBACK_MEM:
+ if (copy_from_user((void *)&cfg, (void __user *)arg,
+ sizeof(struct fh_audio_cfg_arg))) {
+ printk(KERN_ERR "copy err\n");
+ return -EIO;
+ }
+
+ if (0 == arg_config_support(&cfg)) {
+ if (down_trylock(&audio_config->sem_playback)) {
+ printk(KERN_ERR "another thread is running playback.\n");
+ return -EBUSY;
+ }
+ ret = init_audio(playback, audio_config, &cfg);
+ if (ret) {
+ up(&audio_config->sem_playback);
+ return ret;
+ }
+ } else
+ return -EINVAL;
+ break;
+ case AC_AI_EN:
+ if (infor_record.record_pid != pid)
+ return -EBUSY;
+ return fh_i2s_start_capture(audio_config);
+ case AC_AO_EN:
+ if (infor_record.play_pid != pid)
+ return -EBUSY;
+ return fh_i2s_start_playback(audio_config);
+ case AC_SET_VOL:
+ if (infor_record.record_pid != pid)
+ return -EBUSY;
+ if (get_user(value, p))
+ return -EFAULT;
+ ret = get_param_from_volume(value);
+ if (ret < 0)
+ return -EINVAL;
+ audio_param_store.input_volume = value;
+ switch_input_volume(audio_param_store.input_volume);
+ break;
+ case AC_SET_INPUT_MODE:
+ if (infor_record.record_pid != pid)
+ return -EBUSY;
+ if (get_user(value, p))
+ return -EFAULT;
+ if (value != mic_in && value != line_in)
+ return -EINVAL;
+ audio_param_store.input_io_type = value;
+ switch_io_type(capture,
+ audio_param_store.input_io_type);
+ break;
+ case AC_SET_OUTPUT_MODE:
+ if (infor_record.play_pid != pid)
+ return -EBUSY;
+ if (get_user(value, p))
+ return -EFAULT;
+ if (value != speaker_out && value != line_out)
+ return -EINVAL;
+ switch_io_type(playback, value);
+ break;
+ case AC_AI_DISABLE:
+ pr_info("[ac_driver]AC_AI_DISABLE\n");
+ if (infor_record.record_pid != pid)
+ return -EBUSY;
+ fh_i2s_stop_capture(audio_config);
+ pr_info(" AC_AI_DISABLE\n");
+ break;
+ case AC_AO_DISABLE:
+ pr_info("[ac_driver]AC_AO_DISABLE\n");
+ if (infor_record.play_pid != pid)
+ return -EBUSY;
+ fh_i2s_stop_playback(audio_config);
+ pr_info(" AC_AO_DISABLE\n");
+ break;
+ case AC_AI_PAUSE:
+ if (infor_record.record_pid != pid)
+ return -EBUSY;
+ printk(KERN_INFO "capture pause\n");
+ audio_config->capture.state = STATE_PAUSE;
+ /*enable rx*/
+ writel(0, fh_i2s_module.vaddr + I2S_REG_IRER_OFFSET);
+ /*rx en*/
+ writel(0, fh_i2s_module.vaddr + I2S_REG_RER0_OFFSET);
+ break;
+ case AC_AI_RESUME:
+ if (infor_record.record_pid != pid)
+ return -EBUSY;
+ printk(KERN_INFO "capture resume\n");
+ audio_config->capture.state = STATE_RUN;
+ /*enable rx*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_IRER_OFFSET);
+ /*reset rx dma*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_RRXDMA_OFFSET);
+ /*reset rx fifo*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_RXFFR_OFFSET);
+ /*rx en*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_RER0_OFFSET);
+ break;
+ case AC_AO_PAUSE:
+ if (infor_record.play_pid != pid)
+ return -EBUSY;
+ audio_config->playback.state = STATE_PAUSE;
+ writel(0, fh_i2s_module.vaddr + I2S_REG_ITER_OFFSET);
+ writel(0, fh_i2s_module.vaddr + I2S_REG_TER0_OFFSET);
+ printk(KERN_INFO "playback pause\n");
+ break;
+ case AC_AO_RESUME:
+ if (infor_record.play_pid != pid)
+ return -EBUSY;
+ printk(KERN_INFO "playback resume\n");
+ audio_config->playback.state = STATE_RUN;
+ /*enable tx*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_ITER_OFFSET);
+ /*reset tx dma*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_RTXDMA_OFFSET);
+ /*reset tx fifo*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_TXFFR_OFFSET);
+ /*tx en*/
+ writel(1, fh_i2s_module.vaddr + I2S_REG_TER0_OFFSET);
+ break;
+ case AC_AI_SET_VOL:
+ case AC_AO_SET_VOL:
+ case AC_AI_MICIN_SET_VOL:
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static int fh_audio_open(struct inode *ip, struct file *fp)
+{
+
+ fp->private_data = &fh_audio_dev.fh_audio_miscdev;
+ return 0;
+}
+
+static u32 fh_audio_poll(struct file *filp, poll_table *wait)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct audio_dev
+ *dev = container_of(miscdev, struct audio_dev,
+ fh_audio_miscdev);
+ struct fh_audio_cfg *audio_config = &dev->audio_config;
+ u32 mask = 0;
+ long avail;
+ if (STATE_RUN == audio_config->capture.state) {
+ poll_wait(filp, &audio_config->readqueue, wait);
+ avail = avail_data_len(capture, audio_config);
+ if (avail > audio_config->capture.cfg.period_bytes)
+ mask |= POLLIN | POLLRDNORM;
+ }
+ if (STATE_RUN == audio_config->playback.state) {
+ poll_wait(filp, &audio_config->writequeue, wait);
+ avail = avail_data_len(playback, audio_config);
+ if (avail > audio_config->playback.cfg.period_bytes)
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ return mask;
+}
+
+static int fh_audio_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *off)
+{
+ int ret;
+ struct miscdevice *miscdev = filp->private_data;
+ struct audio_dev *dev = container_of(miscdev, struct audio_dev,
+ fh_audio_miscdev);
+ struct fh_audio_cfg *audio_config = &dev->audio_config;
+ int after, left;
+ int pid, avail;
+
+ pid = current->tgid;
+ if (infor_record.record_pid != pid)
+ return -EBUSY;
+
+ /* wait for enough data*/
+ ret = wait_event_interruptible_timeout(audio_config->readqueue,
+ avail_data_len(capture, audio_config) >
+ audio_config->capture.cfg.period_bytes,
+ msecs_to_jiffies(5000));
+
+ len -= len % (audio_config->capture.cfg.frame_bit / 8 * 2);
+
+ avail = avail_data_len(capture, audio_config);
+ if (avail > len)
+ avail = len;
+ after = avail + audio_config->capture.appl_ptr;
+ if (after > audio_config->capture.size) {
+ left = avail - (audio_config->capture.size
+ - audio_config->capture.appl_ptr);
+ ret = copy_to_user(buf, audio_config->capture.area
+ + audio_config->capture.appl_ptr,
+ audio_config->capture.size
+ - audio_config->capture.appl_ptr);
+ ret = copy_to_user(buf+audio_config->capture.size
+ -audio_config->capture.appl_ptr,
+ audio_config->capture.area, left);
+ spin_lock(&audio_config->capture.lock);
+ audio_config->capture.appl_ptr = left;
+ spin_unlock(&audio_config->capture.lock);
+ } else {
+ ret = copy_to_user(buf,
+ audio_config->capture.area+audio_config->capture.appl_ptr,
+ avail);
+ spin_lock(&audio_config->capture.lock);
+ audio_config->capture.appl_ptr += avail;
+ spin_unlock(&audio_config->capture.lock);
+ }
+
+ return avail;
+}
+
+static int fh_audio_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct audio_dev *dev = container_of(miscdev, struct audio_dev,
+ fh_audio_miscdev);
+ struct fh_audio_cfg *audio_config = &dev->audio_config;
+ int ret;
+ int after, left;
+ int pid, avail;
+
+ pid = current->tgid;
+ if (infor_record.play_pid != pid)
+ return -EBUSY;
+
+ /* wait for enough data*/
+ ret = wait_event_interruptible_timeout(audio_config->writequeue,
+ avail_data_len(playback, audio_config) >
+ audio_config->playback.cfg.period_bytes,
+ msecs_to_jiffies(5000));
+
+ len -= len % (audio_config->playback.cfg.frame_bit / 8 * 2);
+
+ avail = avail_data_len(playback, audio_config);
+ if (0 == avail)
+ return 0;
+ if (avail > len)
+ avail = len;
+ after = avail+audio_config->playback.appl_ptr;
+ if (after > audio_config->playback.size) {
+ left = avail - (audio_config->playback.size-
+ audio_config->playback.appl_ptr);
+ ret = copy_from_user(audio_config->playback.area
+ + audio_config->playback.appl_ptr,
+ buf, audio_config->playback.size
+ - audio_config->playback.appl_ptr);
+ ret = copy_from_user(audio_config->playback.area,
+ buf+audio_config->playback.size
+ - audio_config->playback.appl_ptr, left);
+ spin_lock(&audio_config->playback.lock);
+ audio_config->playback.appl_ptr = left;
+ spin_unlock(&audio_config->playback.lock);
+ } else {
+ ret = copy_from_user(audio_config->playback.area
+ + audio_config->playback.appl_ptr, buf, avail);
+ spin_lock(&audio_config->playback.lock);
+ audio_config->playback.appl_ptr += avail;
+ spin_unlock(&audio_config->playback.lock);
+ }
+
+ return avail;
+}
+
+static irqreturn_t fh_audio_interrupt(int irq, void *dev_id)
+{
+ u32 interrupts;
+ struct fh_audio_cfg *audio_config = &fh_audio_dev.audio_config;
+
+ interrupts = readl(fh_i2s_module.vaddr + I2S_REG_ISR0_OFFSET);
+
+ PRINT_AUDIO_DBG("interrupts: 0x%x\n", interrupts);
+
+ if (interrupts & I2S_INTR_RX_AVAILABLE) {
+ fh_i2s_stop_capture(audio_config);
+ fh_i2s_start_capture(audio_config);
+ PRINT_AUDIO_DBG("I2S_INTR_RX_AVAILABLE\n");
+ }
+
+ if (interrupts & I2S_INTR_RX_OVERRUN) {
+ if (audio_config->capture.state == STATE_RUN) {
+ fh_i2s_stop_capture(audio_config);
+ fh_i2s_start_capture(audio_config);
+ } else {
+ /* reset rx fifo*/
+ }
+ PRINT_AUDIO_DBG("I2S_INTR_RX_OVERRUN\n");
+ }
+
+ if (interrupts & I2S_INTR_TX_AVAILABLE) {
+ fh_i2s_stop_playback(audio_config);
+ fh_i2s_start_playback(audio_config);
+ PRINT_AUDIO_DBG("I2S_INTR_TX_AVAILABLE\n");
+ }
+
+ if (interrupts & I2S_INTR_TX_OVERRUN) {
+ fh_i2s_stop_playback(audio_config);
+ fh_i2s_start_playback(audio_config);
+ PRINT_AUDIO_DBG("I2S_INTR_TX_OVERRUN\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations fh_i2s_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = fh_audio_ioctl,
+ .release = fh_audio_close,
+ .open = fh_audio_open,
+ .poll = fh_audio_poll,
+ .read = fh_audio_read,
+ .write = fh_audio_write,
+
+};
+
+#define FREQ_12_288_MHz 12288000
+#define FREQ_11_2896_MHz 11289600
+
+static int config_pllvco_freq(u32 freq)
+{
+ u32 addr = fh_i2s_module.acw_vaddr;
+ if (addr == 0) {
+ pr_err("acw_vaddr is null\n");
+ return -EFAULT;
+ }
+ if (freq == FREQ_12_288_MHz) {
+ /* config PLLVCO to 12.288M */
+ writel(0x03, addr + 0xac);
+ msleep(20);
+ writel(0x07, addr + 0xb0);
+ writel(0x0f, addr + 0xb4);
+ writel(0x07, addr + 0xb8);
+ writel(0x31, addr + 0xbc);
+ writel(0x26, addr + 0xc0);
+ writel(0xe9, addr + 0xc4);
+ } else if (freq == FREQ_11_2896_MHz) {
+ /* config PLLVCO to 11.2896M */
+ writel(0x03, addr + 0xac);
+ msleep(20);
+ writel(0x06, addr + 0xb0);
+ writel(0x1f, addr + 0xb4);
+ writel(0x0f, addr + 0xb8);
+ writel(0x86, addr + 0xbc);
+ writel(0xc2, addr + 0xc0);
+ writel(0x26, addr + 0xc4);
+ } else {
+ pr_err("unsupport freq %d\n", freq);
+ return -EINVAL;
+ }
+
+ writel(0x00, addr + 0x9c);
+ writel(0x21, addr + 0xc8);
+ writel(0x07, addr + 0xcc);
+ writel(0x05, addr + 0xd0);
+ writel(0x02, addr + 0x8c);
+ writel(0x10, addr + 0xa0);
+
+ return 0;
+}
+
+static int config_i2s_clk(u32 rate, u32 bit)
+{
+ u32 ret, freq, div;
+ struct fh_i2s_platform_data *pd = fh_audio_dev.plat_data;
+
+ if (rate % 8000 == 0) {
+ freq = FREQ_12_288_MHz;
+ } else if (rate % 22050 == 0) {
+ freq = FREQ_11_2896_MHz;
+ } else {
+ pr_err("unsupport rate %d\n", rate);
+ return -EINVAL;
+ }
+
+ ret = config_pllvco_freq(freq);
+ if (ret) {
+ pr_err("config_pllvco_freq err %d\n", ret);
+ return ret;
+ }
+
+ div = freq / rate / bit / 2;
+
+ if (div % 4 != 0 || freq % (rate*bit*2) != 0) {
+ pr_err("unsupport rate %d and bit %d\n", rate, bit);
+ return -EINVAL;
+ }
+
+ pr_debug("freq %d, rate %d, bit %d, div %d\n", freq, rate, bit, div);
+
+ if (pd->clk_config)
+ ret = pd->clk_config((div - 1) << 8 | 0x03);
+ else {
+ pr_err("pd->clk_config is invalid");
+ ret = -EINVAL;
+ }
+
+ ret = config_i2s_frame_bit(bit);
+ if (ret) {
+ pr_err("config_i2s_frame_bit err %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fh_i2s_drv_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int irq;
+ u32 acw_virt_addr = 0;
+
+ struct resource *res_mem =
+ platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *res_irq =
+ platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ fh_audio_dev.plat_data =
+ (struct fh_i2s_platform_data *)pdev->dev.platform_data;
+
+ if (res_mem) {
+ fh_i2s_module.paddr = res_mem->start;
+ fh_i2s_module.vaddr = devm_ioremap(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+ }
+
+ if (!fh_audio_dev.plat_data) {
+ dev_err(&pdev->dev, "plat_data not configured\n");
+ ret = -EINVAL;
+ goto out_return;
+ }
+
+ fh_audio_dev.channel_assign.capture_channel =
+ fh_audio_dev.plat_data->dma_capture_channel;
+ fh_audio_dev.channel_assign.playback_channel =
+ fh_audio_dev.plat_data->dma_playback_channel;
+ fh_audio_dev.dma_master = fh_audio_dev.plat_data->dma_master;
+
+ if (!fh_i2s_module.vaddr || !fh_i2s_module.paddr) {
+ dev_err(&pdev->dev, "get mem resource error\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ if (!res_irq) {
+ pr_err("%s: getting resource failed"
+ "cannot get IORESOURCE_IRQ\n", __func__);
+ ret = -ENXIO;
+ goto out_return;
+ }
+
+ spin_lock_init(&fh_i2s_module.lock);
+
+ ret = misc_register(&fh_audio_dev.fh_audio_miscdev);
+
+ if (ret)
+ goto out_return;
+
+ irq = res_irq->start;
+ if (!irq) {
+ dev_err(&pdev->dev, "%s: cannot get irq\n", __func__);
+ ret = -ENXIO;
+ goto out_return;
+ }
+
+ fh_i2s_module.irq = irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, fh_audio_interrupt, IRQF_SHARED,
+ DEVICE_NAME, &fh_i2s_module);
+
+ if (ret)
+ goto out_return;
+
+ acw_virt_addr = (unsigned int)devm_ioremap(&pdev->dev,
+ ACW_REG_BASE, SZ_4K);
+ if (!acw_virt_addr) {
+ dev_err(&pdev->dev, "cannot get acw_virt_addr\n");
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+ fh_i2s_module.acw_vaddr = acw_virt_addr;
+
+ fh_i2s_module.clk = clk_get(NULL, "i2s_clk");
+ if (IS_ERR(fh_i2s_module.clk)) {
+ ret = PTR_ERR(fh_i2s_module.clk);
+ goto out_free_irq;
+ } else {
+ clk_enable(fh_i2s_module.clk);
+ }
+
+ if (fh_audio_dev.plat_data->acodec_clk_name) {
+ char *clk_name = fh_audio_dev.plat_data->acodec_clk_name;
+
+ fh_i2s_module.acodec_clk = clk_get(NULL, clk_name);
+ if (IS_ERR(fh_i2s_module.acodec_clk))
+ dev_err(&pdev->dev, "cannot find clk: %s\n", clk_name);
+ else
+ clk_enable(fh_i2s_module.acodec_clk);
+ }
+
+ dev_info(&pdev->dev, "FH DW I2S Driver\n");
+ return 0;
+
+out_free_irq:
+ devm_free_irq(&pdev->dev, irq, &fh_i2s_module);
+out_return:
+ dev_err(&pdev->dev, "%s failed\n", __func__);
+ return ret;
+}
+
+static int fh_i2s_drv_remove(struct platform_device *pdev)
+{
+ misc_deregister(&fh_audio_dev.fh_audio_miscdev);
+
+ devm_free_irq(&pdev->dev, fh_i2s_module.irq, &fh_i2s_module);
+
+ if (fh_i2s_module.clk) {
+ clk_disable(fh_i2s_module.clk);
+ clk_put(fh_i2s_module.clk);
+ }
+ if (fh_i2s_module.acodec_clk) {
+ clk_disable(fh_i2s_module.acodec_clk);
+ clk_put(fh_i2s_module.acodec_clk);
+ }
+ dev_info(&pdev->dev, "FH DW I2S Driver Removed\n");
+ return 0;
+}
+
+static struct platform_driver fh_audio_driver = {
+ .probe = fh_i2s_drv_probe,
+ .remove = fh_i2s_drv_remove,
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int audio_prealloc_dma_buffer(struct fh_audio_cfg *audio_config)
+{
+ int pg;
+ gfp_t gfp_flags;
+ pg = get_order(AUDIO_DMA_PREALLOC_SIZE);
+ gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
+ audio_config->capture.dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ audio_config->capture.area = dma_alloc_coherent(
+ &audio_config->capture.dev, PAGE_SIZE << pg,
+ &audio_config->capture.addr, gfp_flags);
+ if (!audio_config->capture.area) {
+ printk(KERN_ERR"no enough mem for capture buffer alloc\n");
+ return -1;
+ }
+ audio_config->playback.dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ audio_config->playback.area = dma_alloc_coherent(
+ &audio_config->playback.dev, PAGE_SIZE << pg,
+ &audio_config->playback.addr, gfp_flags);
+ if (!audio_config->playback.area) {
+ printk(KERN_ERR"no enough mem for playback buffer alloc\n");
+ return -1;
+ }
+ return 0;
+}
+
+static void audio_free_prealloc_dma_buffer(struct fh_audio_cfg *audio_config)
+{
+ int pg;
+ pg = get_order(AUDIO_DMA_PREALLOC_SIZE);
+ dma_free_coherent(&audio_config->capture.dev, PAGE_SIZE << pg,
+ audio_config->capture.area, audio_config->capture.addr);
+ dma_free_coherent(&audio_config->playback.dev, PAGE_SIZE << pg,
+ audio_config->playback.area,
+ audio_config->playback.addr);
+}
+
+static void init_audio_mutex(struct fh_audio_cfg *audio_config)
+{
+ sema_init(&audio_config->sem_capture, 1);
+ sema_init(&audio_config->sem_playback, 1);
+}
+
+static int audio_request_dma_channel(void)
+{
+ dma_cap_mask_t mask;
+
+ dma_rx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
+ if (!dma_rx_transfer) {
+ printk(KERN_ERR"alloc dma_rx_transfer failed\n");
+ goto mem_fail;
+ }
+
+ dma_tx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
+ if (!dma_tx_transfer) {
+ printk(KERN_ERR"alloc dma_tx_transfer failed\n");
+ goto mem_fail;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_rx_transfer->chan = dma_request_channel(mask,
+ fh_i2s_dma_chan_filter,
+ &fh_audio_dev.channel_assign.capture_channel);
+ if (!dma_rx_transfer->chan) {
+ printk(KERN_ERR"request audio rx dma channel failedx\n");
+ goto channel_fail;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_tx_transfer->chan = dma_request_channel(mask,
+ fh_i2s_dma_chan_filter,
+ &fh_audio_dev.channel_assign.playback_channel);
+ if (!dma_tx_transfer->chan) {
+ printk(KERN_ERR"request tx dma channel failed\n");
+ goto channel_fail;
+ }
+
+ return 0;
+channel_fail:
+ if (dma_rx_transfer->chan) {
+ dma_release_channel(dma_rx_transfer->chan);
+ dma_rx_transfer->chan = NULL;
+ }
+ if (dma_tx_transfer->chan) {
+ dma_release_channel(dma_tx_transfer->chan);
+ dma_tx_transfer->chan = NULL;
+ }
+
+mem_fail:
+ if (dma_rx_transfer != NULL) {
+ kfree(dma_rx_transfer);
+ dma_rx_transfer = NULL;
+ }
+ if (dma_tx_transfer != NULL) {
+ kfree(dma_tx_transfer);
+ dma_tx_transfer = NULL;
+ }
+
+ return -EFAULT;
+}
+
+static void audio_release_dma_channel(void)
+{
+ /*release audio tx dma channel*/
+ if (dma_tx_transfer != NULL) {
+ if (dma_tx_transfer->chan) {
+ dma_release_channel(dma_tx_transfer->chan);
+ dma_tx_transfer->chan = NULL;
+ }
+ kfree(dma_tx_transfer);
+ dma_tx_transfer = NULL;
+ }
+
+ /*release audio rx dma channel*/
+ if (dma_rx_transfer != NULL) {
+ if (dma_rx_transfer->chan) {
+ dma_release_channel(dma_rx_transfer->chan);
+ dma_rx_transfer->chan = NULL;
+ }
+
+ kfree(dma_rx_transfer);
+ dma_rx_transfer = NULL;
+ }
+}
+
+static int __init fh_audio_init(void)
+{
+ int ret = 0;
+
+ create_proc();
+ init_audio_mutex(&fh_audio_dev.audio_config);
+ ret = platform_driver_register(&fh_audio_driver);
+ if (ret)
+ goto failed;
+ ret = audio_prealloc_dma_buffer(&fh_audio_dev.audio_config);
+ if (ret)
+ goto unregister_driver;
+ ret = audio_request_dma_channel();
+ if (ret)
+ goto free_dma_buffer;
+
+ return 0;
+
+free_dma_buffer:
+ audio_free_prealloc_dma_buffer(&fh_audio_dev.audio_config);
+unregister_driver:
+ platform_driver_unregister(&fh_audio_driver);
+failed:
+ remove_proc();
+ pr_err("%s failed\n", __func__);
+ return ret;
+}
+module_init(fh_audio_init);
+
+static void __exit fh_audio_exit(void)
+{
+ remove_proc();
+ audio_release_dma_channel();
+ audio_free_prealloc_dma_buffer(&fh_audio_dev.audio_config);
+ platform_driver_unregister(&fh_audio_driver);
+}
+module_exit(fh_audio_exit);
+
+MODULE_AUTHOR("Fullhan");
+MODULE_DESCRIPTION("FH_DW_I2S");
+MODULE_LICENSE("GPL");
+
+/****************************debug proc*****************************/
+#include <linux/proc_fs.h>
+
+#define PROC_NAME "driver/"DEVICE_NAME
+static struct proc_dir_entry *proc_file;
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter;
+ if (*pos == 0)
+ return &counter;
+ else {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+ u32 data;
+
+ if (fh_i2s_module.vaddr == NULL)
+ seq_printf(sfile, "reg is null\n");
+ else {
+ for (i = 0; i <= 0x20; i += 4) {
+ data = readl(fh_i2s_module.vaddr + i);
+ seq_printf(sfile, "0x%02x reg = 0x%x\n", i, data);
+ }
+ }
+
+ return 0;
+}
+
+static const struct seq_operations fh_dwi2s_seq_ops = {
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fh_dwi2s_seq_ops);
+}
+
+static const struct file_operations fh_dwi2s_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = proc_open,
+ .read = seq_read,
+};
+
+static void create_proc(void)
+{
+ proc_file = proc_create(PROC_NAME, 0644, NULL, &fh_dwi2s_proc_ops);
+
+ if (proc_file == NULL)
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, DEVICE_NAME);
+
+}
+
+static void remove_proc(void)
+{
+ remove_proc_entry(PROC_NAME, NULL);
+}
diff --git a/drivers/misc/fh_efuse.c b/drivers/misc/fh_efuse.c
new file mode 100644
index 00000000..e3ff972a
--- /dev/null
+++ b/drivers/misc/fh_efuse.c
@@ -0,0 +1,613 @@
+/*
+ * fh_efuse.c
+ *
+ * Created on: Mar 13, 2015
+ * Author: duobao
+ */
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+/*****************************************************************************
+ * Include Section
+ * add all #include here
+ *****************************************************************************/
+#include "fh_efuse.h"
+#include <mach/fh_efuse_plat.h>
+
+/*****************************************************************************
+ * Define section
+ * add all #define here
+ *****************************************************************************/
+
+#define wrap_readl(wrap, name) \
+ __raw_readl(&(((struct wrap_efuse_reg *)wrap->regs)->name))
+
+#define wrap_writel(wrap, name, val) \
+ __raw_writel((val), &(((struct wrap_efuse_reg *)wrap->regs)->name))
+
+#define wrap_readw(wrap, name) \
+ __raw_readw(&(((struct wrap_efuse_reg *)wrap->regs)->name))
+
+#define wrap_writew(wrap, name, val) \
+ __raw_writew((val), &(((struct wrap_efuse_reg *)wrap->regs)->name))
+
+#define wrap_readb(wrap, name) \
+ __raw_readb(&(((struct wrap_efuse_reg *)wrap->regs)->name))
+
+#define wrap_writeb(wrap, name, val) \
+ __raw_writeb((val), &(((struct wrap_efuse_reg *)wrap->regs)->name))
+
+#define FH_EFUSE_PLAT_DEVICE_NAME "fh_efuse"
+#define FH_EFUSE_MISC_DEVICE_NAME "fh_efuse"
+#define FH_EFUSE_MISC_DEVICE_NODE_NAME "fh_efuse_node"
+#define EFUSE_HW_BUFFER_POS (4)
+/****************************************************************************
+ * ADT section
+ * add definition of user defined Data Type
+ * that only be used in this file here
+ ***************************************************************************/
+
+struct wrap_efuse_reg {
+ u32 efuse_cmd; /*0x0*/
+ u32 efuse_config; /*0x4*/
+ u32 efuse_match_key; /*0x8*/
+ u32 efuse_timing0; /*0xc*/
+ u32 efuse_timing1; /*0x10*/
+ u32 efuse_timing2; /*0x14*/
+ u32 efuse_timing3; /*0x18*/
+ u32 efuse_timing4; /*0x1c*/
+ u32 efuse_timing5; /*0x20*/
+ u32 efuse_timing6; /*0x24*/
+ u32 efuse_dout; /*0x28*/
+ u32 efuse_status0; /*0x2c*/
+ u32 efuse_status1; /*0x30*/
+ u32 efuse_status2; /*0x34*/
+ u32 efuse_status3; /*0x38*/
+ u32 efuse_status4; /*0x3c*/
+ u32 efuse_mem_info;
+};
+
+
+
+
+enum {
+ CMD_TRANS_AESKEY = 4,
+ CMD_WFLGA_AUTO = 8,
+};
+
+struct wrap_efuse_obj s_efuse_obj = {0};
+
+#define EFUSE_MAX_ENTRY 60
+
+
+/******************************************************************************
+ * Function prototype section
+ * add prototypes for all functions called by this file,execepting those
+ * declared in header file
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Global variables section - Exported
+ * add declaration of global variables that will be exported here
+ * e.g.
+ * int8_t foo;
+ ****************************************************************************/
+
+/*****************************************************************************
+
+ * static fun;
+ *****************************************************************************/
+static int fh_efuse_open(struct inode *inode, struct file *file);
+static int fh_efuse_release(struct inode *inode, struct file *filp);
+static long fh_efuse_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+/*****************************************************************************
+ * Global variables section - Local
+ * define global variables(will be refered only in this file) here,
+ * static keyword should be used to limit scope of local variable to this file
+ * e.g.
+ * static uint8_t ufoo;
+ *****************************************************************************/
+
+static const struct file_operations fh_efuse_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_efuse_open,
+ .release = fh_efuse_release,
+ .unlocked_ioctl = fh_efuse_ioctl,
+};
+
+static struct miscdevice fh_efuse_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = FH_EFUSE_MISC_DEVICE_NAME,
+ /*.nodename = FH_EFUSE_MISC_DEVICE_NODE_NAME,*/
+ .fops = &fh_efuse_fops,
+};
+
+static int fh_efuse_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int fh_efuse_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+void efuse_detect_complete(struct wrap_efuse_obj *obj, int pos)
+{
+ unsigned int rdata;
+ unsigned int time = 0;
+ /*printk("efuse wait pos %x...\n",pos);*/
+ do {
+ time++;
+ rdata = wrap_readl(obj, efuse_status0);
+ if (time > 1000) {
+ printk("[efuse]:detect time out...pos: 0x%x\n", pos);
+ return;
+ }
+ udelay(10);
+ } while ((rdata&(1<<pos)) != 1<<pos);
+ /*printk("efuse wait pos done...\n",pos);*/
+ udelay(10);
+}
+
+void auto_check_efuse_pro_bits(struct wrap_efuse_obj *obj, u32 *buff)
+{
+ /*first set auto check cmd*/
+ wrap_writel(obj, efuse_cmd, CMD_WFLGA_AUTO);
+ efuse_detect_complete(obj, 8);
+ /*
+ mdelay(300);
+ read status from hw status
+ bit 1 means could be writen....
+ */
+ buff[0] = wrap_readl(obj, efuse_status1);
+ buff[1] = wrap_readl(obj, efuse_status2);
+}
+
+void open_efuse_power(struct wrap_efuse_obj *obj)
+{
+ unsigned int data;
+ data = wrap_readl(obj, efuse_config);
+ data |= 1<<27;
+ data = wrap_writel(obj, efuse_config, data);
+}
+
+void efuse_buffer_set(struct wrap_efuse_obj *obj, unsigned int value)
+{
+ unsigned int data;
+ data = wrap_readl(obj, efuse_config);
+ data &= ~(1 << EFUSE_HW_BUFFER_POS);
+ data |= value << EFUSE_HW_BUFFER_POS;
+ data = wrap_writel(obj, efuse_config, data);
+}
+
+void efuse_write_key_byte(struct wrap_efuse_obj *obj, u32 entry, u8 data)
+{
+ u32 temp = 0;
+ temp = (u32)data;
+ /*0~255*/
+ temp &= ~0xffffff00;
+ temp <<= 12;
+ /*0~63*/
+ entry &= 0x3f;
+ temp |= (entry<<4) | (0x02);
+ /*
+ printk("efuse write entry: %x,data: %x\n",entry,data);
+
+ printk("efuse write data :%x\n",temp);
+ wrap_writel(obj,efuse_cmd,(data<<12) + (entry<<4) + (0x02));
+ */
+ wrap_writel(obj, efuse_cmd, temp);
+ efuse_detect_complete(obj, 2);
+}
+
+void efuse_load_usrcmd(struct wrap_efuse_obj *obj)
+{
+ wrap_writel(obj, efuse_cmd, 1);
+ efuse_detect_complete(obj, 1);
+}
+
+void refresh_efuse(struct wrap_efuse_obj *obj)
+{
+ wrap_writel(obj, efuse_cmd, CMD_WFLGA_AUTO);
+ efuse_detect_complete(obj, 8);
+ efuse_load_usrcmd(obj);
+}
+
+void efuse_check_map_para(unsigned int size, struct ex_key_map *p_key_map)
+{
+ int loop;
+ if (size > MAX_EX_KEY_MAP_SIZE) {
+ printk(KERN_ERR "%s :: size : %d > max size : %d\n",
+ __func__, size, MAX_EX_KEY_MAP_SIZE);
+ }
+ for (loop = 0; loop < size; loop++) {
+ if ((p_key_map[loop].ex_mem_entry % 4 != 0)
+ || (p_key_map[loop].crypto_key_no != loop)) {
+ printk(KERN_ERR \
+ "map[%d]:entry[0x%x]:aes key[0x%x] para error..\n",
+ loop,
+ p_key_map[loop].ex_mem_entry,
+ p_key_map[loop].crypto_key_no);
+ }
+ }
+}
+
+void efuse_check_update_trans_flag(struct wrap_efuse_obj *obj,
+u32 start_no, u32 size, struct af_alg_usr_def *p_alg){
+
+ int i;
+ struct ex_key_map *p_key_map;
+ struct ex_key_map *old_p_key_map;
+ struct af_alg_usr_def *p_old_usr_def;
+ int key_map_size;
+
+ p_old_usr_def = &obj->old_usr_def;
+ /* first check if use cpu mode */
+ if (p_old_usr_def->mode != p_alg->mode) {
+ obj->efuse_trans_flag = EFUSE_NEED_TRANS;
+ p_old_usr_def->mode = p_alg->mode;
+ /* not return here..maybe para not the same..
+ just update below. */
+ }
+ if (obj->open_flag & CRYPTO_EX_MEM_SWITCH_KEY) {
+ if (p_alg->mode & CRYPTO_EX_MEM_SWITCH_KEY) {
+ if (p_alg->mode & CRYPTO_EX_MEM_4_ENTRY_1_KEY) {
+ key_map_size = p_alg->adv.ex_key_para.map_size;
+ p_key_map = &p_alg->adv.ex_key_para.map[0];
+ old_p_key_map = &p_old_usr_def->adv.ex_key_para.map[0];
+ efuse_check_map_para(key_map_size, p_key_map);
+ /* check map size */
+ if (key_map_size != p_old_usr_def->adv.ex_key_para.map_size) {
+ /* cpy new to old */
+ memcpy(p_old_usr_def, p_alg,
+ sizeof(struct af_alg_usr_def));
+ obj->efuse_trans_flag = EFUSE_NEED_TRANS;
+ return;
+ }
+ /* check para. */
+ for (i = 0; i < key_map_size; i++,
+ p_key_map++, old_p_key_map++) {
+ if (memcmp(p_key_map, old_p_key_map,
+ sizeof(struct ex_key_map))) {
+ /* cmp error */
+ memcpy(p_old_usr_def, p_alg,
+ sizeof(struct af_alg_usr_def));
+ obj->efuse_trans_flag = EFUSE_NEED_TRANS;
+ return;
+ }
+ }
+ }
+ } else {
+ /*chip need set the map...and usr not set */
+ key_map_size = size;
+ if (key_map_size != obj->old_size) {
+ obj->old_size = key_map_size;
+ obj->efuse_trans_flag = EFUSE_NEED_TRANS;
+ return;
+ }
+ }
+ } else {
+ key_map_size = size;
+ if (key_map_size != obj->old_size) {
+ obj->old_size = key_map_size;
+ obj->efuse_trans_flag = EFUSE_NEED_TRANS;
+ return;
+ }
+ }
+}
+
+void efuse_trans_key(struct wrap_efuse_obj *obj,
+u32 start_no, u32 size, struct af_alg_usr_def *p_alg)
+{
+ int i;
+ struct ex_key_map *p_key_map;
+ int key_map_size;
+ unsigned int temp_reg;
+
+ efuse_check_update_trans_flag(obj, start_no, size, p_alg);
+ if (obj->efuse_trans_flag != EFUSE_NEED_TRANS) {
+ /* printk("###DO NOT need to update efuse para to aes...\n"); */
+ return;
+ } else {
+ /* clear update flag. */
+ obj->efuse_trans_flag = 0;
+ /* printk("<<<need to update efuse para to aes...\n"); */
+ }
+ if (obj->open_flag & CRYPTO_EX_MEM_SWITCH_KEY) {
+ if (p_alg->mode & CRYPTO_EX_MEM_SWITCH_KEY) {
+ if (p_alg->mode & CRYPTO_EX_MEM_4_ENTRY_1_KEY) {
+ key_map_size = p_alg->adv.ex_key_para.map_size;
+ p_key_map = &p_alg->adv.ex_key_para.map[0];
+ efuse_check_map_para(key_map_size, p_key_map);
+ for (i = 0; i < key_map_size;
+ i++, p_key_map++) {
+ refresh_efuse(obj);
+ temp_reg =
+ wrap_readl(obj, efuse_config);
+ temp_reg &= ~(0xf<<28);
+ temp_reg |=
+ (p_key_map->ex_mem_entry / 4) << 28;
+ wrap_writel(obj,
+ efuse_config, temp_reg);
+ wrap_writel(obj,
+ efuse_cmd, (i << 20) + 0x04);
+ efuse_detect_complete(obj, 4);
+ }
+ }
+ } else {
+ /*chip need set the map...and usr not set */
+ key_map_size = size;
+ for (i = 0; i < key_map_size; i++) {
+ refresh_efuse(obj);
+ temp_reg = wrap_readl(obj, efuse_config);
+ temp_reg &= ~(0xf<<28);
+ temp_reg |= i << 28;
+ wrap_writel(obj, efuse_config, temp_reg);
+ wrap_writel(obj, efuse_cmd, (i << 20) + 0x04);
+ efuse_detect_complete(obj, 4);
+ }
+ }
+ } else {
+ key_map_size = size;
+ for (i = 0; i < key_map_size; i++) {
+ refresh_efuse(obj);
+ wrap_writel(obj, efuse_cmd, (i << 20) + 0x04);
+ efuse_detect_complete(obj, 4);
+ }
+ }
+
+}
+
+static void aes_biglittle_swap(u8 *buf)
+{
+ u8 tmp, tmp1;
+ tmp = buf[0];
+ tmp1 = buf[1];
+ buf[0] = buf[3];
+ buf[1] = buf[2];
+ buf[2] = tmp1;
+ buf[3] = tmp;
+}
+void efuse_get_lock_status(struct wrap_efuse_obj *obj,
+struct efuse_status *status)
+{
+ status->efuse_apb_lock = (wrap_readl(obj, efuse_status0)>>20) & 0x0f;
+ status->aes_ahb_lock = (wrap_readl(obj, efuse_status0)>>24) & 0x0f;
+}
+
+void efuse_read_entry(struct wrap_efuse_obj *obj,
+u32 key, u32 start_entry, u8 *buff, u32 size)
+{
+ /*
+ wrap_writel(obj,efuse_cmd,1);
+ set key..
+ */
+ u32 data, i;
+
+ for (i = 0; i < size; i++) {
+ wrap_writel(obj, efuse_match_key, key);
+ wrap_writel(obj, efuse_cmd, ((start_entry + i)<<4) + 0x03);
+ efuse_detect_complete(obj, 3);
+ data = wrap_readl(obj, efuse_dout);
+ *buff++ = (u8)data;
+ }
+}
+
+
+long fh_efuse_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int i;
+
+ EFUSE_INFO efuse_info = {0};
+ u32 *p_dst = NULL;
+ u8 *p_dst_8 = NULL;
+ unsigned int data;
+ u32 temp_swap_data[32] = {0};
+
+ if (copy_from_user((void *) &efuse_info, (void __user*) arg,
+ sizeof(EFUSE_INFO))) {
+ return -EFAULT;
+ }
+
+ refresh_efuse(&s_efuse_obj);
+ switch (cmd) {
+ case IOCTL_EFUSE_CHECK_PRO:
+ auto_check_efuse_pro_bits(&s_efuse_obj,
+ efuse_info.status.protect_bits);
+ break;
+ case IOCTL_EFUSE_WRITE_KEY:
+ if (copy_from_user((void *) &temp_swap_data[0],
+ (void __user *) efuse_info.key_buff,
+ efuse_info.key_size)) {
+ return -EFAULT;
+ }
+
+ p_dst = &temp_swap_data[0];
+
+ for (i = 0; i < efuse_info.key_size / sizeof(u32); i++)
+ aes_biglittle_swap((u8 *) (p_dst + i));
+ p_dst_8 = (u8 *)&temp_swap_data[0];
+
+ for (i = 0; i < efuse_info.key_size; i++) {
+ efuse_write_key_byte(&s_efuse_obj,
+ efuse_info.efuse_entry_no + i, *(p_dst_8 + i));
+ }
+
+ break;
+ case IOCTL_EFUSE_CHECK_LOCK:
+
+ efuse_get_lock_status(&s_efuse_obj, &efuse_info.status);
+ break;
+ case IOCTL_EFUSE_TRANS_KEY:
+ printk(KERN_WARNING "please use efuse transkey with aes...\n");
+ break;
+ case IOCTL_EFUSE_SWITCH_CPU_KEY_MODE:
+ printk(KERN_WARNING "please use cpu key with aes...\n");
+ break;
+ case IOCTL_EFUSE_SWITCH_EFUSE_KEY_MODE:
+ printk(KERN_WARNING "please use efuse key with aes...\n");
+ break;
+ case IOCTL_EFUSE_CHECK_ERROR:
+ printk(KERN_WARNING "not support...\n");
+ break;
+ case IOCTL_EFUSE_READ_KEY:
+ /*printf("match is : %x..\n",match_key);*/
+ efuse_read_entry(&s_efuse_obj, efuse_info.status.error,
+ efuse_info.efuse_entry_no,
+ (u8 *)&temp_swap_data[0], efuse_info.key_size);
+ p_dst = (u32 *)temp_swap_data;
+ for (i = 0; i < efuse_info.key_size / sizeof(u32); i++) {
+ aes_biglittle_swap((u8 *) (p_dst + i));
+ /*printk("swap data is %x\n",*(p_dst + i));*/
+ }
+ if (copy_to_user((void __user *) (efuse_info.key_buff),
+ (void *) &temp_swap_data[0],
+ efuse_info.key_size)) {
+ return -EFAULT;
+ }
+ /*memcpy(efuse_user_info,&efuse_info,sizeof(EFUSE_INFO));*/
+ break;
+ case IOCTL_EFUSE_SET_LOCK:
+ /*parse lock data...*/
+ data = efuse_info.status.aes_ahb_lock;
+ data <<= 4;
+ data &= 0xf0;
+ efuse_info.status.efuse_apb_lock &= 0x0f;
+ data |= efuse_info.status.efuse_apb_lock;
+ efuse_write_key_byte(&s_efuse_obj, 63, (u8)data);
+ break;
+ case IOCTL_EFUSE_SET_MAP_PARA_4_TO_1:
+ printk(KERN_WARNING "not support here..\
+ pls check if chip support this func and set core para\n");
+ break;
+ case IOCTL_EFUSE_SET_MAP_PARA_1_TO_1:
+ printk(KERN_WARNING "not support func here...\n");
+ break;
+ case IOCTL_EFUSE_CLR_MAP_PARA:
+ printk(KERN_WARNING "not support here...\n");
+ break;
+ default:
+ break;
+ }
+ memcpy((void *)&s_efuse_obj.status, (void *)&efuse_info.status,
+ sizeof(struct efuse_status));
+ if (copy_to_user((void __user *) (&((EFUSE_INFO *)arg)->status),
+ (void *) &efuse_info.status,
+ sizeof(struct efuse_status))) {
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*****************************************************************************
+ *
+ *
+ * function body
+ *
+ *
+ *****************************************************************************/
+static int __devinit fh_efuse_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *res;
+ struct fh_efuse_platform_data *p_efuse_plat;
+ p_efuse_plat =
+ (struct fh_efuse_platform_data *) pdev->dev.platform_data;
+ if (p_efuse_plat == NULL) {
+ dev_err(&pdev->dev, "efuse get platform data error..\n");
+ return -ENODEV;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "efuse get platform source error..\n");
+ return -ENODEV;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "efuse region already claimed\n");
+ return -EBUSY;
+ }
+
+ s_efuse_obj.regs = ioremap(res->start, resource_size(res));
+ if (s_efuse_obj.regs == NULL) {
+ err = -ENXIO;
+ goto fail_no_ioremap;
+ }
+ s_efuse_obj.open_flag = p_efuse_plat->efuse_support_flag;
+ if (s_efuse_obj.open_flag & CRYPTO_EX_MEM_INDEP_POWER) {
+ /*efuse power up*/
+ open_efuse_power(&s_efuse_obj);
+ }
+#ifdef EFUSE_BUFFER_OFF
+ efuse_buffer_set(&s_efuse_obj, 0);
+#endif
+ err = misc_register(&fh_efuse_misc);
+
+ if (err != 0) {
+ dev_err(&pdev->dev, "efuse register misc error\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, &fh_efuse_misc);
+
+ return 0;
+
+fail_no_ioremap:
+ release_mem_region(res->start, resource_size(res));
+
+ return err;
+}
+
+static int __exit fh_efuse_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct miscdevice *misc;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ misc = (struct miscdevice *)platform_get_drvdata(pdev);
+ misc_deregister(misc);
+ iounmap(s_efuse_obj.regs);
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+/*******************
+ *
+ *
+ * add platform cause of i need the board info...
+ * in the probe function. i will register the sadc misc drive...
+ * then the app can open the sadc misc device..
+ *
+ ******************/
+static struct platform_driver fh_efuse_driver = {
+ .driver = {
+ .name = FH_EFUSE_PLAT_DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_efuse_probe,
+ .remove = __exit_p(fh_efuse_remove),
+};
+
+static int __init fh_efuse_init(void)
+{
+ return platform_driver_register(&fh_efuse_driver);
+}
+
+static void __exit fh_efuse_exit(void)
+{
+ platform_driver_unregister(&fh_efuse_driver);
+}
+
+module_init(fh_efuse_init);
+module_exit(fh_efuse_exit);
+
+MODULE_DESCRIPTION("fh efuse driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("zhangy@fullhan.com");
+MODULE_ALIAS("platform:FH_efuse");
diff --git a/drivers/misc/fh_efuse.h b/drivers/misc/fh_efuse.h
new file mode 100644
index 00000000..f018b7e0
--- /dev/null
+++ b/drivers/misc/fh_efuse.h
@@ -0,0 +1,102 @@
+/*
+ * fh_efuse.h
+ *
+ * Created on: Aug 9, 2016
+ * Author: duobao
+ */
+
+#ifndef FH_EFUSE_H_
+#define FH_EFUSE_H_
+
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <crypto/if_alg.h>
+/****************************************************************************
+ * #define section
+ * add constant #define here if any
+ ***************************************************************************/
+/*#define FH_EFUSE_PROC_FILE "driver/efuse"*/
+#define MAX_EFUSE_MAP_SIZE 8
+
+#define IOCTL_EFUSE_CHECK_PRO 0
+#define IOCTL_EFUSE_WRITE_KEY 1
+#define IOCTL_EFUSE_CHECK_LOCK 2
+#define IOCTL_EFUSE_TRANS_KEY 3
+#define IOCTL_EFUSE_SWITCH_CPU_KEY_MODE 4
+#define IOCTL_EFUSE_SWITCH_EFUSE_KEY_MODE 5
+#define IOCTL_EFUSE_CHECK_ERROR 6
+#define IOCTL_EFUSE_READ_KEY 7
+#define IOCTL_EFUSE_SET_LOCK 8
+#define IOCTL_EFUSE_SET_MAP_PARA_4_TO_1 9
+#define IOCTL_EFUSE_SET_MAP_PARA_1_TO_1 10
+#define IOCTL_EFUSE_CLR_MAP_PARA 11
+
+#define EFUSE_NEED_TRANS 0x55
+/****************************************************************************
+ * ADT section
+ * add Abstract Data Type definition here
+ ***************************************************************************/
+
+struct efuse_status {
+ /*bit 1 means could write..0 not write*/
+ u32 protect_bits[2];
+ /*bit 1 means cpu couldn't read efuse entry data...*/
+ u32 efuse_apb_lock;
+ u32 aes_ahb_lock;
+ u32 error;
+};
+
+typedef struct {
+ u32 efuse_entry_no; /*from 0 ~ 31*/
+ u8 *key_buff;
+ u32 key_size;
+ u32 trans_key_start_no; /*from 0 ~ 7*/
+ u32 trans_key_size; /*max 8*/
+ struct efuse_status status;
+} EFUSE_INFO;
+
+struct wrap_efuse_obj {
+ void *regs;
+ /*write key*/
+ u32 efuse_entry_no; /*from 0 ~ 31*/
+ u8 *key_buff;
+ u32 key_size;
+ /*trans key*/
+ u32 trans_key_start_no; /*from 0 ~ 7*/
+ u32 trans_key_size; /*max 8*/
+ /*status*/
+ struct efuse_status status;
+ u32 open_flag;
+ /*add check if need efuse transkey*/
+ u32 efuse_trans_flag;
+ /* for old driver just check size.
+ for have mapping func, should check the adv_info para.*/
+ u32 old_size;
+ struct af_alg_usr_def old_usr_def;
+};
+
+
+/****************************************************************************
+ * extern variable declaration section
+ ***************************************************************************/
+
+/****************************************************************************
+ * section
+ * add function prototype here if any
+ ***************************************************************************/
+void efuse_trans_key(struct wrap_efuse_obj *obj, u32 start_no,
+u32 size, struct af_alg_usr_def *p_alg);
+#endif /* FH_EFUSE_H_ */
diff --git a/drivers/misc/fh_i2s.c b/drivers/misc/fh_i2s.c
new file mode 100644
index 00000000..84e6292c
--- /dev/null
+++ b/drivers/misc/fh_i2s.c
@@ -0,0 +1,1346 @@
+/**@file
+ * @Copyright (c) 2016 Shanghai Fullhan Microelectronics Co., Ltd.
+ * @brief
+ *
+ * @author fullhan
+ * @date 2016-7-15
+ * @version V1.0
+ * @version V1.1 modify code style
+ * @note: misc i2s driver for fh8830 embedded i2s codec.
+ * @note History:
+ * @note <author> <time> <version > <desc>
+ * @note
+ * @warning: the codec is fixed to 24 bit, so remember to move the 24 bit data to 16 bit in
+ * application layer, the next version CPU will sovle this bug.
+ */
+
+
+#include <linux/irqreturn.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <linux/ioctl.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <mach/fh_dmac.h>
+#include <mach/fh_predefined.h>
+
+#include "fh_i2s.h"
+
+
+#define NR_DESCS_PER_CHANNEL 64
+
+#define FIX_SAMPLE_BIT 32
+
+#define I2S_HW_NUM_RX 0
+#define I2S_HW_NUM_TX 1
+#define I2S_DMA_CAP_CHANNEL 2
+#define I2S_DMA_PAY_CHANNEL 3
+
+#define I2S_CTRL 0x0
+#define I2S_TXFIFO_CTRL 0x4
+#define I2S_RXFIFO_CTRL 0x8
+#define I2S_STATUS 0x0c
+#define I2S_DAT_CTL 0x10
+#define I2S_DBG_CTL 0x14
+#define I2S_STATUS1 0x18
+#define I2S_STATUS2 0x1c
+
+#define I2S_DACL_FIFO 0xf0a00100
+#define I2S_DACR_FIFO 0xf0a00300
+#define I2S_ADC_FIFO 0xf0a00200
+
+#define I2S_DMA_PREALLOC_SIZE 128*1024
+
+#define I2S_INTR_RX_UNDERFLOW 0x10000
+#define I2S_INTR_RX_OVERFLOW 0x20000
+#define I2S_INTR_TX_UNDERFLOW 0x40000
+#define I2S_INTR_TX_OVERFLOW 0x80000
+
+#define I2S_EXT_EN 1<<12
+#define I2S_EN 1<<0
+#define I2S_DACL_CHEN_EN 1<<30
+#define I2S_DACR_CHEN_EN 1<<31
+#define I2S_ADC_CHEN_EN 1<<29
+#define I2S_SHIFT_BIT 6
+#define I2S_DAC_FIFO_CLEAR 1<<4
+#define I2S_ADC_FIFO_CLEAR 1<<4
+#define I2S_ADC_FIFO_EN 1<<0
+#define I2S_DAC_FIFO_EN 1<<0
+#define FH_i2s_DEBUG
+#ifdef FH_i2s_DEBUG
+#define PRINT_i2s_DBG(fmt, args...) \
+ do \
+ { \
+ printk("FH_i2s_DEBUG: "); \
+ printk(fmt, ## args); \
+ } \
+ while(0)
+#else
+#define PRINT_i2s_DBG(fmt, args...) do { } while (0)
+#endif
+
+enum i2s_type
+{
+ capture = 0,
+ playback,
+};
+
+
+enum i2s_state
+{
+ STATE_NORMAL = 0,
+ STATE_XRUN,
+ STATE_STOP,
+ STATE_RUN,
+ STATE_PAUSE
+};
+
+struct i2s_infor_record_t
+{
+ int record_pid;
+ int play_pid;
+}i2s_infor_record;
+
+
+struct fh_dma_chan
+{
+ struct dma_chan *chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ bool paused;
+ bool initialized;
+ spinlock_t lock;
+ /* these other elements are all protected by lock */
+ unsigned long flags;
+ dma_cookie_t completed;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ struct fh_cyclic_desc *cdesc;
+ unsigned int descs_allocated;
+};
+
+struct i2s_config {
+ int rate;
+ int volume;
+ int frame_bit;
+ int channels;
+ int buffer_size;
+ int period_size;
+ int buffer_bytes;
+ int period_bytes;
+ int start_threshold;
+ int stop_threshold;
+};
+
+struct i2s_ptr_t
+{
+ struct i2s_config cfg;
+ enum i2s_state state;
+ long size;
+ int hw_ptr;
+ int appl_ptr;
+ spinlock_t lock;
+ struct device dev;
+ u8 *area; /*virtual pointer*/
+ dma_addr_t addr; /*physical address*/
+ u8 * mmap_addr;
+};
+
+struct fh_i2s_cfg
+{
+ struct i2s_ptr_t capture;
+ struct i2s_ptr_t playback;
+ wait_queue_head_t readqueue;
+ wait_queue_head_t writequeue;
+ struct semaphore sem_capture;
+ struct semaphore sem_playback;
+};
+
+struct fh_i2s_dma_chan
+{
+ struct dma_chan *chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ bool paused;
+ bool initialized;
+ spinlock_t lock;
+ /* these other elements are all protected by lock */
+ unsigned long flags;
+ dma_cookie_t completed;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ struct dma_async_tx_descriptor *cdesc;
+ unsigned int descs_allocated;
+};
+
+struct fh_I2S_dma_transfer
+{
+ struct dma_chan *chan;
+ struct fh_dma_slave cfg;
+ struct scatterlist sgl;
+ struct fh_cyclic_desc *desc;
+};
+
+struct channel_assign
+{
+ int capture_channel;
+ int playback_channel;
+};
+
+struct i2s_dev
+{
+ struct channel_assign channel_assign;
+ struct fh_i2s_cfg i2s_config;
+ struct miscdevice fh_i2s_miscdev;
+};
+
+static const struct file_operations I2S_fops;
+
+static struct i2s_dev fh_i2s_dev =
+{
+ .channel_assign = {
+ .capture_channel = I2S_DMA_CAP_CHANNEL,
+ .playback_channel = I2S_DMA_PAY_CHANNEL,
+ },
+ .fh_i2s_miscdev = {
+ .fops = &I2S_fops,
+ .name = "fh_fh8830_i2s",
+ .minor = MISC_DYNAMIC_MINOR,
+ }
+
+};
+
+static struct
+{
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long in_use;
+ unsigned long next_heartbeat;
+ struct timer_list timer;
+ int irq;
+} fh_i2s_module;
+static struct fh_dma_chan *dma_rx_transfer = NULL;
+static struct fh_dma_chan *dma_tx_transfer = NULL;
+static struct work_struct playback_wq;
+
+static struct i2s_param_store
+{
+ int input_volume;
+} i2s_param_store;
+
+
+static void fh_I2S_tx_dma_done(void *arg);
+static void fh_I2S_rx_dma_done(void *arg);
+static bool fh_I2S_dma_chan_filter(struct dma_chan *chan, void *filter_param);
+
+void fh_I2S_stop_playback(struct fh_i2s_cfg *i2s_config)
+{
+
+ if(i2s_config->playback.state == STATE_STOP)
+ {
+ return;
+ }
+ i2s_config->playback.state = STATE_STOP;
+ writel(0, fh_i2s_module.regs + I2S_TXFIFO_CTRL);//tx fifo disable
+ fh_dma_cyclic_stop(dma_tx_transfer->chan);
+ fh_dma_cyclic_free(dma_tx_transfer->chan);
+ up(&i2s_config->sem_playback);
+}
+
+void fh_I2S_stop_capture(struct fh_i2s_cfg *i2s_config)
+{
+
+ u32 rx_status;
+ if(i2s_config->capture.state == STATE_STOP)
+ {
+ return;
+ }
+ rx_status = readl( fh_i2s_module.regs + I2S_RXFIFO_CTRL);//clear rx fifo
+ rx_status = rx_status|I2S_ADC_FIFO_CLEAR;
+ writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);
+
+ i2s_config->capture.state = STATE_STOP;
+
+ writel(0, fh_i2s_module.regs + I2S_RXFIFO_CTRL);//rx fifo disable
+
+ fh_dma_cyclic_stop(dma_rx_transfer->chan);
+ fh_dma_cyclic_free(dma_rx_transfer->chan);
+ up(&i2s_config->sem_capture);
+}
+
+
+int fh_i2s_get_factor_from_table(int rate)
+{
+ return 0;
+}
+
+void fh_switch_input_volume(int volume)
+{
+
+}
+
+void init_i2s(enum i2s_type type,struct fh_i2s_cfg *i2s_config)
+{
+
+}
+
+static inline long bytes_to_frames(int frame_bit, int bytes)
+{
+ return bytes * 8 /frame_bit;
+}
+
+static inline long fh_i2s_frames_to_bytes(int frame_bit, int frames)
+{
+ return frames * frame_bit / 8;
+}
+
+int i2s_avail_data_len(enum i2s_type type,struct fh_i2s_cfg *stream)
+{
+ int delta;
+ if (capture == type)
+ {
+ spin_lock(&stream->capture.lock);
+ delta = stream->capture.hw_ptr - stream->capture.appl_ptr;
+ spin_unlock(&stream->capture.lock);
+ if (delta < 0)
+ {
+ delta += stream->capture.size;
+ }
+ return delta;
+ }
+ else
+ {
+ spin_lock(&stream->playback.lock);
+ delta = stream->playback.appl_ptr - stream->playback.hw_ptr;
+ spin_unlock(&stream->playback.lock);
+ if (delta < 0)
+ {
+ delta += stream->playback.size;
+ }
+ return stream->playback.size - delta;
+ }
+}
+
+static int fh_i2s_close(struct inode *ip, struct file *fp)
+{
+ struct miscdevice *miscdev = fp->private_data;
+ struct i2s_dev *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
+ struct fh_i2s_cfg *i2s_config = &dev->i2s_config;
+ int pid;
+
+ pid= current->tgid;
+
+ if( i2s_infor_record.play_pid == pid)
+ {
+ fh_I2S_stop_playback(i2s_config);
+
+ }
+ if (i2s_infor_record.record_pid==pid)
+ {
+ fh_I2S_stop_capture(i2s_config);
+ }
+ return 0;
+}
+
+int fh_i2s_register_tx_dma(struct fh_i2s_cfg *i2s_config)
+{
+ int ret;
+ unsigned int reg;
+ struct fh_dma_slave *tx_slave;
+ tx_slave = kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
+ if (!tx_slave)
+ {
+ return -ENOMEM;
+ }
+ tx_slave->cfg_hi = FHC_CFGH_DST_PER(I2S_HW_NUM_TX);
+ tx_slave->dst_msize = FH_DMA_MSIZE_8;
+ tx_slave->src_msize = FH_DMA_MSIZE_8;
+ tx_slave->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
+ tx_slave->fc = FH_DMA_FC_D_M2P;
+ tx_slave->tx_reg = I2S_DACL_FIFO;
+ dma_tx_transfer->chan->private = tx_slave;
+
+ if ((i2s_config->playback.cfg.buffer_bytes < i2s_config->playback.cfg.period_bytes) ||
+ (i2s_config->playback.cfg.buffer_bytes <= 0) || (i2s_config->playback.cfg.period_bytes <= 0) ||
+ (i2s_config->playback.cfg.buffer_bytes/i2s_config->playback.cfg.period_bytes > NR_DESCS_PER_CHANNEL))
+ {
+ printk(KERN_ERR "buffer_size and period_size are invalid\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dma_tx_transfer->cdesc =
+ fh_dma_cyclic_prep(dma_tx_transfer->chan,i2s_config->playback.addr,
+ i2s_config->playback.cfg.buffer_bytes,i2s_config->playback.cfg.period_bytes, DMA_MEM_TO_DEV);
+ if(dma_tx_transfer->cdesc <= 0)
+ {
+ printk(KERN_ERR "cyclic desc err\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ dma_tx_transfer->cdesc->period_callback = fh_I2S_tx_dma_done;
+ dma_tx_transfer->cdesc->period_callback_param = i2s_config;
+ fh_dma_cyclic_start(dma_tx_transfer->chan);
+ if (tx_slave)
+ {
+ kfree(tx_slave);
+ }
+
+ reg = readl(fh_i2s_module.regs + I2S_CTRL);
+ reg = reg <<I2S_SHIFT_BIT;
+ reg |= I2S_DACL_CHEN_EN;
+ writel(reg,fh_i2s_module.regs + I2S_CTRL);// enable left tx fifo
+
+ writel(0x1<<4,fh_i2s_module.regs + I2S_TXFIFO_CTRL);// clear tx fifo
+ writel(0x20027,fh_i2s_module.regs + I2S_TXFIFO_CTRL);// enbale tx fifo
+
+ /*must set NULL to tell DMA driver that we free the DMA slave*/
+ dma_tx_transfer->chan->private = NULL;
+
+ return 0;
+fail:
+ return ret;
+}
+
+int fh_i2s_register_rx_dma( struct fh_i2s_cfg *i2s_config)
+{
+ int ret;
+ unsigned int reg;
+ struct fh_dma_slave *rx_slave;
+ rx_slave = kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
+ if (!rx_slave)
+ {
+ return -ENOMEM;
+ }
+
+ rx_slave->cfg_hi = FHC_CFGH_SRC_PER(I2S_HW_NUM_RX);
+ rx_slave->dst_msize = FH_DMA_MSIZE_8;
+ rx_slave->src_msize = FH_DMA_MSIZE_8;
+ rx_slave->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
+ rx_slave->fc = FH_DMA_FC_D_P2M;
+ rx_slave->rx_reg = I2S_ADC_FIFO;
+ dma_rx_transfer->chan->private = rx_slave;
+
+ if ((i2s_config->capture.cfg.buffer_bytes < i2s_config->capture.cfg.period_bytes) ||
+ (i2s_config->capture.cfg.buffer_bytes <= 0) ||(i2s_config->capture.cfg.period_bytes <= 0) ||
+ (i2s_config->capture.cfg.buffer_bytes/i2s_config->capture.cfg.period_bytes > NR_DESCS_PER_CHANNEL))
+ {
+ printk(KERN_ERR "buffer_size and period_size are invalid\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dma_rx_transfer->cdesc=
+ fh_dma_cyclic_prep(dma_rx_transfer->chan,i2s_config->capture.addr,
+ i2s_config->capture.cfg.buffer_bytes,i2s_config->capture.cfg.period_bytes, DMA_DEV_TO_MEM);
+ if(dma_rx_transfer->cdesc <= 0)
+ {
+ printk(KERN_ERR" cyclic desc err\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dma_rx_transfer->cdesc->period_callback = fh_I2S_rx_dma_done;
+ dma_rx_transfer->cdesc->period_callback_param = i2s_config;
+ fh_dma_cyclic_start(dma_rx_transfer->chan);
+
+ reg = readl(fh_i2s_module.regs + I2S_CTRL);
+ reg = reg<<I2S_SHIFT_BIT;
+ reg |= I2S_ADC_CHEN_EN ;
+ writel(reg,fh_i2s_module.regs + I2S_CTRL);//enbale dac fifo
+ writel(0x1<<4,fh_i2s_module.regs + I2S_RXFIFO_CTRL);// clear rx fifo
+ writel(0x20027,fh_i2s_module.regs + I2S_RXFIFO_CTRL);// enable rx fifo
+
+
+ if (rx_slave)
+ {
+ kfree(rx_slave);
+ }
+ /*must set NULL to tell DMA driver that we free the DMA slave*/
+ dma_rx_transfer->chan->private = NULL;
+ return 0;
+fail:
+ kfree(rx_slave);
+ return ret;
+}
+
+
+void fh_i2s_playback_start_wq_handler(struct work_struct *work)
+{
+ int avail;
+ while(1)
+ {
+ if (STATE_STOP == fh_i2s_dev.i2s_config.playback.state)
+ {
+ return;
+ }
+ avail = i2s_avail_data_len(playback, &fh_i2s_dev.i2s_config);
+ if (avail > fh_i2s_dev.i2s_config.playback.cfg.period_bytes)
+ {
+ msleep(0);
+ }
+ else
+ {
+ break;
+ }
+
+ }
+}
+
+int fh_I2S_start_playback(struct fh_i2s_cfg *i2s_config)
+{
+ int ret;
+
+ if(i2s_config->playback.state == STATE_RUN)
+ {
+ return 0;
+ }
+ if (i2s_config->playback.cfg.buffer_bytes >= I2S_DMA_PREALLOC_SIZE)
+ {
+ printk("DMA prealloc buffer is smaller than i2s_config->buffer_bytes\n");
+ return -ENOMEM;
+ }
+ memset(i2s_config->playback.area, 0x0, i2s_config->playback.cfg.buffer_bytes);
+ i2s_config->playback.size = i2s_config->playback.cfg.buffer_bytes;
+ i2s_config->playback.state = STATE_RUN;
+ ret = fh_i2s_register_tx_dma(i2s_config);
+ if (ret < 0)
+ {
+ return ret;
+ }
+ INIT_WORK(&playback_wq, fh_i2s_playback_start_wq_handler);
+ schedule_work(&playback_wq);
+
+ return 0;
+}
+
+int fh_I2S_start_capture(struct fh_i2s_cfg *i2s_config)
+{
+ if(i2s_config->capture.state == STATE_RUN)
+ {
+ return 0;
+ }
+
+ if (i2s_config->capture.cfg.buffer_bytes >= I2S_DMA_PREALLOC_SIZE)
+ {
+ printk("DMA prealloc buffer is smaller than i2s_config->buffer_bytes\n");
+ return -ENOMEM;
+ }
+ memset(i2s_config->capture.area, 0, i2s_config->capture.cfg.buffer_bytes);
+ i2s_config->capture.size = i2s_config->capture.cfg.buffer_bytes;
+
+ i2s_config->capture.state = STATE_RUN;
+ return fh_i2s_register_rx_dma(i2s_config);
+}
+
+
+static void fh_I2S_rx_dma_done(void *arg)
+{
+ struct fh_i2s_cfg *i2s_config;
+ i2s_config = ( struct fh_i2s_cfg *)arg;
+ spin_lock(&i2s_config->capture.lock);
+ i2s_config->capture.hw_ptr += i2s_config->capture.cfg.period_bytes;
+ if (i2s_config->capture.hw_ptr > i2s_config->capture.size )
+ {
+ i2s_config->capture.hw_ptr = i2s_config->capture.hw_ptr - i2s_config->capture.size;
+ }
+ spin_unlock(&i2s_config->capture.lock);
+ if (waitqueue_active(&i2s_config->readqueue))
+ {
+ int avail = i2s_avail_data_len(capture,i2s_config);
+ if (avail > i2s_config->capture.cfg.period_bytes)
+ {
+ wake_up_interruptible(&i2s_config->readqueue);
+ }
+ }
+
+}
+
+
+static void fh_I2S_tx_dma_done(void *arg)
+{
+
+ struct fh_i2s_cfg *i2s_config;
+ i2s_config = ( struct fh_i2s_cfg *)arg;
+ spin_lock(&i2s_config->playback.lock);
+ i2s_config->playback.hw_ptr += i2s_config->playback.cfg.period_bytes;
+ if (i2s_config->playback.hw_ptr > i2s_config->playback.size )
+ {
+ i2s_config->playback.hw_ptr = i2s_config->playback.hw_ptr - i2s_config->playback.size;
+ }
+ spin_unlock(&i2s_config->playback.lock);
+ if (waitqueue_active(&i2s_config->writequeue))
+ {
+ int avail = i2s_avail_data_len(playback,i2s_config);
+ if (avail > i2s_config->playback.cfg.period_bytes)
+ {
+ wake_up_interruptible(&i2s_config->writequeue);
+ }
+ }
+}
+
+bool fh_I2S_dma_chan_filter(struct dma_chan *chan, void *filter_param)
+{
+ int dma_channel = *(int *)filter_param;
+ bool ret = false;
+
+ if (chan->chan_id == dma_channel)
+ {
+ ret = true;
+ }
+ return ret;
+}
+
+int fh_i2s_arg_config_support(struct fh_i2s_cfg_arg * cfg)
+{
+ return 0;
+}
+
+void fh_i2s_reset_dma_buff(enum i2s_type type, struct fh_i2s_cfg *i2s_config)
+{
+ if (capture == type)
+ {
+ i2s_config->capture.appl_ptr = 0;
+ i2s_config->capture.hw_ptr = 0;
+ }
+ else
+ {
+ i2s_config->playback.appl_ptr = 0;
+ i2s_config->playback.hw_ptr = 0;
+ }
+}
+
+static long fh_i2s_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct fh_i2s_cfg_arg cfg;
+ struct miscdevice *miscdev = filp->private_data;
+ struct i2s_dev *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
+ struct fh_i2s_cfg *i2s_config = &dev->i2s_config;
+ int pid;
+ int rx_status;
+ int tx_status;
+
+ pid = current->tgid;
+ switch (cmd)
+ {
+ case I2S_INIT_CAPTURE_MEM:
+
+ if (copy_from_user((void *)&cfg, (void __user *)arg, sizeof(struct fh_i2s_cfg_arg)))
+ {
+ printk(KERN_ERR "copy err\n");
+ return -EIO;
+ }
+ if (0 == fh_i2s_arg_config_support(&cfg))
+ {
+ if (down_trylock(&i2s_config->sem_capture))
+ {
+ printk(KERN_ERR "another thread is running capture.\n");
+ return -EBUSY;
+ }
+ i2s_infor_record.record_pid = pid;
+ i2s_config->capture.cfg.volume = cfg.volume;
+ i2s_config->capture.cfg.rate = cfg.rate;
+ i2s_config->capture.cfg.channels = cfg.channels;
+ i2s_config->capture.cfg.buffer_size = cfg.buffer_size;
+ i2s_config->capture.cfg.frame_bit = FIX_SAMPLE_BIT;
+ i2s_config->capture.cfg.period_size = cfg.period_size;
+ i2s_config->capture.cfg.buffer_bytes = fh_i2s_frames_to_bytes(i2s_config->capture.cfg.frame_bit,i2s_config->capture.cfg.buffer_size);
+ i2s_config->capture.cfg.period_bytes = fh_i2s_frames_to_bytes(i2s_config->capture.cfg.frame_bit,i2s_config->capture.cfg.period_size);
+ i2s_config->capture.cfg.start_threshold =i2s_config->capture.cfg.buffer_bytes;
+ i2s_config->capture.cfg.stop_threshold = i2s_config->capture.cfg.buffer_bytes;
+ fh_i2s_reset_dma_buff(capture, i2s_config);
+ init_waitqueue_head(&i2s_config->readqueue);
+ spin_lock_init(&i2s_config->capture.lock);
+ init_i2s(capture, i2s_config);
+ i2s_param_store.input_volume = i2s_config->capture.cfg.volume;
+ /* * config sample * */
+
+ }
+ else
+ {
+ return -EINVAL;
+ }
+
+ break;
+ case I2S_INIT_PLAYBACK_MEM:
+ if (copy_from_user((void *)&cfg, (void __user *)arg, sizeof(struct fh_i2s_cfg_arg)))
+ {
+ printk(KERN_ERR "copy err\n");
+ return -EIO;
+ }
+
+ if (0 == fh_i2s_arg_config_support(&cfg))
+ {
+ if (down_trylock(&i2s_config->sem_playback))
+ {
+ printk(KERN_ERR "another thread is running playback.\n");
+ return -EBUSY;
+ }
+ i2s_infor_record.play_pid = pid;
+ i2s_config->playback.cfg.volume = cfg.volume;
+ i2s_config->playback.cfg.rate = cfg.rate;
+ i2s_config->playback.cfg.channels = cfg.channels;
+ i2s_config->playback.cfg.buffer_size = cfg.buffer_size;
+ i2s_config->playback.cfg.frame_bit = FIX_SAMPLE_BIT;
+ i2s_config->playback.cfg.period_size = cfg.period_size;
+ i2s_config->playback.cfg.buffer_bytes = fh_i2s_frames_to_bytes(i2s_config->playback.cfg.frame_bit,i2s_config->playback.cfg.buffer_size);
+ i2s_config->playback.cfg.period_bytes = fh_i2s_frames_to_bytes(i2s_config->playback.cfg.frame_bit,i2s_config->playback.cfg.period_size);
+ i2s_config->playback.cfg.start_threshold =i2s_config->playback.cfg.buffer_bytes;
+ i2s_config->playback.cfg.stop_threshold = i2s_config->playback.cfg.buffer_bytes;
+ fh_i2s_reset_dma_buff(playback, i2s_config);
+ init_waitqueue_head(&i2s_config->writequeue);
+ spin_lock_init(&i2s_config->playback.lock);
+ init_i2s(playback, i2s_config);
+
+ }
+ else
+ {
+ return -EINVAL;
+ }
+ break;
+ case I2S_AI_EN:
+ if (i2s_infor_record.record_pid != pid){
+ return -EBUSY;
+ }
+ return fh_I2S_start_capture(i2s_config);
+ case I2S_AO_EN:
+ if (i2s_infor_record.play_pid != pid) {
+ return -EBUSY;
+ }
+ return fh_I2S_start_playback(i2s_config);
+
+ case I2S_AI_DISABLE:
+ printk("[ac_driver]AC_AI_DISABLE\n");
+ if (i2s_infor_record.record_pid != pid) {
+ return -EBUSY;
+ }
+ fh_I2S_stop_capture(i2s_config);
+ printk(" AC_AI_DISABLE\n");
+ break;
+ case I2S_AO_DISABLE:
+ printk("[ac_driver]AC_AO_DISABLE\n");
+ if (i2s_infor_record.play_pid != pid) {
+ return -EBUSY;
+ }
+ fh_I2S_stop_playback(i2s_config);
+ printk(" AC_AO_DISABLE\n");
+ break;
+ case I2S_AI_PAUSE:
+ if (i2s_infor_record.record_pid != pid) {
+ return -EBUSY;
+ }
+ printk(KERN_INFO "capture pause\n");
+ i2s_config->capture.state = STATE_PAUSE;
+ rx_status = readl(fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*rx fifo disable*/
+ rx_status = rx_status&(~I2S_ADC_FIFO_EN);
+ writel(rx_status, fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*rx fifo disable*/
+ break;
+ case I2S_AI_RESUME:
+ if (i2s_infor_record.record_pid != pid) {
+ return -EBUSY;
+ }
+ printk(KERN_INFO "capture resume\n");
+ i2s_config->capture.state = STATE_RUN;
+ rx_status = readl( fh_i2s_module.regs + I2S_RXFIFO_CTRL);//clear rx fifo
+ rx_status = rx_status|I2S_ADC_FIFO_CLEAR;
+ writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*enable rx fifo*/
+ rx_status = rx_status&(~I2S_ADC_FIFO_CLEAR);
+ rx_status = rx_status|I2S_ADC_FIFO_EN;
+ writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*enable rx fifo*/
+ break;
+ case I2S_AO_PAUSE:
+ if (i2s_infor_record.play_pid != pid) {
+ return -EBUSY;
+ }
+ i2s_config->playback.state = STATE_PAUSE;
+ printk(KERN_INFO "playback pause\n");
+ tx_status = readl(fh_i2s_module.regs + I2S_TXFIFO_CTRL);/*rx fifo disable*/
+ tx_status = tx_status&(~I2S_DAC_FIFO_EN);
+ writel(tx_status, fh_i2s_module.regs + I2S_TXFIFO_CTRL);/*tx fifo disable*/
+ break;
+ case I2S_AO_RESUME:
+ if (i2s_infor_record.play_pid != pid) {
+ return -EBUSY;
+ }
+ printk(KERN_INFO "playback resume\n");
+ i2s_config->playback.state = STATE_RUN;
+ tx_status = readl( fh_i2s_module.regs + I2S_TXFIFO_CTRL);//clear rx fifo
+ tx_status = tx_status|I2S_DAC_FIFO_EN;
+ writel(tx_status,fh_i2s_module.regs + I2S_TXFIFO_CTRL); //enable tx fifo read enable
+ break;
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static int fh_i2s_open(struct inode *ip, struct file *fp)
+{
+
+ fp->private_data = &fh_i2s_dev.fh_i2s_miscdev;
+
+ return 0;
+}
+
+static u32 fh_i2s_poll(struct file *filp, poll_table *wait)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct i2s_dev *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
+ struct fh_i2s_cfg *i2s_config = &dev->i2s_config;
+ u32 mask = 0;
+ long avail;
+ if (STATE_RUN == i2s_config->capture.state)
+ {
+ poll_wait(filp,&i2s_config->readqueue,wait);
+ avail = i2s_avail_data_len(capture, i2s_config);
+ if (avail > i2s_config->capture.cfg.period_bytes)
+ {
+ mask |= POLLIN | POLLRDNORM;
+ }
+ }
+ if (STATE_RUN == i2s_config->playback.state)
+ {
+ poll_wait(filp,&i2s_config->writequeue,wait);
+ avail = i2s_avail_data_len(playback, i2s_config);
+ if (avail > i2s_config->playback.cfg.period_bytes)
+ {
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ }
+ return mask;
+}
+
+static int fh_i2s_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+{
+
+ int ret;
+ struct miscdevice *miscdev = filp->private_data;
+ struct i2s_dev *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
+ struct fh_i2s_cfg *i2s_config = &dev->i2s_config;
+ int after,left;
+ int pid,avail;
+ pid = current->tgid;
+ if (i2s_infor_record.record_pid != pid){
+ return -EBUSY;
+ }
+
+ avail = i2s_avail_data_len(capture, i2s_config);
+ if (avail > len)
+ {
+ avail = len;
+ }
+ after = avail + i2s_config->capture.appl_ptr;
+ if(after > i2s_config->capture.size)
+ {
+ left = avail - (i2s_config->capture.size - i2s_config->capture.appl_ptr);
+ ret = copy_to_user(buf, i2s_config->capture.area+i2s_config->capture.appl_ptr, i2s_config->capture.size-i2s_config->capture.appl_ptr);
+ ret = copy_to_user(buf+i2s_config->capture.size-i2s_config->capture.appl_ptr,i2s_config->capture.area,left);
+ spin_lock(&i2s_config->capture.lock);
+ i2s_config->capture.appl_ptr = left;
+ spin_unlock(&i2s_config->capture.lock);
+ }
+ else
+ {
+ ret = copy_to_user(buf,i2s_config->capture.area+i2s_config->capture.appl_ptr,avail);
+ spin_lock(&i2s_config->capture.lock);
+ i2s_config->capture.appl_ptr += avail;
+ spin_unlock(&i2s_config->capture.lock);
+ }
+
+ return avail;
+
+}
+
+static int fh_i2s_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *off)
+{
+
+ struct miscdevice *miscdev = filp->private_data;
+ struct i2s_dev *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
+ struct fh_i2s_cfg *i2s_config = &dev->i2s_config;
+ int ret;
+ int after,left;
+ int pid,avail;
+ pid = current->tgid;
+ if (i2s_infor_record.play_pid != pid){
+ return -EBUSY;
+ }
+ avail = i2s_avail_data_len(playback,i2s_config);
+ if (0 == avail)
+ {
+ return 0;
+ }
+ if (avail > len)
+ {
+ avail = len;
+ }
+ after = avail+i2s_config->playback.appl_ptr;
+ if(after > i2s_config->playback.size)
+ {
+ left = avail - (i2s_config->playback.size-i2s_config->playback.appl_ptr);
+ ret = copy_from_user(i2s_config->playback.area+i2s_config->playback.appl_ptr,buf,i2s_config->playback.size-i2s_config->playback.appl_ptr);
+ ret = copy_from_user(i2s_config->playback.area,buf+i2s_config->playback.size-i2s_config->playback.appl_ptr,left);
+ spin_lock(&i2s_config->playback.lock);
+ i2s_config->playback.appl_ptr = left;
+ spin_unlock(&i2s_config->playback.lock);
+ }
+ else
+ {
+ ret = copy_from_user(i2s_config->playback.area+i2s_config->playback.appl_ptr,buf,avail);
+ spin_lock(&i2s_config->playback.lock);
+ i2s_config->playback.appl_ptr += avail;
+ spin_unlock(&i2s_config->playback.lock);
+ }
+
+ return avail;
+}
+
+static irqreturn_t fh_i2s_interrupt(int irq, void *dev_id)
+{
+#if 0
+#ifndef CONFIG_MACH_FH8830_FPGA
+ u32 interrupts, rx_status;
+ struct fh_i2s_cfg *i2s_config = &fh_i2s_dev.i2s_config;
+
+ interrupts = readl(fh_i2s_module.regs + I2S_CTRL);
+ //interrupts &= ~(0x3ff) << 16;
+ writel(interrupts, fh_i2s_module.regs + I2S_CTRL);
+
+ if(interrupts & I2S_INTR_RX_UNDERFLOW)
+ {
+ fh_I2S_stop_capture(i2s_config);
+ fh_I2S_start_capture(i2s_config);
+ PRINT_i2s_DBG("I2S_INTR_RX_UNDERFLOW\n");
+ }
+
+ if(interrupts & I2S_INTR_RX_OVERFLOW)
+ {
+ if (i2s_config->capture.state == STATE_RUN) {
+ fh_I2S_stop_capture(i2s_config);
+ fh_I2S_start_capture(i2s_config);
+ } else {
+ rx_status = readl( fh_i2s_module.regs + I2S_RXFIFO_CTRL);//clear rx fifo
+ rx_status = rx_status|(1<<4);
+ writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);
+ }
+ PRINT_i2s_DBG("I2S_INTR_RX_OVERFLOW\n");
+ }
+
+ if(interrupts & I2S_INTR_TX_UNDERFLOW)
+ {
+ fh_I2S_stop_playback(i2s_config);
+ fh_I2S_start_playback(i2s_config);
+ PRINT_i2s_DBG("I2S_INTR_TX_UNDERFLOW\n");
+ }
+
+ if(interrupts & I2S_INTR_TX_OVERFLOW)
+ {
+ fh_I2S_stop_playback(i2s_config);
+ fh_I2S_start_playback(i2s_config);
+ PRINT_i2s_DBG("I2S_INTR_TX_OVERFLOW\n");
+ }
+
+ PRINT_i2s_DBG("interrupts: 0x%x\n", interrupts);
+#endif
+#endif
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations I2S_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = fh_i2s_ioctl,
+ .release = fh_i2s_close,
+ .open = fh_i2s_open,
+ .poll = fh_i2s_poll,
+ .read = fh_i2s_read,
+ .write = fh_i2s_write,
+
+};
+
+static int __devinit fh_i2s_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *irq_res, *mem;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
+ printk("I2S probe\n");
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ "fh_fh8830_i2s_module"))
+ return -ENOMEM;
+ printk("I2S :%d\n",__LINE__);
+ PRINT_i2s_DBG("%d\n",__LINE__);
+ fh_i2s_module.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ PRINT_i2s_DBG("probe: regs %p\n",fh_i2s_module.regs);
+ if (!fh_i2s_module.regs){
+ ret = -ENOMEM;
+ goto remap_fail;
+ }
+
+ fh_i2s_module.clk = clk_get(NULL, "ac_clk");
+ if (!fh_i2s_module.clk) {
+ ret = -EINVAL;
+ goto clk_fail;
+ }
+ clk_enable(fh_i2s_module.clk);
+ PRINT_i2s_DBG("%d\n",__LINE__);
+ spin_lock_init(&fh_i2s_module.lock);
+
+ ret = misc_register(&fh_i2s_dev.fh_i2s_miscdev);
+
+ if (ret)
+ goto out_disable_clk;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res)
+ {
+ pr_err("%s: ERROR: getting resource failed"
+ "cannot get IORESOURCE_IRQ\n", __func__);
+ ret = -ENXIO;
+ goto out_disable_clk;
+ }
+ fh_i2s_module.irq = irq_res->start;
+ ret = request_irq(fh_i2s_module.irq, fh_i2s_interrupt, IRQF_SHARED, "i2s", &fh_i2s_module);
+ ret = readl(fh_i2s_module.regs + I2S_DBG_CTL);
+ ret = ret | I2S_EXT_EN| I2S_EN;
+ writel(ret,fh_i2s_module.regs + I2S_DBG_CTL);
+ return 0;
+
+out_disable_clk:
+ clk_disable(fh_i2s_module.clk);
+ fh_i2s_module.clk = NULL;
+clk_fail:
+ devm_iounmap(&pdev->dev, fh_i2s_module.regs);
+ fh_i2s_module.regs = NULL;
+remap_fail:
+ devm_release_mem_region(&pdev->dev, mem->start, resource_size(mem));
+ return ret;
+}
+
+static int __devexit fh_I2S_drv_remove(struct platform_device *pdev)
+{
+ struct resource *mem;
+ misc_deregister(&fh_i2s_dev.fh_i2s_miscdev);
+
+ free_irq(fh_i2s_module.irq, &fh_i2s_module);
+
+ if (fh_i2s_module.clk) {
+ clk_disable(fh_i2s_module.clk);
+ clk_put(fh_i2s_module.clk);
+ }
+ if (fh_i2s_module.regs) {
+ devm_iounmap(&pdev->dev, fh_i2s_module.regs);
+ }
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem) {
+ devm_release_mem_region(&pdev->dev, mem->start, resource_size(mem));
+ }
+ printk("I2S remove ok\n");
+ return 0;
+}
+
+static struct platform_driver fh_i2s_driver =
+{
+ .probe = fh_i2s_drv_probe,
+ .remove = __devexit_p(fh_I2S_drv_remove),
+ .driver = {
+ .name = "fh_fh8830_i2s",
+ .owner = THIS_MODULE,
+ }
+};
+
+void i2s_prealloc_dma_buffer(struct fh_i2s_cfg *i2s_config)
+{
+ int pg;
+ gfp_t gfp_flags;
+ pg = get_order(I2S_DMA_PREALLOC_SIZE);
+ gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
+ i2s_config->capture.dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ i2s_config->capture.area = dma_alloc_coherent(&i2s_config->capture.dev, PAGE_SIZE << pg, &i2s_config->capture.addr, gfp_flags );
+ if (!i2s_config->capture.area)
+ {
+ printk(KERN_ERR"no enough mem for capture buffer alloc\n");
+ return ;
+ }
+ i2s_config->playback.dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ i2s_config->playback.area = dma_alloc_coherent(&i2s_config->playback.dev, PAGE_SIZE << pg, &i2s_config->playback.addr, gfp_flags );
+ if (!i2s_config->playback.area)
+ {
+ printk(KERN_ERR"no enough mem for playback buffer alloc\n");
+ return ;
+ }
+}
+
+void i2s_free_prealloc_dma_buffer(struct fh_i2s_cfg *i2s_config)
+{
+ int pg;
+ pg = get_order(I2S_DMA_PREALLOC_SIZE);
+ dma_free_coherent(&i2s_config->capture.dev, PAGE_SIZE<<pg, i2s_config->capture.area, i2s_config->capture.addr);
+ dma_free_coherent(&i2s_config->playback.dev, PAGE_SIZE<<pg, i2s_config->playback.area, i2s_config->playback.addr);
+}
+
+static void init_i2s_mutex(struct fh_i2s_cfg *i2s_config)
+{
+ sema_init(&i2s_config->sem_capture, 1);
+ sema_init(&i2s_config->sem_playback, 1);
+}
+
+int i2s_request_dma_channel(void)
+{
+ dma_cap_mask_t mask;
+ /*request i2s rx dma channel*/
+ dma_rx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
+ if (!dma_rx_transfer)
+ {
+ printk(KERN_ERR"alloc dma_rx_transfer failed\n");
+ goto mem_fail;
+ }
+ memset(dma_rx_transfer, 0, sizeof(struct fh_dma_chan));
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_rx_transfer->chan = dma_request_channel(mask, fh_I2S_dma_chan_filter, &fh_i2s_dev.channel_assign.capture_channel);
+ if (!dma_rx_transfer->chan)
+ {
+ printk(KERN_ERR"request i2s rx dma channel failed \n");
+ goto channel_fail;
+ }
+
+ /*request i2s tx dma channel*/
+ dma_tx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
+ if (!dma_tx_transfer)
+ {
+ printk(KERN_ERR"alloc dma_tx_transfer failed\n");
+ goto mem_fail;
+ }
+ memset(dma_tx_transfer, 0, sizeof(struct fh_dma_chan));
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_tx_transfer->chan = dma_request_channel(mask, fh_I2S_dma_chan_filter, &fh_i2s_dev.channel_assign.playback_channel);
+ if (!dma_tx_transfer->chan)
+ {
+ printk(KERN_ERR"request dma channel failed \n");
+ return -EFAULT;
+ }
+
+ return 0;
+channel_fail:
+ if (!dma_rx_transfer->chan)
+ {
+ dma_release_channel(dma_rx_transfer->chan);
+ dma_rx_transfer->chan = NULL;
+ }
+ if (!dma_tx_transfer->chan)
+ {
+ dma_release_channel(dma_tx_transfer->chan);
+ dma_tx_transfer->chan = NULL;
+ }
+
+mem_fail:
+ if (dma_rx_transfer != NULL)
+ {
+ kfree(dma_rx_transfer);
+ dma_rx_transfer = NULL;
+ }
+ if (dma_tx_transfer != NULL)
+ {
+ kfree(dma_tx_transfer);
+ dma_tx_transfer = NULL;
+ }
+
+ return -EFAULT;
+}
+
+void i2s_release_dma_channel(void)
+{
+ /*release i2s tx dma channel*/
+ if (dma_tx_transfer != NULL)
+ {
+ if (dma_tx_transfer->chan) {
+ dma_release_channel(dma_tx_transfer->chan);
+ dma_tx_transfer->chan = NULL;
+ }
+ kfree(dma_tx_transfer);
+ dma_tx_transfer = NULL;
+ }
+
+ /*release i2s rx dma channel*/
+ if (dma_rx_transfer != NULL)
+ {
+ if (dma_rx_transfer->chan) {
+ dma_release_channel(dma_rx_transfer->chan);
+ dma_rx_transfer->chan = NULL;
+ }
+
+ kfree(dma_rx_transfer);
+ dma_rx_transfer = NULL;
+ }
+
+
+}
+static void create_fh8830_i2s_proc(void);
+static void remove_fh8830_i2s_proc(void);
+static int __init fh_i2s_init(void)
+{
+ int status;
+ init_i2s_mutex(&fh_i2s_dev.i2s_config);
+
+ i2s_prealloc_dma_buffer(&fh_i2s_dev.i2s_config);
+
+ status = i2s_request_dma_channel();
+ if(status)
+ printk("fh i2s init fail status=0x%x\n",status);
+ create_fh8830_i2s_proc();
+ return platform_driver_register(&fh_i2s_driver);
+}
+module_init(fh_i2s_init);
+
+static void __exit fh_i2s_exit(void)
+{
+
+ remove_fh8830_i2s_proc();
+ i2s_release_dma_channel();
+ i2s_free_prealloc_dma_buffer(&fh_i2s_dev.i2s_config);
+ platform_driver_unregister(&fh_i2s_driver);
+}
+module_exit(fh_i2s_exit);
+
+MODULE_AUTHOR("FH_i2s");
+MODULE_DESCRIPTION("FH_i2s");
+MODULE_LICENSE("GPL");
+
+/****************************debug proc*****************************/
+#include <linux/proc_fs.h>
+#include <asm/unistd.h>
+struct proc_dir_entry *proc_ac_entry;
+#define proc_name "fh_fh8830_i2s"
+#define I2S_TEST_LOOP 1
+#define I2S_TEST_OUT 0
+ssize_t proc_ac_read(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ssize_t len = 0;
+ int i;
+ unsigned int reg;
+ unsigned int *data_addr;
+ unsigned int file_len;
+ struct file *fp;
+ loff_t pos;
+ mm_segment_t old_fs;
+ for (i = 0;i <= 0x1C;i += 4) {
+ printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
+ }
+ for (i = 80;i <= 0xd0;i += 4) {
+ printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
+ }
+ printk("loop test start __________\n");
+ writel(0xe0000000,fh_i2s_module.regs + I2S_CTRL);// ctrl
+ writel(0x1,fh_i2s_module.regs + I2S_TXFIFO_CTRL);// tx
+ writel(0x1,fh_i2s_module.regs + I2S_RXFIFO_CTRL);// rx
+ writel(0x1 <<12 |0x3,fh_i2s_module.regs + I2S_DBG_CTL);// loop
+
+ writel(0x1000000,0xfe0901ac);
+ i = 0;
+ data_addr = kzalloc(4096*4, GFP_KERNEL);
+ if (!data_addr)
+ {
+ printk("alloc dma_rx_transfer failed\n");
+ }
+ memset(data_addr, 0, 4096*4);
+ mdelay(1000);
+ while (1) {
+#if I2S_TEST_LOOP
+ reg =0xff & readl(fh_i2s_module.regs + 0x0c);
+
+ if(reg >0){
+ reg = readl(fh_i2s_module.regs +0x200);
+ writel(reg,fh_i2s_module.regs + 0x100);
+ }
+
+#endif
+
+
+#if I2S_TEST_OUT
+ reg =0xff00 & readl(fh_i2s_module.regs + 0x0c);
+ reg = reg>>8;
+ // printk("write dac date reg = %x \n ",reg);
+ if (reg < 0x40) {
+ writel(i, fh_i2s_module.regs + 0x100);
+ writel(i, fh_i2s_module.regs + 0x300);
+
+
+ }
+ i = i+0x200;
+ if(i >= 0xffffff)
+ i = 0;
+ // printk("water level 0x%x\n",readl(fh_i2s_module.regs + 0x0c));
+
+#endif
+ }
+//TEST I2S_INPUT TO FILE
+ pos =0;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ fp = filp_open("/home/test",O_RDWR|O_CREAT,0644);
+ if(IS_ERR(fp)){
+ printk("file is error\n");
+ return 1;
+
+ }
+ file_len = fp->f_op->write(fp,(void*)data_addr,4096,&pos);
+ printk("write len is %d\n",len);
+ set_fs(old_fs);
+ filp_close(fp,NULL);
+ printk("loop test stop ___________\n");
+ for (i = 0;i <= 0x1C;i += 4) {
+ printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
+ }
+ for (i = 80;i <= 0xd0;i += 4) {
+ printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
+ }
+ return len;
+}
+
+static void create_fh8830_i2s_proc(void)
+{
+ proc_ac_entry = create_proc_entry(proc_name, S_IRUGO, NULL);
+ if (!proc_ac_entry)
+ {
+ printk(KERN_ERR"create proc failed\n");
+ }
+ else
+ {
+ proc_ac_entry->read_proc = proc_ac_read;
+ }
+}
+
+static void remove_fh8830_i2s_proc(void)
+{
+ remove_proc_entry(proc_name, NULL);
+}
diff --git a/drivers/misc/fh_i2s.h b/drivers/misc/fh_i2s.h
new file mode 100644
index 00000000..7f067a76
--- /dev/null
+++ b/drivers/misc/fh_i2s.h
@@ -0,0 +1,35 @@
+#ifndef __FH_LINBA0_I2S_H
+#define __FH_LINBA0_I2S_H
+
+#define FH_I2S_IOCTL_MEM_BASE 'M'
+#define I2S_INIT_CAPTURE_MEM _IOWR(FH_I2S_IOCTL_MEM_BASE, 0, int)
+#define I2S_INIT_PLAYBACK_MEM _IOWR(FH_I2S_IOCTL_MEM_BASE, 1, int)
+
+#define FH_I2S_IOCTL_PARAM_BASE 'P'
+#define I2S_SET_VOL _IOWR(FH_I2S_IOCTL_PARAM_BASE, 0, int)
+#define I2S_SET_INPUT_MODE _IOWR(FH_I2S_IOCTL_PARAM_BASE, 1, int)
+#define I2S_SET_OUTPUT_MODE _IOWR(FH_I2S_IOCTL_PARAM_BASE, 2, int)
+
+#define FH_I2S_IOCTL_ENA_BASE 'E'
+#define I2S_AI_EN _IOWR(FH_I2S_IOCTL_ENA_BASE, 0, int)
+#define I2S_AO_EN _IOWR(FH_I2S_IOCTL_ENA_BASE, 1, int)
+#define I2S_AI_DISABLE _IOWR(FH_I2S_IOCTL_ENA_BASE, 2, int)
+#define I2S_AO_DISABLE _IOWR(FH_I2S_IOCTL_ENA_BASE, 3, int)
+#define I2S_AI_PAUSE _IOWR(FH_I2S_IOCTL_ENA_BASE, 4, int)
+#define I2S_AI_RESUME _IOWR(FH_I2S_IOCTL_ENA_BASE, 5, int)
+#define I2S_AO_PAUSE _IOWR(FH_I2S_IOCTL_ENA_BASE, 6, int)
+#define I2S_AO_RESUME _IOWR(FH_I2S_IOCTL_ENA_BASE, 7, int)
+
+
+struct fh_i2s_cfg_arg{
+
+ int volume;
+ int rate;
+ int frame_bit;
+ int channels;
+ int buffer_size;
+ int period_size;
+};
+
+#endif
+
diff --git a/drivers/misc/fh_pinctrl_dev.c b/drivers/misc/fh_pinctrl_dev.c
new file mode 100644
index 00000000..c8790f40
--- /dev/null
+++ b/drivers/misc/fh_pinctrl_dev.c
@@ -0,0 +1,279 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+#include <linux/printk.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/uaccess.h>
+#include "fh_pinctrl_dev.h"
+
+#define FH_PINCTRL_PROC_FILE "driver/pinctrl"
+
+#undef FH_PINCTRL_DEBUG
+#ifdef FH_PINCTRL_DEBUG
+#define PRINT_DBG(fmt,args...) printk(fmt,##args)
+#else
+#define PRINT_DBG(fmt,args...) do{} while(0)
+#endif
+
+struct proc_dir_entry *pinctrl_proc_file;
+
+static int fh_pinctrl_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int fh_pinctrl_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+
+static long fh_pinctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+
+ if (unlikely(_IOC_TYPE(cmd) != PINCTRL_IOCTL_MAGIC))
+ {
+ pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
+ __func__, _IOC_TYPE(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (unlikely(_IOC_NR(cmd) > PINCTRL_IOCTL_MAXNR))
+ {
+ pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ {
+ ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+ else if(_IOC_DIR(cmd) & _IOC_WRITE)
+ {
+ ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+
+ if(ret)
+ {
+ pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -EACCES);
+ return -EACCES;
+ }
+
+ switch(cmd)
+ {
+
+ }
+
+ return ret;
+}
+
+static const struct file_operations fh_pinctrl_fops =
+{
+ .owner = THIS_MODULE,
+ .open = fh_pinctrl_open,
+ .release = fh_pinctrl_release,
+ .unlocked_ioctl = fh_pinctrl_ioctl,
+};
+
+static struct miscdevice fh_pinctrl_misc =
+{
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &fh_pinctrl_fops,
+};
+
+static void del_char(char* str,char ch)
+{
+ char *p = str;
+ char *q = str;
+ while(*q)
+ {
+ if (*q !=ch)
+ {
+ *p++ = *q;
+ }
+ q++;
+ }
+ *p='\0';
+}
+
+static ssize_t fh_pinctrl_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
+{
+ int i;
+ char message[32] = {0};
+ char * const delim = ",";
+ char *cur = message;
+ char* param_str[4];
+ unsigned int param[4];
+
+ len = (len > 32) ? 32 : len;
+
+ if (copy_from_user(message, buf, len))
+ return -EFAULT;
+
+ for(i=0; i<4; i++)
+ {
+ param_str[i] = strsep(&cur, delim);
+ if(!param_str[i])
+ {
+ pr_err("%s: ERROR: parameter[%d] is empty\n", __func__, i);
+ pr_err("[dev/mux], [dev name], [mux name], [func sel]\n");
+ return -EINVAL;
+ }
+ else
+ {
+ del_char(param_str[i], ' ');
+ del_char(param_str[i], '\n');
+ }
+ }
+
+ if(!strcmp(param_str[0], "dev"))
+ {
+ fh_pinctrl_sdev(param_str[1], 0);
+ }
+ else if(!strcmp(param_str[0], "mux"))
+ {
+ param[3] = (u32)simple_strtoul(param_str[3], NULL, 10);
+ if(param[3] < 0)
+ {
+ pr_err("ERROR: parameter[3] is incorrect\n");
+ return -EINVAL;
+ }
+ fh_pinctrl_smux(param_str[1], param_str[2], param[3], 0);
+ }
+ else
+ {
+ pr_err("ERROR: parameter[0] is incorrect\n"
+ "[dev/mux], [dev name], [mux name], [func sel]\n");
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter = 0;
+ if (*pos == 0)
+ return &counter;
+ else
+ {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+
+ fh_pinctrl_prt(sfile);
+ return 0;
+}
+
+static const struct seq_operations isp_seq_ops =
+{
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int fh_pinctrl_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &isp_seq_ops);
+}
+
+static struct file_operations fh_pinctrl_proc_ops =
+{
+ .owner = THIS_MODULE,
+ .open = fh_pinctrl_proc_open,
+ .read = seq_read,
+ .write = fh_pinctrl_proc_write,
+ .release = seq_release,
+};
+
+
+static int __devinit fh_pinctrl_probe(struct platform_device *pdev)
+{
+ int err;
+
+ err = misc_register(&fh_pinctrl_misc);
+
+ if(err < 0)
+ {
+ pr_err("%s: ERROR: %s registration failed",
+ __func__, DEVICE_NAME);
+ return -ENXIO;
+ }
+
+ pinctrl_proc_file = create_proc_entry(FH_PINCTRL_PROC_FILE, 0644, NULL);
+
+ if (pinctrl_proc_file)
+ pinctrl_proc_file->proc_fops = &fh_pinctrl_proc_ops;
+ else
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, DEVICE_NAME);
+
+ return 0;
+}
+
+static int __exit fh_pinctrl_remove(struct platform_device *pdev)
+{
+ misc_deregister(&fh_pinctrl_misc);
+ return 0;
+}
+
+static struct platform_driver fh_pinctrl_driver =
+{
+ .driver =
+ {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_pinctrl_probe,
+ .remove = __exit_p(fh_pinctrl_remove),
+};
+
+static int __init fh_pinctrl_dev_init(void)
+{
+ return platform_driver_register(&fh_pinctrl_driver);
+}
+
+static void __exit fh_pinctrl_dev_exit(void)
+{
+
+ platform_driver_unregister(&fh_pinctrl_driver);
+
+}
+
+module_init(fh_pinctrl_dev_init);
+module_exit(fh_pinctrl_dev_exit);
+
+
+MODULE_AUTHOR("fullhan");
+
+MODULE_DESCRIPTION("FH PINCTRL driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
diff --git a/drivers/misc/fh_pinctrl_dev.h b/drivers/misc/fh_pinctrl_dev.h
new file mode 100644
index 00000000..8fbf6c0e
--- /dev/null
+++ b/drivers/misc/fh_pinctrl_dev.h
@@ -0,0 +1,18 @@
+
+#ifndef FH_PINCTRL_DEV_H_
+#define FH_PINCTRL_DEV_H_
+
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <mach/pinctrl.h>
+
+
+#define DEVICE_NAME "fh_pinctrl"
+
+
+#define PINCTRL_IOCTL_MAGIC 'p'
+
+#define PINCTRL_IOCTL_MAXNR 8
+
+
+#endif /* FH_PINCTRL_DEV_H_ */
diff --git a/drivers/misc/fh_sadc.c b/drivers/misc/fh_sadc.c
new file mode 100644
index 00000000..54925642
--- /dev/null
+++ b/drivers/misc/fh_sadc.c
@@ -0,0 +1,557 @@
+/*
+ * fh_sadc.c
+ *
+ * Created on: Mar 13, 2015
+ * Author: duobao
+ */
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+/*****************************************************************************
+ * Include Section
+ * add all #include here
+ *****************************************************************************/
+#include <mach/fh_sadc.h>
+#include <mach/pmu.h>
+
+/*****************************************************************************
+ * Define section
+ * add all #define here
+ *****************************************************************************/
+#define wrap_readl(wrap, name) \
+ __raw_readl(&(((struct wrap_sadc_reg *)wrap->regs)->name))
+
+#define wrap_writel(wrap, name, val) \
+ __raw_writel((val), &(((struct wrap_sadc_reg *)wrap->regs)->name))
+
+
+#if defined(CONFIG_FH_SADC_V11)
+#define SADC_REF (1800)
+#define SADC_MAX_AD_VALUE (0xfff)
+#else
+#define SADC_REF (3300)
+#define SADC_MAX_AD_VALUE (0x3ff)
+#endif
+
+#define IOCTL_GET_SADC_DATA 1
+#define IOCTL_SADC_POWER_DOWN 0xff
+#define SADC_WRAP_BASE (0xf1200000)
+#define SADC_IRQn (23)
+#define SADC_MAX_CONTROLLER (1)
+#define SADC_STATUS_COLESD (0)
+#define SADC_STATUS_OPEN (1)
+#define FH_SADC_PLAT_DEVICE_NAME "fh_sadc"
+#define FH_SADC_MISC_DEVICE_NAME "fh_sadc"
+/****************************************************************************
+* ADT section
+* add definition of user defined Data Type that only be used in this file here
+***************************************************************************/
+struct sadc_info {
+ int channel;
+ int sadc_data;
+};
+/******************************************************************************
+ * Function prototype section
+ * add prototypes for all functions called by this file,execepting those
+ * declared in header file
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Global variables section - Exported
+ * add declaration of global variables that will be exported here
+ * e.g.
+ * int8_t foo;
+ ****************************************************************************/
+
+/*****************************************************************************
+
+ * static fun;
+ *****************************************************************************/
+static u32 fh_sadc_isr_read_data(struct wrap_sadc_obj *sadc,\
+ u32 channel, u16 *buf);
+static int fh_sadc_open(struct inode *inode, struct file *file);
+static int fh_sadc_release(struct inode *inode, struct file *filp);
+static long fh_sadc_ioctl(struct file *filp, unsigned int cmd,\
+ unsigned long arg);
+/*****************************************************************************
+ * Global variables section - Local
+ * define global variables(will be refered only in this file) here,
+ * static keyword should be used to limit scope of local variable to this file
+ * e.g.
+ * static uint8_t ufoo;
+ *****************************************************************************/
+static struct wrap_sadc_obj fh_sadc_obj;
+
+static const struct file_operations fh_sadc_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_sadc_open,
+ .release = fh_sadc_release,
+ .unlocked_ioctl = fh_sadc_ioctl,
+};
+
+static struct miscdevice fh_sadc_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = FH_SADC_MISC_DEVICE_NAME,
+ .fops = &fh_sadc_fops,
+};
+
+/*****************************************************************************
+ *
+ *
+ * function body
+ *
+ *
+ *****************************************************************************/
+u32 fh_sadc_isr_read_data(struct wrap_sadc_obj *sadc, u32 channel,
+ u16 *buf) {
+ u32 xainsel = 1 << channel;
+ u32 xversel = 0;
+ u32 xpwdb = 1;
+ /*cnt*/
+ u32 sel2sam_pre_cnt = 2;
+ u32 sam_cnt = 2;
+ u32 sam2sel_pos_cnt = 2;
+ /*time out*/
+ u32 eoc_tos = 0xff;
+ u32 eoc_toe = 0xff;
+ u32 time_out = 0xffff;
+ /*set isr en..*/
+ u32 sadc_isr = 0x01;
+ /*start*/
+ u32 sadc_cmd = 0x01;
+ /*get data*/
+ u32 temp_data = 0;
+ u32 ret_time;
+
+ /*control...*/
+ wrap_writel(sadc, sadc_control, xainsel | (xversel << 8) \
+ | (xpwdb << 12));
+
+ wrap_writel(sadc, sadc_cnt,
+ sel2sam_pre_cnt | (sam_cnt << 8) | \
+ (sam2sel_pos_cnt << 16));
+
+ wrap_writel(sadc, sadc_timeout,
+ eoc_tos | (eoc_toe << 8) | (time_out << 16));
+
+ wrap_writel(sadc, sadc_ier, sadc_isr);
+
+ wrap_writel(sadc, sadc_cmd, sadc_cmd);
+
+ ret_time = wait_for_completion_timeout(&sadc->done, 5000);
+ if (ret_time == 0) {
+ printk(KERN_ERR "sadc timeout..\n");
+ return SADC_TIMEOUT;
+ }
+
+ switch (channel) {
+ case 0:
+ case 1:
+ /*read channel 0 1*/
+ temp_data = wrap_readl(sadc, sadc_dout0);
+ break;
+
+ case 2:
+ case 3:
+ /*read channel 2 3*/
+ temp_data = wrap_readl(sadc, sadc_dout1);
+ break;
+
+ case 4:
+ case 5:
+ /*read channel 4 5*/
+ temp_data = wrap_readl(sadc, sadc_dout2);
+ break;
+
+ case 6:
+ case 7:
+ /*read channel 6 7*/
+ temp_data = wrap_readl(sadc, sadc_dout3);
+ break;
+ default:
+ break;
+ }
+ if (channel % 2) {
+ /*read low 16bit*/
+ *buf = (u16) (temp_data & 0xffff);
+ } else {
+ /*read high 16bit*/
+ *buf = (u16) (temp_data >> 16);
+ }
+ return 0;
+
+}
+EXPORT_SYMBOL(fh_sadc_isr_read_data);
+
+
+int fh_sadc_enable(void)
+{
+ u32 control_reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ control_reg = wrap_readl(sadc, sadc_control);
+ control_reg |= 1 << 12;
+ wrap_writel(sadc, sadc_control, control_reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_enable);
+
+int fh_sadc_disable(void)
+{
+ u32 control_reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ control_reg = wrap_readl(sadc, sadc_control);
+ control_reg &= ~(1 << 12);
+ wrap_writel(sadc, sadc_control, control_reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_disable);
+
+static irqreturn_t fh_sadc_isr(int irq, void *dev_id)
+{
+
+ u32 isr_status;
+ struct wrap_sadc_obj *sadc = (struct wrap_sadc_obj *) dev_id;
+
+ isr_status = wrap_readl(sadc, sadc_int_status);
+ if (isr_status & 0x01) {
+
+ u32 sadc_isr = 0x00;
+ wrap_writel(sadc, sadc_ier, sadc_isr);
+
+ wrap_writel(sadc, sadc_int_status, isr_status);
+ complete(&(sadc->done));
+ } else {
+
+ printk(KERN_ERR "sadc maybe error!\n");
+ }
+ return IRQ_HANDLED;
+}
+
+long fh_sadc_get_value(int channel)
+{
+ unsigned int ret;
+ long w = 0;
+ u16 ad_raw_data;
+ if (channel < 0) {
+ printk(KERN_ERR "ERROR: %s, sadc channel no %d is incorrect\n",
+ __func__, channel);
+ return 0;
+ }
+
+ fh_sadc_enable();
+
+ ret = fh_sadc_isr_read_data(&fh_sadc_obj, channel, &ad_raw_data);
+
+ if (ret != 0) {
+ printk(KERN_INFO "sadc error code:0x%x\n", ret);
+ } else {
+ w = ad_raw_data * SADC_REF / SADC_MAX_AD_VALUE;
+ printk(KERN_INFO "the value of sadc is: %ld\n", w);
+ }
+
+ return w;
+}
+EXPORT_SYMBOL(fh_sadc_get_value);
+
+static void del_char(char *str, char ch)
+{
+ char *p = str;
+ char *q = str;
+ while (*q) {
+ if (*q != ch)
+ *p++ = *q;
+ q++;
+ }
+ *p = '\0';
+}
+
+static ssize_t fh_sadc_proc_write(struct file *filp, const char *buf,
+ size_t len, loff_t *off)
+{
+ char message[32] = {0};
+ char * const delim = ",";
+ char *cur = message, *power_str;
+ int power;
+ unsigned int str_len;
+ str_len = 10;
+ len = (len > 32) ? 32 : len;
+
+ if (copy_from_user(message, buf, len))
+ return -EFAULT;
+
+ power_str = strsep(&cur, delim);
+ if (!power_str) {
+ pr_err("%s: ERROR: parameter is empty\n", __func__);
+ return -EINVAL;
+ } else {
+ del_char(power_str, ' ');
+ del_char(power_str, '\n');
+ power = kstrtouint(power_str, 0, &str_len);
+ if (power < 0) {
+ pr_err("%s: ERROR: parameter is incorrect\n",\
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ power ? fh_sadc_enable() : fh_sadc_disable();
+
+ return len;
+}
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter;
+ counter = 0;
+ if (*pos == 0)
+ return &counter;
+ else {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ u32 ret;
+
+ reg = (wrap_readl(sadc, sadc_control) & 0x1000);
+
+ seq_printf(sfile, "\nSADC Status:\n");
+ seq_printf(sfile, "Power %s\n\n", reg ? "up" : "down");
+
+ for (i = 0; i < 8; i++) {
+ u16 ad_raw_data;
+ ret = fh_sadc_isr_read_data(&fh_sadc_obj, i, &ad_raw_data);
+ if (ret != 0)
+ seq_printf(sfile, "sadc error code:0x%x\n", ret);
+ else
+ seq_printf(sfile, "channel: %d \tvalue: %u\n", i,
+ ad_raw_data * SADC_REF / SADC_MAX_AD_VALUE);
+ }
+ return 0;
+}
+
+static const struct seq_operations isp_seq_ops = {
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int fh_sadc_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &isp_seq_ops);
+}
+
+static const struct file_operations fh_sadc_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = fh_sadc_proc_open,
+ .read = seq_read,
+ .write = fh_sadc_proc_write,
+ .release = seq_release,
+};
+
+static int __devinit fh_sadc_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *res;
+ struct clk *sadc_clk;
+
+ sadc_clk = clk_get(&pdev->dev, "sadc_clk");
+ if (IS_ERR(sadc_clk)) {
+ err = PTR_ERR(sadc_clk);
+ return -EPERM;
+ }
+ clk_enable(sadc_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "sadc get platform source error..\n");
+ return -ENODEV;
+ }
+
+ fh_sadc_obj.irq_no = platform_get_irq(pdev, 0);
+ if (fh_sadc_obj.irq_no < 0) {
+ dev_warn(&pdev->dev, "sadc interrupt is not available.\n");
+ return fh_sadc_obj.irq_no;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "sadc region already claimed\n");
+ return -EBUSY;
+ }
+
+ fh_sadc_obj.regs = ioremap(res->start, resource_size(res));
+ if (fh_sadc_obj.regs == NULL) {
+ err = -ENXIO;
+ goto fail_no_ioremap;
+ }
+
+ init_completion(&fh_sadc_obj.done);
+ mutex_init(&fh_sadc_obj.sadc_lock);
+ fh_sadc_obj.active_channel_no = 0;
+
+ err = request_irq(fh_sadc_obj.irq_no, fh_sadc_isr, 0,\
+ dev_name(&pdev->dev),\
+ &fh_sadc_obj);
+ if (err) {
+ dev_dbg(&pdev->dev, "request_irq failed, %d\n", err);
+ err = -ENXIO;
+ goto err_irq;
+ }
+
+ err = misc_register(&fh_sadc_misc);
+
+ if (err < 0) {
+ pr_err("%s: ERROR: %s registration failed", __func__,
+ FH_SADC_MISC_DEVICE_NAME);
+ err = -ENXIO;
+ goto misc_error;
+ }
+
+ fh_sadc_obj.proc_file = create_proc_entry(FH_SADC_PROC_FILE,
+ 0644, NULL);
+
+ if (fh_sadc_obj.proc_file)
+ fh_sadc_obj.proc_file->proc_fops = &fh_sadc_proc_ops;
+ else
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, "SADC");
+
+ return 0;
+
+misc_error:
+ free_irq(fh_sadc_obj.irq_no, &fh_sadc_obj);
+
+err_irq:
+ iounmap(fh_sadc_obj.regs);
+
+fail_no_ioremap:
+ release_mem_region(res->start, resource_size(res));
+
+ return err;
+}
+
+static int __exit fh_sadc_remove(struct platform_device *pdev)
+{
+
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ misc_deregister(&fh_sadc_misc);
+ free_irq(fh_sadc_obj.irq_no, &fh_sadc_obj);
+ iounmap(fh_sadc_obj.regs);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+
+}
+
+
+static int fh_sadc_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int fh_sadc_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static long fh_sadc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg) {
+
+ u32 ad_data;
+ u32 control_reg;
+ u16 ad_raw_data;
+ struct sadc_info sadc_info;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ mutex_lock(&sadc->sadc_lock);
+ if (cmd == IOCTL_GET_SADC_DATA) {
+ if (copy_from_user((void *) &sadc_info, (void __user*) arg,
+ sizeof(sadc_info))) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EFAULT;
+ }
+ if ((wrap_readl(sadc, sadc_status2) & 0x10)) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EBUSY;
+ }
+ fh_sadc_isr_read_data(&fh_sadc_obj, sadc_info.channel,\
+ &ad_raw_data);
+ ad_data = ad_raw_data * SADC_REF;
+ ad_data /= SADC_MAX_AD_VALUE;
+ sadc_info.sadc_data = ad_data;
+ if (put_user(sadc_info.sadc_data,
+ (int __user *)(&((struct sadc_info *)arg)\
+ ->sadc_data))) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EFAULT;
+ }
+ }
+
+ else if (cmd == IOCTL_SADC_POWER_DOWN) {
+ control_reg = wrap_readl(sadc, sadc_control);
+ control_reg &= ~(1 << 12);
+ wrap_writel(sadc, sadc_control, control_reg);
+ }
+ mutex_unlock(&sadc->sadc_lock);
+ return 0;
+}
+
+
+/*******************
+ *
+ *
+ *add platform cause of i need the board info...
+ *in the probe function. i will register the sadc
+ *misc drive...then the app can open the sadc misc device..
+ *
+ ******************/
+static struct platform_driver fh_sadc_driver = {
+ .driver = {
+ .name = FH_SADC_PLAT_DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_sadc_probe,
+ .remove = __exit_p(fh_sadc_remove),
+};
+
+
+
+static int __init fh_sadc_init(void)
+{
+ return platform_driver_register(&fh_sadc_driver);
+}
+
+static void __exit fh_sadc_exit(void)
+{
+
+ platform_driver_unregister(&fh_sadc_driver);
+
+}
+
+module_init(fh_sadc_init);
+module_exit(fh_sadc_exit);
+
+MODULE_DESCRIPTION("fh sadc driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("zhangy@fullhan.com");
+MODULE_ALIAS("platform:FH_sadc");
diff --git a/drivers/misc/fh_sadc_v2.c b/drivers/misc/fh_sadc_v2.c
new file mode 100644
index 00000000..482e1ded
--- /dev/null
+++ b/drivers/misc/fh_sadc_v2.c
@@ -0,0 +1,1150 @@
+/*
+ * fh_sadc.c
+ *
+ * Created on: Mar 13, 2015
+ * Author: duobao
+ */
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+/*****************************************************************************
+ * Include Section
+ * add all #include here
+ *****************************************************************************/
+#include <mach/fh_sadc.h>
+#include <mach/pmu.h>
+#include <linux/sched.h>
+
+
+/*****************************************************************************
+ * Define section
+ * add all #define here
+ *****************************************************************************/
+#define wrap_readl(wrap, name) \
+ __raw_readl(&(((struct wrap_sadc_reg *)wrap->regs)->name))
+
+#define wrap_writel(wrap, name, val) \
+ __raw_writel((val), &(((struct wrap_sadc_reg *)wrap->regs)->name))
+
+
+#define IOCTL_GET_SINGLE_SADC_DATA 0x1
+#define IOCTL_CONTINUE_SADC_CFG 0x3
+#define IOCTL_CONTINUE_SADC_START 0x4
+#define IOCTL_CONTINUE_SADC_END 0x5
+#ifdef CONFIG_FH_SADC_V22
+#define IOCTL_HIT_SADC_CFG 0x6
+#endif
+#define IOCTL_SADC_POWER_DOWN 0xff
+
+
+
+
+#define SADC_WRAP_BASE (SADC_REG_BASE)
+#define SADC_IRQn (23)
+#define SADC_MAX_CONTROLLER (1)
+#define SADC_STATUS_COLESD (0)
+#define SADC_STATUS_OPEN (1)
+#define FH_SADC_PLAT_DEVICE_NAME "fh_sadc"
+#define FH_SADC_MISC_DEVICE_NAME "fh_sadc"
+/*void (*g_sadc_get_continue_data)(u8 channel, u32 value) = NULL;*/
+/*void (*g_sadc_get_hit_data)(u8 channel, u32 value) = NULL;*/
+
+/****************************************************************************
+* ADT section
+* add definition of user defined Data Type that only be used in this file here
+***************************************************************************/
+struct sadc_info {
+ int channel;
+ int sadc_data;
+};
+
+struct sadc_continue_cfg {
+ u32 channel_cfg;
+ u32 continue_time; /*ms*/
+ u32 glitch_value; /*mv*/
+ u32 glitch_time; /*count*/
+ u32 eq_flag;
+ u32 zero_value; /*ms*/
+ u32 precision; /*0xfff*/
+ u32 powerdown;
+};
+
+struct sadc_continue_ctrl {
+ struct sadc_info usrdata;
+ u32 flag;
+};
+
+
+#ifdef CONFIG_FH_SADC_V22
+struct sadc_hit_data_cfg {
+ u8 channel_cfg;
+ u8 value_en;
+ u16 hit_gap;/*mv*/
+ u16 value[8];/*mv*/
+};
+#endif
+
+struct sadc_continue_ctrl g_sadc_data[SADC_CHANNEL_NUM];
+u32 g_sadc_debug;
+/******************************************************************************
+ * Function prototype section
+ * add prototypes for all functions called by this file,execepting those
+ * declared in header file
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Global variables section - Exported
+ * add declaration of global variables that will be exported here
+ * e.g.
+ * int8_t foo;
+ ****************************************************************************/
+
+/*****************************************************************************
+
+ * static fun;
+ *****************************************************************************/
+static u32 fh_sadc_isr_read_data(struct wrap_sadc_obj *sadc,\
+ u32 channel, u16 *buf);
+static int fh_sadc_open(struct inode *inode, struct file *file);
+static int fh_sadc_release(struct inode *inode, struct file *filp);
+static long fh_sadc_ioctl(struct file *filp, unsigned int cmd,\
+ unsigned long arg);
+static int fh_sadc_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *off);
+
+/*****************************************************************************
+ * Global variables section - Local
+ * define global variables(will be refered only in this file) here,
+ * static keyword should be used to limit scope of local variable to this file
+ * e.g.
+ * static uint8_t ufoo;
+ *****************************************************************************/
+static struct wrap_sadc_obj fh_sadc_obj;
+
+static const struct file_operations fh_sadc_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_sadc_open,
+ .read = fh_sadc_read,
+ .release = fh_sadc_release,
+ .unlocked_ioctl = fh_sadc_ioctl,
+};
+
+static struct miscdevice fh_sadc_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = FH_SADC_MISC_DEVICE_NAME,
+ .fops = &fh_sadc_fops,
+};
+
+/*****************************************************************************
+ *
+ *
+ * function body
+ *
+ *
+ *****************************************************************************/
+u32 fh_sadc_isr_read_data(struct wrap_sadc_obj *sadc, u32 channel,
+ u16 *buf) {
+ u32 xainsel = 1 << channel;
+ u32 xversel = 0;
+ u32 xpwdb = 1;
+ /*cnt*/
+ u32 sel2sam_pre_cnt = 2;
+ u32 sam_cnt = 2;
+ u32 sam2sel_pos_cnt = 2;
+ /*time out*/
+ u32 eoc_tos = 0xff;
+ u32 eoc_toe = 0xff;
+ u32 time_out = 0xffff;
+ /*set isr en..*/
+ u32 sadc_isr = 0x01;
+ /*start*/
+ u32 sadc_cmd = 0x01;
+ /*get data*/
+ u32 temp_data = 0;
+ u32 ret_time;
+
+ /*control...*/
+ wrap_writel(sadc, sadc_control, xainsel | (xversel << 8) \
+ | (xpwdb << 12));
+
+ wrap_writel(sadc, sadc_cnt,
+ sel2sam_pre_cnt | (sam_cnt << 8) | \
+ (sam2sel_pos_cnt << 16));
+
+ wrap_writel(sadc, sadc_timeout,
+ eoc_tos | (eoc_toe << 8) | (time_out << 16));
+
+ wrap_writel(sadc, sadc_ier, sadc_isr);
+
+ wrap_writel(sadc, sadc_cmd, sadc_cmd);
+
+ ret_time = wait_for_completion_timeout(&sadc->done, 5000);
+ if (ret_time == 0) {
+ printk(KERN_ERR "sadc timeout..\n");
+ return SADC_TIMEOUT;
+ }
+
+ switch (channel) {
+ case 0:
+ case 1:
+ /*read channel 0 1*/
+ temp_data = wrap_readl(sadc, sadc_dout0);
+ break;
+
+ case 2:
+ case 3:
+ /*read channel 2 3*/
+ temp_data = wrap_readl(sadc, sadc_dout1);
+ break;
+
+ case 4:
+ case 5:
+ /*read channel 4 5*/
+ temp_data = wrap_readl(sadc, sadc_dout2);
+ break;
+
+ case 6:
+ case 7:
+ /*read channel 6 7*/
+ temp_data = wrap_readl(sadc, sadc_dout3);
+ break;
+ default:
+ break;
+ }
+ if (channel % 2) {
+ /*read low 16bit*/
+ *buf = (u16) (temp_data & 0xffff);
+ } else {
+ /*read high 16bit*/
+ *buf = (u16) (temp_data >> 16);
+ }
+ return 0;
+
+}
+EXPORT_SYMBOL(fh_sadc_isr_read_data);
+
+/* 0:single mode;1:continue mode*/
+int fh_sadc_mode_set(u32 mode)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_model);
+ if (mode)
+ reg |= 1;
+ else
+ reg &= (~1);
+ wrap_writel(sadc, sadc_model, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_mode_set);
+
+int fh_sadc_lpc_en_set(u32 lpc_flag)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_chn_cfg);
+ if (lpc_flag)
+ reg |= (1 << 28);
+ else
+ reg &= (~(1 << 28));
+ wrap_writel(sadc, sadc_chn_cfg, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_lpc_en_set);
+
+u32 fh_sadc_lpc_en_get(void)
+{
+ u32 reg;
+ u32 ret;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_chn_cfg);
+ reg &= (1 << 28);
+ ret = reg >> 28;
+ return ret;
+}
+EXPORT_SYMBOL(fh_sadc_lpc_en_get);
+
+int fh_sadc_scan_delta(u32 scan_delta)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ u32 value;
+
+ value = ((u32)scan_delta*SADC_MAX_AD_VALUE)/SADC_REF;
+ if (value > 0x3f)
+ value = 0x3f;
+ reg = wrap_readl(sadc, sadc_chn_cfg1);
+ reg &= (~(0x3f<<0));
+ reg |= value ;
+ wrap_writel(sadc, sadc_chn_cfg1, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_scan_delta);
+
+
+int fh_sadc_glitch_en_set(u32 glitch_flag)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_chn_cfg);
+ if (glitch_flag)
+ reg |= (1 << 27);
+ else
+ reg &= (~(1 << 27));
+ wrap_writel(sadc, sadc_chn_cfg, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_glitch_en_set);
+
+u32 fh_sadc_glitch_en_get(void)
+{
+ u32 reg;
+ u32 ret;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_chn_cfg);
+ reg &= (1 << 27);
+ ret = reg >> 27;
+ return ret;
+}
+EXPORT_SYMBOL(fh_sadc_glitch_en_get);
+
+int fh_sadc_scan_start(void)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_scan_en);
+ reg |= (1 << 0);
+ wrap_writel(sadc, sadc_scan_en, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_scan_start);
+
+int fh_sadc_scan_end(void)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_scan_en);
+ reg |= (1 << 1);
+ wrap_writel(sadc, sadc_scan_en, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_scan_end);
+
+int fh_sadc_scan_power_cnt(u8 scan_power_cnt)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_chn_cfg1);
+ reg &= (~(0xff<<8));
+ reg |= (scan_power_cnt << 8);
+ wrap_writel(sadc, sadc_chn_cfg1, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_scan_power_cnt);
+
+
+int fh_sadc_cons_ch_eq_set(u32 chn_num)
+{
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ reg = wrap_readl(sadc, sadc_glitch_cfg);
+ reg |= (chn_num&0xff);
+ wrap_writel(sadc, sadc_glitch_cfg, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_cons_ch_eq_set);
+
+int fh_sadc_set_act_bit(u32 act_bit)
+{
+ u32 reg;
+ u32 value;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ if (act_bit > 12)
+ value = 12;
+ else
+ value = act_bit;
+ value = (~(1<<value))&0xfff;
+ reg = wrap_readl(sadc, sadc_glitch_cfg);
+ reg &= (~(0xfff<<8));
+ reg |= (value<<8);
+ wrap_writel(sadc, sadc_glitch_cfg, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_set_act_bit);
+
+int fh_sadc_set_continuous_time(u32 continuous_time)
+{
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ struct clk *sadc_clk;
+ u32 value;
+
+ sadc_clk = clk_get(NULL, "sadc_clk");
+ if (IS_ERR(sadc_clk))
+ return -EPERM;
+
+ /*value = (glitch_time/1000)*sadc_clk->rate;FPGA 1000000HZ*/
+ value = continuous_time*(1000000/1000);
+ wrap_writel(sadc, sadc_continuous_time, value);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_set_continuous_time);
+
+int fh_sadc_set_glitch_time(u32 glitch_time)
+{
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ wrap_writel(sadc, sadc_glitch_time, glitch_time);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_set_glitch_time);
+
+int fh_sadc_set_zero_value(u32 zero_value)
+{
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ u32 value;
+ u32 reg;
+
+ value = (zero_value*SADC_MAX_AD_VALUE)/SADC_REF;
+ value &= 0xfff;
+ reg = wrap_readl(sadc, sadc_glitch_cfg);
+ reg &= (~(0xfff<<20));
+ reg |= (value<<20);
+ wrap_writel(sadc, sadc_glitch_cfg, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_set_zero_value);
+
+#ifdef CONFIG_FH_SADC_V22
+int fh_sadc_hit_data_config(struct sadc_hit_data_cfg *hit_data)
+{
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ u32 value;
+ u32 value1;
+ u32 reg;
+
+ value = hit_data->hit_gap;
+ value = (value*SADC_MAX_AD_VALUE)/SADC_REF;
+ value &= 0xfff;
+ reg = value|(hit_data->value_en << 16)|(hit_data->channel_cfg << 24);
+ wrap_writel(sadc, sadc_hit_cfg, reg);
+
+ value = hit_data->value[0];
+ value = (value*SADC_MAX_AD_VALUE)/SADC_REF;
+ value &= 0xfff;
+
+ value1 = hit_data->value[1];
+ value1 = (value1*SADC_MAX_AD_VALUE)/SADC_REF;
+ value1 &= 0xfff;
+ reg = value | (value1 << 16);
+
+ wrap_writel(sadc, sadc_hit_value0, reg);
+
+ value = hit_data->value[2];
+ value = (value*SADC_MAX_AD_VALUE)/SADC_REF;
+ value &= 0xfff;
+
+ value1 = hit_data->value[3];
+ value1 = (value1*SADC_MAX_AD_VALUE)/SADC_REF;
+ value1 &= 0xfff;
+ reg = value | (value1 << 16);
+
+ wrap_writel(sadc, sadc_hit_value1, reg);
+
+ value = hit_data->value[4];
+ value = (value*SADC_MAX_AD_VALUE)/SADC_REF;
+ value &= 0xfff;
+
+ value1 = hit_data->value[5];
+ value1 = (value1*SADC_MAX_AD_VALUE)/SADC_REF;
+ value1 &= 0xfff;
+ reg = value | (value1 << 16);
+
+ wrap_writel(sadc, sadc_hit_value2, reg);
+
+ value = hit_data->value[6];
+ value = (value*SADC_MAX_AD_VALUE)/SADC_REF;
+ value &= 0xfff;
+
+ value1 = hit_data->value[7];
+ value1 = (value1*SADC_MAX_AD_VALUE)/SADC_REF;
+ value1 &= 0xfff;
+ reg = value | (value1 << 16);
+
+ wrap_writel(sadc, sadc_hit_value3, reg);
+
+ reg = wrap_readl(sadc, sadc_ier);
+ reg &= (~(0xff<<24));
+ reg |= (hit_data->value_en << 24);
+ wrap_writel(sadc, sadc_ier, reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_hit_data_config);
+#endif
+
+int fh_sadc_enable(void)
+{
+ u32 control_reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ control_reg = wrap_readl(sadc, sadc_control);
+ control_reg |= 1 << 12;
+ wrap_writel(sadc, sadc_control, control_reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_enable);
+
+int fh_sadc_disable(void)
+{
+ u32 control_reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+
+ control_reg = wrap_readl(sadc, sadc_control);
+ control_reg &= ~(1 << 12);
+ wrap_writel(sadc, sadc_control, control_reg);
+ return 0;
+}
+EXPORT_SYMBOL(fh_sadc_disable);
+
+
+u32 fh_sadc_default_config(void)
+{
+ u32 xversel = 0;
+ u32 xpwdb = 1;
+ /*cnt*/
+ u32 sel2sam_pre_cnt = 2;
+ u32 sam_cnt = 2;
+ u32 sam2sel_pos_cnt = 2;
+ /*time out*/
+ u32 eoc_tos = 0xff;
+ u32 eoc_toe = 0xff;
+ u32 time_out = 0xffff;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ /*sadc act bit default 12bit*/
+ fh_sadc_set_act_bit(12);
+ /*control...*/
+ wrap_writel(sadc, sadc_control, (xversel << 8) \
+ | (xpwdb << 12));
+
+ wrap_writel(sadc, sadc_cnt,
+ sel2sam_pre_cnt | (sam_cnt << 8) | \
+ (sam2sel_pos_cnt << 16));
+
+ wrap_writel(sadc, sadc_timeout,
+ eoc_tos | (eoc_toe << 8) | (time_out << 16));
+
+
+ return 0;
+}
+
+static irqreturn_t fh_sadc_isr(int irq, void *dev_id)
+{
+
+ u32 isr_status;
+ u32 temp_data = 0;
+ u32 temp_data1 = 0;
+ u32 channel = 0;
+ u32 data = 0;
+ u32 data1 = 0;
+ struct wrap_sadc_obj *sadc = (struct wrap_sadc_obj *) dev_id;
+
+ isr_status = wrap_readl(sadc, sadc_int_status);
+ if (isr_status & 0x01) {
+
+ u32 sadc_isr = 0x00;
+ wrap_writel(sadc, sadc_ier, sadc_isr);
+
+ wrap_writel(sadc, sadc_int_status, isr_status);
+ complete(&(sadc->done));
+ return IRQ_HANDLED;
+ }
+ if (isr_status & 0x10000) {
+ channel = wrap_readl(sadc, sadc_status2);
+ channel = ((channel>>12)&0x7);
+ switch (channel/2) {
+ case 0:
+ /*read channel 0 1*/
+ temp_data = wrap_readl(sadc, sadc_dout0);
+ break;
+ case 1:
+ /*read channel 2 3*/
+ temp_data = wrap_readl(sadc, sadc_dout1);
+ break;
+ case 2:
+ /*read channel 4 5*/
+ temp_data = wrap_readl(sadc, sadc_dout2);
+ break;
+ case 3:
+ /*read channel 6 7*/
+ temp_data = wrap_readl(sadc, sadc_dout3);
+ break;
+ default:
+ break;
+ }
+
+ if (channel % 2)
+ /*read low 16bit*/
+ data = (temp_data & 0xfff);
+ else
+ /*read high 16bit*/
+ data = (temp_data >> 16) & 0xfff;
+ data = (data*SADC_REF)/SADC_MAX_AD_VALUE;
+ if (g_sadc_debug)
+ printk(KERN_ERR "channel:%x data:%dmv\n",
+ channel, data);
+ if (waitqueue_active(&fh_sadc_obj.readqueue)) {
+ if (g_sadc_data[channel].flag) {
+ g_sadc_data[channel].usrdata.sadc_data = data;
+ g_sadc_data[channel].flag = 0;
+ wake_up(&fh_sadc_obj.readqueue);
+ }
+ }
+ wrap_writel(sadc, sadc_int_status, 0x10000);
+ }
+ if (isr_status & 0x100000) {
+ channel = wrap_readl(sadc, sadc_status2);
+ channel = ((channel>>12)&0x7);
+ switch (channel/2) {
+ case 0:
+ /*read channel 0 1*/
+ temp_data1 = wrap_readl(sadc, sadc_dout0_all);
+ break;
+ case 1:
+ /*read channel 2 3*/
+ temp_data1 = wrap_readl(sadc, sadc_dout1_all);
+ break;
+ case 2:
+ /*read channel 4 5*/
+ temp_data1 = wrap_readl(sadc, sadc_dout2_all);
+ break;
+ case 3:
+ /*read channel 6 7*/
+ temp_data1 = wrap_readl(sadc, sadc_dout3_all);
+ break;
+ default:
+ break;
+ }
+
+ if (channel % 2)
+ /*read low 16bit*/
+ data1 = (temp_data1 & 0xfff);
+ else
+ /*read high 16bit*/
+ data1 = (temp_data1 >> 16) & 0xfff;
+ data1 = (data1*SADC_REF)/SADC_MAX_AD_VALUE;
+ if (g_sadc_debug) {
+ printk(KERN_ERR "glitch :%x data:%dmv\n",
+ channel, data1);
+ wrap_writel(sadc, sadc_int_status, 0x100000);
+ }
+ }
+#ifdef CONFIG_FH_SADC_V22
+ if (isr_status & 0xff000000) {
+ channel = wrap_readl(sadc, sadc_status2);
+ channel = ((channel>>12)&0x7);
+ switch (channel) {
+ case 0:
+ case 1:
+ /*read channel 0 1*/
+ temp_data = wrap_readl(sadc, sadc_hit_data0);
+ break;
+
+ case 2:
+ case 3:
+ /*read channel 2 3*/
+ temp_data = wrap_readl(sadc, sadc_hit_data1);
+ break;
+
+ case 4:
+ case 5:
+ /*read channel 4 5*/
+ temp_data = wrap_readl(sadc, sadc_hit_data2);
+ break;
+
+ case 6:
+ case 7:
+ /*read channel 6 7*/
+ temp_data = wrap_readl(sadc, sadc_hit_data3);
+ break;
+ default:
+ break;
+ }
+
+ if (channel % 2) {
+ /*read low 16bit*/
+ data = (temp_data & 0xfff);
+ } else {
+ /*read high 16bit*/
+ data = (temp_data >> 16) & 0xfff;
+ }
+
+ data = (data*SADC_REF)/SADC_MAX_AD_VALUE;
+ if (g_sadc_debug)
+ printk(KERN_ERR "hit value channel:%x data:%dmv isr_status:%x\n",
+ channel, data, isr_status);
+ if (waitqueue_active(&fh_sadc_obj.readqueue)) {
+ if (g_sadc_data[channel].flag) {
+ g_sadc_data[channel].usrdata.sadc_data = data;
+ g_sadc_data[channel].flag = 0;
+ wake_up(&fh_sadc_obj.readqueue);
+ }
+ }
+ wrap_writel(sadc, sadc_int_status, (isr_status & 0xff000000));
+
+ }
+#endif
+ return IRQ_HANDLED;
+}
+
+long fh_sadc_get_value(int channel)
+{
+ unsigned int ret;
+ long w = 0;
+ u16 ad_raw_data;
+ if (channel < 0) {
+ printk(KERN_ERR "ERROR: %s, sadc channel no %d is incorrect\n",
+ __func__, channel);
+ return 0;
+ }
+
+ fh_sadc_enable();
+
+ ret = fh_sadc_isr_read_data(&fh_sadc_obj, channel, &ad_raw_data);
+
+ if (ret != 0) {
+ printk(KERN_INFO "sadc error code:0x%x\n", ret);
+ } else {
+ w = ad_raw_data * SADC_REF / SADC_MAX_AD_VALUE;
+ printk(KERN_INFO "the value of sadc is: %ld\n", w);
+ }
+
+ return w;
+}
+EXPORT_SYMBOL(fh_sadc_get_value);
+
+static void del_char(char *str, char ch)
+{
+ char *p = str;
+ char *q = str;
+ while (*q) {
+ if (*q != ch)
+ *p++ = *q;
+ q++;
+ }
+ *p = '\0';
+}
+
+static ssize_t fh_sadc_proc_write(struct file *filp, const char *buf,
+ size_t len, loff_t *off)
+{
+ char message[32] = {0};
+ char * const delim = ",";
+ char *cur = message;
+ int i;
+ char *param_str[2];
+ int reg;
+ int ret;
+ unsigned long param = 0;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ len = (len > 32) ? 32 : len;
+
+ if (copy_from_user(message, buf, len))
+ return -EFAULT;
+
+ for (i = 0; i < 2; i++) {
+ param_str[i] = strsep(&cur, delim);
+ if (!param_str[i]) {
+ pr_err("%s: ERROR: parameter[%d] is empty\n",
+ __func__, i);
+ pr_err("[debug],[channel],[pwr_down]\n");
+ return -EINVAL;
+ } else {
+ del_char(param_str[i], ' ');
+ del_char(param_str[i], '\n');
+ }
+ }
+ if (!strcmp(param_str[0], "debug")) {
+ ret = (u32)kstrtoul(param_str[1], 10, &param);
+ spin_lock_irq(&fh_sadc_obj.lock);
+ if (param) {
+ g_sadc_debug = 1;
+ reg = wrap_readl(sadc, sadc_ier);
+ reg |= 1<<20;
+ wrap_writel(sadc, sadc_ier, reg);
+ } else {
+ g_sadc_debug = 0;
+ reg = wrap_readl(sadc, sadc_ier);
+ reg &= (~(1<<20));
+ wrap_writel(sadc, sadc_ier, reg);
+ }
+ spin_unlock_irq(&fh_sadc_obj.lock);
+ }
+
+ if (!strcmp(param_str[0], "pwr_down")) {
+ ret = (u32)kstrtoul(param_str[1], 10, &param);
+ param ? fh_sadc_enable() : fh_sadc_disable();
+ }
+
+ return len;
+}
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter;
+ counter = 0;
+ if (*pos == 0)
+ return &counter;
+ else {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+ u32 reg;
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ u32 ret;
+
+ reg = (wrap_readl(sadc, sadc_control) & 0x1000);
+
+ seq_printf(sfile, "\nSADC Status:\n");
+ seq_printf(sfile, "Power %s\n\n", reg ? "up" : "down");
+
+ for (i = 0; i < 8; i++) {
+ u16 ad_raw_data;
+ ret = fh_sadc_isr_read_data(&fh_sadc_obj, i, &ad_raw_data);
+ if (ret != 0)
+ seq_printf(sfile, "sadc error code:0x%x\n", ret);
+ else
+ seq_printf(sfile, "channel: %d \tvalue: %u\n", i,
+ ad_raw_data * SADC_REF / SADC_MAX_AD_VALUE);
+ }
+ return 0;
+}
+
+static const struct seq_operations isp_seq_ops = {
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int fh_sadc_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &isp_seq_ops);
+}
+
+static const struct file_operations fh_sadc_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = fh_sadc_proc_open,
+ .read = seq_read,
+ .write = fh_sadc_proc_write,
+ .release = seq_release,
+};
+
+static int __devinit fh_sadc_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *res;
+ struct clk *sadc_clk;
+
+ sadc_clk = clk_get(&pdev->dev, "sadc_clk");
+ if (IS_ERR(sadc_clk)) {
+ err = PTR_ERR(sadc_clk);
+ return -EPERM;
+ }
+ clk_enable(sadc_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "sadc get platform source error..\n");
+ return -ENODEV;
+ }
+
+ fh_sadc_obj.irq_no = platform_get_irq(pdev, 0);
+ if (fh_sadc_obj.irq_no < 0) {
+ dev_warn(&pdev->dev, "sadc interrupt is not available.\n");
+ return fh_sadc_obj.irq_no;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "sadc region already claimed\n");
+ return -EBUSY;
+ }
+
+ fh_sadc_obj.regs = ioremap(res->start, resource_size(res));
+ if (fh_sadc_obj.regs == NULL) {
+ err = -ENXIO;
+ goto fail_no_ioremap;
+ }
+
+ init_completion(&fh_sadc_obj.done);
+ init_waitqueue_head(&fh_sadc_obj.readqueue);
+ mutex_init(&fh_sadc_obj.sadc_lock);
+ fh_sadc_obj.active_channel_no = 0;
+
+ err = request_irq(fh_sadc_obj.irq_no, fh_sadc_isr, 0,\
+ dev_name(&pdev->dev),\
+ &fh_sadc_obj);
+ if (err) {
+ dev_dbg(&pdev->dev, "request_irq failed, %d\n", err);
+ err = -ENXIO;
+ goto err_irq;
+ }
+
+ err = misc_register(&fh_sadc_misc);
+
+ if (err < 0) {
+ pr_err("%s: ERROR: %s registration failed", __func__,
+ FH_SADC_MISC_DEVICE_NAME);
+ err = -ENXIO;
+ goto misc_error;
+ }
+
+ fh_sadc_obj.proc_file = create_proc_entry(FH_SADC_PROC_FILE,
+ 0644, NULL);
+
+ if (fh_sadc_obj.proc_file)
+ fh_sadc_obj.proc_file->proc_fops = &fh_sadc_proc_ops;
+ else
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, "SADC");
+
+ return 0;
+
+misc_error:
+ free_irq(fh_sadc_obj.irq_no, &fh_sadc_obj);
+
+err_irq:
+ iounmap(fh_sadc_obj.regs);
+
+fail_no_ioremap:
+ release_mem_region(res->start, resource_size(res));
+
+ return err;
+}
+
+static int __exit fh_sadc_remove(struct platform_device *pdev)
+{
+
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ misc_deregister(&fh_sadc_misc);
+ free_irq(fh_sadc_obj.irq_no, &fh_sadc_obj);
+ iounmap(fh_sadc_obj.regs);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+
+}
+
+
+static int fh_sadc_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int fh_sadc_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+static int fh_sadc_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *off)
+{
+ int ret;
+ struct sadc_info sadc_data;
+
+ ret = copy_from_user((void *) &sadc_data,
+ (void __user *) buf, sizeof(struct sadc_info));
+
+ spin_lock_irq(&fh_sadc_obj.lock);
+ g_sadc_data[sadc_data.channel].usrdata.channel = sadc_data.channel;
+ g_sadc_data[sadc_data.channel].flag = 1;
+ spin_unlock_irq(&fh_sadc_obj.lock);
+
+ /* wait for enough data*/
+ if (!wait_event_timeout(fh_sadc_obj.readqueue,
+ !g_sadc_data[sadc_data.channel].flag, 100)) {
+ pr_err("%s: channel:%d read time out",
+ __func__, sadc_data.channel);
+ return -1;
+ }
+ spin_lock_irq(&fh_sadc_obj.lock);
+ ret = copy_to_user((void __user *)buf,
+ (void *)&g_sadc_data[sadc_data.channel].usrdata,
+ sizeof(struct sadc_info));
+ spin_unlock_irq(&fh_sadc_obj.lock);
+ return len;
+}
+
+static long fh_sadc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg) {
+
+ u32 ad_data;
+ u32 control_reg;
+ u16 ad_raw_data;
+ struct sadc_info sadc_info;
+ struct sadc_continue_cfg sadc_continue_cfg;
+#ifdef CONFIG_FH_SADC_V22
+ struct sadc_hit_data_cfg sadc_hit_data_cfg;
+#endif
+ struct wrap_sadc_obj *sadc = &fh_sadc_obj;
+ mutex_lock(&sadc->sadc_lock);
+ switch (cmd) {
+ case IOCTL_GET_SINGLE_SADC_DATA:
+ {
+ if (copy_from_user((void *) &sadc_info, (void __user*) arg,
+ sizeof(sadc_info))) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EFAULT;
+ }
+ if ((wrap_readl(sadc, sadc_status2) & 0x10)) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EBUSY;
+ }
+ fh_sadc_mode_set(0);
+
+ fh_sadc_isr_read_data(&fh_sadc_obj, sadc_info.channel,\
+ &ad_raw_data);
+ ad_data = ad_raw_data * SADC_REF;
+ ad_data /= SADC_MAX_AD_VALUE;
+ sadc_info.sadc_data = ad_data;
+ if (put_user(sadc_info.sadc_data,
+ (int __user *)(&((struct sadc_info *)arg)\
+ ->sadc_data))) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EFAULT;
+ }
+ break;
+ }
+ case IOCTL_SADC_POWER_DOWN:
+ control_reg = wrap_readl(sadc, sadc_control);
+ control_reg &= ~(1 << 12);
+ wrap_writel(sadc, sadc_control, control_reg);
+ break;
+ case IOCTL_CONTINUE_SADC_CFG:
+ {
+ if (copy_from_user((void *) &sadc_continue_cfg,
+ (void __user *) arg, sizeof(sadc_continue_cfg))) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EFAULT;
+ }
+ /*config continue model*/
+ fh_sadc_mode_set(1);
+
+ /*config channel cfg */
+ control_reg = wrap_readl(sadc, sadc_chn_cfg);
+ control_reg |= (sadc_continue_cfg.channel_cfg & 0x7ffffff);
+ wrap_writel(sadc, sadc_chn_cfg, control_reg);
+
+ /*config continue time */
+ fh_sadc_set_continuous_time(sadc_continue_cfg.continue_time);
+
+ fh_sadc_glitch_en_set(1);
+
+ /*config glitch time */
+ fh_sadc_set_glitch_time(sadc_continue_cfg.glitch_time);
+
+ /*config scan delta */
+ fh_sadc_scan_delta(sadc_continue_cfg.glitch_value);
+
+ /*config zero value */
+ fh_sadc_set_zero_value(sadc_continue_cfg.zero_value);
+
+ /*config precision*/
+ if (sadc_continue_cfg.precision)
+ fh_sadc_set_act_bit(sadc_continue_cfg.precision);
+
+ /*config eq flag*/
+ fh_sadc_cons_ch_eq_set(sadc_continue_cfg.eq_flag);
+
+ /*default config*/
+ fh_sadc_default_config();
+
+ /*glitch isr enable*/
+ wrap_writel(sadc, sadc_ier, 1<<16);
+ break;
+ }
+ case IOCTL_CONTINUE_SADC_START:
+ fh_sadc_scan_start();
+ break;
+ case IOCTL_CONTINUE_SADC_END:
+ fh_sadc_scan_end();
+ break;
+#ifdef CONFIG_FH_SADC_V22
+ case IOCTL_HIT_SADC_CFG:
+ if (copy_from_user((void *) &sadc_hit_data_cfg,
+ (void __user *) arg, sizeof(sadc_hit_data_cfg))) {
+ mutex_unlock(&sadc->sadc_lock);
+ return -EFAULT;
+ }
+ fh_sadc_hit_data_config(&sadc_hit_data_cfg);
+ break;
+#endif
+ }
+ mutex_unlock(&sadc->sadc_lock);
+ return 0;
+}
+
+
+/*******************
+ *
+ *
+ *add platform cause of i need the board info...
+ *in the probe function. i will register the sadc
+ *misc drive...then the app can open the sadc misc device..
+ *
+ ******************/
+static struct platform_driver fh_sadc_driver = {
+ .driver = {
+ .name = FH_SADC_PLAT_DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_sadc_probe,
+ .remove = __exit_p(fh_sadc_remove),
+};
+
+
+
+static int __init fh_sadc_init(void)
+{
+ return platform_driver_register(&fh_sadc_driver);
+}
+
+static void __exit fh_sadc_exit(void)
+{
+
+ platform_driver_unregister(&fh_sadc_driver);
+
+}
+
+module_init(fh_sadc_init);
+module_exit(fh_sadc_exit);
+
+MODULE_DESCRIPTION("fh sadc driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("tangyh@fullhan.com");
+MODULE_ALIAS("platform:FH_sadc");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f85e4222..5606661c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -675,6 +675,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
struct mmc_blk_request brq;
int ret = 1, disable_multi = 0;
+ int re_write_count = 0;
/*
* Reliable writes are used to implement Forced Unit Access and
@@ -688,7 +689,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
do {
struct mmc_command cmd = {0};
u32 readcmd, writecmd, status = 0;
-
+re_write:
memset(&brq, 0, sizeof(struct mmc_blk_request));
brq.mrq.cmd = &brq.cmd;
brq.mrq.data = &brq.data;
@@ -894,7 +895,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
spin_unlock_irq(&md->lock);
continue;
}
- goto cmd_err;
+ re_write_count++;
+ if (re_write_count < 4)
+ goto re_write;
+ else
+ goto cmd_err;
}
/*
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 38089b25..2b645fbe 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1557,6 +1557,9 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
}
EXPORT_SYMBOL(mmc_set_blocklen);
+extern struct mmc_host *mmc_sd1;
+extern struct mmc_host *mmc_sd0;
+
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
@@ -1631,6 +1634,14 @@ void mmc_rescan(struct work_struct *work)
if (host->ops->get_cd && host->ops->get_cd(host) == 0)
goto out;
+ /* rescan 5 times when detect mmc */
+ if (!(host->caps & MMC_CAP_NONREMOVABLE)) {
+ if (host->rescan_count > 5)
+ goto out;
+ else
+ host->rescan_count++;
+ }
+
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index d9411ed2..01b3b3d3 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -13,7 +13,7 @@
#include <linux/delay.h>
-#define MMC_CMD_RETRIES 3
+#define MMC_CMD_RETRIES 0 //FIXME, by PeterJiang, don't retry...
struct mmc_bus_ops {
int (*awake)(struct mmc_host *);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index aa7d1d79..2f002146 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -262,8 +262,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.rev > 5) {
printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
mmc_hostname(card->host), card->ext_csd.rev);
- err = -EINVAL;
- goto out;
+ /*err = -EINVAL;*/
+ /*goto out;*/
}
card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ff277412..c1649228 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -305,6 +305,8 @@ static int mmc_read_switch(struct mmc_card *card)
goto out;
}
+ if (status[13] & 0x02) //modify by kernel 3.0.101
+ card->sw_caps.hs_max_dtr = 50000000;
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
@@ -348,11 +350,11 @@ static int mmc_read_switch(struct mmc_card *card)
}
card->sw_caps.sd3_curr_limit = status[7];
- } else {
+ } /*else {//modify by kernel 3.0.101
if (status[13] & 0x02)
card->sw_caps.hs_max_dtr = 50000000;
}
-
+ */
out:
kfree(status);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 262fff01..06dbfdde 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -111,7 +111,7 @@ static int sdio_read_cccr(struct mmc_card *card)
cccr_vsn = data & 0x0f;
- if (cccr_vsn > SDIO_CCCR_REV_1_20) {
+ if (cccr_vsn > SDIO_CCCR_REV_3_00) { /* to support SDIO 3.0 (luoc) */
printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n",
mmc_hostname(card->host), cccr_vsn);
return -EINVAL;
@@ -881,3 +881,73 @@ err:
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+ printk("%s():\n", __func__);
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ mmc_claim_host(host);
+ mmc_go_idle(host);
+ mmc_set_clock(host, host->f_min);
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ host->ocr = mmc_select_voltage(host, ocr);
+ if (!host->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto err;
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto err;
+ }
+ /*
+ * Switch to high-speed (if supported).
+ */
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto err;
+ /*
+ * Change to the card's maximum speed.
+ */
+ printk("%s line %d.\n", __FILE__, __LINE__);
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto err;
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56dbf3f6..96361a63 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -518,6 +518,23 @@ config MMC_DW_IDMAC
Designware Mobile Storage IP block. This disables the external DMA
interface.
+config MMC_FH
+ tristate "FH Memory Card Interface"
+ depends on ARM
+ help
+ This selects support for the Synopsys DesignWare Mobile Storage IP
+ block, this provides host support for SD and MMC interfaces, in both
+ PIO and external DMA modes.
+
+config MMC_FH_IDMAC
+ bool "Internal DMAC interface"
+ depends on MMC_FH
+ help
+ This selects support for the internal DMAC block within the Synopsys
+ Designware Mobile Storage IP block. This disables the external DMA
+ interface.
+
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 58a5cf73..e56cfa32 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_DW) += dw_mmc.o
+obj-$(CONFIG_MMC_FH) += fhmci/
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
diff --git a/drivers/mmc/host/fh_mmc.c b/drivers/mmc/host/fh_mmc.c
new file mode 100644
index 00000000..151b5278
--- /dev/null
+++ b/drivers/mmc/host/fh_mmc.c
@@ -0,0 +1,2150 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#define DEBUG
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/proc_fs.h>
+#include <linux/gpio.h>
+
+#include "fh_mmc.h"
+#include "fh_mmc_reg.h"
+
+#define TEST_GPIO 4
+
+#define SDC_DESC_SIZE (PAGE_SIZE * 2)
+#define T_END 10
+
+/* Common flag combinations */
+#define FH_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
+ SDMMC_INT_HTO | SDMMC_INT_SBE | \
+ SDMMC_INT_EBE)
+#define FH_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
+ SDMMC_INT_RESP_ERR)
+#define FH_MCI_ERROR_FLAGS (FH_MCI_DATA_ERROR_FLAGS | \
+ FH_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
+#define FH_MCI_SEND_STATUS 1
+#define FH_MCI_RECV_STATUS 2
+#define FH_MCI_DMA_THRESHOLD 16
+
+#ifdef CONFIG_MMC_FH_IDMAC
+struct idmac_desc {
+ u32 des0; /* Control Descriptor */
+#define IDMAC_DES0_DIC BIT(1)
+#define IDMAC_DES0_LD BIT(2)
+#define IDMAC_DES0_FD BIT(3)
+#define IDMAC_DES0_CH BIT(4)
+#define IDMAC_DES0_ER BIT(5)
+#define IDMAC_DES0_CES BIT(30)
+#define IDMAC_DES0_OWN BIT(31)
+
+ u32 des1; /* Buffer sizes */
+#define IDMAC_SET_BUFFER1_SIZE(d, s) \
+ ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
+
+ u32 des2; /* buffer 1 physical address */
+
+ u32 des3; /* buffer 2 physical address */
+};
+#endif /* CONFIG_MMC_FH_IDMAC */
+
+/**
+ * struct fh_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ * processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ * &struct fh_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct fh_mci_slot {
+ struct mmc_host *mmc;
+ struct fh_mci *host;
+
+ u32 ctype;
+
+ struct mmc_request *mrq;
+ struct list_head queue_node;
+
+ unsigned int clock;
+ unsigned long flags;
+#define FH_MMC_CARD_PRESENT 0
+#define FH_MMC_CARD_NEED_INIT 1
+ int id;
+ int last_detect_state;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static int fh_mci_req_show(struct seq_file *s, void *v)
+{
+ struct fh_mci_slot *slot = s->private;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_command *stop;
+ struct mmc_data *data;
+
+ /* Make sure we get a consistent snapshot */
+ spin_lock_bh(&slot->host->lock);
+ mrq = slot->mrq;
+
+ if (mrq) {
+ cmd = mrq->cmd;
+ data = mrq->data;
+ stop = mrq->stop;
+
+ if (cmd)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ cmd->opcode, cmd->arg, cmd->flags,
+ cmd->resp[0], cmd->resp[1], cmd->resp[2],
+ cmd->resp[2], cmd->error);
+ if (data)
+ seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+ data->bytes_xfered, data->blocks,
+ data->blksz, data->flags, data->error);
+ if (stop)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ stop->opcode, stop->arg, stop->flags,
+ stop->resp[0], stop->resp[1], stop->resp[2],
+ stop->resp[2], stop->error);
+ }
+
+ spin_unlock_bh(&slot->host->lock);
+
+ return 0;
+}
+
+static int fh_mci_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fh_mci_req_show, inode->i_private);
+}
+
+static const struct file_operations fh_mci_req_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_mci_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int fh_mci_regs_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
+ seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
+ seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
+ seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
+ seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
+ seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+
+ return 0;
+}
+
+static int fh_mci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fh_mci_regs_show, inode->i_private);
+}
+
+static const struct file_operations fh_mci_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_mci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void fh_mci_init_debugfs(struct fh_mci_slot *slot)
+{
+ struct mmc_host *mmc = slot->mmc;
+ struct fh_mci *host = slot->host;
+ struct dentry *root;
+ struct dentry *node;
+
+ root = mmc->debugfs_root;
+ if (!root)
+ return;
+
+ node = debugfs_create_file("regs", S_IRUSR, root, host,
+ &fh_mci_regs_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_file("req", S_IRUSR, root, slot,
+ &fh_mci_req_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("pending_events", S_IRUSR, root,
+ (u32 *)&host->pending_events);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("completed_events", S_IRUSR, root,
+ (u32 *)&host->completed_events);
+ if (!node)
+ goto err;
+
+ return;
+
+err:
+ dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+
+static void fh_mci_set_timeout(struct fh_mci *host)
+{
+ /* timeout (maximum) */
+ mci_writel(host, TMOUT, 0xffffffff);
+}
+
+static u32 fh_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+ u32 cmdr;
+ cmd->error = -EINPROGRESS;
+
+ cmdr = cmd->opcode;
+
+ if (cmdr == MMC_STOP_TRANSMISSION)
+ cmdr |= SDMMC_CMD_STOP;
+ else
+ cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ /* We expect a response, so set this bit */
+ cmdr |= SDMMC_CMD_RESP_EXP;
+ if (cmd->flags & MMC_RSP_136)
+ cmdr |= SDMMC_CMD_RESP_LONG;
+ }
+
+ if (cmd->flags & MMC_RSP_CRC)
+ cmdr |= SDMMC_CMD_RESP_CRC;
+
+ data = cmd->data;
+ if (data) {
+ cmdr |= SDMMC_CMD_DAT_EXP;
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= SDMMC_CMD_STRM_MODE;
+ if (data->flags & MMC_DATA_WRITE)
+ cmdr |= SDMMC_CMD_DAT_WR;
+ }
+ cmdr |= SDMMC_CMD_USE_HOLD_REG;
+ return cmdr;
+}
+
+
+static void fh_mci_start_command(struct fh_mci *host,
+ struct mmc_command *cmd, u32 cmd_flags)
+{
+ host->cmd = cmd;
+ dev_vdbg(&host->pdev->dev,
+ "start command: ARGR=0x%08x CMDR=0x%08x\n",
+ cmd->arg, cmd_flags);
+ mci_writel(host, CMDARG, cmd->arg);
+ wmb();
+
+ mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+}
+
+static void send_stop_cmd(struct fh_mci *host, struct mmc_data *data)
+{
+ fh_mci_start_command(host, data->stop, host->stop_cmdr);
+}
+
+/* DMA interface functions */
+static void fh_mci_stop_dma(struct fh_mci *host)
+{
+ if (host->use_dma) {
+ host->dma_ops->stop(host);
+ host->dma_ops->cleanup(host);
+ }
+
+ /* Data transfer was stopped by the interrupt handler */
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+
+}
+
+#ifdef CONFIG_MMC_FH_IDMAC
+
+
+
+static void fh_mci_idmac_reset(struct fh_mci *host)
+{
+ u32 bmod = mci_readl(host, BMOD);
+ /* Software reset of DMA */
+ bmod |= SDMMC_IDMAC_SWRESET;
+ mci_writel(host, BMOD, bmod);
+
+}
+
+static void fh_mci_dma_cleanup(struct fh_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (data && host->dma_data_mapped)
+ dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+ host->dma_data_mapped =0;
+}
+
+static void fh_mci_idmac_stop_dma(struct fh_mci *host)
+{
+ u32 temp;
+
+ /* Disable and reset the IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_USE_IDMAC;
+ temp |= SDMMC_CTRL_DMA_RESET;
+ mci_writel(host, CTRL, temp);
+
+ /* Stop the IDMAC running */
+ temp = mci_readl(host, BMOD);
+ temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
+ temp |= SDMMC_IDMAC_SWRESET;
+ mci_writel(host, BMOD, temp);
+}
+
+static void fh_mci_idmac_complete_dma(struct fh_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ dev_vdbg(&host->pdev->dev, "DMA complete\n");
+
+ host->dma_ops->cleanup(host);
+
+ /*
+ * If the card was removed, data will be NULL. No point in trying to
+ * send the stop command or waiting for NBUSY in this case.
+ */
+ if (data) {
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+}
+
+static void fh_mci_translate_sglist(struct fh_mci *host, struct mmc_data *data,
+ unsigned int sg_len)
+{
+ #define DMA_ONE_BUF_SIZE_MAX (0x2000 - 16)
+
+ int i;
+ int num = 0;
+ u32 seglen;
+ struct idmac_desc *desc = host->sg_cpu;
+ struct idmac_desc *ldesc = NULL;
+
+ for (i = 0; i < sg_len; i++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+ u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+ while (length > 0) {
+ //FIXME
+ //this should not happen
+ if (++num > host->ring_size) {
+ panic("%s, line %d, too long DMA transfer!\n",
+ __FILE__, __LINE__);
+ }
+
+ seglen = length;
+ if (seglen > DMA_ONE_BUF_SIZE_MAX) {
+ seglen = DMA_ONE_BUF_SIZE_MAX;
+ }
+
+ /* Set the OWN bit and disable interrupts for this descriptor */
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
+ /* Buffer length */
+ IDMAC_SET_BUFFER1_SIZE(desc, seglen);
+ /* Physical address to DMA to/from */
+ desc->des2 = mem_addr;
+
+ mem_addr += seglen;
+ length -= seglen;
+
+ ldesc = desc++;
+ }
+ }
+
+ /* Set first descriptor */
+ desc = host->sg_cpu;
+ desc->des0 |= IDMAC_DES0_FD;
+
+ /* Set last descriptor */
+ if (ldesc) {
+ ldesc->des0 |= IDMAC_DES0_LD;
+ ldesc->des0 &= ~IDMAC_DES0_DIC;
+ }
+
+ wmb();
+}
+
+static void fh_mci_idmac_start_dma(struct fh_mci *host, unsigned int sg_len)
+{
+ u32 temp;
+
+ fh_mci_translate_sglist(host, host->data, sg_len);
+
+ /* Select IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_USE_IDMAC;
+ mci_writel(host, CTRL, temp);
+
+ wmb();
+
+ /* Enable the IDMAC */
+ temp = mci_readl(host, BMOD);
+ temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
+ mci_writel(host, BMOD, temp);
+ /* Start it running */
+ mci_writel(host, PLDMND, 1);
+}
+
+static int fh_mci_idmac_init(struct fh_mci *host)
+{
+ struct idmac_desc *p;
+ int i;
+
+ /* Number of descriptors in the ring buffer */
+ //host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+ host->ring_size = SDC_DESC_SIZE / sizeof(struct idmac_desc);
+
+ /* Forward link the descriptor list */
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+ p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
+
+ /* Set the last descriptor as the end-of-ring descriptor */
+ p->des3 = host->sg_dma;
+ p->des0 = IDMAC_DES0_ER;
+ fh_mci_idmac_reset(host);
+ /* Mask out interrupts - get Tx & Rx complete only */
+ mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
+ SDMMC_IDMAC_INT_TI);
+
+ /* Set the descriptor base address */
+ mci_writel(host, DBADDR, host->sg_dma);
+ return 0;
+}
+
+static struct fh_mci_dma_ops fh_mci_idmac_ops = {
+ .init = fh_mci_idmac_init,
+ .start = fh_mci_idmac_start_dma,
+ .stop = fh_mci_idmac_stop_dma,
+ .complete = fh_mci_idmac_complete_dma,
+ .cleanup = fh_mci_dma_cleanup,
+};
+#endif /* CONFIG_MMC_FH_IDMAC */
+
+static int fh_mci_pre_dma_transfer(struct fh_mci *host,
+ struct mmc_data *data,
+ bool next)
+{
+ struct scatterlist *sg;
+ unsigned int i, direction, sg_len;
+
+//#define SDIO_DMA
+//#define SDIO_PIO
+
+#ifdef SDIO_PIO
+ return -EINVAL;
+#else
+#ifdef SDIO_DMA
+
+#else
+ /*
+ * We don't do DMA on "complex" transfers, i.e. with
+ * non-word-aligned buffers or lengths. Also, we don't bother
+ * with all the DMA setup overhead for short transfers.
+ */
+ if (data->blocks * data->blksz < FH_MCI_DMA_THRESHOLD)
+ return -EINVAL;
+ if (data->blksz & 3)
+ return -EINVAL;
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3)
+ return -EINVAL;
+ }
+#endif
+#endif
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+ direction);
+
+ host->dma_data_mapped = 1;
+ return sg_len;
+}
+
+
+
+static int fh_mci_submit_data_dma(struct fh_mci *host, struct mmc_data *data)
+{
+ int sg_len;
+ u32 temp;
+
+ host->using_dma = 0;
+
+ /* If we don't have a channel, we can't do DMA */
+ if (!host->use_dma)
+ return -ENODEV;
+
+ sg_len = fh_mci_pre_dma_transfer(host, data, 0);
+ if (sg_len < 0) {
+ host->dma_ops->stop(host);
+ return sg_len;
+ }
+
+ host->using_dma = 1;
+
+ dev_vdbg(&host->pdev->dev,
+ "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+ (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
+ sg_len);
+
+#if 0
+ //test data blocksize
+ WARN((host->prev_blksz && (host->prev_blksz != data->blksz)),
+ "Block size changed, from %d to %d",
+ host->prev_blksz,
+ data->blksz);
+#endif
+
+ /* Enable the DMA interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+
+ /* Disable RX/TX IRQs, let DMA handle it */
+ temp = mci_readl(host, INTMASK);
+ temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+ mci_writel(host, INTMASK, temp);
+ host->dma_ops->start(host, sg_len);
+ return 0;
+}
+
+static void fh_mci_submit_data(struct fh_mci *host, struct mmc_data *data)
+{
+ u32 temp;
+ int ret;
+ data->error = -EINPROGRESS;
+
+ WARN_ON(host->data);
+ host->sg = NULL;
+ host->data = data;
+
+ if (data->flags & MMC_DATA_READ)
+ host->dir_status = FH_MCI_RECV_STATUS;
+ else
+ host->dir_status = FH_MCI_SEND_STATUS;
+
+ ret = fh_mci_submit_data_dma(host, data);
+ if (ret) {
+ host->sg = data->sg;
+ host->pio_offset = 0;
+
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
+
+ temp = mci_readl(host, INTMASK);
+ temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
+ mci_writel(host, INTMASK, temp);
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+ host->prev_blksz = 0;
+ }else {
+ /*
+ * Keep the current block size.
+ * It will be used to decide whether to update
+ * fifoth register next time.
+ */
+ host->prev_blksz = data->blksz;
+ }
+}
+
+static void mci_send_cmd(struct fh_mci_slot *slot, u32 cmd, u32 arg)
+{
+ struct fh_mci *host = slot->host;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ unsigned int cmd_status = 0;
+
+ mci_writel(host, CMDARG, arg);
+ wmb();
+ mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+
+ while (time_before(jiffies, timeout)) {
+ cmd_status = mci_readl(host, CMD);
+ if (!(cmd_status & SDMMC_CMD_START))
+ return;
+ }
+ dev_err(&slot->mmc->class_dev,
+ "Timeout sending command (cmd %#x arg %#x status %#x)\n",
+ cmd, arg, cmd_status);
+}
+
+static void fh_mci_setup_bus(struct fh_mci_slot *slot)
+{
+ struct fh_mci *host = slot->host;
+ u32 div;
+
+ if (slot->clock != host->current_speed) {
+ if (host->bus_hz % slot->clock)
+ /*
+ * move the + 1 after the divide to prevent
+ * over-clocking the card.
+ */
+ div = ((host->bus_hz / slot->clock) >> 1) + 1;
+ else
+ div = (host->bus_hz / slot->clock) >> 1;
+
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
+ " div = %d)\n", slot->id, host->bus_hz, slot->clock,
+ div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+
+ /* disable clock */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ /* set clock to desired speed */
+ mci_writel(host, CLKDIV, div);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ /* enable clock */
+ mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
+ SDMMC_CLKEN_LOW_PWR);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ host->current_speed = slot->clock;
+ }
+
+ /* Set the current slot bus width */
+ mci_writel(host, CTYPE, slot->ctype);
+}
+
+
+static void fh_mci_start_request(struct fh_mci *host,
+ struct fh_mci_slot *slot)
+{
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ u32 cmdflags;
+
+ mrq = slot->mrq;
+ if (host->pdata->select_slot)
+ host->pdata->select_slot(slot->id);
+ /* Slot specific timing and width adjustment */
+ // do_gettimeofday(&mrq->rq_times[3]);
+ fh_mci_setup_bus(slot);
+ host->cur_slot = slot;
+ host->mrq = mrq;
+
+ host->pending_events = 0;
+ host->completed_events = 0;
+ host->data_status = 0;
+ // do_gettimeofday(&mrq->rq_times[4]);
+ host->data_error_flag = 0;
+ data = mrq->data;
+ if (data) {
+ fh_mci_set_timeout(host);
+ mci_writel(host, BYTCNT, data->blksz*data->blocks);
+ mci_writel(host, BLKSIZ, data->blksz);
+ }
+ // do_gettimeofday(&mrq->rq_times[5]);
+ cmd = mrq->cmd;
+ cmdflags = fh_mci_prepare_command(slot->mmc, cmd);
+ /* this is the first command, send the initialization clock */
+ if (test_and_clear_bit(FH_MMC_CARD_NEED_INIT, &slot->flags))
+ cmdflags |= SDMMC_CMD_INIT;
+ // do_gettimeofday(&mrq->rq_times[6]);
+ if (data) {
+ fh_mci_submit_data(host, data);
+ wmb();
+ }
+ fh_mci_start_command(host, cmd, cmdflags);
+ if (mrq->stop){
+ host->stop_cmdr = fh_mci_prepare_command(slot->mmc, mrq->stop);
+ }
+
+}
+
+
+/* must be called with host->lock held */
+static void fh_mci_queue_request(struct fh_mci *host, struct fh_mci_slot *slot,
+ struct mmc_request *mrq)
+{
+ dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+ host->state);
+ slot->mrq = mrq;
+
+ if (host->state == STATE_IDLE) {
+ host->state = STATE_SENDING_CMD;
+ fh_mci_start_request(host, slot);
+ } else {
+ list_add_tail(&slot->queue_node, &host->queue);
+
+ }
+}
+
+static void fh_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct fh_mci_slot *slot = mmc_priv(mmc);
+ struct fh_mci *host = slot->host;
+
+ WARN_ON(slot->mrq);
+ /*
+ * The check for card presence and queueing of the request must be
+ * atomic, otherwise the card could be removed in between and the
+ * request wouldn't fail until another card was inserted.
+ */
+ spin_lock_bh(&host->lock);
+ if (!test_bit(FH_MMC_CARD_PRESENT, &slot->flags)) {
+ spin_unlock_bh(&host->lock);
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ /* We don't support multiple blocks of weird lengths. */
+ fh_mci_queue_request(host, slot, mrq);
+ spin_unlock_bh(&host->lock);
+}
+
+static void fh_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ u32 regs;
+ struct fh_mci_slot *slot = mmc_priv(mmc);
+#if 0
+ struct clk* sdc_clk;
+ int sdc_id = slot->host->pdev->id;
+
+
+ if(sdc_id)
+ {
+ printk("fh_mci_set_ios, clk: %lu\n", ios->clock);
+ sdc_clk = clk_get(NULL, "sdc1_clk");
+ clk_set_rate(sdc_clk,ios->clock);
+ }
+ else
+ {
+ sdc_clk = clk_get(NULL, "sdc0_clk");
+ clk_set_rate(sdc_clk,ios->clock);
+ }
+#endif
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_4:
+ slot->ctype = SDMMC_CTYPE_4BIT;
+ break;
+ case MMC_BUS_WIDTH_8:
+ slot->ctype = SDMMC_CTYPE_8BIT;
+ break;
+ default:
+ /* set default 1 bit mode */
+ slot->ctype = SDMMC_CTYPE_1BIT;
+ }
+ /* DDR mode set */
+ if (ios->ddr) {
+ regs = mci_readl(slot->host, UHS_REG);
+ regs |= (0x1 << slot->id) << 16;
+ mci_writel(slot->host, UHS_REG, regs);
+ }
+
+ if (ios->clock) {
+ /*
+ * Use mirror of ios->clock to prevent race with mmc
+ * core ios update when finding the minimum.
+ */
+ slot->clock = ios->clock;
+ }
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ set_bit(FH_MMC_CARD_NEED_INIT, &slot->flags);
+ break;
+ default:
+ break;
+ }
+}
+
+static int fh_mci_get_ro(struct mmc_host *mmc)
+{
+ int read_only;
+ struct fh_mci_slot *slot = mmc_priv(mmc);
+ struct fh_mci_board *brd = slot->host->pdata;
+
+ /* Use platform get_ro function, else try on board write protect */
+ if (brd->get_ro)
+ read_only = brd->get_ro(slot->id);
+ else
+ read_only =
+ mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+
+ dev_dbg(&mmc->class_dev, "card is %s\n",
+ read_only ? "read-only" : "read-write");
+
+ return read_only;
+}
+
+static int fh_mci_get_cd(struct mmc_host *mmc)
+{
+ int present;
+ struct fh_mci_slot *slot = mmc_priv(mmc);
+ struct fh_mci_board *brd = slot->host->pdata;
+ struct fh_mci *host = slot->host;
+
+ /* Use platform get_cd function, else try onboard card detect */
+ if (brd->quirks & FH_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = 1;
+ else if (brd->get_cd)
+ present = !brd->get_cd(slot->id);
+ else
+ present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
+ == 0 ? 1 : 0;
+
+ spin_lock_bh(&host->lock);
+ if (present)
+ dev_dbg(&mmc->class_dev, "card is present\n");
+ else
+ dev_dbg(&mmc->class_dev, "card is not present\n");
+ spin_unlock_bh(&host->lock);
+
+ return present;
+}
+
+/*
+ * Disable lower power mode.
+ *
+ * Low power mode will stop the card clock when idle. According to the
+ * description of the CLKENA register we should disable low power mode
+ * for SDIO cards if we need SDIO interrupts to work.
+ *
+ * This function is fast if low power mode is already disabled.
+ */
+static void fh_mci_disable_low_power(struct fh_mci_slot *slot)
+{
+ struct fh_mci *host = slot->host;
+ u32 clk_en_a;
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+
+ clk_en_a = mci_readl(host, CLKENA);
+
+ if (clk_en_a & clken_low_pwr) {
+ mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+ SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+}
+
+static void fh_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
+{
+ struct fh_mci_slot *slot = mmc_priv(mmc);
+ struct fh_mci *host = slot->host;
+ u32 int_mask;
+
+ /* Enable/disable Slot Specific SDIO interrupt */
+ int_mask = mci_readl(host, INTMASK);
+ if (enb) {
+ /*
+ * Turn off low power mode if it was enabled. This is a bit of
+ * a heavy operation and we disable / enable IRQs a lot, so
+ * we'll leave low power mode disabled and it will get
+ * re-enabled again in fh_mci_setup_bus().
+ */
+ fh_mci_disable_low_power(slot);
+
+ mci_writel(host, INTMASK,
+ (int_mask | SDMMC_INT_SDIO(slot->id)));
+ } else {
+ mci_writel(host, INTMASK,
+ (int_mask & ~SDMMC_INT_SDIO(slot->id)));
+ }
+}
+
+
+static const struct mmc_host_ops fh_mci_ops = {
+ .request = fh_mci_request,
+ .set_ios = fh_mci_set_ios,
+ .get_ro = fh_mci_get_ro,
+ .get_cd = fh_mci_get_cd,
+
+
+ .enable_sdio_irq = fh_mci_enable_sdio_irq,
+};
+
+static void fh_mci_request_end(struct fh_mci *host, struct mmc_request *mrq)
+ __releases(&host->lock)
+ __acquires(&host->lock)
+{
+ struct fh_mci_slot *slot;
+ struct mmc_host *prev_mmc = host->cur_slot->mmc;
+
+ if(host->data && host->data->error)
+ printk(KERN_ERR "fh SDC : func request_end\n");
+
+ WARN_ON(host->cmd || host->data);
+
+ host->cur_slot->mrq = NULL;
+ host->mrq = NULL;
+ if (!list_empty(&host->queue)) {
+ slot = list_entry(host->queue.next,
+ struct fh_mci_slot, queue_node);
+ list_del(&slot->queue_node);
+ dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
+ mmc_hostname(slot->mmc));
+ host->state = STATE_SENDING_CMD;
+ fh_mci_start_request(host, slot);
+ } else {
+ dev_vdbg(&host->pdev->dev, "list empty\n");
+ host->state = STATE_IDLE;
+ }
+
+ spin_unlock(&host->lock);
+ mmc_request_done(prev_mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+static void fh_mci_command_complete(struct fh_mci *host, struct mmc_command *cmd)
+{
+ u32 status = host->cmd_status;
+
+ host->cmd_status = 0;
+
+ /* Read the response from the card (up to 16 bytes) */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = mci_readl(host, RESP0);
+ cmd->resp[2] = mci_readl(host, RESP1);
+ cmd->resp[1] = mci_readl(host, RESP2);
+ cmd->resp[0] = mci_readl(host, RESP3);
+ } else {
+ cmd->resp[0] = mci_readl(host, RESP0);
+ cmd->resp[1] = 0;
+ cmd->resp[2] = 0;
+ cmd->resp[3] = 0;
+ }
+
+ }
+
+ if (status & SDMMC_INT_RTO)
+ cmd->error = -ETIMEDOUT;
+ else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+ cmd->error = -EILSEQ;
+ else if (status & SDMMC_INT_RESP_ERR)
+ cmd->error = -EIO;
+ else
+ cmd->error = 0;
+
+ if (cmd->error) {
+ /* newer ip versions need a delay between retries */
+ if (host->quirks & FH_MCI_QUIRK_RETRY_DELAY)
+ mdelay(20);
+
+ if (cmd->data) {
+ host->data = NULL;
+ fh_mci_stop_dma(host);
+ }
+ }
+}
+
+static void fh_mci_tasklet_func(unsigned long priv)
+{
+ struct fh_mci *host = (struct fh_mci *)priv;
+ struct mmc_data *data;
+ struct mmc_command *cmd;
+ enum fh_mci_state state;
+ enum fh_mci_state prev_state;
+ u32 status;
+
+ spin_lock(&host->lock);
+
+ state = host->state;
+ data = host->data;
+
+ do {
+ prev_state = state;
+
+ switch (state) {
+ case STATE_IDLE:
+ break;
+
+ case STATE_SENDING_CMD:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ cmd = host->cmd;
+ host->cmd = NULL;
+ set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
+ fh_mci_command_complete(host, host->mrq->cmd);
+
+
+ if (!host->mrq->data || cmd->error) {
+ fh_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
+ prev_state = state = STATE_SENDING_DATA;
+ /* fall through */
+
+ case STATE_SENDING_DATA:
+ if (test_and_clear_bit(EVENT_DATA_ERROR,
+ &host->pending_events)) {
+ printk(KERN_ERR "fh SDC : STATE_SENDING_DATA EVENT_DATA_ERROR\n");
+ fh_mci_stop_dma(host);
+ if (data->stop)
+ send_stop_cmd(host, data);
+ state = STATE_DATA_ERROR;
+ break;
+ }
+
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+
+ set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+ prev_state = state = STATE_DATA_BUSY;
+ /* fall through */
+
+ case STATE_DATA_BUSY:
+ if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
+ &host->pending_events))
+ break;
+
+ host->data = NULL;
+ set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
+ status = host->data_status;
+
+ if (status & FH_MCI_DATA_ERROR_FLAGS) {
+ printk(KERN_ERR "fh SDC : STATE_DATA_BUSY\n");
+ if (status & SDMMC_INT_DTO) {
+ dev_err(&host->pdev->dev,
+ "data timeout error\n");
+ data->error = -ETIMEDOUT;
+ } else if (status & SDMMC_INT_DCRC) {
+ dev_err(&host->pdev->dev,
+ "data CRC error\n");
+ data->error = -EILSEQ;
+ } else {
+ dev_err(&host->pdev->dev,
+ "data FIFO error "
+ "(status=%08x)\n",
+ status);
+ data->error = -EIO;
+ }
+ } else {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
+ }
+
+ if (!data->stop) {
+ fh_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
+ prev_state = state = STATE_SENDING_STOP;
+ if (!data->error)
+ send_stop_cmd(host, data);
+ /* fall through */
+
+ case STATE_SENDING_STOP:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ host->cmd = NULL;
+ fh_mci_command_complete(host, host->mrq->stop);
+ fh_mci_request_end(host, host->mrq);
+ goto unlock;
+
+ case STATE_DATA_ERROR:
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+ printk(KERN_ERR "fh SDC : STATE_DATA_ERROR\n");
+
+ state = STATE_DATA_BUSY;
+ break;
+ }
+ } while (state != prev_state);
+ host->state = state;
+unlock:
+ spin_unlock(&host->lock);
+ ;
+
+}
+
+static void fh_mci_push_data16(struct fh_mci *host, void *buf, int cnt)
+{
+ u16 *pdata = (u16 *)buf;
+
+ WARN_ON(cnt % 2 != 0);
+
+ cnt = cnt >> 1;
+ while (cnt > 0) {
+ mci_writew(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void fh_mci_pull_data16(struct fh_mci *host, void *buf, int cnt)
+{
+ u16 *pdata = (u16 *)buf;
+
+ WARN_ON(cnt % 2 != 0);
+
+ cnt = cnt >> 1;
+ while (cnt > 0) {
+ *pdata++ = mci_readw(host, DATA);
+ cnt--;
+ }
+}
+
+static void fh_mci_push_data32(struct fh_mci *host, void *buf, int cnt)
+{
+ u32 *pdata = (u32 *)buf;
+
+ WARN_ON(cnt % 4 != 0);
+ WARN_ON((unsigned long)pdata & 0x3);
+
+ cnt = cnt >> 2;
+ while (cnt > 0) {
+ mci_writel(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void fh_mci_pull_data32(struct fh_mci *host, void *buf, int cnt)
+{
+ u32 *pdata = (u32 *)buf;
+
+ WARN_ON(cnt % 4 != 0);
+ WARN_ON((unsigned long)pdata & 0x3);
+
+ cnt = cnt >> 2;
+ while (cnt > 0) {
+ *pdata++ = mci_readl(host, DATA);
+ cnt--;
+ }
+}
+
+static void fh_mci_push_data64(struct fh_mci *host, void *buf, int cnt)
+{
+ u64 *pdata = (u64 *)buf;
+
+ WARN_ON(cnt % 8 != 0);
+
+ cnt = cnt >> 3;
+ while (cnt > 0) {
+ mci_writeq(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void fh_mci_pull_data64(struct fh_mci *host, void *buf, int cnt)
+{
+ u64 *pdata = (u64 *)buf;
+
+ WARN_ON(cnt % 8 != 0);
+
+ cnt = cnt >> 3;
+ while (cnt > 0) {
+ *pdata++ = mci_readq(host, DATA);
+ cnt--;
+ }
+}
+
+static void fh_mci_read_data_pio(struct fh_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len=0;
+
+ do {
+ len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+ if (offset + len <= sg->length) {
+
+ host->pull_data(host, (void *)(buf + offset), len);
+ offset += len;
+ nbytes += len;
+
+ if (offset == sg->length) {
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+ host->pull_data(host, (void *)(buf + offset),
+ remaining);
+ nbytes += remaining;
+
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = len - remaining;
+ buf = sg_virt(sg);
+ host->pull_data(host, buf, offset);
+ nbytes += offset;
+ }
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (status & FH_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ printk("data error in read pio\n");
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+ tasklet_schedule(&host->tasklet);
+ return;
+ }
+ } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
+ len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+ return;
+
+done:
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void fh_mci_write_data_pio(struct fh_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len;
+
+ do {
+ len = SDMMC_FIFO_SZ -
+ (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+ if (offset + len <= sg->length) {
+ host->push_data(host, (void *)(buf + offset), len);
+
+
+ offset += len;
+ nbytes += len;
+ if (offset == sg->length) {
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+
+ host->push_data(host, (void *)(buf + offset),
+ remaining);
+ nbytes += remaining;
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = len - remaining;
+ buf = sg_virt(sg);
+ host->push_data(host, (void *)buf, offset);
+ nbytes += offset;
+ }
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (status & FH_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+
+ smp_wmb();
+ printk("data error in write pio\n");
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+ tasklet_schedule(&host->tasklet);
+ return;
+ }
+ } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+ //flag_int = mci_readl(host, INTMASK);
+ //mci_writel(host, INTMASK, flag_int|0x4);
+ return;
+
+done:
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void fh_mci_cmd_interrupt(struct fh_mci *host, u32 status)
+{
+ if (!host->cmd_status)
+ host->cmd_status = status;
+
+ smp_wmb();
+
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+}
+
+
+static irqreturn_t fh_mci_interrupt(int irq, void *dev_id)
+{
+ struct fh_mci *host = dev_id;
+ u32 pending;
+ int i;
+ u32 cmd, arg, rint, resp0, resp1, resp2, resp3;
+ #ifdef SDC_CRC_TEST
+ struct clk *sdc_clk;
+ #endif
+
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+ if (pending) {
+ if (pending & FH_MCI_CMD_ERROR_FLAGS) {
+ mci_writel(host, RINTSTS, FH_MCI_CMD_ERROR_FLAGS);
+ host->cmd_status = pending;
+ smp_wmb();
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ }
+
+ if (pending & FH_MCI_DATA_ERROR_FLAGS) {
+#ifdef SDC_CRC_TEST
+ gpio_direction_output(TEST_GPIO, 1);
+ __gpio_set_value(TEST_GPIO, 1);
+#endif
+ host->data_error_flag = 1;
+ rint = mci_readl(host, RINTSTS);
+ /* if there is an error report DATA_ERROR */
+ mci_writel(host, RINTSTS, FH_MCI_DATA_ERROR_FLAGS);
+ host->data_status = pending;
+ smp_wmb();
+ cmd = mci_readl(host, CMD);
+ arg = mci_readl(host, CMDARG);
+ printk("data error in interrupt, cmd=0x%x, args=0x%x, rintsts=0x%x\n",
+ cmd, arg, rint);
+
+ resp0 = mci_readl(host, RESP0);
+ resp1 = mci_readl(host, RESP1);
+ resp2 = mci_readl(host, RESP2);
+ resp3 = mci_readl(host, RESP3);
+
+ printk("resp0=0x%x, resp1=0x%x, resp2=0x%x, resp3=0x%x\n",
+ resp0, resp1, resp2, resp3);
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+#ifdef SDC_CRC_TEST
+ sdc_clk = clk_get(NULL, "sdc0_clk");
+ clk_disable(sdc_clk);
+
+ printk("!!!!!!!!!!!sdc stopped!!!!!!!!!!!!\n");
+ panic("really terrible\n");
+#endif
+ tasklet_schedule(&host->tasklet);
+
+ }
+
+ if (likely(pending & SDMMC_INT_DATA_OVER)) {
+ mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+ if (!host->data_status)
+ host->data_status = pending;
+ smp_wmb();
+ if (host->dir_status == FH_MCI_RECV_STATUS) {
+ if (host->sg != NULL)
+ fh_mci_read_data_pio(host);
+ }
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & SDMMC_INT_RXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (host->dir_status == FH_MCI_RECV_STATUS && host->sg)
+ fh_mci_read_data_pio(host);
+ }
+
+ if (pending & SDMMC_INT_TXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (host->dir_status == FH_MCI_SEND_STATUS && host->sg)
+ fh_mci_write_data_pio(host);
+ }
+
+ if (likely(pending & SDMMC_INT_CMD_DONE)) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+ fh_mci_cmd_interrupt(host, pending);
+ }
+
+ if (pending & SDMMC_INT_CD) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CD);
+ tasklet_schedule(&host->card_tasklet);
+ }
+
+ /* Handle SDIO Interrupts */
+ for (i = 0; i < host->num_slots; i++) {
+ struct fh_mci_slot *slot = host->slot[i];
+ if (pending & SDMMC_INT_SDIO(i)) {
+ mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
+ mmc_signal_sdio_irq(slot->mmc);
+ }
+ }
+
+ }
+#ifdef CONFIG_MMC_FH_IDMAC
+ /* Handle DMA interrupts */
+ pending = mci_readl(host, IDSTS);
+ if (likely(pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))) {
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_NI);
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ host->dma_ops->complete(host);
+ }
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static void fh_mci_tasklet_card(unsigned long data)
+{
+ struct fh_mci *host = (struct fh_mci *)data;
+ int i;
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct fh_mci_slot *slot = host->slot[i];
+ struct mmc_host *mmc = slot->mmc;
+ struct mmc_request *mrq;
+ int present;
+ u32 ctrl;
+ present = fh_mci_get_cd(mmc);
+ while (present != slot->last_detect_state) {
+ dev_dbg(&slot->mmc->class_dev, "card %s\n",
+ present ? "inserted" : "removed");
+
+ spin_lock(&host->lock);
+
+ /* Card change detected */
+ slot->last_detect_state = present;
+
+ /* Power up slot */
+ if (present != 0) {
+ if (host->pdata->setpower)
+ host->pdata->setpower(slot->id,
+ mmc->ocr_avail);
+
+ set_bit(FH_MMC_CARD_PRESENT, &slot->flags);
+ }
+
+ /* Clean up queue if present */
+ mrq = slot->mrq;
+ if (mrq) {
+ if (mrq == host->mrq) {
+ host->data = NULL;
+ host->cmd = NULL;
+
+ switch (host->state) {
+ case STATE_IDLE:
+ break;
+ case STATE_SENDING_CMD:
+ mrq->cmd->error = -ENOMEDIUM;
+ if (!mrq->data)
+ break;
+ /* fall through */
+ case STATE_SENDING_DATA:
+ mrq->data->error = -ENOMEDIUM;
+ fh_mci_stop_dma(host);
+ break;
+ case STATE_DATA_BUSY:
+ case STATE_DATA_ERROR:
+ printk("STATE_DATA_ERROR in tasklet card\n");
+ if (mrq->data->error == -EINPROGRESS)
+ mrq->data->error = -ENOMEDIUM;
+ if (!mrq->stop)
+ break;
+ /* fall through */
+ case STATE_SENDING_STOP:
+ mrq->stop->error = -ENOMEDIUM;
+ break;
+ }
+
+ fh_mci_request_end(host, mrq);
+ } else {
+ list_del(&slot->queue_node);
+ mrq->cmd->error = -ENOMEDIUM;
+ if (mrq->data)
+ mrq->data->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+
+ spin_unlock(&host->lock);
+ mmc_request_done(slot->mmc, mrq);
+ spin_lock(&host->lock);
+ }
+ }
+
+ /* Power down slot */
+ if (present == 0) {
+ if (host->pdata->setpower)
+ host->pdata->setpower(slot->id, 0);
+ clear_bit(FH_MMC_CARD_PRESENT, &slot->flags);
+
+ /*
+ * Clear down the FIFO - doing so generates a
+ * block interrupt, hence setting the
+ * scatter-gather pointer to NULL.
+ */
+ host->sg = NULL;
+
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= SDMMC_CTRL_FIFO_RESET;
+ mci_writel(host, CTRL, ctrl);
+
+#ifdef CONFIG_MMC_FH_IDMAC
+ ctrl = mci_readl(host, BMOD);
+ ctrl |= SDMMC_IDMAC_SWRESET; /* Software reset of DMA */
+ mci_writel(host, BMOD, ctrl);
+#endif
+
+ }
+
+ spin_unlock(&host->lock);
+ present = fh_mci_get_cd(mmc);
+ }
+
+ mmc_detect_change(slot->mmc,
+ msecs_to_jiffies(host->pdata->detect_delay_ms));
+ }
+}
+
+static int __init fh_mci_init_slot(struct fh_mci *host, unsigned int id)
+{
+ struct mmc_host *mmc;
+ struct fh_mci_slot *slot;
+
+ mmc = mmc_alloc_host(sizeof(struct fh_mci_slot), &host->pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ slot = mmc_priv(mmc);
+ slot->id = id;
+ slot->mmc = mmc;
+ slot->host = host;
+
+ mmc->ops = &fh_mci_ops;
+ mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
+ mmc->f_max = 50000000;//12500000;
+
+ if (host->pdata->get_ocr)
+ mmc->ocr_avail = host->pdata->get_ocr(id);
+ else
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ /*
+ * Start with slot power disabled, it will be enabled when a card
+ * is detected.
+ */
+ if (host->pdata->setpower)
+ host->pdata->setpower(id, 0);
+
+ if (host->pdata->caps)
+ mmc->caps = host->pdata->caps;
+ else
+ mmc->caps = 0;
+
+ if (host->pdata->get_bus_wd)
+ if (host->pdata->get_bus_wd(slot->id) >= 4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (host->pdata->quirks & FH_MCI_QUIRK_HIGHSPEED)
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+#ifdef CONFIG_MMC_FH_IDMAC
+ /* Useful defaults if platform data is unset. */
+ mmc->max_segs = 64;
+ mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+ mmc->max_blk_count = 512;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+#else
+ if (host->pdata->blk_settings) {
+ mmc->max_segs = host->pdata->blk_settings->max_segs;
+ mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
+ mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
+ mmc->max_req_size = host->pdata->blk_settings->max_req_size;
+ mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
+ } else {
+ /* Useful defaults if platform data is unset. */
+ mmc->max_segs = 64;
+ mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+ mmc->max_blk_count = 512;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+ }
+#endif /* CONFIG_MMC_FH_IDMAC */
+
+ host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ if (IS_ERR(host->vmmc)) {
+ host->vmmc = NULL;
+ } else
+ regulator_enable(host->vmmc);
+
+ if (fh_mci_get_cd(mmc))
+ set_bit(FH_MMC_CARD_PRESENT, &slot->flags);
+ else
+ clear_bit(FH_MMC_CARD_PRESENT, &slot->flags);
+
+ host->slot[id] = slot;
+ mmc_add_host(mmc);
+
+#if defined(CONFIG_DEBUG_FS)
+ fh_mci_init_debugfs(slot);
+#endif
+
+ /* Card initially undetected */
+ slot->last_detect_state = 0;
+
+ /*
+ * Card may have been plugged in prior to boot so we
+ * need to run the detect tasklet
+ */
+ tasklet_schedule(&host->card_tasklet);
+
+ return 0;
+}
+
+static void fh_mci_cleanup_slot(struct fh_mci_slot *slot, unsigned int id)
+{
+ /* Shutdown detect IRQ */
+ if (slot->host->pdata->exit)
+ slot->host->pdata->exit(id);
+
+ /* Debugfs stuff is cleaned up by mmc core */
+ mmc_remove_host(slot->mmc);
+ slot->host->slot[id] = NULL;
+ mmc_free_host(slot->mmc);
+}
+
+static void fh_mci_init_dma(struct fh_mci *host)
+{
+ /* Alloc memory for sg translation */
+ host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, SDC_DESC_SIZE,
+ &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
+ __func__);
+ goto no_dma;
+ }
+
+ /* Determine which DMA interface to use */
+#ifdef CONFIG_MMC_FH_IDMAC
+ host->dma_ops = &fh_mci_idmac_ops;
+ dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
+#endif
+
+ if (!host->dma_ops)
+ goto no_dma;
+
+ if (host->dma_ops->init) {
+ if (host->dma_ops->init(host)) {
+ dev_err(&host->pdev->dev, "%s: Unable to initialize "
+ "DMA Controller.\n", __func__);
+ goto no_dma;
+ }
+ } else {
+ dev_err(&host->pdev->dev, "DMA initialization not found.\n");
+ goto no_dma;
+ }
+
+ host->use_dma = 1;
+ return;
+
+no_dma:
+ dev_info(&host->pdev->dev, "Using PIO mode.\n");
+ host->use_dma = 0;
+ return;
+}
+
+static bool mci_wait_reset(struct device *dev, struct fh_mci *host)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ unsigned int ctrl;
+
+ mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_DMA_RESET));
+
+ /* wait till resets clear */
+ do {
+ ctrl = mci_readl(host, CTRL);
+ if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_DMA_RESET)))
+ return true;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+
+ return false;
+}
+
+static int fh_mci_probe(struct platform_device *pdev)
+{
+ struct fh_mci *host;
+ struct resource *regs;
+ struct fh_mci_board *pdata;
+ int irq, ret, i, width;
+ u32 fifo_size;
+ u32 reg;
+ struct clk *sdc_clk;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ host = kzalloc(sizeof(struct fh_mci), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+ host->pdata = pdata = pdev->dev.platform_data;
+ if (!pdata || !pdata->init) {
+ dev_err(&pdev->dev,
+ "Platform data must supply init function\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ if (!pdata->select_slot && pdata->num_slots > 1) {
+ dev_err(&pdev->dev,
+ "Platform data must supply select_slot function\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ if (!pdata->bus_hz) {
+ dev_err(&pdev->dev,
+ "Platform data must supply bus speed\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ host->bus_hz = pdata->bus_hz;
+ host->quirks = pdata->quirks;
+
+ spin_lock_init(&host->lock);
+ INIT_LIST_HEAD(&host->queue);
+
+ pdata->init(pdev->id, NULL, NULL);
+
+ ret = -ENOMEM;
+ //enable clk
+
+ if(pdev->id){
+ ret = gpio_request(6, NULL);
+ if(ret){
+ printk("gpio requset err\n");
+ ret = -ENODEV;
+ return ret;
+ }
+ gpio_direction_output(6,0);//set power on
+ sdc_clk = clk_get(NULL, "sdc1_clk");
+ clk_enable(sdc_clk);
+// *(int *)0xfe900020 =0x100000;//wait for modify
+
+ clk_set_rate(sdc_clk,50000000);
+ reg = clk_get_clk_sel();
+ reg |=1<<12;
+ reg &=~(1<<13);
+ clk_set_clk_sel(reg);
+ }
+ else
+ {
+ ret = gpio_request(5, NULL);
+ if(ret){
+ printk("gpio requset err\n");
+ ret = -ENODEV;
+ return ret;
+ }
+ gpio_direction_output(5,0);//set power on
+ sdc_clk = clk_get(NULL, "sdc0_clk");
+ clk_enable(sdc_clk);
+
+ clk_set_rate(sdc_clk,50000000);
+ reg = clk_get_clk_sel();
+ reg |=1<<20;
+ reg &=~(1<<21);
+
+#define SIMPLE_0
+//#define SIMPLE_90
+//#define SIMPLE_180
+//#define SIMPLE_270
+
+#ifdef SIMPLE_0
+ //0
+ reg &=~(1<<17);
+ reg &=~(1<<16);
+#endif
+#ifdef SIMPLE_90
+ //90
+ reg |=(1<<16);
+ reg &=~(1<<17);
+#endif
+#ifdef SIMPLE_180
+ //180
+ reg &=~(1<<16);
+ reg |=(1<<17);
+#endif
+#ifdef SIMPLE_270
+ //270
+ reg |=(1<<17);
+ reg |=(1<<16);
+#endif
+ clk_set_clk_sel(reg);
+
+ }
+
+ //io_remap
+ host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ //host->regs = 0xfe700000;
+ if (!host->regs)
+ goto err_freehost;
+
+ //host->dma_ops = pdata->dma_ops;
+ fh_mci_init_dma(host);
+
+ /*
+ * Get the host data width - this assumes that HCON has been set with
+ * the correct values.
+ */
+ i = (mci_readl(host, HCON) >> 7) & 0x7;
+ if (!i) {
+ host->push_data = fh_mci_push_data16;
+ host->pull_data = fh_mci_pull_data16;
+ width = 16;
+ host->data_shift = 1;
+ } else if (i == 2) {
+ host->push_data = fh_mci_push_data64;
+ host->pull_data = fh_mci_pull_data64;
+ width = 64;
+ host->data_shift = 3;
+ } else {
+ /* Check for a reserved value, and warn if it is */
+ WARN((i != 1),
+
+ "HCON reports a reserved host data width!\n"
+ "Defaulting to 32-bit access.\n");
+ host->push_data = fh_mci_push_data32;
+ host->pull_data = fh_mci_pull_data32;
+ width = 32;
+ host->data_shift = 2;
+ }
+
+ /* Reset all blocks */
+ if (!mci_wait_reset(&pdev->dev, host)) {
+ ret = -ENODEV;
+ goto err_dmaunmap;
+ }
+
+ /* Clear the interrupts for the host controller */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ /* Put in max timeout */
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+ /*
+ * FIFO threshold settings RxMark = fifo_size / 2 - 1,
+ * Tx Mark = fifo_size / 2 DMA Size = 8
+ */
+ fifo_size = mci_readl(host, FIFOTH);
+ fifo_size = 1+((fifo_size >> 16) & 0x7ff);
+ host->fifoth_val =
+ SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
+ mci_writel(host, FIFOTH, host->fifoth_val);
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ tasklet_init(&host->tasklet, fh_mci_tasklet_func, (unsigned long)host);
+ tasklet_init(&host->card_tasklet,
+ fh_mci_tasklet_card, (unsigned long)host);
+
+ ret = request_irq(irq, fh_mci_interrupt, 0, "fh-mci", host);
+ if (ret)
+ goto err_dmaunmap;
+
+ platform_set_drvdata(pdev, host);
+
+ if (host->pdata->num_slots)
+ host->num_slots = host->pdata->num_slots;
+ else
+ host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+
+ /* We need at least one slot to succeed */
+ for (i = 0; i < host->num_slots; i++) {
+ ret = fh_mci_init_slot(host, i);
+ if (ret) {
+ ret = -ENODEV;
+ goto err_init_slot;
+ }
+ }
+
+ /*
+ * Enable interrupts for command done, data over, data empty, card det,
+ * receive ready and error such as transmit, receive timeout, crc error
+ */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |SDMMC_INT_RTO | SDMMC_INT_DTO |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ FH_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+ dev_info(&pdev->dev, "FH MMC controller at irq %d, "
+ "%d bit host data width\n", irq, width);
+ if (host->quirks & FH_MCI_QUIRK_IDMAC_DTO)
+ dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
+#ifdef SDC_CRC_TEST
+ ret = gpio_request(TEST_GPIO, "SDC_TEST");
+
+ if(ret)
+ {
+ printk("!!!!!!!!!!SDC gpio_request failed!!!!!!!!!\n");
+ }
+
+ gpio_direction_output(TEST_GPIO, 1);
+ __gpio_set_value(TEST_GPIO, 0);
+#endif
+ return 0;
+
+err_init_slot:
+ /* De-init any initialized slots */
+ while (i > 0) {
+ if (host->slot[i])
+ fh_mci_cleanup_slot(host->slot[i], i);
+ i--;
+ }
+ free_irq(irq, host);
+
+err_dmaunmap:
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+ dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
+ host->sg_cpu, host->sg_dma);
+ //iounmap(host->regs);
+
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
+ regulator_put(host->vmmc);
+ }
+
+
+err_freehost:
+ kfree(host);
+ return ret;
+}
+
+static int __exit fh_mci_remove(struct platform_device *pdev)
+{
+ struct fh_mci *host = platform_get_drvdata(pdev);
+ int i;
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ platform_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < host->num_slots; i++) {
+ dev_dbg(&pdev->dev, "remove slot %d\n", i);
+ if (host->slot[i])
+ fh_mci_cleanup_slot(host->slot[i], i);
+ }
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ free_irq(platform_get_irq(pdev, 0), host);
+ dma_free_coherent(&pdev->dev, SDC_DESC_SIZE, host->sg_cpu, host->sg_dma);
+
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
+ regulator_put(host->vmmc);
+ }
+
+ //iounmap(host->regs);
+#ifdef SDC_CRC_TEST
+ gpio_free(TEST_GPIO);
+#endif
+
+ kfree(host);
+ return 0;
+}
+
+#if CONFIG_PM
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+static int fh_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ int i, ret;
+ struct fh_mci *host = platform_get_drvdata(pdev);
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct fh_mci_slot *slot = host->slot[i];
+ if (!slot)
+ continue;
+ ret = mmc_suspend_host(slot->mmc);
+ if (ret < 0) {
+ while (--i >= 0) {
+ slot = host->slot[i];
+ if (slot)
+ mmc_resume_host(host->slot[i]->mmc);
+ }
+ return ret;
+ }
+ }
+
+ if (host->vmmc)
+ regulator_disable(host->vmmc);
+
+ return 0;
+}
+
+static int fh_mci_resume(struct platform_device *pdev)
+{
+ int i, ret;
+ struct fh_mci *host = platform_get_drvdata(pdev);
+
+ if (host->vmmc)
+ regulator_enable(host->vmmc);
+
+ if (host->dma_ops->init)
+ host->dma_ops->init(host);
+
+ if (!mci_wait_reset(&pdev->dev, host)) {
+ ret = -ENODEV;
+ return ret;
+ }
+
+ /* Restore the old value at FIFOTH register */
+ mci_writel(host, FIFOTH, host->fifoth_val);
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |SDMMC_INT_RTO | SDMMC_INT_DTO |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ FH_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct fh_mci_slot *slot = host->slot[i];
+ if (!slot)
+ continue;
+ ret = mmc_resume_host(host->slot[i]->mmc);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+#else
+#define fh_mci_suspend NULL
+#define fh_mci_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver fh_mci_driver = {
+ .remove = __exit_p(fh_mci_remove),
+ .suspend = fh_mci_suspend,
+ .resume = fh_mci_resume,
+ .driver = {
+ .name = "fh_mmc",
+ },
+};
+
+static int __init fh_mci_init(void)
+{
+ return platform_driver_probe(&fh_mci_driver, fh_mci_probe);
+}
+
+static void __exit fh_mci_exit(void)
+{
+ platform_driver_unregister(&fh_mci_driver);
+}
+
+module_init(fh_mci_init);
+module_exit(fh_mci_exit);
+
+MODULE_DESCRIPTION("FH Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/fh_mmc.h b/drivers/mmc/host/fh_mmc.h
new file mode 100644
index 00000000..ddb9a63d
--- /dev/null
+++ b/drivers/mmc/host/fh_mmc.h
@@ -0,0 +1,233 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MMC_FH_MMC_H_
+#define _LINUX_MMC_FH_MMC_H_
+
+#include <linux/scatterlist.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
+#define MAX_MCI_SLOTS 2
+
+enum fh_mci_state {
+ STATE_IDLE = 0,
+ STATE_SENDING_CMD,
+ STATE_SENDING_DATA,
+ STATE_DATA_BUSY,
+ STATE_SENDING_STOP,
+ STATE_DATA_ERROR,
+};
+
+enum {
+ EVENT_CMD_COMPLETE = 0,
+ EVENT_XFER_COMPLETE,
+ EVENT_DATA_COMPLETE,
+ EVENT_DATA_ERROR,
+ EVENT_XFER_ERROR
+};
+
+struct mmc_data;
+
+/**
+ * struct fh_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @pio_offset: Offset into the current scatterlist entry.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ * or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ * transfer is in progress.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ * command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @data_status: Snapshot of SR taken upon completion of the current
+ * data transfer. Only valid when EVENT_DATA_COMPLETE or
+ * EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ * to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @card_tasklet: Tasklet handling card detect.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ * to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ * processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ * rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @pdev: Platform device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @slot: Slots sharing this MMC controller.
+ * @data_shift: log2 of FIFO item size.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * The @mrq field of struct fh_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interrupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct fh_mci {
+ spinlock_t lock;
+ void __iomem *regs;
+
+ struct scatterlist *sg;
+ unsigned int pio_offset;
+
+ struct fh_mci_slot *cur_slot;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ /* DMA interface members*/
+ int use_dma;
+ int using_dma;
+ unsigned int prev_blksz;
+
+ dma_addr_t sg_dma;
+ void *sg_cpu;
+ struct fh_mci_dma_ops *dma_ops;
+#ifdef CONFIG_MMC_FH_IDMAC
+ unsigned int ring_size;
+#else
+ struct fh_mci_dma_data *dma_data;
+#endif
+ u32 cmd_status;
+ u32 data_status;
+ u32 stop_cmdr;
+ u32 dir_status;
+ struct tasklet_struct tasklet;
+ struct tasklet_struct card_tasklet;
+ unsigned long pending_events;
+ unsigned long completed_events;
+ enum fh_mci_state state;
+ struct list_head queue;
+
+ u32 bus_hz;
+ u32 current_speed;
+ u32 num_slots;
+ u32 fifoth_val;
+ struct platform_device *pdev;
+ struct fh_mci_board *pdata;
+ struct fh_mci_slot *slot[MAX_MCI_SLOTS];
+
+ /* FIFO push and pull */
+ int data_shift;
+ void (*push_data)(struct fh_mci *host, void *buf, int cnt);
+ void (*pull_data)(struct fh_mci *host, void *buf, int cnt);
+
+ /* Workaround flags */
+ u32 quirks;
+
+ struct regulator *vmmc; /* Power regulator */
+
+ int dma_data_mapped;
+ int data_error_flag;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct fh_mci_dma_ops {
+ /* DMA Ops */
+ int (*init)(struct fh_mci *host);
+ void (*start)(struct fh_mci *host, unsigned int sg_len);
+ void (*complete)(struct fh_mci *host);
+ void (*stop)(struct fh_mci *host);
+ void (*cleanup)(struct fh_mci *host);
+ void (*exit)(struct fh_mci *host);
+};
+
+/* IP Quirks/flags. */
+/* DTO fix for command transmission with IDMAC configured */
+#define FH_MCI_QUIRK_IDMAC_DTO BIT(0)
+/* delay needed between retries on some 2.11a implementations */
+#define FH_MCI_QUIRK_RETRY_DELAY BIT(1)
+/* High Speed Capable - Supports HS cards (up to 50MHz) */
+#define FH_MCI_QUIRK_HIGHSPEED BIT(2)
+/* Unreliable card detection */
+#define FH_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
+
+
+struct dma_pdata;
+
+struct block_settings {
+ unsigned short max_segs; /* see blk_queue_max_segments */
+ unsigned int max_blk_size; /* maximum size of one mmc block */
+ unsigned int max_blk_count; /* maximum number of blocks in one req*/
+ unsigned int max_req_size; /* maximum number of bytes in one req*/
+ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
+};
+
+/* Board platform data */
+struct fh_mci_board {
+ u32 num_slots;
+
+ u32 quirks; /* Workaround / Quirk flags */
+ unsigned int bus_hz; /* Bus speed */
+
+ unsigned int caps; /* Capabilities */
+
+ /* delay in mS before detecting cards after interrupt */
+ u32 detect_delay_ms;
+
+ int (*init)(u32 slot_id,void* irq_handler_t , void *);
+ int (*get_ro)(u32 slot_id);
+ int (*get_cd)(u32 slot_id);
+ int (*get_ocr)(u32 slot_id);
+ int (*get_bus_wd)(u32 slot_id);
+ /*
+ * Enable power to selected slot and set voltage to desired level.
+ * Voltage levels are specified using MMC_VDD_xxx defines defined
+ * in linux/mmc/host.h file.
+ */
+ void (*setpower)(u32 slot_id, u32 volt);
+ void (*exit)(u32 slot_id);
+ void (*select_slot)(u32 slot_id);
+
+ struct fh_mci_dma_ops *dma_ops;
+ struct dma_pdata *data;
+ struct block_settings *blk_settings;
+};
+
+#endif /* _LINUX_MMC_FH_MMC_H_ */
diff --git a/drivers/mmc/host/fh_mmc_reg.h b/drivers/mmc/host/fh_mmc_reg.h
new file mode 100644
index 00000000..8153d4d6
--- /dev/null
+++ b/drivers/mmc/host/fh_mmc_reg.h
@@ -0,0 +1,174 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_H_
+#define _DW_MMC_H_
+
+#define SDMMC_CTRL 0x000
+#define SDMMC_PWREN 0x004
+#define SDMMC_CLKDIV 0x008
+#define SDMMC_CLKSRC 0x00c
+#define SDMMC_CLKENA 0x010
+#define SDMMC_TMOUT 0x014
+#define SDMMC_CTYPE 0x018
+#define SDMMC_BLKSIZ 0x01c
+#define SDMMC_BYTCNT 0x020
+#define SDMMC_INTMASK 0x024
+#define SDMMC_CMDARG 0x028
+#define SDMMC_CMD 0x02c
+#define SDMMC_RESP0 0x030
+#define SDMMC_RESP1 0x034
+#define SDMMC_RESP2 0x038
+#define SDMMC_RESP3 0x03c
+#define SDMMC_MINTSTS 0x040
+#define SDMMC_RINTSTS 0x044
+#define SDMMC_STATUS 0x048
+#define SDMMC_FIFOTH 0x04c
+#define SDMMC_CDETECT 0x050
+#define SDMMC_WRTPRT 0x054
+#define SDMMC_GPIO 0x058
+#define SDMMC_TCBCNT 0x05c
+#define SDMMC_TBBCNT 0x060
+#define SDMMC_DEBNCE 0x064
+#define SDMMC_USRID 0x068
+#define SDMMC_VERID 0x06c
+#define SDMMC_HCON 0x070
+#define SDMMC_UHS_REG 0x074
+#define SDMMC_RST_n 0x78
+#define SDMMC_BMOD 0x080
+#define SDMMC_PLDMND 0x084
+#define SDMMC_DBADDR 0x088
+#define SDMMC_IDSTS 0x08c
+#define SDMMC_IDINTEN 0x090
+#define SDMMC_DSCADDR 0x094
+#define SDMMC_BUFADDR 0x098
+#define SDMMC_DATA 0x200
+
+/* shift bit field */
+#define _SBF(f, v) ((v) << (f))
+
+/* Control register defines */
+#define SDMMC_CTRL_USE_IDMAC BIT(25)
+#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
+#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
+#define SDMMC_CTRL_SEND_CCSD BIT(9)
+#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
+#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
+#define SDMMC_CTRL_READ_WAIT BIT(6)
+#define SDMMC_CTRL_DMA_ENABLE BIT(5)
+#define SDMMC_CTRL_INT_ENABLE BIT(4)
+#define SDMMC_CTRL_DMA_RESET BIT(2)
+#define SDMMC_CTRL_FIFO_RESET BIT(1)
+#define SDMMC_CTRL_RESET BIT(0)
+/* Clock Enable register defines */
+#define SDMMC_CLKEN_LOW_PWR BIT(16)
+#define SDMMC_CLKEN_ENABLE BIT(0)
+/* time-out register defines */
+#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
+#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
+#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
+#define SDMMC_TMOUT_RESP_MSK 0xFF
+/* card-type register defines */
+#define SDMMC_CTYPE_8BIT BIT(16)
+#define SDMMC_CTYPE_4BIT BIT(0)
+#define SDMMC_CTYPE_1BIT 0
+/* Interrupt status & mask register defines */
+//#define SDMMC_INT_SDIO BIT(16)
+#define SDMMC_INT_SDIO(n) BIT(16 + (n))
+#define SDMMC_INT_EBE BIT(15)
+#define SDMMC_INT_ACD BIT(14)
+#define SDMMC_INT_SBE BIT(13)
+#define SDMMC_INT_HLE BIT(12)
+#define SDMMC_INT_FRUN BIT(11)
+#define SDMMC_INT_HTO BIT(10)
+#define SDMMC_INT_DTO BIT(9)
+#define SDMMC_INT_RTO BIT(8)
+#define SDMMC_INT_DCRC BIT(7)
+#define SDMMC_INT_RCRC BIT(6)
+#define SDMMC_INT_RXDR BIT(5)
+#define SDMMC_INT_TXDR BIT(4)
+#define SDMMC_INT_DATA_OVER BIT(3)
+#define SDMMC_INT_CMD_DONE BIT(2)
+#define SDMMC_INT_RESP_ERR BIT(1)
+#define SDMMC_INT_CD BIT(0)
+#define SDMMC_INT_ERROR 0xbfc2
+/* Command register defines */
+#define SDMMC_CMD_START BIT(31)
+#define SDMMC_CMD_USE_HOLD_REG BIT(29)
+#define SDMMC_CMD_CCS_EXP BIT(23)
+#define SDMMC_CMD_CEATA_RD BIT(22)
+#define SDMMC_CMD_UPD_CLK BIT(21)
+#define SDMMC_CMD_INIT BIT(15)
+#define SDMMC_CMD_STOP BIT(14)
+#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
+#define SDMMC_CMD_SEND_STOP BIT(12)
+#define SDMMC_CMD_STRM_MODE BIT(11)
+#define SDMMC_CMD_DAT_WR BIT(10)
+#define SDMMC_CMD_DAT_EXP BIT(9)
+#define SDMMC_CMD_RESP_CRC BIT(8)
+#define SDMMC_CMD_RESP_LONG BIT(7)
+#define SDMMC_CMD_RESP_EXP BIT(6)
+#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
+/* Status register defines */
+#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
+#define SDMMC_FIFO_SZ 32
+/* Internal DMAC interrupt defines */
+#define SDMMC_IDMAC_INT_AI BIT(9)
+#define SDMMC_IDMAC_INT_NI BIT(8)
+#define SDMMC_IDMAC_INT_CES BIT(5)
+#define SDMMC_IDMAC_INT_DU BIT(4)
+#define SDMMC_IDMAC_INT_FBE BIT(2)
+#define SDMMC_IDMAC_INT_RI BIT(1)
+#define SDMMC_IDMAC_INT_TI BIT(0)
+/* Internal DMAC bus mode bits */
+#define SDMMC_IDMAC_ENABLE BIT(7)
+#define SDMMC_IDMAC_FB BIT(1)
+#define SDMMC_IDMAC_SWRESET BIT(0)
+#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
+ ((r) & 0xFFF) << 16 | \
+ ((t) & 0xFFF))
+
+/* Register access macros */
+#define mci_readl(dev, reg) \
+ __raw_readl(dev->regs + SDMMC_##reg)
+#define mci_writel(dev, reg, value) \
+ __raw_writel((value), dev->regs + SDMMC_##reg)
+
+/* 16-bit FIFO access macros */
+#define mci_readw(dev, reg) \
+ __raw_readw(dev->regs + SDMMC_##reg)
+#define mci_writew(dev, reg, value) \
+ __raw_writew((value), dev->regs + SDMMC_##reg)
+
+/* 64-bit FIFO access macros */
+#ifdef readq
+#define mci_readq(dev, reg) \
+ __raw_readq(dev->regs + SDMMC_##reg)
+#define mci_writeq(dev, reg, value) \
+ __raw_writeq((value), dev->regs + SDMMC_##reg)
+#else
+/*
+ * Dummy readq implementation for architectures that don't define it.
+ *
+ * We would assume that none of these architectures would configure
+ * the IP block with a 64bit FIFO width, so this code will never be
+ * executed on those machines. Defining these macros here keeps the
+ * rest of the code free from ifdefs.
+ */
+#define mci_readq(dev, reg) \
+ (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+#define mci_writeq(dev, reg, value) \
+ (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+#endif
+
+#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/fhmci/Makefile b/drivers/mmc/host/fhmci/Makefile
new file mode 100644
index 00000000..bc3ffd2c
--- /dev/null
+++ b/drivers/mmc/host/fhmci/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_MMC) += fh_mci.o
+fh_mci-y := fhmci.o
diff --git a/drivers/mmc/host/fhmci/fhmci.c b/drivers/mmc/host/fhmci/fhmci.c
new file mode 100644
index 00000000..3f0f1ef6
--- /dev/null
+++ b/drivers/mmc/host/fhmci/fhmci.c
@@ -0,0 +1,1541 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/card.h>
+#include <linux/slab.h>
+
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/freezer.h>
+#include <asm/dma.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/sizes.h>
+#include <linux/uaccess.h>
+#include <mach/hardware.h>
+#include <linux/mmc/card.h>
+#include <linux/clk.h>
+#include "fhmci_reg.h"
+#include <mach/fhmci.h>
+
+#include <mach/pmu.h>
+
+
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+#ifndef TRUE
+#define TRUE (!(FALSE))
+#endif
+
+#define SD_POWER_ON 1
+#define SD_POWER_OFF 0
+#define DRIVER_NAME "fh_mci"
+
+static unsigned int retry_count = MAX_RETRY_COUNT;
+static unsigned int request_timeout = FH_MCI_REQUEST_TIMEOUT;
+int trace_level = FHMCI_TRACE_LEVEL;
+struct mmc_host *mmc_sd1;
+struct mmc_host *mmc_sd0;
+
+#ifdef MODULE
+
+MODULE_PARM_DESC(detect_timer, "card detect time (default:500ms))");
+
+module_param(retry_count, uint, 0600);
+MODULE_PARM_DESC(retry_count, "retry count times (default:100))");
+
+module_param(request_timeout, uint, 0600);
+MODULE_PARM_DESC(request_timeout, "Request timeout time (default:3s))");
+
+module_param(trace_level, int, 0600);
+MODULE_PARM_DESC(trace_level, "FHMCI_TRACE_LEVEL");
+
+#endif
+
+/* reset MMC host controler */
+static void fh_mci_sys_reset(struct fhmci_host *host)
+{
+ unsigned int reg_value;
+ unsigned long flags;
+
+ fhmci_trace(2, "reset");
+
+ local_irq_save(flags);
+
+ reg_value = fhmci_readl(host->base + MCI_BMOD);
+ reg_value |= BMOD_SWR;
+ fhmci_writel(reg_value, host->base + MCI_BMOD);
+ udelay(50);
+
+ reg_value = fhmci_readl(host->base + MCI_BMOD);
+ reg_value |= BURST_INCR;
+ fhmci_writel(reg_value, host->base + MCI_BMOD);
+
+ reg_value = fhmci_readl(host->base + MCI_CTRL);
+ reg_value |= CTRL_RESET | FIFO_RESET | DMA_RESET;
+ fhmci_writel(reg_value, host->base + MCI_CTRL);
+
+ local_irq_restore(flags);
+}
+
+static void fh_mci_sys_undo_reset(struct fhmci_host *host)
+{
+ unsigned long flags;
+
+ fhmci_trace(2, "undo reset");
+
+ local_irq_save(flags);
+ local_irq_restore(flags);
+}
+
+static void fh_mci_ctrl_power(struct fhmci_host *host, unsigned int flag)
+{
+ fhmci_trace(2, "begin");
+
+}
+
+/**********************************************
+ *1: card off
+ *0: card on
+ ***********************************************/
+static unsigned int fh_mci_sys_card_detect(struct fhmci_host *host)
+{
+ unsigned int card_status = readl(host->base + MCI_CDETECT);
+ return card_status & FHMCI_CARD0;
+}
+
+/**********************************************
+ *1: card readonly
+ *0: card read/write
+ ***********************************************/
+static unsigned int fh_mci_ctrl_card_readonly(struct fhmci_host *host)
+{
+ unsigned int card_value = fhmci_readl(host->base + MCI_WRTPRT);
+ return card_value & FHMCI_CARD0;
+}
+
+static int fh_mci_wait_cmd(struct fhmci_host *host)
+{
+ int wait_retry_count = 0;
+ unsigned int reg_data = 0;
+ unsigned long flags;
+
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+
+ while (1) {
+ /*
+ * Check if CMD::start_cmd bit is clear.
+ * start_cmd = 0 means MMC Host controller has loaded registers
+ * and next command can be loaded in.
+ */
+ reg_data = readl(host->base + MCI_CMD);
+ if ((reg_data & START_CMD) == 0)
+ return 0;
+
+ /* Check if Raw_Intr_Status::HLE bit is set. */
+ spin_lock_irqsave(&host->lock, flags);
+ reg_data = readl(host->base + MCI_RINTSTS);
+ if (reg_data & HLE_INT_STATUS) {
+ reg_data &= (~SDIO_INT_STATUS);
+ fhmci_writel(reg_data, host->base + MCI_RINTSTS);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ fhmci_trace(3, "Other CMD is running," \
+ "please operate cmd again!");
+ return 1;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ udelay(100);
+
+ /* Check if number of retries for this are over. */
+ wait_retry_count++;
+ if (wait_retry_count >= retry_count) {
+ fhmci_trace(3, "send cmd is timeout!");
+ return -1;
+ }
+ }
+}
+
+static void fh_mci_control_cclk(struct fhmci_host *host, unsigned int flag)
+{
+ unsigned int reg;
+ union cmd_arg_s cmd_reg;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+
+ reg = fhmci_readl(host->base + MCI_CLKENA);
+ if (flag == ENABLE)
+ reg |= CCLK_ENABLE;
+ else
+ reg &= 0xffff0000;
+ fhmci_writel(reg, host->base + MCI_CLKENA);
+
+ cmd_reg.cmd_arg = fhmci_readl(host->base + MCI_CMD);
+ cmd_reg.bits.start_cmd = 1;
+ cmd_reg.bits.update_clk_reg_only = 1;
+ fhmci_writel(cmd_reg.cmd_arg, host->base + MCI_CMD);
+ if (fh_mci_wait_cmd(host) != 0)
+ fhmci_trace(3, "disable or enable clk is timeout!");
+}
+
+static void fh_mci_set_cclk(struct fhmci_host *host, unsigned int cclk)
+{
+ unsigned int reg_value;
+ union cmd_arg_s clk_cmd;
+ struct fh_mci_board *pdata;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(cclk);
+
+ pdata = host->pdata;
+
+ /*
+ * set card clk divider value,
+ * clk_divider = Fmmcclk/(Fmmc_cclk * 2)
+ */
+
+ if (0 == host->id) {
+ if (pdata->bus_hz <= cclk)
+ reg_value = 0;
+ else {
+ reg_value = pdata->bus_hz / (cclk * 2);
+ if (pdata->bus_hz % (cclk * 2))
+ reg_value++;
+ }
+ } else if (1 == host->id) {
+ if (pdata->bus_hz <= cclk)
+ reg_value = 0;
+ else {
+ reg_value = pdata->bus_hz / (cclk * 2);
+ if (pdata->bus_hz % (cclk * 2))
+ reg_value++;
+ }
+ } else {
+ fhmci_error("fhmci host id error!");
+ return;
+ }
+
+ fhmci_writel(reg_value, host->base + MCI_CLKDIV);
+
+
+ clk_cmd.cmd_arg = fhmci_readl(host->base + MCI_CMD);
+ clk_cmd.bits.start_cmd = 1;
+ clk_cmd.bits.update_clk_reg_only = 1;
+ fhmci_writel(clk_cmd.cmd_arg, host->base + MCI_CMD);
+
+ if (fh_mci_wait_cmd(host) != 0)
+ fhmci_trace(3, "set card clk divider is failed!");
+}
+
+static void fh_mci_init_card(struct fhmci_host *host)
+{
+ unsigned int tmp_reg, tmp;
+ unsigned long flags;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ tmp = fhmci_readl(host->base + MCI_PWREN);
+ fh_mci_sys_reset(host);
+ fh_mci_ctrl_power(host, POWER_OFF);
+ udelay(500);
+ /* card power on */
+ fh_mci_ctrl_power(host, POWER_ON);
+ udelay(200);
+
+ fh_mci_sys_undo_reset(host);
+
+ /* set phase shift */
+ /* set card read threshold */
+
+ /* clear MMC host intr */
+ fhmci_writel(ALL_INT_CLR, host->base + MCI_RINTSTS);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->pending_events = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* MASK MMC host intr */
+ tmp_reg = fhmci_readl(host->base + MCI_INTMASK);
+ tmp_reg &= ~ALL_INT_MASK;
+ tmp_reg |= DATA_INT_MASK;
+ fhmci_writel(tmp_reg, host->base + MCI_INTMASK);
+
+ /* enable inner DMA mode and close intr of MMC host controler */
+ tmp_reg = fhmci_readl(host->base + MCI_CTRL);
+ tmp_reg &= ~INTR_EN;
+ tmp_reg |= USE_INTERNAL_DMA | INTR_EN;
+ fhmci_writel(tmp_reg, host->base + MCI_CTRL);
+
+ /* set timeout param */
+ fhmci_writel(DATA_TIMEOUT | RESPONSE_TIMEOUT, host->base + MCI_TIMEOUT);
+
+ /* set FIFO param */
+ if (host->pdata->fifo_depth > 15)
+ tmp = 0x5;
+ else
+ tmp = 0x2;
+
+ tmp_reg = ((tmp << 28) | ((host->pdata->fifo_depth / 2) << 16)
+ | (((host->pdata->fifo_depth / 2) + 1) << 0));
+ fhmci_writel(tmp_reg, host->base + MCI_FIFOTH);
+}
+
+int read_mci_ctrl_states(int id_mmc_sd)
+{
+ if ((id_mmc_sd == ID_SD0) && (mmc_sd0 != NULL))
+ return mmc_sd0->rescan_disable;
+ else if ((id_mmc_sd == ID_SD1) && (mmc_sd1 != NULL))
+ return mmc_sd1->rescan_disable;
+
+ return -1;
+}
+
+int storage_dev_set_mmc_rescan(struct mmc_ctrl *m_ctrl)
+{
+ unsigned int tmp;
+ struct mmc_host *mmc_sd = NULL;
+ tmp = m_ctrl->mmc_ctrl_state;
+
+ if (m_ctrl->slot_idx == 1) {
+ if (mmc_sd1 != NULL)
+ mmc_sd = mmc_sd1;
+ } else if (m_ctrl->slot_idx == 0) {
+ if (mmc_sd0 != NULL)
+ mmc_sd = mmc_sd0;
+ }
+ if ((tmp != TRUE) && (tmp != FALSE))
+ return -1;
+
+ if (tmp == TRUE) {
+ if (mmc_sd != NULL) {
+ mmc_sd->rescan_disable = TRUE;
+ mmc_detect_change(mmc_sd, 0);
+ }
+ } else {
+ if (mmc_sd != NULL) {
+ mmc_sd->rescan_disable = FALSE;
+ mmc_detect_change(mmc_sd, 0);
+ }
+ }
+ return 0;
+}
+
+static void fh_mci_idma_start(struct fhmci_host *host)
+{
+ unsigned int tmp;
+
+ fhmci_trace(2, "begin");
+ fhmci_writel(host->dma_paddr, host->base + MCI_DBADDR);
+ tmp = fhmci_readl(host->base + MCI_BMOD);
+ tmp |= BMOD_DMA_EN;
+ tmp |= BURST_INCR;
+ fhmci_writel(tmp, host->base + MCI_BMOD);
+}
+
+static void fh_mci_idma_stop(struct fhmci_host *host)
+{
+ unsigned int tmp_reg;
+
+ fhmci_trace(2, "begin");
+ tmp_reg = fhmci_readl(host->base + MCI_BMOD);
+ tmp_reg &= ~BMOD_DMA_EN;
+ tmp_reg |= BMOD_SWR;
+ fhmci_writel(tmp_reg, host->base + MCI_BMOD);
+}
+
+static int fh_mci_setup_data(struct fhmci_host *host, struct mmc_data *data)
+{
+ unsigned int sg_phyaddr, sg_length;
+ unsigned int i, ret = 0;
+ unsigned int data_size;
+ unsigned int max_des, des_cnt;
+ struct fhmci_des *des;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(data);
+
+ host->data = data;
+
+ if (data->flags & MMC_DATA_READ)
+ host->dma_dir = DMA_FROM_DEVICE;
+ else
+ host->dma_dir = DMA_TO_DEVICE;
+
+ host->dma_sg = data->sg;
+ host->dma_sg_num = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, host->dma_dir);
+ fhmci_assert(host->dma_sg_num);
+ fhmci_trace(2, "host->dma_sg_num is %d\n", host->dma_sg_num);
+ data_size = data->blksz * data->blocks;
+
+ if (data_size > (DMA_BUFFER * MAX_DMA_DES)) {
+ fhmci_error("mci request data_size is too big!\n");
+ ret = -1;
+ goto out;
+ }
+
+ fhmci_trace(2, "host->dma_paddr is 0x%08X,host->dma_vaddr is 0x%08X\n",
+ (unsigned int)host->dma_paddr,
+ (unsigned int)host->dma_vaddr);
+
+ max_des = (PAGE_SIZE/sizeof(struct fhmci_des));
+ des = (struct fhmci_des *)host->dma_vaddr;
+ des_cnt = 0;
+
+ for (i = 0; i < host->dma_sg_num; i++) {
+ sg_length = sg_dma_len(&data->sg[i]);
+ sg_phyaddr = sg_dma_address(&data->sg[i]);
+ fhmci_trace(2, "sg[%d] sg_length is 0x%08X, " \
+ "sg_phyaddr is 0x%08X\n", \
+ i, (unsigned int)sg_length, \
+ (unsigned int)sg_phyaddr);
+ while (sg_length) {
+ des[des_cnt].idmac_des_ctrl = DMA_DES_OWN
+ | DMA_DES_NEXT_DES;
+ des[des_cnt].idmac_des_buf_addr = sg_phyaddr;
+ /* idmac_des_next_addr is paddr for dma */
+ des[des_cnt].idmac_des_next_addr = host->dma_paddr
+ + (des_cnt + 1) * sizeof(struct fhmci_des);
+
+ if (sg_length >= 0x1F00) {
+ des[des_cnt].idmac_des_buf_size = 0x1F00;
+ sg_length -= 0x1F00;
+ sg_phyaddr += 0x1F00;
+ } else {
+ /* FIXME:data alignment */
+ des[des_cnt].idmac_des_buf_size = sg_length;
+ sg_length = 0;
+ }
+
+ fhmci_trace(2, "des[%d] vaddr is 0x%08X", i,
+ (unsigned int)&des[i]);
+ fhmci_trace(2, "des[%d].idmac_des_ctrl is 0x%08X",
+ i, (unsigned int)des[i].idmac_des_ctrl);
+ fhmci_trace(2, "des[%d].idmac_des_buf_size is 0x%08X",
+ i, (unsigned int)des[i].idmac_des_buf_size);
+ fhmci_trace(2, "des[%d].idmac_des_buf_addr 0x%08X",
+ i, (unsigned int)des[i].idmac_des_buf_addr);
+ fhmci_trace(2, "des[%d].idmac_des_next_addr is 0x%08X",
+ i, (unsigned int)des[i].idmac_des_next_addr);
+ des_cnt++;
+ }
+
+ fhmci_assert(des_cnt < max_des);
+ }
+ des[0].idmac_des_ctrl |= DMA_DES_FIRST_DES;
+ des[des_cnt - 1].idmac_des_ctrl |= DMA_DES_LAST_DES;
+ des[des_cnt - 1].idmac_des_next_addr = 0;
+out:
+ return ret;
+}
+
+static int fh_mci_exec_cmd(struct fhmci_host *host, struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ union cmd_arg_s cmd_regs;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(cmd);
+
+ host->cmd = cmd;
+
+ fhmci_writel(cmd->arg, host->base + MCI_CMDARG);
+ fhmci_trace(2, "arg_reg 0x%x, val 0x%x\n", MCI_CMDARG, cmd->arg);
+ cmd_regs.cmd_arg = fhmci_readl(host->base + MCI_CMD);
+ if (data) {
+ cmd_regs.bits.data_transfer_expected = 1;
+ if (data->flags & (MMC_DATA_WRITE | MMC_DATA_READ))
+ cmd_regs.bits.transfer_mode = 0;
+
+ if (data->flags & MMC_DATA_STREAM)
+ cmd_regs.bits.transfer_mode = 1;
+
+ if (data->flags & MMC_DATA_WRITE)
+ cmd_regs.bits.read_write = 1;
+ else if (data->flags & MMC_DATA_READ)
+ cmd_regs.bits.read_write = 0;
+ } else {
+ cmd_regs.bits.data_transfer_expected = 0;
+ cmd_regs.bits.transfer_mode = 0;
+ cmd_regs.bits.read_write = 0;
+ }
+ cmd_regs.bits.send_auto_stop = 0;
+#ifdef CONFIG_SEND_AUTO_STOP
+ if ((host->mrq->stop) && (!(host->is_tuning)))
+ cmd_regs.bits.send_auto_stop = 1;
+#endif
+
+ if (cmd == host->mrq->stop) {
+ cmd_regs.bits.stop_abort_cmd = 1;
+ cmd_regs.bits.wait_prvdata_complete = 0;
+ } else {
+ cmd_regs.bits.stop_abort_cmd = 0;
+ cmd_regs.bits.wait_prvdata_complete = 1;
+ }
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ cmd_regs.bits.response_expect = 0;
+ cmd_regs.bits.response_length = 0;
+ cmd_regs.bits.check_response_crc = 0;
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ cmd_regs.bits.response_expect = 1;
+ cmd_regs.bits.response_length = 0;
+ cmd_regs.bits.check_response_crc = 1;
+ break;
+ case MMC_RSP_R2:
+ cmd_regs.bits.response_expect = 1;
+ cmd_regs.bits.response_length = 1;
+ cmd_regs.bits.check_response_crc = 1;
+ break;
+ case MMC_RSP_R3:
+ cmd_regs.bits.response_expect = 1;
+ cmd_regs.bits.response_length = 0;
+ cmd_regs.bits.check_response_crc = 0;
+ break;
+ default:
+ fhmci_error("fh_mci: unhandled response type %02x\n",
+ mmc_resp_type(cmd));
+ return -EINVAL;
+ }
+
+ fhmci_trace(2, "send cmd of card is cmd->opcode = %d ", cmd->opcode);
+ if (cmd->opcode == MMC_GO_IDLE_STATE)
+ cmd_regs.bits.send_initialization = 1;
+ else
+ cmd_regs.bits.send_initialization = 0;
+ /* CMD 11 check switch voltage */
+ if (cmd->opcode == SD_SWITCH_VOLTAGE)
+ cmd_regs.bits.volt_switch = 1;
+ else
+ cmd_regs.bits.volt_switch = 0;
+
+
+ cmd_regs.bits.card_number = 0;
+ cmd_regs.bits.cmd_index = cmd->opcode;
+ cmd_regs.bits.start_cmd = 1;
+ cmd_regs.bits.update_clk_reg_only = 0;
+ fhmci_writel(DATA_INT_MASK, host->base + MCI_RINTSTS);
+ fhmci_writel(cmd_regs.cmd_arg, host->base + MCI_CMD);
+ fhmci_trace(2, "cmd_reg 0x%x, val 0x%x\n", MCI_CMD, cmd_regs.cmd_arg);
+
+ if (fh_mci_wait_cmd(host) != 0) {
+ fhmci_trace(3, "send card cmd is failed!");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void fh_mci_finish_request(struct fhmci_host *host,
+ struct mmc_request *mrq)
+{
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(mrq);
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void fh_mci_cmd_done(struct fhmci_host *host, unsigned int stat)
+{
+ unsigned int i;
+ struct mmc_command *cmd = host->cmd;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(cmd);
+
+
+ for (i = 0; i < 4; i++) {
+ if (mmc_resp_type(cmd) == MMC_RSP_R2) {
+ cmd->resp[i] = fhmci_readl(host->base +
+ MCI_RESP3 - i * 0x4);
+ /* R2 must delay some time here ,when use UHI card,
+ need check why */
+ udelay(1000);
+ } else
+ cmd->resp[i] = fhmci_readl(host->base +
+ MCI_RESP0 + i * 0x4);
+ }
+
+ if (stat & RTO_INT_STATUS) {
+ cmd->error = -ETIMEDOUT;
+ fhmci_trace(3, "irq cmd status stat = 0x%x is timeout error!",
+ stat);
+ } else if (stat & (RCRC_INT_STATUS | RE_INT_STATUS)) {
+ cmd->error = -EILSEQ;
+ fhmci_trace(3, "irq cmd status stat = 0x%x is response error!",
+ stat);
+ }
+ host->cmd = NULL;
+}
+
+
+static void fh_mci_data_done(struct fhmci_host *host, unsigned int stat)
+{
+ struct mmc_data *data = host->data;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(data);
+
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
+
+ if (stat & (HTO_INT_STATUS | DRTO_INT_STATUS)) {
+ data->error = -ETIMEDOUT;
+ fhmci_trace(3, "irq data status stat = 0x%x is timeout error!",
+ stat);
+ } else if (stat & (EBE_INT_STATUS | SBE_INT_STATUS | FRUN_INT_STATUS
+ | DCRC_INT_STATUS)) {
+ data->error = -EILSEQ;
+ fhmci_trace(3, "irq data status stat = 0x%x is data error!",
+ stat);
+ }
+
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ host->data = NULL;
+}
+
+
+static int fh_mci_wait_cmd_complete(struct fhmci_host *host)
+{
+ unsigned int cmd_retry_count = 0;
+ unsigned long cmd_jiffies_timeout;
+ unsigned int cmd_irq_reg = 0;
+ struct mmc_command *cmd = host->cmd;
+ unsigned long flags;
+ unsigned int cmd_done = 0;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(cmd);
+
+ cmd_jiffies_timeout = jiffies + request_timeout;
+ while (1) {
+
+ do {
+ spin_lock_irqsave(&host->lock, flags);
+ cmd_irq_reg = readl(host->base + MCI_RINTSTS);
+
+ if (cmd_irq_reg & CD_INT_STATUS) {
+ fhmci_writel((CD_INT_STATUS | RTO_INT_STATUS
+ | RCRC_INT_STATUS | RE_INT_STATUS),
+ host->base + MCI_RINTSTS);
+ spin_unlock_irqrestore(&host->lock, flags);
+ cmd_done = 1;
+ break;
+/* fh_mci_cmd_done(host, cmd_irq_reg);
+ return 0;*/
+ } else if (cmd_irq_reg & VOLT_SWITCH_INT_STATUS) {
+ fhmci_writel(VOLT_SWITCH_INT_STATUS, \
+ host->base + MCI_RINTSTS);
+ spin_unlock_irqrestore(&host->lock, flags);
+ cmd_done = 1;
+ break;
+/* fh_mci_cmd_done(host, cmd_irq_reg);
+ return 0;*/
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ cmd_retry_count++;
+ } while (cmd_retry_count < retry_count &&
+ host->get_cd(host) != CARD_UNPLUGED);
+
+ cmd_retry_count = 0;
+
+ if ((host->card_status == CARD_UNPLUGED)
+ || (host->get_cd(host) == CARD_UNPLUGED)) {
+ cmd->error = -ETIMEDOUT;
+ return -1;
+ }
+ if (cmd_done) {
+ fh_mci_cmd_done(host, cmd_irq_reg);
+ return 0;
+ }
+
+ if (!time_before(jiffies, cmd_jiffies_timeout)) {
+ unsigned int i = 0;
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = fhmci_readl(host->base \
+ + MCI_RESP0 + i * 0x4);
+ printk(KERN_ERR "voltage switch read MCI_RESP");
+ printk(KERN_ERR "%d : 0x%x\n", i, cmd->resp[i]);
+ }
+ cmd->error = -ETIMEDOUT;
+ fhmci_trace(3, "wait cmd request complete is timeout!");
+ return -1;
+ }
+
+ schedule();
+ }
+}
+/*
+ * designware support send stop command automatically when
+ * read or wirte multi blocks
+ */
+#ifdef CONFIG_SEND_AUTO_STOP
+static int fh_mci_wait_auto_stop_complete(struct fhmci_host *host)
+{
+ unsigned int cmd_retry_count = 0;
+ unsigned long cmd_jiffies_timeout;
+ unsigned int cmd_irq_reg = 0;
+ unsigned long flags;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+
+ cmd_jiffies_timeout = jiffies + request_timeout;
+ while (1) {
+
+ do {
+ spin_lock_irqsave(&host->lock, flags);
+ cmd_irq_reg = readl(host->base + MCI_RINTSTS);
+ if (cmd_irq_reg & ACD_INT_STATUS) {
+ fhmci_writel((ACD_INT_STATUS | RTO_INT_STATUS
+ | RCRC_INT_STATUS | RE_INT_STATUS),
+ host->base + MCI_RINTSTS);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ cmd_retry_count++;
+ } while (cmd_retry_count < retry_count);
+
+ cmd_retry_count = 0;
+ if (host->card_status == CARD_UNPLUGED)
+ return -1;
+
+ if (!time_before(jiffies, cmd_jiffies_timeout)) {
+ fhmci_trace(3, "wait auto stop complete is timeout!");
+ return -1;
+ }
+
+ schedule();
+ }
+
+}
+#endif
+static int fh_mci_wait_data_complete(struct fhmci_host *host)
+{
+ unsigned int tmp_reg;
+ struct mmc_data *data = host->data;
+ long time = request_timeout;
+ unsigned long flags;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ fhmci_assert(data);
+
+ time = wait_event_timeout(host->intr_wait,
+ test_bit(FHMCI_PEND_DTO_b, &host->pending_events),
+ time);
+
+ /* Mask MMC host data intr */
+ spin_lock_irqsave(&host->lock, flags);
+ tmp_reg = fhmci_readl(host->base + MCI_INTMASK);
+ tmp_reg &= ~DATA_INT_MASK;
+ fhmci_writel(tmp_reg, host->base + MCI_INTMASK);
+ host->pending_events &= ~FHMCI_PEND_DTO_m;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (((time <= 0)
+ && (!test_bit(FHMCI_PEND_DTO_b, &host->pending_events)))
+ || (host->card_status == CARD_UNPLUGED)) {
+
+ data->error = -ETIMEDOUT;
+ fhmci_trace(3, "wait data request complete is timeout! 0x%08X",
+ host->irq_status);
+ fh_mci_idma_stop(host);
+ fh_mci_data_done(host, host->irq_status);
+ return -1;
+ }
+
+ fh_mci_idma_stop(host);
+ fh_mci_data_done(host, host->irq_status);
+ return 0;
+}
+
+
+static int fh_mci_wait_card_complete(struct fhmci_host *host,
+ struct mmc_data *data)
+{
+ unsigned int card_retry_count = 0;
+ unsigned long card_jiffies_timeout;
+ unsigned int card_status_reg = 0;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(host);
+ /* fhmci_assert(data); */
+
+ card_jiffies_timeout = jiffies + FH_MCI_DETECT_TIMEOUT;
+ while (1) {
+
+ do {
+ card_status_reg = readl(host->base + MCI_STATUS);
+ if (!(card_status_reg & DATA_BUSY)) {
+ fhmci_trace(2, "end");
+ return 0;
+ }
+ card_retry_count++;
+ } while (card_retry_count < retry_count);
+ card_retry_count = 0;
+
+ if (host->card_status == CARD_UNPLUGED) {
+ data->error = -ETIMEDOUT;
+ return -1;
+ }
+
+ if (!time_before(jiffies, card_jiffies_timeout)) {
+ if (data != NULL)
+ data->error = -ETIMEDOUT;
+ fhmci_trace(3, "wait card ready complete is timeout!");
+ return -1;
+ }
+
+ schedule();
+ }
+}
+
+static unsigned long t;
+static unsigned long cmds;
+static unsigned long long send_byte_count;
+static struct timeval in_cmd, out_cmd;
+static struct timeval *x = &out_cmd, *y = &in_cmd;
+static unsigned long max, sum;
+static unsigned long called, ended;
+
+static void fh_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct fhmci_host *host = mmc_priv(mmc);
+ int byte_cnt = 0;
+ #ifdef CONFIG_SEND_AUTO_STOP
+ int trans_cnt;
+ #endif
+ int fifo_count = 0, tmp_reg;
+ int ret = 0;
+ unsigned long flags;
+
+ if (host->id == 1) {
+ called++;
+ memset(x, 0, sizeof(struct timeval));
+ memset(y, 0, sizeof(struct timeval));
+ do_gettimeofday(y);
+ }
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(mmc);
+ fhmci_assert(mrq);
+ fhmci_assert(host);
+
+ host->mrq = mrq;
+ host->irq_status = 0;
+
+
+ if (host->card_status == CARD_UNPLUGED) {
+ mrq->cmd->error = -ENODEV;
+ goto request_end;
+ }
+#if 1
+ ret = fh_mci_wait_card_complete(host, mrq->data);
+
+ if (ret) {
+ mrq->cmd->error = ret;
+ goto request_end;
+ }
+#endif
+ /* prepare data */
+ if (mrq->data) {
+ ret = fh_mci_setup_data(host, mrq->data);
+ if (ret) {
+ mrq->data->error = ret;
+ fhmci_trace(3, "data setup is error!");
+ goto request_end;
+ }
+
+ byte_cnt = mrq->data->blksz * mrq->data->blocks;
+ fhmci_writel(byte_cnt, host->base + MCI_BYTCNT);
+ fhmci_writel(mrq->data->blksz, host->base + MCI_BLKSIZ);
+
+ tmp_reg = fhmci_readl(host->base + MCI_CTRL);
+ tmp_reg |= FIFO_RESET;
+ fhmci_writel(tmp_reg, host->base + MCI_CTRL);
+
+ do {
+ tmp_reg = fhmci_readl(host->base + MCI_CTRL);
+ fifo_count++;
+ if (fifo_count >= retry_count) {
+ printk(KERN_INFO "fifo reset is timeout!");
+ return;
+ }
+ } while (tmp_reg&FIFO_RESET);
+
+ /* start DMA */
+ fh_mci_idma_start(host);
+ } else {
+ fhmci_writel(0, host->base + MCI_BYTCNT);
+ fhmci_writel(0, host->base + MCI_BLKSIZ);
+ }
+
+ /* send command */
+ ret = fh_mci_exec_cmd(host, mrq->cmd, mrq->data);
+ if (ret) {
+ mrq->cmd->error = ret;
+ fh_mci_idma_stop(host);
+ fhmci_trace(3, "can't send card cmd! ret = %d", ret);
+ goto request_end;
+ }
+
+ /* wait command send complete */
+ ret = fh_mci_wait_cmd_complete(host);
+
+ /* start data transfer */
+ if (mrq->data) {
+ if (!(mrq->cmd->error)) {
+ /* Open MMC host data intr */
+ spin_lock_irqsave(&host->lock, flags);
+ tmp_reg = fhmci_readl(host->base + MCI_INTMASK);
+ tmp_reg |= DATA_INT_MASK;
+ fhmci_writel(tmp_reg, host->base + MCI_INTMASK);
+ spin_unlock_irqrestore(&host->lock, flags);
+ /* wait data transfer complete */
+ ret = fh_mci_wait_data_complete(host);
+ } else {
+ /* CMD error in data command */
+ fh_mci_idma_stop(host);
+ }
+
+ if (mrq->stop) {
+#ifdef CONFIG_SEND_AUTO_STOP
+ trans_cnt = fhmci_readl(host->base + MCI_TCBCNT);
+ /* send auto stop */
+ if ((trans_cnt == byte_cnt) && (!(host->is_tuning))) {
+ fhmci_trace(3, "byte_cnt = %d, trans_cnt = %d",
+ byte_cnt, trans_cnt);
+ ret = fh_mci_wait_auto_stop_complete(host);
+ if (ret) {
+ mrq->stop->error = -ETIMEDOUT;
+ goto request_end;
+ }
+ } else {
+#endif
+ /* send soft stop command */
+ fhmci_trace(3, "this time, send soft stop");
+ ret = fh_mci_exec_cmd(host, host->mrq->stop,
+ host->data);
+ if (ret) {
+ mrq->stop->error = ret;
+ goto request_end;
+ }
+ ret = fh_mci_wait_cmd_complete(host);
+ if (ret)
+ goto request_end;
+#ifdef CONFIG_SEND_AUTO_STOP
+ }
+#endif
+ }
+ }
+
+request_end:
+ /* clear MMC host intr */
+ spin_lock_irqsave(&host->lock, flags);
+ fhmci_writel(ALL_INT_CLR & (~SDIO_INT_STATUS),
+ host->base + MCI_RINTSTS);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ fh_mci_finish_request(host, mrq);
+
+
+ if (host->id == 1) {
+ ended++;
+ do_gettimeofday(x);
+
+ /* Perform the carry for the later subtraction by updating y. */
+ if (x->tv_usec < y->tv_usec) {
+ int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
+ y->tv_usec -= 1000000 * nsec;
+ y->tv_sec += nsec;
+ }
+ if (x->tv_usec - y->tv_usec > 1000000) {
+ int nsec = (x->tv_usec - y->tv_usec) / 1000000;
+ y->tv_usec += 1000000 * nsec;
+ y->tv_sec -= nsec;
+ }
+ /* Compute the time remaining to wait.
+ * tv_usec is certainly positive. */
+ if (((x->tv_sec - y->tv_sec) * 1000
+ + x->tv_usec - y->tv_usec) > max) {
+ max = (x->tv_sec - y->tv_sec)
+ * 1000 + x->tv_usec - y->tv_usec;
+ }
+
+ sum += (x->tv_sec - y->tv_sec) * 1000 + x->tv_usec - y->tv_usec;
+
+ send_byte_count += byte_cnt;
+ cmds++;
+
+ if (jiffies - t > HZ) {
+ /*
+ * pr_info("SDIO HOST send_byte_count:
+ * %llu in %u cmds, max cost time: %lu,
+ * sum: %lu, ave: %lu\ncalled: %lu, ended: %lu\n",
+ * send_byte_count, cmds, max, sum,
+ * sum / cmds, called, ended);
+ */
+ t = jiffies;
+ send_byte_count = 0;
+ cmds = 0;
+ max = 0;
+ sum = 0;
+ called = 0;
+ ended = 0;
+ }
+ }
+}
+
+static void fh_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct fhmci_host *host = mmc_priv(mmc);
+ unsigned int tmp_reg;
+ u32 ctrl;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(mmc);
+ fhmci_assert(ios);
+ fhmci_assert(host);
+
+ fhmci_trace(3, "ios->power_mode = %d ", ios->power_mode);
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ /*
+ * Set controller working voltage as 3.3V before power off.
+ */
+ ctrl = fhmci_readl(host->base + MCI_UHS_REG);
+ ctrl &= ~FH_SDXC_CTRL_VDD_180;
+ fhmci_trace(3, "set voltage %d[addr 0x%x]", ctrl, MCI_UHS_REG);
+ fhmci_writel(ctrl, host->base + MCI_UHS_REG);
+
+ fh_mci_ctrl_power(host, POWER_OFF);
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ fh_mci_ctrl_power(host, POWER_ON);
+ break;
+ }
+ fhmci_trace(3, "ios->clock = %d ", ios->clock);
+ if (ios->clock) {
+
+ fh_mci_control_cclk(host, DISABLE);
+ fh_mci_set_cclk(host, ios->clock);
+ fh_mci_control_cclk(host, ENABLE);
+
+ /* speed mode check ,if it is DDR50 set DDR mode*/
+ if ((ios->timing == MMC_TIMING_UHS_DDR50)) {
+ ctrl = fhmci_readl(host->base + MCI_UHS_REG);
+ if (!(FH_SDXC_CTRL_DDR_REG & ctrl)) {
+ ctrl |= FH_SDXC_CTRL_DDR_REG;
+ fhmci_writel(ctrl, host->base + MCI_UHS_REG);
+ }
+ }
+ } else {
+ fh_mci_control_cclk(host, DISABLE);
+ if ((ios->timing != MMC_TIMING_UHS_DDR50)) {
+ ctrl = fhmci_readl(host->base + MCI_UHS_REG);
+ if (FH_SDXC_CTRL_DDR_REG & ctrl) {
+ ctrl &= ~FH_SDXC_CTRL_DDR_REG;
+ fhmci_writel(ctrl, host->base + MCI_UHS_REG);
+ }
+ }
+ }
+
+ /* set bus_width */
+ fhmci_trace(3, "ios->bus_width = %d ", ios->bus_width);
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ tmp_reg = fhmci_readl(host->base + MCI_CTYPE);
+ tmp_reg |= CARD_WIDTH;
+ fhmci_writel(tmp_reg, host->base + MCI_CTYPE);
+ } else {
+ tmp_reg = fhmci_readl(host->base + MCI_CTYPE);
+ tmp_reg &= ~CARD_WIDTH;
+ fhmci_writel(tmp_reg, host->base + MCI_CTYPE);
+ }
+}
+
+static void fhmci_enable_sdio_irq(struct mmc_host *host, int enable)
+{
+ struct fhmci_host *fh_host = mmc_priv(host);
+ unsigned int reg_value;
+ unsigned long flags;
+
+ if (enable) {
+ local_irq_save(flags);
+
+ reg_value = fhmci_readl(fh_host->base + MCI_INTMASK);
+ reg_value |= 0x10000;
+ fhmci_writel(reg_value, fh_host->base + MCI_INTMASK);
+ local_irq_restore(flags);
+ } else {
+ reg_value = fhmci_readl(fh_host->base + MCI_INTMASK);
+ reg_value &= ~0xffff0000;
+ fhmci_writel(reg_value, fh_host->base + MCI_INTMASK);
+ }
+
+}
+
+
+static int fh_mci_get_ro(struct mmc_host *mmc)
+{
+ unsigned ret;
+ struct fhmci_host *host = mmc_priv(mmc);
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(mmc);
+
+ ret = host->get_ro(host);
+
+ return ret;
+}
+
+/**
+ * @brief get the status of SD card's CD pin
+ *
+ * @param [in] mmc host struct
+ *
+ * @return "1": sd card in the slot, "0": sd card is not in the slot, "2":status of sd card no changed
+ */
+static int fh_mci_get_cd(struct mmc_host *mmc)
+{
+ unsigned int i, curr_status, status[3] = {0}, detect_retry_count = 0;
+ struct fhmci_host *host = mmc_priv(mmc);
+
+ while (1) {
+ for (i = 0; i < 3; i++) {
+ status[i] = host->get_cd(host);
+ udelay(10);
+ }
+ if ((status[0] == status[1]) && (status[0] == status[2]))
+ break;
+
+ detect_retry_count++;
+ if (detect_retry_count >= retry_count) {
+ fhmci_error("this is a dithering,card detect error!");
+ goto err;
+ }
+ }
+ curr_status = status[0];
+ if (curr_status != host->card_status) {
+ host->card_status = curr_status;
+ if (curr_status != CARD_UNPLUGED) {
+ fh_mci_init_card(host);
+ printk(KERN_INFO "card%d connected!\n", host->id);
+ mmc->rescan_count = 0;
+ return 1;
+ } else {
+ printk(KERN_INFO "card%d disconnected!\n", host->id);
+ return 0;
+ }
+ }
+ if (mmc->card == NULL)
+ fh_mci_init_card(host);
+err:
+ return 2;
+}
+
+static const struct mmc_host_ops fh_mci_ops = {
+ .request = fh_mci_request,
+ .set_ios = fh_mci_set_ios,
+ .get_ro = fh_mci_get_ro,
+ .enable_sdio_irq = fhmci_enable_sdio_irq,
+ .get_cd = fh_mci_get_cd,
+};
+
+static irqreturn_t hisd_irq(int irq, void *dev_id)
+{
+ struct fhmci_host *host = dev_id;
+ u32 state = 0;
+ int handle = 0;
+
+ state = fhmci_readl(host->base + MCI_RINTSTS);
+
+#ifndef CONFIG_SEND_AUTO_STOP
+ /* bugfix: when send soft stop to SD Card, Host will report
+ sdio interrupt, This situation needs to be avoided */
+ if ((host->mmc->card != NULL)
+ && (host->mmc->card->type == MMC_TYPE_SDIO)) {
+#endif
+ if (state & SDIO_INT_STATUS) {
+ if (fhmci_readl(host->base + MCI_INTMASK)
+ & SDIO_INT_STATUS) {
+ fhmci_writel(SDIO_INT_STATUS,
+ host->base + MCI_RINTSTS);
+ mmc_signal_sdio_irq(host->mmc);
+ handle = 1;
+ }
+ }
+#ifndef CONFIG_SEND_AUTO_STOP
+ }
+#endif
+
+ if (state & DATA_INT_MASK) {
+ handle = 1;
+ host->pending_events |= FHMCI_PEND_DTO_m;
+
+ host->irq_status = fhmci_readl(host->base + MCI_RINTSTS);
+ if (host->irq_status
+ & (DCRC_INT_STATUS|SBE_INT_STATUS|EBE_INT_STATUS)) {
+ printk(KERN_ERR "SDC CRC error:%08x,.\n",
+ host->irq_status);
+ }
+ fhmci_writel(DATA_INT_MASK , host->base + MCI_RINTSTS);
+
+ wake_up(&host->intr_wait);
+ }
+
+ /*if (state & 0x10000) {
+ handle = 1;
+ fhmci_writel(0x10000, host->base + MCI_RINTSTS);
+ mmc_signal_sdio_irq(host->mmc);
+ }*/
+
+ if (handle)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static int __devinit fh_mci_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct mmc_host *mmc;
+ struct fhmci_host *host = NULL;
+ int ret = 0, irq;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(pdev);
+
+ mmc = mmc_alloc_host(sizeof(struct fhmci_host), &pdev->dev);
+ if (!mmc) {
+ fhmci_error("no mem for hi mci host controller!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ host = mmc_priv(mmc);
+ host->pdata = pdev->dev.platform_data;
+ mmc->ops = &fh_mci_ops;
+ mmc->rescan_disable = FALSE;
+ mmc->f_min = DIV_ROUND_UP(host->pdata->bus_hz, 510);
+ mmc->f_max = host->pdata->bus_hz;
+
+ if (host->pdata->caps)
+ mmc->caps = host->pdata->caps;
+ else
+ mmc->caps = 0;
+
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ if (0 == pdev->id) {
+ mmc_sd0 = mmc;
+ } else if (1 == pdev->id) {
+ mmc_sd1 = mmc;
+ } else {
+ fhmci_error("fhmci host id error!");
+ goto out;
+ }
+ /* reload by this controller */
+ mmc->max_blk_count = 2048;
+ mmc->max_segs = 1024;
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->ocr = mmc->ocr_avail;
+
+ host->dma_vaddr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &host->dma_paddr, GFP_KERNEL);
+ if (!host->dma_vaddr) {
+ fhmci_error("no mem for fhmci dma!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->mmc = mmc;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ fhmci_error("request resource error!\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ host->id = pdev->id;
+ host->base = ioremap_nocache(regs->start, regs->end - regs->start + 1);
+ if (!host->base) {
+ fhmci_error("no mem for fhmci base!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (host->pdata->init)
+ host->pdata->init(pdev->id, NULL, NULL);
+
+ if (host->pdata->get_cd)
+ host->get_cd = host->pdata->get_cd;
+ else
+ host->get_cd = fh_mci_sys_card_detect;
+
+ if (host->pdata->get_ro)
+ host->get_ro = host->pdata->get_ro;
+ else
+ host->get_ro = fh_mci_ctrl_card_readonly;
+
+ /* config fifo depth */
+ host->pdata->fifo_depth =
+ (fhmci_readl(host->base + MCI_FIFOTH) & 0xfff0000) >> 16;
+
+ /* enable card */
+ spin_lock_init(&host->lock);
+ platform_set_drvdata(pdev, mmc);
+ mmc_add_host(mmc);
+
+ fhmci_writel(SD_POWER_ON, host->base + MCI_PWREN);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ printk(KERN_ERR "no IRQ defined!\n");
+ goto out;
+ }
+
+ init_waitqueue_head(&host->intr_wait);
+
+ host->irq = irq;
+ ret = request_irq(irq, hisd_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret) {
+ printk(KERN_ERR "request_irq error!\n");
+ goto out;
+ }
+
+
+ return 0;
+out:
+ if (host) {
+
+ if (host->base)
+ iounmap(host->base);
+
+ if (host->dma_vaddr)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ host->dma_vaddr, host->dma_paddr);
+ }
+ if (mmc)
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+/* for wifi Cypress 43438/43455
+ * (Note: sd_id is the sdio index used by wifi)
+ */
+void fh_sdio_card_scan(int sd_id)
+{
+ if ((NULL != mmc_sd1) && (sd_id == 1)) {
+ printk(KERN_ERR "%s-%d(mmc_sd1->caps 0x%lx), enter\n",
+ __func__, __LINE__, mmc_sd1->caps);
+ mmc_sd1->caps &= ~MMC_CAP_NEEDS_POLL;
+ mmc_sd1->caps &= ~MMC_CAP_NONREMOVABLE;
+ /* mmc_host中rescan_count清0以触发 mmc_rescan() */
+ mmc_sd1->rescan_count = 0;
+ mmc_detect_change(mmc_sd1, 0);
+ msleep(100);
+ mmc_sd1->caps |= MMC_CAP_NONREMOVABLE;
+ printk(KERN_ERR "%s-%d mmc_sd1->caps 0x%lx\n", __func__,
+ __LINE__, mmc_sd1->caps);
+ } else if ((NULL != mmc_sd0) && (sd_id == 0)) {
+ printk(KERN_ERR "%s-%d(mmc_sd0->caps 0x%lx), enter\n",
+ __func__, __LINE__, mmc_sd0->caps);
+ mmc_sd0->caps &= ~MMC_CAP_NEEDS_POLL;
+ mmc_sd0->caps &= ~MMC_CAP_NONREMOVABLE;
+ /* mmc_host中rescan_count清0以触发 mmc_rescan() */
+ mmc_sd0->rescan_count = 0;
+ mmc_detect_change(mmc_sd0, 0);
+ msleep(100);
+ mmc_sd0->caps |= MMC_CAP_NONREMOVABLE;
+ printk(KERN_ERR "%s-%d mmc_sd0->caps 0x%lx\n",
+ __func__, __LINE__, mmc_sd0->caps);
+ } else {
+ printk(KERN_ERR "%s-%d, sd_id invalid!\n", __func__, __LINE__);
+ }
+
+ return;
+}
+EXPORT_SYMBOL_GPL(fh_sdio_card_scan);
+
+static int __devexit fh_mci_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (mmc) {
+ struct fhmci_host *host = mmc_priv(mmc);
+
+ free_irq(host->irq, host);
+ mmc_remove_host(mmc);
+ fh_mci_ctrl_power(host, POWER_OFF);
+ fh_mci_control_cclk(host, DISABLE);
+ iounmap(host->base);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->dma_vaddr,
+ host->dma_paddr);
+ mmc_free_host(mmc);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int fh_mci_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct fhmci_host *host;
+ int ret = 0;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(dev);
+
+ if (mmc) {
+ ret = mmc_suspend_host(mmc);
+
+ host = mmc_priv(mmc);
+ }
+
+ fhmci_trace(2, "end");
+
+ return ret;
+}
+
+static int fh_mci_resume(struct platform_device *dev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct fhmci_host *host;
+ int ret = 0;
+
+ fhmci_trace(2, "begin");
+ fhmci_assert(dev);
+
+ if (mmc) {
+ host = mmc_priv(mmc);
+
+ /* enable card */
+ fh_mci_init_card(host);
+
+ ret = mmc_resume_host(mmc);
+ }
+
+ fhmci_trace(2, "end");
+
+ return ret;
+}
+#else
+#define fh_mci_suspend NULL
+#define fh_mci_resume NULL
+#endif
+
+
+static struct platform_driver fh_mci_driver = {
+ .probe = fh_mci_probe,
+ .remove = fh_mci_remove,
+ .suspend = fh_mci_suspend,
+ .resume = fh_mci_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+
+static ssize_t fh_mci_rescan_control(struct class *cls,
+ struct class_attribute *attr, const char *_buf, size_t _count)
+{
+ int cmd = 0;
+ int err = 0;
+
+ err = kstrtoint(_buf, 10, &cmd);
+ if (err)
+ return _count;
+
+ if (cmd) {
+ if (mmc_sd0)
+ mmc_sd0->rescan_count = 0;
+ if (mmc_sd1)
+ mmc_sd1->rescan_count = 0;
+ }
+ return _count;
+}
+
+static struct class *fhmci_rescan_class;
+
+static CLASS_ATTR(mmc_rescan, 0666, NULL, fh_mci_rescan_control);
+
+static void fh_mci_rescan_init(void)
+{
+ int err = 0;
+
+ fhmci_rescan_class = class_create(THIS_MODULE, "fhmci");
+ err = class_create_file(fhmci_rescan_class, &class_attr_mmc_rescan);
+ if (err)
+ fhmci_error("fhmci_rescan_class: create class file failed!");
+}
+
+static int __init fh_mci_init(void)
+{
+ int ret = 0;
+
+ fhmci_trace(2, "mci init begin");
+ fh_mci_rescan_init();
+
+ ret = platform_driver_register(&fh_mci_driver);
+ if (ret)
+ fhmci_error("Platform driver register is failed!");
+
+ return ret;
+}
+
+static void __exit fh_mci_exit(void)
+{
+ fhmci_trace(2, "begin");
+ platform_driver_unregister(&fh_mci_driver);
+}
+module_init(fh_mci_init);
+module_exit(fh_mci_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/fhmci/fhmci_reg.h b/drivers/mmc/host/fhmci/fhmci_reg.h
new file mode 100644
index 00000000..b4a215cf
--- /dev/null
+++ b/drivers/mmc/host/fhmci/fhmci_reg.h
@@ -0,0 +1,182 @@
+#ifndef _FH_MCI_REG_H_
+#define _FH_MCI_REG_H_
+
+#define MCI_CTRL 0x00
+#define MCI_PWREN 0x04
+#define MCI_CLKDIV 0x08
+#define MCI_CLKSRC 0x0C
+#define MCI_CLKENA 0x10
+#define MCI_TIMEOUT 0x14
+#define MCI_CTYPE 0x18
+#define MCI_BLKSIZ 0x1c
+#define MCI_BYTCNT 0x20
+#define MCI_INTMASK 0x24
+#define MCI_CMDARG 0x28
+#define MCI_CMD 0x2C
+#define MCI_RESP0 0x30
+#define MCI_RESP1 0x34
+#define MCI_RESP2 0x38
+#define MCI_RESP3 0x3C
+#define MCI_MINTSTS 0x40
+#define MCI_RINTSTS 0x44
+#define MCI_STATUS 0x48
+#define MCI_FIFOTH 0x4C
+#define MCI_CDETECT 0x50
+#define MCI_WRTPRT 0x54
+#define MCI_GPIO 0x58
+#define MCI_TCBCNT 0x5C
+#define MCI_TBBCNT 0x60
+#define MCI_DEBNCE 0x64
+#define MCI_USRID 0x68
+#define MCI_VERID 0x6C
+#define MCI_HCON 0x70
+#define MCI_UHS_REG 0x74
+#define MCI_BMOD 0x80
+#define MCI_DBADDR 0x88
+#define MCI_IDSTS 0x8C
+#define MCI_IDINTEN 0x90
+#define MCI_DSCADDR 0x94
+#define MCI_BUFADDR 0x98
+#define MCI_READ_THRESHOLD_SIZE 0x100
+#define MCI_UHS_EXT 0x108
+/* MCI_UHS_REG(0x74) details */
+#define FH_SDXC_CTRL_VDD_180 (1<<0)
+#define FH_SDXC_CTRL_DDR_REG (1<<16)
+
+/* MCI_BMOD(0x80) details */
+#define BMOD_SWR (1<<0)
+#define BURST_INCR (1<<1)
+#define BURST_8 (0x2<<8)
+
+/* MCI_CTRL(0x00) details */
+#define CTRL_RESET (1<<0)
+#define FIFO_RESET (1<<1)
+#define DMA_RESET (1<<2)
+#define INTR_EN (1<<4)
+#define USE_INTERNAL_DMA (1<<25)
+
+/* IDMAC DEST1 details */
+#define DMA_BUFFER 0x2000
+#define MAX_DMA_DES (20480)
+
+/* MCI_CDETECT(0x50) details */
+#define FHMCI_CARD0 (1<<0)
+
+/* MCI_TIMEOUT(0x14) details: */
+/*bit 31-8: data read timeout param*/
+#define DATA_TIMEOUT (0xffffff<<8)
+
+/* bit 7-0: response timeout param */
+#define RESPONSE_TIMEOUT 0xff
+
+/* bit 0: enable of card clk*/
+#define CCLK_ENABLE (1<<0)
+
+/* IDMAC DEST0 details */
+#define DMA_DES_OWN (1<<31)
+#define DMA_DES_NEXT_DES (1<<4)
+#define DMA_DES_FIRST_DES (1<<3)
+#define DMA_DES_LAST_DES (1<<2)
+
+/* MCI_BMOD(0x80) details */
+#define BMOD_DMA_EN (1<<7)
+
+/* MCI_CTYPE(0x18) details */
+#define CARD_WIDTH (0x1<<0)
+
+/* MCI_INTMASK(0x24) details:
+ bit 16-1: mask MMC host controller each interrupt
+*/
+#define ALL_INT_MASK 0x1ffff
+#define DTO_INT_MASK (1<<3)
+
+/* bit[18:16] sampling phase */
+#define CLK_SMPL_PHS_MASK (7<<16)
+
+/* MCI_CMD(0x2c) details:
+ bit 31: cmd execute or load start param of interface clk bit
+*/
+#define START_CMD (1<<31)
+
+
+/* MCI_INTSTS(0x44) details */
+/***************************************************************/
+/* bit 16: sdio interrupt status */
+#define SDIO_INT_STATUS (0x1<<16)
+
+/* bit 15: end-bit error (read)/write no CRC interrupt status */
+#define EBE_INT_STATUS (0x1<<15)
+
+/* bit 14: auto command done interrupt status */
+#define ACD_INT_STATUS (0x1<<14)
+
+/* bit 13: start bit error interrupt status */
+#define SBE_INT_STATUS (0x1<<13)
+
+/* bit 12: hardware locked write error interrupt status */
+#define HLE_INT_STATUS (0x1<<12)
+
+/* bit 11: FIFO underrun/overrun error interrupt status */
+#define FRUN_INT_STATUS (0x1<<11)
+
+/* bit 10: data starvation-by-host timeout interrupt status */
+#define HTO_INT_STATUS (0x1<<10)
+
+/* bit 10: volt_switch to 1.8v for sdxc */
+#define VOLT_SWITCH_INT_STATUS (0x1<<10)
+
+/* bit 9: data read timeout interrupt status */
+#define DRTO_INT_STATUS (0x1<<9)
+
+/* bit 8: response timeout interrupt status */
+#define RTO_INT_STATUS (0x1<<8)
+
+/* bit 7: data CRC error interrupt status */
+#define DCRC_INT_STATUS (0x1<<7)
+
+/* bit 6: response CRC error interrupt status */
+#define RCRC_INT_STATUS (0x1<<6)
+
+/* bit 5: receive FIFO data request interrupt status */
+#define RXDR_INT_STATUS (0x1<<5)
+
+/* bit 4: transmit FIFO data request interrupt status */
+#define TXDR_INT_STATUS (0x1<<4)
+
+/* bit 3: data transfer Over interrupt status */
+#define DTO_INT_STATUS (0x1<<3)
+
+/* bit 2: command done interrupt status */
+#define CD_INT_STATUS (0x1<<2)
+
+/* bit 1: response error interrupt status */
+#define RE_INT_STATUS (0x1<<1)
+#define DATA_INT_MASK (DTO_INT_STATUS | DCRC_INT_STATUS \
+ | SBE_INT_STATUS | EBE_INT_STATUS)
+/***************************************************************/
+
+/* MCI_RINTSTS(0x44) details:bit 16-1: clear
+ MMC host controller each interrupt but
+ hardware locked write error interrupt
+*/
+#define ALL_INT_CLR 0x1efff
+
+#define PHASE_SHIFT 0x1030000
+#define READ_THRESHOLD_SIZE 0x2000001
+
+/* MCI_STATUS(0x48) details */
+#define DATA_BUSY (0x1<<9)
+
+/* MCI_FIFOTH(0x4c) details */
+
+#define BURST_SIZE (0x2<<28)
+#define RX_WMARK (0x7<<16)
+#define TX_WMARK 0x8
+
+/*
+#define BURST_SIZE (0x6<<28)
+#define RX_WMARK (0x7f<<16)
+#define TX_WMARK 0x80
+*/
+
+#endif
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 4be8373d..98f2b8b0 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -330,6 +330,8 @@ source "drivers/mtd/onenand/Kconfig"
source "drivers/mtd/lpddr/Kconfig"
+source "drivers/mtd/spi-nand/Kconfig"
+
source "drivers/mtd/ubi/Kconfig"
endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 39664c42..baf5b647 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -31,4 +31,5 @@ inftl-objs := inftlcore.o inftlmount.o
obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
+obj-$(CONFIG_MTD_SPI_NAND) += spi-nand/
obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index e790f388..398cd61a 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -48,7 +48,7 @@
#define ERRP "mtd: "
/* debug macro */
-#if 0
+#if 1
#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
#else
#define dbg(x)
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
old mode 100644
new mode 100755
index 35180e47..8536355d
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -47,10 +47,15 @@
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+#define OPCODE_DUAL_READ_3_ADR 0x3b
+#define OPCODE_QUAD_READ_3_ADR 0x6b
+#define OPCODE_DUAL_READ_4_ADR 0x3c
+#define OPCODE_QUAD_READ_4_ADR 0x6c
+
/* Used for SST flashes only. */
#define OPCODE_BP 0x02 /* Byte program */
#define OPCODE_WRDI 0x04 /* Write disable */
-#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
+#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
/* Used for Macronix flashes only. */
#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
@@ -161,15 +166,103 @@ static inline int write_disable(struct m25p *flash)
return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
}
+
+/*
+ * Enable/disable QE.
+ */
+static inline int set_qe(struct m25p *flash, u32 jedec_id, int enable)
+{
+ int ret = 0;
+ struct spi_device *spi = flash->spi;
+ u8 send_cmd[5] = {0};
+ u8 get_data[5] = {0};
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ send_cmd[0] = 0x05;
+ ret = spi_write_then_read(spi, send_cmd, 1, get_data, 1);
+ if(ret < 0)
+ return -1;
+ get_data[0] &= ~(1 << 6);
+ get_data[0] |= (enable << 6);
+ send_cmd[0] = 0x01;
+ send_cmd[1] = get_data[0];
+ write_enable(flash);
+ ret = spi_write(spi, send_cmd, 2);
+ if (ret < 0)
+ return -1;
+ break;
+ case 0xC8 /* GD */ :
+ send_cmd[0] = 0x35;
+ ret = spi_write_then_read(spi, send_cmd, 1, get_data, 1);
+ if (ret < 0)
+ return -1;
+ get_data[0] &= ~(1 << 1);
+ get_data[0] |= (enable << 1);
+ send_cmd[0] = 0x31;
+ send_cmd[1] = get_data[0];
+ write_enable(flash);
+ ret = spi_write(spi, send_cmd, 2);
+ if (ret < 0)
+ return -1;
+ break;
+ case 0xEF /* winbond */:
+ /* status 0 */
+ send_cmd[0] = 0x05;
+ ret = spi_write_then_read(spi, send_cmd, 1, get_data, 1);
+ if (ret < 0)
+ return -1;
+ /* status 1 */
+ send_cmd[0] = 0x35;
+ ret = spi_write_then_read(spi, send_cmd, 1, &get_data[1], 1);
+ if (ret < 0)
+ return -1;
+ get_data[1] &= ~(1 << 1);
+ get_data[1] |= (enable << 1);
+ send_cmd[0] = 0x01;
+ send_cmd[1] = get_data[0];
+ send_cmd[2] = get_data[1];
+ write_enable(flash);
+ ret = spi_write(spi, send_cmd, 3);
+ if (ret < 0)
+ return -1;
+ break;
+ case 0x20 /* xmc */:
+ /* 16MB and 8MB default support multi wire*/
+ break;
+ default:
+ ret = -1;
+ dev_err(&spi->dev, "%s : %d default not support multi wire..\n", __func__, __LINE__);
+ break;
+ }
+ return ret;
+
+}
/*
* Enable/disable 4-byte addressing mode.
*/
static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
+ int ret;
switch (JEDEC_MFR(jedec_id)) {
case CFI_MFR_MACRONIX:
+ case CFI_MFR_ST: /* Micron, actually */
+ case 0xC8 /* GD */ :
+ flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
+ ret = spi_write(flash->spi, flash->command, 1);
+ return ret;
+ case 0xEF /* winbond */:
flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
- return spi_write(flash->spi, flash->command, 1);
+ ret = spi_write(flash->spi, flash->command, 1);
+ if (!enable)
+ {
+ flash->command[0] = 0x06;
+ spi_write(flash->spi, flash->command, 1);
+ flash->command[0] = 0xc5;
+ flash->command[1] = 0x00;
+ ret = spi_write(flash->spi, flash->command, 2);
+ }
+ return ret;
default:
/* Spansion style */
flash->command[0] = OPCODE_BRWR;
@@ -178,6 +271,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
}
}
+
/*
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
@@ -192,7 +286,7 @@ static int wait_till_ready(struct m25p *flash)
do {
if ((sr = read_sr(flash)) < 0)
break;
- else if (!(sr & SR_WIP))
+ else if (!(sr & (SR_WIP | SR_WEL)))
return 0;
cond_resched();
@@ -202,6 +296,43 @@ static int wait_till_ready(struct m25p *flash)
return 1;
}
+
+static int reset_chip(struct m25p *flash, u32 jedec_id)
+{
+ int ret;
+ mutex_lock(&flash->lock);
+
+ /* Wait till previous write/erase is done. */
+ if (wait_till_ready(flash)) {
+ mutex_unlock(&flash->lock);
+ return 1;
+ }
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case 0x9F: /* S25FL128/256S spansion */
+ flash->command[0] = 0xFF;
+ ret = spi_write(flash->spi, flash->command, 1);
+ flash->command[0] = 0xF0;
+ ret = spi_write(flash->spi, flash->command, 1);
+ mutex_unlock(&flash->lock);
+ return ret;
+ case 0xef: /*winbond*/
+ case 0xc8: /*GD*/
+ flash->command[0] = 0x66;
+ ret = spi_write(flash->spi, flash->command, 1);
+ flash->command[0] = 0x99;
+ ret = spi_write(flash->spi, flash->command, 1);
+ udelay(100);
+ mutex_unlock(&flash->lock);
+ return ret;
+ case CFI_MFR_MACRONIX:
+ case CFI_MFR_ST: /* Micron, actually */
+ default:
+ mutex_unlock(&flash->lock);
+ return 0;
+ }
+}
+
/*
* Erase the whole flash memory
*
@@ -283,7 +414,7 @@ static int erase_sector(struct m25p *flash, u32 offset)
static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct m25p *flash = mtd_to_m25p(mtd);
- u32 addr,len;
+ u32 addr, len;
uint32_t rem;
DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
@@ -341,13 +472,49 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
* Read an address range from the flash chip. The address range
* may be any size provided it is within the physical boundaries.
*/
+
+void fix_read_cmd(struct m25p *p_m25p, struct spi_transfer *p_trans)
+{
+ struct spi_device *spi;
+ struct m25p *flash;
+ flash = p_m25p;
+ spi = flash->spi;
+ /*if spi dev open as multi wire..check cmd data.*/
+ if (spi->dev_open_multi_wire_flag & MULTI_WIRE_SUPPORT) {
+ p_trans[0].xfer_wire_mode = ONE_WIRE_SUPPORT;
+ if (spi->dev_open_multi_wire_flag & QUAD_WIRE_SUPPORT) {
+ p_trans[1].xfer_wire_mode = QUAD_WIRE_SUPPORT;
+ p_trans[1].xfer_dir = SPI_DATA_DIR_IN;
+ if (flash->addr_width == 4)
+ flash->command[0] = OPCODE_QUAD_READ_4_ADR;
+ else
+ flash->command[0] = OPCODE_QUAD_READ_3_ADR;
+ } else if (spi->dev_open_multi_wire_flag & DUAL_WIRE_SUPPORT) {
+ p_trans[1].xfer_wire_mode = DUAL_WIRE_SUPPORT;
+ p_trans[1].xfer_dir = SPI_DATA_DIR_IN;
+ if (flash->addr_width == 4)
+ flash->command[0] = OPCODE_DUAL_READ_4_ADR;
+ else
+ flash->command[0] = OPCODE_DUAL_READ_3_ADR;
+ } else {
+ /*p_trans[0].xfer_wire_mode = ONE_WIRE_SUPPORT;*/
+ p_trans[1].xfer_wire_mode = ONE_WIRE_SUPPORT;
+ p_trans[1].xfer_dir = SPI_DATA_DIR_IN;
+ flash->command[0] = OPCODE_READ; }
+ } else
+ flash->command[0] = OPCODE_READ;
+}
+
static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct m25p *flash = mtd_to_m25p(mtd);
struct spi_transfer t[2];
struct spi_message m;
-
+ struct spi_device *spi;
+ struct spi_master *master;
+ spi = flash->spi;
+ master = spi->master;
DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
dev_name(&flash->spi->dev), __func__, "from",
(u32)from, len);
@@ -392,13 +559,19 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
/* Set up the write data buffer. */
- flash->command[0] = OPCODE_READ;
+ /*fix cmd here...*/
+ fix_read_cmd(flash, t);
m25p_addr2cmd(flash, from, flash->command);
spi_sync(flash->spi, &m);
*retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
-
+ /*back to one wire..*/
+ if (spi->dev_open_multi_wire_flag & MULTI_WIRE_SUPPORT) {
+ /*change to one wire here first.....*/
+ /*printk("back to one wire..\n");*/
+ master->ctl_multi_wire_info.change_to_1_wire(master);
+ }
mutex_unlock(&flash->lock);
return 0;
@@ -425,7 +598,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
/* sanity checks */
if (!len)
- return(0);
+ return 0;
if (to + len > flash->mtd.size)
return -EINVAL;
@@ -630,9 +803,11 @@ struct flash_info {
u16 flags;
#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
#define M25P_NO_ERASE 0x02 /* No erase command needed */
+
+ u32 multi_wire_open;
};
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags, _multi_wire_flag) \
((kernel_ulong_t)&(struct flash_info) { \
.jedec_id = (_jedec_id), \
.ext_id = (_ext_id), \
@@ -640,135 +815,162 @@ struct flash_info {
.n_sectors = (_n_sectors), \
.page_size = 256, \
.flags = (_flags), \
+ .multi_wire_open = (_multi_wire_flag),\
})
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _multi_wire_flag) \
((kernel_ulong_t)&(struct flash_info) { \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
.flags = M25P_NO_ERASE, \
+ .multi_wire_open = (_multi_wire_flag),\
})
/* NOTE: double check command sets and memory organization when you add
* more flash chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
*/
+#define M25P80_4_WIRE_ALL_SUPPORT (ONE_WIRE_SUPPORT | \
+DUAL_WIRE_SUPPORT | QUAD_WIRE_SUPPORT | MULTI_WIRE_SUPPORT)
+#define M25P80_2_WIRE_ALL_SUPPORT (ONE_WIRE_SUPPORT | \
+DUAL_WIRE_SUPPORT | MULTI_WIRE_SUPPORT)
static const struct spi_device_id m25p_ids[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K, 0) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K, 0) },
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K, 0) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K, 0) },
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K, 0) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K, 0) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K, 0) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K, 0) },
/* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0, 0) },
/* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0, 0) },
/* Macronix */
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K, 0) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K, 0) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0, 0) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0, 0) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
-
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0, 0) },
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0, 0) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0, 0) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0, 0) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0, 0) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0, 0) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K, 0) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K, 0) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K, 0) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K, 0) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K, 0) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K, 0) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K, 0) },
/* ST Microelectronics -- newer production may have feature updates */
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0, 0) },
+
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K, 0) },
+
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
-
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K, 0) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K, 0) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K, 0) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K, 0) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K, 0) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K, 0) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K, 0) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K, 0) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K, 0) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "w25q16", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K,
+ M25P80_4_WIRE_ALL_SUPPORT) },
/* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
- { "cat25c03", CAT25_INFO( 32, 8, 16, 2) },
- { "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
- { "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
+ { "cat25c11", CAT25_INFO(16, 8, 16, 1, 0) },
+ { "cat25c03", CAT25_INFO(32, 8, 16, 2, 0) },
+ { "cat25c09", CAT25_INFO(128, 8, 32, 2, 0) },
+ { "cat25c17", CAT25_INFO(256, 8, 32, 2, 0) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2, 0) },
+
+ /*for GD flash..*/
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ /*for xmc flash..*/
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, 0,
+ M25P80_4_WIRE_ALL_SUPPORT) },
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
@@ -827,12 +1029,19 @@ static int __devinit m25p_probe(struct spi_device *spi)
unsigned i;
struct mtd_partition *parts = NULL;
int nr_parts = 0;
-
+ struct spi_master *p_master;
+ int ret;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
* a chip ID, try the JEDEC id commands; they'll work for most
* newer chips, even if we don't recognize the particular chip.
*/
+ p_master = spi->master;
+ if (p_master->ctl_multi_wire_info.ctl_wire_support
+ & MULTI_WIRE_SUPPORT) {
+ /*if master support multi wire, set one wire here..*/
+ p_master->ctl_multi_wire_info.change_to_1_wire(p_master);
+ }
data = spi->dev.platform_data;
if (data && data->type) {
const struct spi_device_id *plat_id;
@@ -876,7 +1085,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash = kzalloc(sizeof *flash, GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
+ flash->command =
+ kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
if (!flash->command) {
kfree(flash);
return -ENOMEM;
@@ -909,6 +1119,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.size = info->sector_size * info->n_sectors;
flash->mtd.erase = m25p80_erase;
flash->mtd.read = m25p80_read;
+ flash->mtd.priv = (void *)info->jedec_id;
/* sst flash chips use AAI word program */
if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
@@ -941,6 +1152,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
} else
flash->addr_width = 3;
}
+ /* add set qe bit here.. */
+ ret = set_qe(flash, info->jedec_id, 1);
+ if (ret != 0)
+ info->multi_wire_open = 0;
+ spi->dev_open_multi_wire_flag = info->multi_wire_open;
+ spi_dev_set_multi_data(p_master, spi);
dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
(long long)flash->mtd.size >> 10);
@@ -1005,6 +1222,20 @@ static int __devinit m25p_probe(struct spi_device *spi)
-ENODEV : 0;
}
+static void m25p_shutdown(struct spi_device *spi)
+{
+ struct m25p *flash = dev_get_drvdata(&spi->dev);
+ u32 jedec = (u32)flash->mtd.priv;
+ dev_err(&spi->dev, "[m25] shutdown here? \n");
+ if (flash->addr_width == 4) {
+ set_4byte(flash, jedec, 0);
+ flash->addr_width = 3;
+ }
+
+ if (reset_chip(flash, jedec))
+ dev_err(&spi->dev, "[m25] reset chip error...\n");
+}
+
static int __devexit m25p_remove(struct spi_device *spi)
{
@@ -1030,7 +1261,7 @@ static struct spi_driver m25p80_driver = {
.id_table = m25p_ids,
.probe = m25p_probe,
.remove = __devexit_p(m25p_remove),
-
+ .shutdown = m25p_shutdown,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
* And also when they're otherwise idle...
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
old mode 100644
new mode 100755
index 3326615a..757cb7a9
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -268,7 +268,8 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
{
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
- mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
+ mtdblk->cache_data = kmalloc(mtdblk->mbd.mtd->erasesize,
+ GFP_KERNEL);
if (!mtdblk->cache_data)
return -EINTR;
/* -EINTR is not really correct, but it is the best match
@@ -324,7 +325,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
/* It was the last usage. Free the cache */
if (mbd->mtd->sync)
mbd->mtd->sync(mbd->mtd);
- vfree(mtdblk->cache_data);
+ kfree(mtdblk->cache_data);
}
mutex_unlock(&mtdblks_lock);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 630be3e7..6b89835f 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -72,6 +72,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
len = mtd->size - from;
res = part->master->read(part->master, from + part->offset,
len, retlen, buf);
+
+ if ((part->master->bitflip_threshold != 0) &&
+ (res >= part->master->bitflip_threshold))
+ res = -EUCLEAN;
+
+
if (unlikely(res)) {
if (res == -EUCLEAN)
mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
diff --git a/drivers/mtd/spi-nand/Kconfig b/drivers/mtd/spi-nand/Kconfig
new file mode 100644
index 00000000..b4da8f5b
--- /dev/null
+++ b/drivers/mtd/spi-nand/Kconfig
@@ -0,0 +1,7 @@
+menuconfig MTD_SPI_NAND
+ tristate "SPI-NAND device Support"
+ depends on MTD_NAND && SPI
+ help
+ This is the framework for the SPI NAND which can be used by the SPI
+ device drivers and the SPI-NAND device drivers.
+
diff --git a/drivers/mtd/spi-nand/Makefile b/drivers/mtd/spi-nand/Makefile
new file mode 100644
index 00000000..971e7a9d
--- /dev/null
+++ b/drivers/mtd/spi-nand/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_MTD_SPI_NAND) += spi-nand-base.o spi-nand-bbt.o spi-nand-device.o spi-nand-ids.o
diff --git a/drivers/mtd/spi-nand/spi-nand-base.c b/drivers/mtd/spi-nand/spi-nand-base.c
new file mode 100644
index 00000000..e96cb71d
--- /dev/null
+++ b/drivers/mtd/spi-nand/spi-nand-base.c
@@ -0,0 +1,2150 @@
+/**
+* spi-nand-base.c
+*
+* Copyright (c) 2009-2014 Micron Technology, Inc.
+*
+* Derived from nand_base.c
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version 2
+* of the License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nand.h>
+#include <linux/mtd/bbm.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include "spi-nand-ids.h"
+
+int fh_start_debug = 0;
+/*#define SPINAND_BBT_DEBUG*/
+#ifdef SPINAND_BBT_DEBUG
+#define fh_dev_debug dev_err
+#else
+#define fh_dev_debug(...)
+#endif
+
+static int spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo);
+
+/**
+ * spi_nand_get_device - [GENERIC] Get chip for selected access
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int spi_nand_get_device(struct mtd_info *mtd, int new_state)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Grab the lock and see if the device is available
+ */
+ while (1) {
+ spin_lock(&this->chip_lock);
+ if (this->state == FL_READY) {
+ this->state = new_state;
+ spin_unlock(&this->chip_lock);
+ break;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ spin_unlock(&this->chip_lock);
+ return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&this->wq, &wait);
+ spin_unlock(&this->chip_lock);
+ schedule();
+ remove_wait_queue(&this->wq, &wait);
+ }
+ return 0;
+}
+
+/**
+ * spi_nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void spi_nand_release_device(struct mtd_info *mtd)
+{
+ struct spi_nand_chip *this = mtd->priv;
+
+ /* Release the chip */
+ spin_lock(&this->chip_lock);
+ this->state = FL_READY;
+ wake_up(&this->wq);
+ spin_unlock(&this->chip_lock);
+}
+
+/**
+ * __spi_nand_do_read_page - [INTERN] read data from flash to buffer
+ * @mtd: MTD device structure
+ * @page_addr: page address/raw address
+ * @column :column address
+ * @raw: without ecc or not
+ * @corrected: how many bit error corrected
+ *
+ * read a page to buffer pointed by chip->buf
+ */
+static int __spi_nand_do_read_page(struct mtd_info *mtd, u32 page_addr,
+ u32 colunm, bool raw, int *corrected)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ int ret, ecc_error;
+ u8 status;
+
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+
+ /*read data from chip*/
+ memset(chip->buf, 0, chip->page_size + chip->page_spare_size);
+ if (raw) {
+ ret = chip->disable_ecc(chip);
+ if (ret < 0) {
+ pr_debug("disable ecc failed\n");
+ return ret;
+ }
+ }
+ ret = chip->load_page(chip, page_addr);
+ if (ret < 0) {
+ pr_debug("error %d loading page 0x%x to cache\n",
+ ret, page_addr);
+ return ret;
+ }
+ ret = chip->waitfunc(chip, &status);
+ if (ret < 0) {
+ pr_debug("error %d waiting page 0x%x to cache\n",
+ ret, page_addr);
+ return ret;
+ }
+ chip->get_ecc_status(chip, status, corrected, &ecc_error);
+ /*
+ * If there's an ECC error, print a message and notify MTD
+ * about it. Then complete the read, to load actual data on
+ * the buffer (instead of the status result).
+ */
+ if (ecc_error) {
+ pr_warn("internal ECC error reading page 0x%x with status 0x%02x\n",
+ page_addr, status);
+ mtd->ecc_stats.failed++;
+ } else if (*corrected)
+ mtd->ecc_stats.corrected += *corrected;
+ /* Get page from the device cache into our internal buffer */
+ ret = chip->read_cache(chip, page_addr, colunm,
+ chip->page_size + chip->page_spare_size - colunm,
+ chip->buf + colunm);
+ if (ret < 0) {
+ pr_debug("error %d reading page 0x%x from cache\n",
+ ret, page_addr);
+ return ret;
+ }
+ if (raw) {
+ ret = chip->enable_ecc(chip);
+ if (ret < 0) {
+ pr_debug("enable ecc failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nand_do_read_page - [INTERN] read a page from flash to buffer
+ * @mtd: MTD device structure
+ * @page_addr: page address/raw address
+ * @raw: without ecc or not
+ * @corrected: how many bit error corrected
+ *
+ * read a page to buffer pointed by chip->buf
+ */
+static int spi_nand_do_read_page(struct mtd_info *mtd, u32 page_addr,
+ bool raw, int *corrected)
+{
+ return __spi_nand_do_read_page(mtd, page_addr, 0, raw, corrected);
+}
+
+/**
+ * spi_nand_do_read_page_oob - [INTERN] read page oob from flash to buffer
+ * @mtd: MTD device structure
+ * @page_addr: page address/raw address
+ * @raw: without ecc or not
+ * @corrected: how many bit error corrected
+ *
+ * read page oob to buffer pointed by chip->oobbuf
+ */
+static int spi_nand_do_read_page_oob(struct mtd_info *mtd, u32 page_addr,
+ bool raw, int *corrected)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+
+ return __spi_nand_do_read_page(mtd, page_addr, chip->page_size,
+ raw, corrected);
+}
+
+
+/**
+ * __spi_nand_do_write_page - [INTERN] write data from buffer to flash
+ * @mtd: MTD device structure
+ * @page_addr: page address/raw address
+ * @column :column address
+ * @raw: without ecc or not
+ *
+ * write data from buffer pointed by chip->buf to flash
+ */
+static int __spi_nand_do_write_page(struct mtd_info *mtd, u32 page_addr,
+ u32 column, bool raw)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ u8 status;
+ bool p_fail = false;
+ bool p_timeout = false;
+ int ret = 0;
+
+ fh_dev_debug(&chip->spi->dev, "Enter %s, with buf \n", __func__);
+
+ if (raw) {
+ ret = chip->disable_ecc(chip);
+ if (ret < 0) {
+ pr_debug("disable ecc failed\n");
+ return ret;
+ }
+ }
+ ret = chip->write_enable(chip);
+ if (ret < 0) {
+ pr_debug("write enable command failed\n");
+ return ret;
+ }
+ /* Store the page to cache */
+ ret = chip->store_cache(chip, page_addr, column,
+ chip->page_size + chip->page_spare_size - column,
+ chip->buf + column);
+ if (ret < 0) {
+ pr_debug("error %d storing page 0x%x to cache\n",
+ ret, page_addr);
+ return ret;
+ }
+ /* Get page from the device cache into our internal buffer */
+ ret = chip->write_page(chip, page_addr);
+ if (ret < 0) {
+ pr_debug("error %d reading page 0x%x from cache\n",
+ ret, page_addr);
+ return ret;
+ }
+ ret = chip->waitfunc(chip, &status);
+ if (ret < 0) {
+ pr_info("error %d write page 0x%x timeout\n",
+ ret, page_addr);
+ return ret;
+ }
+ if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
+ pr_debug("program page 0x%x failed\n", page_addr);
+ p_fail = true;
+ }
+
+ if ((status & STATUS_OIP_MASK) == STATUS_BUSY) {
+ pr_debug("program page 0x%x timeout\n", page_addr);
+ p_timeout = true;
+ }
+ if (raw) {
+ ret = chip->enable_ecc(chip);
+ if (ret < 0) {
+ pr_debug("enable ecc failed\n");
+ return ret;
+ }
+ }
+ if ((p_fail == true)||(p_timeout == true))
+ ret = -EIO;
+
+ return ret;
+}
+
+/**
+ * spi_nand_do_write_page - [INTERN] write page from buffer to flash
+ * @mtd: MTD device structure
+ * @page_addr: page address/raw address
+ * @raw: without ecc or not
+ *
+ * write page from buffer pointed by chip->buf to flash
+ */
+static int spi_nand_do_write_page(struct mtd_info *mtd, u32 page_addr,
+ bool raw)
+{
+ return __spi_nand_do_write_page(mtd, page_addr, 0, raw);
+}
+
+/**
+ * spi_nand_do_write_page_oob - [INTERN] write oob from buffer to flash
+ * @mtd: MTD device structure
+ * @page_addr: page address/raw address
+ * @raw: without ecc or not
+ *
+ * write oob from buffer pointed by chip->oobbuf to flash
+ */
+static int spi_nand_do_write_page_oob(struct mtd_info *mtd, u32 page_addr,
+ bool raw)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+
+ return __spi_nand_do_write_page(mtd, page_addr, chip->page_size, raw);
+}
+
+
+/**
+ * spi_nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: SPI-NAND device structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static void spi_nand_transfer_oob(struct spi_nand_chip *chip, u8 *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ switch (ops->mode) {
+
+ case MTD_OOB_PLACE: /*MTD_OPS_PLACE_OOB:*/
+ case MTD_OOB_RAW: /*MTD_OPS_RAW:*/
+ memcpy(oob, chip->oobbuf + ops->ooboffs, len);
+ return;
+
+ case MTD_OOB_AUTO: { /*MTD_OPS_AUTO_OOB:*/
+ struct nand_oobfree *free = chip->ecclayout->oobfree;
+ uint32_t boffs = 0, roffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Read request not from offset 0? */
+ if (unlikely(roffs)) {
+ if (roffs >= free->length) {
+ roffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + roffs;
+ bytes = min_t(size_t, len,
+ (free->length - roffs));
+ roffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(oob, chip->oobbuf + boffs, bytes);
+ oob += bytes;
+ }
+ return;
+ }
+ default:
+ BUG();
+ }
+}
+
+/**
+ * spi_nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @chip: SPI-NAND device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static void spi_nand_fill_oob(struct spi_nand_chip *chip, uint8_t *oob,
+ size_t len, struct mtd_oob_ops *ops)
+{
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+ memset(chip->oobbuf, 0xff, chip->page_spare_size);
+
+ switch (ops->mode) {
+
+ case MTD_OOB_PLACE:
+ case MTD_OOB_RAW:
+ memcpy(chip->oobbuf + ops->ooboffs, oob, len);
+ return;
+
+ case MTD_OOB_AUTO: {
+ struct nand_oobfree *free = chip->ecclayout->oobfree;
+ uint32_t boffs = 0, woffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Write request not from offset 0? */
+ if (unlikely(woffs)) {
+ if (woffs >= free->length) {
+ woffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + woffs;
+ bytes = min_t(size_t, len,
+ (free->length - woffs));
+ woffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(chip->oobbuf + boffs, oob, bytes);
+ oob += bytes;
+ }
+ return;
+ }
+ default:
+ BUG();
+ }
+}
+
+/**
+ * spi_nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int spi_nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ int page_addr, page_offset, size;
+ int ret;
+ unsigned int corrected = 0;
+ struct mtd_ecc_stats stats;
+ unsigned int max_bitflips = 0;
+ int readlen = ops->len;
+ int oobreadlen = ops->ooblen;
+ int ooblen = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+ stats = mtd->ecc_stats;
+
+ page_addr = from >> chip->page_shift;
+
+ /* for main data */
+ page_offset = from & chip->page_mask;
+ ops->retlen = 0;
+
+ /* for oob */
+ if (oobreadlen > 0) {
+ if (unlikely(ops->ooboffs >= ooblen)) {
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs + oobreadlen >
+ ((mtd->size >> chip->page_shift) - (from >> chip->page_shift))
+ * ooblen)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+ ooblen -= ops->ooboffs;
+ ops->oobretlen = 0;
+ }
+
+ while (1) {
+ if (page_addr != chip->pagebuf || oobreadlen > 0) {
+ ret = spi_nand_do_read_page(mtd, page_addr,
+ ops->mode == MTD_OOB_RAW, &corrected);
+ if (ret) {
+ pr_debug("error %d reading page 0x%x\n",
+ ret, page_addr);
+ return ret;
+ }
+ chip->pagebuf_bitflips = corrected;
+ chip->pagebuf = page_addr;
+ }
+ max_bitflips = max(max_bitflips, chip->pagebuf_bitflips);
+ size = min(readlen, chip->page_size - page_offset);
+ memcpy(ops->datbuf + ops->retlen,
+ chip->buf + page_offset, size);
+
+ ops->retlen += size;
+ readlen -= size;
+ page_offset = 0;
+
+ if (unlikely(ops->oobbuf)) {
+ size = min(oobreadlen, ooblen);
+ spi_nand_transfer_oob(chip,
+ ops->oobbuf + ops->oobretlen, ops, size);
+
+ ops->oobretlen += size;
+ oobreadlen -= size;
+ }
+ if (!readlen)
+ break;
+
+ page_addr++;
+ }
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * spi_nand_do_write_ops - [INTERN] SPI-NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ */
+static int spi_nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ int page_addr, page_offset, size;
+ int writelen = ops->len;
+ int oobwritelen = ops->ooblen;
+ int ret;
+ int ooblen = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+
+ /* Do not allow reads past end of device */
+ if (unlikely(to >= mtd->size)) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ page_addr = to >> chip->page_shift;
+
+ /* for main data */
+ page_offset = to & chip->page_mask;
+ ops->retlen = 0;
+
+ /* for oob */
+ if (oobwritelen > 0) {
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + oobwritelen) > ooblen) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs >= ooblen)) {
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs + oobwritelen >
+ ((mtd->size >> chip->page_shift) - (to >> chip->page_shift))
+ * ooblen)) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+ ooblen -= ops->ooboffs;
+ ops->oobretlen = 0;
+ }
+
+ chip->pagebuf = -1;
+
+ while (1) {
+ memset(chip->buf, 0xFF,
+ chip->page_size + chip->page_spare_size);
+
+ size = min(writelen, chip->page_size - page_offset);
+ memcpy(chip->buf + page_offset,
+ ops->datbuf + ops->retlen, size);
+
+ ops->retlen += size;
+ writelen -= size;
+ page_offset = 0;
+
+ if (unlikely(ops->oobbuf)) {
+ size = min(oobwritelen, ooblen);
+
+ spi_nand_fill_oob(chip, ops->oobbuf + ops->oobretlen,
+ size, ops);
+
+ ops->oobretlen += size;
+ oobwritelen -= size;
+ }
+ ret = spi_nand_do_write_page(mtd, page_addr,
+ ops->mode == MTD_OOB_RAW);
+ if (ret) {
+ pr_debug("error %d writing page 0x%x\n",
+ ret, page_addr);
+ return ret;
+ }
+ if (!writelen)
+ break;
+ page_addr++;
+ }
+ return 0;
+}
+
+/**
+ * nand_read - [MTD Interface] SPI-NAND read
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ */
+static int spi_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_oob_ops ops = { 0 };
+ int ret;
+
+ spi_nand_get_device(mtd, FL_READING);
+
+ ops.len = len;
+ ops.datbuf = buf;
+ ret = spi_nand_do_read_ops(mtd, from, &ops);
+
+ *retlen = ops.retlen;
+
+ spi_nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * spi_nand_write - [MTD Interface] SPI-NAND write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ */
+static int spi_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_oob_ops ops = {0};
+ int ret;
+
+ spi_nand_get_device(mtd, FL_WRITING);
+
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+
+
+ ret = spi_nand_do_write_ops(mtd, to, &ops);
+
+ *retlen = ops.retlen;
+
+ spi_nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * spi_nand_do_read_oob - [INTERN] SPI-NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * SPI-NAND read out-of-band data from the spare area.
+ */
+static int spi_nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ int page_addr;
+ int corrected = 0;
+ struct mtd_ecc_stats stats;
+ int readlen = ops->ooblen;
+ int len;
+ int ret = 0;
+
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+
+ len = ops->mode == MTD_OOB_AUTO ? mtd->oobavail : mtd->oobsize;
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+ (from >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Shift to get page */
+ page_addr = (from >> chip->page_shift);
+ len -= ops->ooboffs;
+ ops->oobretlen = 0;
+
+ while (1) {
+ /*read data from chip*/
+ ret = spi_nand_do_read_page_oob(mtd, page_addr,
+ ops->mode == MTD_OOB_RAW, &corrected);
+ if (ret) {
+ pr_debug("error %d reading page 0x%x\n",
+ ret, page_addr);
+ return ret;
+ }
+ if (page_addr == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ len = min(len, readlen);
+ spi_nand_transfer_oob(chip, ops->oobbuf + ops->oobretlen,
+ ops, len);
+
+ readlen -= len;
+ ops->oobretlen += len;
+ if (!readlen)
+ break;
+
+ page_addr++;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
+ * spi_nand_do_write_oob - [MTD Interface] SPI-NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * SPI-NAND write out-of-band.
+ */
+static int spi_nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int page_addr, len, ret;
+ struct spi_nand_chip *chip = mtd->priv;
+ int writelen = ops->ooblen;
+
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)writelen);
+
+ len = ops->mode == MTD_OOB_AUTO ? mtd->oobavail : mtd->oobsize;
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + writelen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow write past end of device */
+ if (unlikely(to >= mtd->size ||
+ ops->ooboffs + writelen >
+ ((mtd->size >> chip->page_shift) -
+ (to >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Shift to get page */
+ page_addr = to >> chip->page_shift;
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page_addr == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ spi_nand_fill_oob(chip, ops->oobbuf, writelen, ops);
+
+ ret = spi_nand_do_write_page_oob(mtd, page_addr,
+ ops->mode == MTD_OOB_RAW);
+ if (ret) {
+ pr_debug("error %d writing page 0x%x\n",
+ ret, page_addr);
+ return ret;
+ }
+ ops->oobretlen = writelen;
+
+ return 0;
+}
+
+/**
+ * spi_nand_read_oob - [MTD Interface] SPI-NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * SPI-NAND read data and/or out-of-band data.
+ */
+static int spi_nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret = -ENOTSUPP;
+
+ struct spi_nand_chip *this = mtd->priv;
+ fh_dev_debug(&this->spi->dev, "Enter %s, from 0x%08llx \n", __func__, from);
+ ops->retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ spi_nand_get_device(mtd, FL_READING);
+
+ switch (ops->mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_AUTO:
+ case MTD_OOB_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = spi_nand_do_read_oob(mtd, from, ops);
+ else
+ ret = spi_nand_do_read_ops(mtd, from, ops);
+
+out:
+ spi_nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * spi_nand_write_oob - [MTD Interface] SPI-NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int spi_nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret = -ENOTSUPP;
+ struct spi_nand_chip *this = mtd->priv;
+ fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
+
+ ops->retlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (ops->datbuf && (to + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ spi_nand_get_device(mtd, FL_WRITING);
+
+ switch (ops->mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_AUTO:
+ case MTD_OOB_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = spi_nand_do_write_oob(mtd, to, ops);
+ else
+ ret = spi_nand_do_write_ops(mtd, to, ops);
+
+out:
+ spi_nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * spi_nand_block_bad - [INTERN] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int spi_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops = {0};
+ u32 block_addr;
+ u8 bad[2] = {0, 0};
+ u8 ret = 0;
+
+ block_addr = ofs >> chip->block_shift;
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = 2;
+ ops.oobbuf = bad;
+
+ ret = spi_nand_do_read_oob(mtd, block_addr << chip->block_shift, &ops);
+ if (bad[0] != 0xFF || bad[1] != 0xFF)
+ ret = 1;
+
+ return ret;
+
+}
+
+/**
+ * spi_nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int spi_nand_block_checkbad(struct mtd_info *mtd, loff_t ofs,
+ int allowbbt)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+
+ if (!chip->bbt)
+ return spi_nand_block_bad(mtd, ofs);
+
+ /* Return info from the table */
+ return spi_nand_isbad_bbt(mtd, ofs, allowbbt);
+}
+
+/**
+ * spi_nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int spi_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ return chip->block_bad(mtd, offs, 0);
+}
+
+/**
+ * spi_nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) write bad block marker to OOB area of affected block (unless flag
+ * NAND_BBT_NO_OOB_BBM is present)
+ * (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int spi_nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops = {0};
+ struct erase_info einfo = {0};
+ u32 block_addr;
+ u8 buf[2] = {0, 0};
+ int res, ret = 0;
+
+ if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
+ /*erase bad block before mark bad block*/
+ einfo.mtd = mtd;
+ einfo.addr = ofs;
+ einfo.len = 1UL << chip->block_shift;
+ spi_nand_erase(mtd, &einfo);
+
+ block_addr = ofs >> chip->block_shift;
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = 2;
+ ops.oobbuf = buf;
+
+ ret = spi_nand_do_write_oob(mtd,
+ block_addr << chip->block_shift, &ops);
+ }
+
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ res = spi_nand_markbad_bbt(mtd, ofs);
+ if (!ret)
+ ret = res;
+ }
+
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * spi_nand_block_markbad - [MTD Interface] Mark block at the given offset
+ * as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int spi_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = spi_nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return spi_nand_block_markbad_lowlevel(mtd, ofs);
+}
+
+/**
+ * __spi_nand_erase - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @einfo: erase instruction
+ * @allowbbt: allow to access bbt
+ *
+ * Erase one ore more blocks
+ */
+int __spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo,
+ int allowbbt)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ int page_addr, pages_per_block;
+ loff_t len;
+ u8 status;
+ int ret = 0;
+
+
+ /* check address align on block boundary */
+ if (einfo->addr & (chip->block_size - 1)) {
+ pr_debug("%s: Unaligned address\n", __func__);
+ return -EINVAL;
+ }
+
+ if (einfo->len & (chip->block_size - 1)) {
+ pr_debug("%s: Length not block aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow erase past end of device */
+ if ((einfo->len + einfo->addr) > chip->size) {
+ pr_debug("%s: Erase past end of device\n", __func__);
+ return -EINVAL;
+ }
+
+ einfo->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+
+ /* Grab the lock and see if the device is available */
+ spi_nand_get_device(mtd, FL_ERASING);
+
+ pages_per_block = 1 << (chip->block_shift - chip->page_shift);
+ page_addr = einfo->addr >> chip->page_shift;
+ len = einfo->len;
+
+ einfo->state = MTD_ERASING;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if (chip->block_bad(mtd, ((loff_t) page_addr) <<
+ chip->page_shift, allowbbt)) {
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page_addr);
+ einfo->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page_addr <= chip->pagebuf && chip->pagebuf <
+ (page_addr + pages_per_block))
+ chip->pagebuf = -1;
+
+ ret = chip->write_enable(chip);
+ if (ret < 0) {
+ pr_debug("write enable command failed\n");
+ einfo->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ ret = chip->erase_block(chip, page_addr);
+ if (ret < 0) {
+ pr_debug("block erase command failed\n");
+ einfo->state = MTD_ERASE_FAILED;
+ einfo->fail_addr = (loff_t)page_addr
+ << chip->page_shift;
+ goto erase_exit;
+ }
+ ret = chip->waitfunc(chip, &status);
+ if (ret < 0) {
+ pr_debug("block erase command wait failed\n");
+ einfo->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+ if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
+ pr_debug("erase block 0x%012llx failed\n",
+ ((loff_t) page_addr) << chip->page_shift);
+ einfo->state = MTD_ERASE_FAILED;
+ einfo->fail_addr = (loff_t)page_addr
+ << chip->page_shift;
+ goto erase_exit;
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1ULL << chip->block_shift);
+ page_addr += pages_per_block;
+ }
+
+ einfo->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = einfo->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+ spi_nand_release_device(mtd);
+
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(einfo);
+
+ /* Return more or less happy */
+ return ret;
+}
+EXPORT_SYMBOL(__spi_nand_erase);
+
+/**
+ * spi_nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @einfo: erase instruction
+ *
+ * Erase one ore more blocks
+ */
+static int spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo)
+{
+ return __spi_nand_erase(mtd, einfo, 0);
+}
+
+/**
+ * spi_nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+static void spi_nand_sync(struct mtd_info *mtd)
+{
+ pr_debug("spi_nand_sync: called\n");
+
+ /* Grab the lock and see if the device is available */
+ spi_nand_get_device(mtd, FL_SYNCING);
+
+ /* Release it and go back */
+ spi_nand_release_device(mtd);
+}
+
+/**
+ * spi_nand_suspend - [MTD Interface] Suspend the SPI-NAND flash
+ * @mtd: MTD device structure
+ */
+static int spi_nand_suspend(struct mtd_info *mtd)
+{
+ return spi_nand_get_device(mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * spi_nand_resume - [MTD Interface] Resume the SPI-NAND flash
+ * @mtd: MTD device structure
+ */
+static void spi_nand_resume(struct mtd_info *mtd)
+{
+ struct spi_nand_chip *this = mtd->priv;
+
+ if (this->state == FL_PM_SUSPENDED)
+ spi_nand_release_device(mtd);
+ else
+ pr_err("%s is not called in suspended state\n:", __func__);
+}
+
+
+/*
+ * spi_nand_send_cmd - to process a command to send to the SPI-NAND
+ * @spi: spi device structure
+ * @cmd: command structure
+ *
+ * Set up the command buffer to send to the SPI controller.
+ * The command buffer has to initialized to 0.
+ */
+int spi_nand_send_cmd(struct spi_device *spi, struct spi_nand_cmd *cmd)
+{
+ struct spi_message message;
+ struct spi_transfer x[4];
+ u8 buf[8], i;
+ u32 buflen = 0;
+
+ spi_message_init(&message);
+ memset(x, 0, sizeof(x));
+ x[0].len = 1;
+ x[0].tx_buf = &cmd->cmd;
+ x[0].xfer_wire_mode = ONE_WIRE_SUPPORT;
+ spi_message_add_tail(&x[0], &message);
+
+ buf[buflen++] = cmd->cmd;
+
+ if (cmd->n_addr) {
+ x[1].len = cmd->n_addr;
+ x[1].tx_buf = cmd->addr;
+ spi_message_add_tail(&x[1], &message);
+ x[1].xfer_wire_mode = ONE_WIRE_SUPPORT;
+ }
+ for (i = 0; i < cmd->n_addr && buflen < 7; i++)
+ buf[buflen++] = cmd->addr[i];
+
+ if (cmd->n_tx) {
+ x[2].len = cmd->n_tx;
+ /*x[2].tx_nbits = cmd->tx_nbits; always 0 for single future version*/
+ x[2].tx_buf = cmd->tx_buf;
+ x[2].xfer_wire_mode = ONE_WIRE_SUPPORT;
+ spi_message_add_tail(&x[2], &message);
+ }
+ for (i = 0; i < cmd->n_tx && buflen < 7; i++)
+ buf[buflen++] = cmd->tx_buf[i];
+
+ if (cmd->n_rx) {
+ x[3].len = cmd->n_rx;
+ /*x[3].rx_nbits = cmd->rx_nbits;*/
+ x[3].rx_buf = cmd->rx_buf;
+ if (cmd->cmd == SPINAND_CMD_READ_CACHE_X4)
+ x[3].xfer_wire_mode = QUAD_WIRE_SUPPORT;
+ else if (cmd->cmd == SPINAND_CMD_READ_CACHE_X2)
+ x[3].xfer_wire_mode = DUAL_WIRE_SUPPORT;
+ else
+ x[3].xfer_wire_mode = ONE_WIRE_SUPPORT;
+ spi_message_add_tail(&x[3], &message);
+ }
+ for (i = 0; i < cmd->n_rx && buflen < 7; i++)
+ buf[buflen++] = cmd->rx_buf[i];
+
+ buflen = 1 + cmd->n_addr + cmd->n_tx + cmd->n_rx;
+ if (fh_start_debug || cmd->cmd == 0x1f)
+ fh_dev_debug(&spi->dev, " spi%d:%d: send cmd 0x: %02x %02x %02x %02x %02x %02x, size %d\n",
+ spi->master->bus_num, spi->chip_select,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buflen);
+
+ return spi_sync(spi, &message);
+}
+EXPORT_SYMBOL(spi_nand_send_cmd);
+/*
+ * spi_nand_read_status- send command 0x0f to the SPI-NAND status register value
+ * @spi: spi device structure
+ * @status: buffer to store value
+ * Description:
+ * After read, write, or erase, the Nand device is expected to set the
+ * busy status.
+ * This function is to allow reading the status of the command: read,
+ * write, and erase.
+ * Once the status turns to be ready, the other status bits also are
+ * valid status bits.
+ */
+static int spi_nand_read_status(struct spi_device *spi, uint8_t *status)
+{
+ struct spi_nand_cmd cmd = {0};
+ int ret;
+
+ cmd.cmd = SPINAND_CMD_READ_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_STATUS;
+ cmd.n_rx = 1;
+ cmd.rx_buf = status;
+
+ ret = spi_nand_send_cmd(spi, &cmd);
+ if (ret < 0)
+ dev_err(&spi->dev, "err: %d read status register\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nand_get_otp- send command 0x0f to read the SPI-NAND OTP register
+ * @spi: spi device structure
+ * @opt: buffer to store value
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spi_nand_get_otp(struct spi_device *spi, u8 *otp)
+{
+ struct spi_nand_cmd cmd = {0};
+ int ret;
+
+ cmd.cmd = SPINAND_CMD_READ_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_OTP;
+ cmd.n_rx = 1;
+ cmd.rx_buf = otp;
+
+ ret = spi_nand_send_cmd(spi, &cmd);
+ if (ret < 0)
+ dev_err(&spi->dev, "error %d get otp\n", ret);
+ return ret;
+}
+
+/**
+ * spi_nand_set_otp- send command 0x1f to write the SPI-NAND OTP register
+ * @spi: spi device structure
+ * @status: buffer stored value
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spi_nand_set_otp(struct spi_device *spi, u8 *otp)
+{
+ int ret;
+ struct spi_nand_cmd cmd = { 0 };
+
+ cmd.cmd = SPINAND_CMD_WRITE_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_OTP;
+ cmd.n_tx = 1;
+ cmd.tx_buf = otp;
+
+ ret = spi_nand_send_cmd(spi, &cmd);
+ if (ret < 0)
+ dev_err(&spi->dev, "error %d set otp\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nand_enable_ecc- enable internal ECC
+ * @chip: SPI-NAND device structure
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spi_nand_enable_ecc(struct spi_nand_chip *chip)
+{
+ struct spi_device *spi = chip->spi;
+ int ret;
+ u8 otp = 0;
+
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ ret = spi_nand_get_otp(spi, &otp);
+ if (ret < 0)
+ return ret;
+
+ fh_dev_debug(&spi->dev, "get opt: 0x%02x\n", otp);
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_ENABLE)
+ return 0;
+
+ otp |= OTP_ECC_ENABLE;
+ ret = spi_nand_set_otp(spi, &otp);
+ if (ret < 0)
+ return ret;
+ fh_dev_debug(&spi->dev, "set opt: 0x%02x\n", otp);
+ return spi_nand_get_otp(spi, &otp);
+}
+
+static int spi_nand_get_qe_value(struct spi_nand_chip *chip, u8 *otp)
+{
+ struct spi_nand_cmd cmd = {0};
+ int ret;
+
+ cmd.cmd = SPINAND_CMD_READ_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = chip->qe_addr;
+ cmd.n_rx = 1;
+ cmd.rx_buf = otp;
+
+ ret = spi_nand_send_cmd(chip->spi, &cmd);
+ return ret;
+}
+
+static int spi_nand_set_qe_value(struct spi_nand_chip *chip, u8 *otp)
+{
+ int ret;
+ struct spi_nand_cmd cmd = { 0 };
+
+ cmd.cmd = SPINAND_CMD_WRITE_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = chip->qe_addr;
+ cmd.n_tx = 1;
+ cmd.tx_buf = otp;
+
+ ret = spi_nand_send_cmd(chip->spi, &cmd);
+ return ret;
+}
+
+
+
+static int spi_nand_set_qe(struct spi_nand_chip *chip)
+{
+ int ret;
+ u8 otp = 0;
+
+ ret = spi_nand_get_qe_value(chip, &otp);
+ if (ret < 0)
+ return ret;
+
+ if (chip->qe_flag)
+ otp |= chip->qe_mask;
+ else
+ otp &= (~chip->qe_mask);
+ ret = spi_nand_set_qe_value(chip, &otp);
+ if (ret < 0)
+ return ret;
+ return spi_nand_get_qe_value(chip, &otp);
+}
+
+
+/**
+ * spi_nand_disable_ecc- disable internal ECC
+ * @chip: SPI-NAND device structure
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spi_nand_disable_ecc(struct spi_nand_chip *chip)
+{
+ struct spi_device *spi = chip->spi;
+ int ret;
+ u8 otp = 0;
+
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+ ret = spi_nand_get_otp(spi, &otp);
+ if (ret < 0)
+ return ret;
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_ENABLE) {
+ otp &= ~OTP_ECC_ENABLE;
+ ret = spi_nand_set_otp(spi, &otp);
+ if (ret < 0)
+ return ret;
+ return spi_nand_get_otp(spi, &otp);
+ } else
+ return 0;
+}
+
+/**
+ * spi_nand_write_enable- send command 0x06 to enable write or erase the
+ * Nand cells
+ * @chip: SPI-NAND device structure
+ * Description:
+ * Before write and erase the Nand cells, the write enable has to be set.
+ * After the write or erase, the write enable bit is automatically
+ * cleared (status register bit 2)
+ * Set the bit 2 of the status register has the same effect
+ */
+static int spi_nand_write_enable(struct spi_nand_chip *chip)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ cmd.cmd = SPINAND_CMD_WR_ENABLE;
+ return spi_nand_send_cmd(spi, &cmd);
+}
+
+/*
+ * spi_nand_read_from_cache- send command 0x13 to read data from Nand to cache
+ * @chip: SPI-NAND device structure
+ * @page_addr: page to read
+ */
+static int spi_nand_read_page_to_cache(struct spi_nand_chip *chip,
+ u32 page_addr)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ cmd.cmd = SPINAND_CMD_READ;
+ cmd.n_addr = 3;
+ cmd.addr[0] = (u8)(page_addr >> 16);
+ cmd.addr[1] = (u8)(page_addr >> 8);
+ cmd.addr[2] = (u8)page_addr;
+
+ return spi_nand_send_cmd(spi, &cmd);
+}
+
+/*
+ * spi_nand_read_from_cache- send command 0x03 to read out the data from the
+ * cache register
+ * Description:
+ * The read can specify 1 to (page size + spare size) bytes of data read at
+ * the corresponding locations.
+ * No tRd delay.
+ */
+int spi_nand_read_from_cache(struct spi_nand_chip *chip, u32 page_addr,
+ u32 column, size_t len, u8 *rbuf)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ /*cmd.cmd = SPINAND_CMD_READ_RDM;*/
+ cmd.n_addr = 3;
+ if (spi->dev_open_multi_wire_flag & QUAD_WIRE_SUPPORT) {
+ cmd.cmd = SPINAND_CMD_READ_CACHE_X4;
+ if (chip->multi_wire_command_length == 5)
+ cmd.n_addr = 4;
+ } else if (spi->dev_open_multi_wire_flag & DUAL_WIRE_SUPPORT) {
+ cmd.cmd = SPINAND_CMD_READ_CACHE_X2;
+ if (chip->multi_wire_command_length == 5)
+ cmd.n_addr = 4;
+ } else
+ cmd.cmd = SPINAND_CMD_READ_RDM;
+ if (chip->dev_id[0] == 0xC8) {/*FIXME: early GD chips, test 1G*/
+ cmd.addr[0] = 0;
+ cmd.addr[1] = (u8)(column >> 8);
+ if (chip->options & SPINAND_NEED_PLANE_SELECT)
+ cmd.addr[0] |= (u8)(((page_addr >>
+ (chip->block_shift - chip->page_shift)) & 0x1) << 4);
+ cmd.addr[2] = (u8)column;
+ }
+ else{
+ cmd.addr[0] = (u8)(column >> 8);
+ if (chip->options & SPINAND_NEED_PLANE_SELECT)
+ cmd.addr[0] |= (u8)(((page_addr >>
+ (chip->block_shift - chip->page_shift)) & 0x1) << 4);
+ cmd.addr[1] = (u8)column;
+ cmd.addr[2] = 0;
+ }
+ cmd.n_rx = len;
+ cmd.rx_buf = rbuf;
+
+ return spi_nand_send_cmd(spi, &cmd);
+}
+
+/*
+ * spi_nand_read_from_cache_x2- send command 0x3b to read out the data from the
+ * cache register
+ * Description:
+ * The read can specify 1 to (page size + spare size) bytes of data read at
+ * the corresponding locations.
+ * No tRd delay.
+ */
+/*int spi_nand_read_from_cache_x2(struct spi_nand_chip *chip, u32 page_addr,
+ u32 column, size_t len, u8 *rbuf)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+
+ cmd.cmd = SPINAND_CMD_READ_CACHE_X2;
+ cmd.n_addr = 3;
+ cmd.addr[0] = (u8)(column >> 8);
+ if (chip->options & SPINAND_NEED_PLANE_SELECT)
+ cmd.addr[0] |= (u8)(((page_addr >>
+ (chip->block_shift - chip->page_shift)) & 0x1) << 4);
+ cmd.addr[1] = (u8)column;
+ cmd.addr[2] = 0;
+ cmd.n_rx = len;
+ cmd.rx_nbits = SPI_NBITS_DUAL;
+ cmd.rx_buf = rbuf;
+
+ return spi_nand_send_cmd(spi, &cmd);
+}*/
+
+/*
+ * spi_nand_read_from_cache_x4- send command 0x6b to read out the data from the
+ * cache register
+ * Description:
+ * The read can specify 1 to (page size + spare size) bytes of data read at
+ * the corresponding locations.
+ * No tRd delay.
+ */
+/*int spi_nand_read_from_cache_x4(struct spi_nand_chip *chip, u32 page_addr,
+ u32 column, size_t len, u8 *rbuf)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+
+ cmd.cmd = SPINAND_CMD_READ_CACHE_X4;
+ cmd.n_addr = 3;
+ cmd.addr[0] = (u8)(column >> 8);
+ if (chip->options & SPINAND_NEED_PLANE_SELECT)
+ cmd.addr[0] |= (u8)(((page_addr >>
+ (chip->block_shift - chip->page_shift)) & 0x1) << 4);
+ cmd.addr[1] = (u8)column;
+ cmd.addr[2] = 0;
+ cmd.n_rx = len;
+ cmd.rx_nbits = SPI_NBITS_QUAD;
+ cmd.rx_buf = rbuf;
+
+ return spi_nand_send_cmd(spi, &cmd);
+}*/
+
+/*
+ * spi_nand_read_from_cache_snor_protocol- send command 0x03 to read out the
+ * data from the cache register, 0x03 command protocol is same as SPI NOR
+ * read command
+ * Description:
+ * The read can specify 1 to (page size + spare size) bytes of data read at
+ * the corresponding locations.
+ * No tRd delay.
+ */
+int spi_nand_read_from_cache_snor_protocol(struct spi_nand_chip *chip,
+ u32 page_addr, u32 column, size_t len, u8 *rbuf)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ cmd.cmd = SPINAND_CMD_READ_RDM;
+ cmd.n_addr = 3;
+ cmd.addr[0] = 0;
+ cmd.addr[1] = (u8)(column >> 8);
+ if (chip->options & SPINAND_NEED_PLANE_SELECT)
+ cmd.addr[1] |= (u8)(((page_addr >>
+ (chip->block_shift - chip->page_shift)) & 0x1) << 4);
+ cmd.addr[2] = (u8)column;
+ cmd.n_rx = len;
+ cmd.rx_buf = rbuf;
+
+ return spi_nand_send_cmd(spi, &cmd);
+}
+EXPORT_SYMBOL(spi_nand_read_from_cache_snor_protocol);
+
+/*
+ * spi_nand_program_data_to_cache--to write a page to cache
+ * @chip: SPI-NAND device structure
+ * @page_addr: page to write
+ * @column: the location to write to the cache
+ * @len: number of bytes to write
+ * wrbuf: buffer held @len bytes
+ *
+ * Description:
+ * The write command used here is 0x02--indicating that the cache is
+ * cleared first.
+ * Since it is writing the data to cache, there is no tPROG time.
+ */
+static int spi_nand_program_data_to_cache(struct spi_nand_chip *chip,
+ u32 page_addr, u32 column, size_t len, u8 *wbuf)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ cmd.cmd = SPINAND_CMD_PROG_LOAD;
+ cmd.n_addr = 2;
+ cmd.addr[0] = (u8)(column >> 8);
+ if (chip->options & SPINAND_NEED_PLANE_SELECT)
+ cmd.addr[0] |= (u8)(((page_addr >>
+ (chip->block_shift - chip->page_shift)) & 0x1) << 4);
+ cmd.addr[1] = (u8)column;
+ cmd.n_tx = len;
+ cmd.tx_buf = wbuf;
+ fh_dev_debug(&spi->dev, "see n_tx %d, oob[4] 0x%08x\n",
+ len, *(uint32_t*)(wbuf+2048));
+
+ return spi_nand_send_cmd(spi, &cmd);
+}
+
+/**
+ * spi_nand_program_execute--to write a page from cache to the Nand array
+ * @chip: SPI-NAND device structure
+ * @page_addr: the physical page location to write the page.
+ *
+ * Description:
+ * The write command used here is 0x10--indicating the cache is writing to
+ * the Nand array.
+ * Need to wait for tPROG time to finish the transaction.
+ */
+static int spi_nand_program_execute(struct spi_nand_chip *chip, u32 page_addr)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+ cmd.cmd = SPINAND_CMD_PROG;
+ cmd.n_addr = 3;
+ cmd.addr[0] = (u8)(page_addr >> 16);
+ cmd.addr[1] = (u8)(page_addr >> 8);
+ cmd.addr[2] = (u8)page_addr;
+
+
+ return spi_nand_send_cmd(spi, &cmd);
+}
+
+
+/**
+ * spi_nand_erase_block_erase--to erase a block
+ * @chip: SPI-NAND device structure
+ * @page_addr: the page to erase.
+ *
+ * Description:
+ * The command used here is 0xd8--indicating an erase command to erase
+ * one block
+ * Need to wait for tERS.
+ */
+static int spi_nand_erase_block(struct spi_nand_chip *chip,
+ u32 page_addr)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ cmd.cmd = SPINAND_CMD_ERASE_BLK;
+ cmd.n_addr = 3;
+ cmd.addr[0] = (u8)(page_addr >> 16);
+ cmd.addr[1] = (u8)(page_addr >> 8);
+ cmd.addr[2] = (u8)page_addr;
+
+ return spi_nand_send_cmd(spi, &cmd);
+}
+
+/**
+ * spi_nand_wait - [DEFAULT] wait until the command is done
+ * @chip: SPI-NAND device structure
+ * @s: buffer to store status register(can be NULL)
+ *
+ * Wait for command done. This applies to erase and program only. Erase can
+ * take up to 400ms and program up to 20ms.
+ */
+static int spi_nand_wait(struct spi_nand_chip *chip, u8 *s)
+{
+ unsigned long timeo = jiffies;
+ u8 status, state = chip->state;
+ int ret = -ETIMEDOUT;
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+
+ if (state == FL_ERASING)
+ timeo += msecs_to_jiffies(400);
+ else
+ timeo += msecs_to_jiffies(1000); // 20 -> 40 for mx2g
+
+ while (time_before(jiffies, timeo)) {
+ spi_nand_read_status(chip->spi, &status);
+ if ((status & STATUS_OIP_MASK) == STATUS_READY) {
+ ret = 0;
+ goto out;
+ }
+ cond_resched();
+ }
+out:
+ if (s)
+ *s = status;
+
+ return 0;
+}
+
+
+/*
+ * spi_nand_reset- send RESET command "0xff" to the SPI-NAND.
+ * @chip: SPI-NAND device structure
+ */
+static int spi_nand_reset(struct spi_nand_chip *chip)
+{
+ struct spi_nand_cmd cmd = {0};
+ struct spi_device *spi = chip->spi;
+ fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
+
+ cmd.cmd = SPINAND_CMD_RESET;
+
+ if (spi_nand_send_cmd(spi, &cmd) < 0)
+ pr_err("spi_nand reset failed!\n");
+
+ /* elapse 1ms before issuing any other command */
+ udelay(1000);
+
+ return 0;
+}
+
+
+/**
+ * spi_nand_lock_block- send write register 0x1f command to the lock/unlock device
+ * @spi: spi device structure
+ * @lock: value to set to block lock register
+ *
+ * Description:
+ * After power up, all the Nand blocks are locked. This function allows
+ * one to unlock the blocks, and so it can be written or erased.
+ */
+static int spi_nand_lock_block(struct spi_device *spi, u8 lock)
+{
+ struct spi_nand_cmd cmd = {0};
+ int ret;
+
+ cmd.cmd = SPINAND_CMD_WRITE_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_BLOCK_LOCK;
+ cmd.n_tx = 1;
+ cmd.tx_buf = &lock;
+
+ ret = spi_nand_send_cmd(spi, &cmd);
+ if (ret < 0)
+ dev_err(&spi->dev, "error %d lock block\n", ret);
+
+ return ret;
+}
+
+static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+/* Sanitize ONFI strings so we can safely print them */
+static void sanitize_string(uint8_t *s, size_t len)
+{
+ ssize_t i;
+
+ /* Null terminate */
+ s[len - 1] = 0;
+
+ /* Remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* Remove trailing spaces */
+ strim(s);
+}
+
+/*
+ * Check if the SPI-NAND chip is ONFI compliant,
+ * returns 1 if it is, 0 otherwise.
+ */
+static bool spi_nand_detect_onfi(struct spi_nand_chip *chip)
+{
+ struct spi_device *spi = chip->spi;
+ struct spi_nand_onfi_params *p;
+ char *buffer;
+ bool ret = true;
+ u8 otp;
+ int i;
+
+ /*FIXME buffer size*/
+ buffer = kmalloc(256 * 3, GFP_KERNEL);
+ otp = OTP_ENABLE;
+ spi_nand_set_otp(spi, &otp);
+ chip->load_page(chip, 0x01);
+ chip->waitfunc(chip, NULL);
+ spi_nand_read_from_cache(chip, 0x01, 0x00, 256 * 3, buffer);
+ otp = OTP_ECC_ENABLE;
+ spi_nand_set_otp(spi, &otp);
+
+ p = (struct spi_nand_onfi_params *)buffer;
+ for (i = 0; i < 3; i++, p++) {
+ if (p->sig[0] != 'O' || p->sig[1] != 'N' ||
+ p->sig[2] != 'F' || p->sig[3] != 'I')
+ continue;
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+ le16_to_cpu(p->crc))
+ break;
+ }
+ if (i == 3) {
+ pr_err("Could not find valid ONFI parameter page; aborting\n");
+ ret = false;
+ goto out;
+ }
+
+ memcpy(&chip->onfi_params, p, sizeof(*p));
+
+ p = &chip->onfi_params;
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+
+ chip->name = p->model;
+ chip->size = le32_to_cpu(p->byte_per_page) *
+ le32_to_cpu(p->pages_per_block) *
+ le32_to_cpu(p->blocks_per_lun) * p->lun_count;
+ chip->block_size = le32_to_cpu(p->byte_per_page) *
+ le32_to_cpu(p->pages_per_block);
+ chip->page_size = le32_to_cpu(p->byte_per_page);
+ chip->page_spare_size = le16_to_cpu(p->spare_bytes_per_page);
+ chip->block_shift = ilog2(chip->block_size);
+ chip->page_shift = ilog2(chip->page_size);
+ chip->page_mask = chip->page_size - 1;
+ chip->bits_per_cell = p->bits_per_cell;
+ /*FIXME need to find a way to read options from ONFI table*/
+ chip->options = SPINAND_NEED_PLANE_SELECT;
+ if (p->ecc_bits != 0xff) {
+ chip->ecc_strength_ds = p->ecc_bits;
+ chip->ecc_step_ds = 512;
+ }
+
+out:
+ kfree(buffer);
+ return ret;
+}
+
+static void spi_nand_set_defaults(struct spi_nand_chip *chip)
+{
+ /*struct spi_device *spi = chip->spi;*/
+
+ /* if (spi->mode & SPI_RX_QUAD)
+ chip->read_cache = spi_nand_read_from_cache_x4;
+ else if (spi->mode & SPI_RX_DUAL)
+ chip->read_cache = spi_nand_read_from_cache_x2;
+ else*/
+ chip->read_cache = spi_nand_read_from_cache;
+
+ if (!chip->reset)
+ chip->reset = spi_nand_reset;
+ if (!chip->erase_block)
+ chip->erase_block = spi_nand_erase_block;
+ if (!chip->load_page)
+ chip->load_page = spi_nand_read_page_to_cache;
+ if (!chip->store_cache)
+ chip->store_cache = spi_nand_program_data_to_cache;
+ if (!chip->write_page)
+ chip->write_page = spi_nand_program_execute;
+ if (!chip->write_enable)
+ chip->write_enable = spi_nand_write_enable;
+ if (!chip->waitfunc)
+ chip->waitfunc = spi_nand_wait;
+ if (!chip->enable_ecc)
+ chip->enable_ecc = spi_nand_enable_ecc;
+ if (!chip->disable_ecc)
+ chip->disable_ecc = spi_nand_disable_ecc;
+ if (!chip->block_bad)
+ chip->block_bad = spi_nand_block_checkbad;
+ if (!chip->set_qe)
+ chip->set_qe = spi_nand_set_qe;
+}
+
+static int spi_nand_check(struct spi_nand_chip *chip)
+{
+ if (!chip->reset)
+ return -ENODEV;
+ if (!chip->read_id)
+ return -ENODEV;
+ if (!chip->load_page)
+ return -ENODEV;
+ if (!chip->read_cache)
+ return -ENODEV;
+ if (!chip->store_cache)
+ return -ENODEV;
+ if (!chip->write_page)
+ return -ENODEV;
+ if (!chip->erase_block)
+ return -ENODEV;
+ if (!chip->waitfunc)
+ return -ENODEV;
+ if (!chip->write_enable)
+ return -ENODEV;
+ if (!chip->get_ecc_status)
+ return -ENODEV;
+ if (!chip->enable_ecc)
+ return -ENODEV;
+ if (!chip->disable_ecc)
+ return -ENODEV;
+ if (!chip->ecclayout)
+ return -ENODEV;
+ return 0;
+}
+
+/**
+ * spi_nand_scan_ident - [SPI-NAND Interface] Scan for the SPI-NAND device
+ * @mtd: MTD device structure
+ *
+ * This is the first phase of the normal spi_nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ */
+int spi_nand_scan_ident(struct mtd_info *mtd)
+{
+ int ret;
+ u8 id[SPINAND_MAX_ID_LEN] = {0};
+ struct spi_nand_chip *chip = mtd->priv;
+
+ spi_nand_set_defaults(chip);
+ chip->reset(chip);
+
+ chip->read_id(chip, id);
+ if (id[0] == 0 && id[1] == 0) {
+ pr_err("SPINAND: read id error! 0x%02x, 0x%02x!\n",
+ id[0], id[1]);
+ return -ENODEV;
+ }
+
+ pr_err("SPINAND: read id ! 0x%02x, 0x%02x 0x%02x, 0x%02x!\n",
+ id[0], id[1], id[2], id[3]);
+ if (spi_nand_scan_id_table(chip, id))
+ goto ident_done;
+ pr_info("SPI-NAND type mfr_id: %x, dev_id: %x is not in id table.\n",
+ id[0], id[1]);
+
+
+ if (chip->spi->dev_open_multi_wire_flag & QUAD_WIRE_SUPPORT)
+ chip->set_qe(chip);
+
+ if (spi_nand_detect_onfi(chip))
+ goto ident_done;
+
+ return -ENODEV;
+
+ident_done:
+ pr_info("SPI-NAND: %s is found.\n", chip->name);
+
+ /*chip->mfr_id = id[0];
+ chip->dev_id = id[1];*/
+
+ chip->buf = kzalloc(chip->page_size + chip->page_spare_size,
+ GFP_KERNEL);
+ if (!chip->buf)
+ return -ENOMEM;
+
+ chip->oobbuf = chip->buf + chip->page_size;
+
+ ret = spi_nand_lock_block(chip->spi, BL_ALL_UNLOCKED);
+ ret = chip->enable_ecc(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_nand_scan_ident);
+
+/**
+ * spi_nand_scan_tail - [SPI-NAND Interface] Scan for the SPI-NAND device
+ * @mtd: MTD device structure
+ *
+ * This is the second phase of the normal spi_nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults.
+ */
+int spi_nand_scan_tail(struct mtd_info *mtd)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = spi_nand_check(chip);
+ if (ret)
+ return ret;
+ /* Initialize state */
+ chip->state = FL_READY;
+ /* Invalidate the pagebuffer reference */
+ chip->pagebuf = -1;
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+
+ init_waitqueue_head(&chip->wq);
+ spin_lock_init(&chip->chip_lock);
+
+ mtd->name = chip->name;
+ mtd->size = chip->size;
+ mtd->erasesize = chip->block_size;
+ mtd->writesize = chip->page_size;
+ mtd->writebufsize = mtd->writesize;
+ mtd->oobsize = chip->page_spare_size;
+ mtd->owner = THIS_MODULE;
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ /*xxx:porting down: if (!mtd->ecc_strength)
+ mtd->ecc_strength = chip->ecc_strength_ds ?
+ chip->ecc_strength_ds : 1;*/
+ mtd->bitflip_threshold = 1;
+ mtd->ecclayout = chip->ecclayout;
+ mtd->oobsize = chip->page_spare_size;
+ mtd->oobavail = chip->ecclayout->oobavail;
+ /* remove _* */
+ mtd->erase = spi_nand_erase;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = spi_nand_read;
+ mtd->write = spi_nand_write;
+ mtd->read_oob = spi_nand_read_oob;
+ mtd->write_oob = spi_nand_write_oob;
+ mtd->sync = spi_nand_sync;
+ mtd->lock = NULL;
+ mtd->unlock = NULL;
+ mtd->suspend = spi_nand_suspend;
+ mtd->resume = spi_nand_resume;
+ mtd->block_isbad = spi_nand_block_isbad;
+ mtd->block_markbad = spi_nand_block_markbad;
+
+#ifndef CONFIG_SPI_NAND_BBT
+ /* Build bad block table */
+ return spi_nand_default_bbt(mtd);
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL_GPL(spi_nand_scan_tail);
+
+/**
+ * spi_nand_scan_ident_release - [SPI-NAND Interface] Free resources
+ * applied by spi_nand_scan_ident
+ * @mtd: MTD device structure
+ */
+int spi_nand_scan_ident_release(struct mtd_info *mtd)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+
+ kfree(chip->buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nand_scan_ident_release);
+
+/**
+ * spi_nand_scan_tail_release - [SPI-NAND Interface] Free resources
+ * applied by spi_nand_scan_tail
+ * @mtd: MTD device structure
+ */
+int spi_nand_scan_tail_release(struct mtd_info *mtd)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nand_scan_tail_release);
+
+/**
+ * spi_nand_release - [SPI-NAND Interface] Free resources held by the SPI-NAND
+ * device
+ * @mtd: MTD device structure
+ */
+int spi_nand_release(struct mtd_info *mtd)
+{
+ struct spi_nand_chip *chip = mtd->priv;
+
+ mtd_device_unregister(mtd);
+ kfree(chip->buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nand_release);
+
+MODULE_DESCRIPTION("SPI NAND framework");
+MODULE_AUTHOR("Peter Pan<peterpandong at micron.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/mtd/spi-nand/spi-nand-bbt.c b/drivers/mtd/spi-nand/spi-nand-bbt.c
new file mode 100644
index 00000000..5340606f
--- /dev/null
+++ b/drivers/mtd/spi-nand/spi-nand-bbt.c
@@ -0,0 +1,1357 @@
+/*
+ * drivers/mtd/spi_nand_bbt.c
+ *
+ * Overview:
+ * Bad block table support for the SPI-NAND driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file is derived from nand_base.c
+ *
+ * TODO:
+ * share BBT code with parallel nand
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
+#include <linux/mtd/spi-nand.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/spi/spi.h>
+#include <asm/string.h>
+
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+#ifdef SPINAND_BBT_DEBUG
+#define fh_dev_debug dev_err
+#define fh_debug_dump(buf,len) do { \
+ unsigned int i; \
+ printk("\t %s:L%d", __func__,__LINE__); \
+ for (i=0;i<len/4;i++) { \
+ if (0 == i % 4 ) \
+ printk("\n\t\t 0x%08x:\t",(unsigned int) buf+i*4 ); \
+ printk("%08x ", *(unsigned int*) (buf + i*4));\
+ } \
+ } while(0)
+#else
+#define fh_dev_debug(...)
+#define fh_debug_dump(buf,len)
+#endif
+
+
+static int spi_nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct spi_nand_chip *chip, int block)
+{
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct spi_nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen,
+ struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
+ /* Compare the pattern */
+ fh_debug_dump(buf + paglen + td->offs, td->len);
+ if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ /* Compare the pattern */
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+static inline int mtd_is_eccerr(int err)
+{
+ return err == -EBADMSG;
+}
+
+static inline int mtd_is_bitflip(int err)
+{
+ return err == -EUCLEAN;
+}
+
+static inline int mtd_is_bitflip_or_eccerr(int err)
+{
+ return mtd_is_bitflip(err) || mtd_is_eccerr(err);
+}
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ int res, ret = 0, i, j, act = 0;
+ struct spi_nand_chip *this = mtd->priv;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
+
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ from = ((loff_t)page) << this->page_shift;
+ while (totlen) {
+ len = min(totlen, (size_t)(1 << this->block_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd->read(mtd, from, len, &retlen,
+ buf); /*/// mtd_read(mtd, from, len, &retlen, buf);*/
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("spi_nand_bbt: ECC error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("spi_nand_bbt: corrected error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("spi_nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+
+ for (j = 0; j < 8; j += bits, act++) {
+ uint8_t tmp = (dat >> j) & msk;
+
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code &&
+ (tmp == reserved_block_code)) {
+ pr_info("spi_nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->block_shift);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_RESERVED);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("spi_nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->block_shift);
+ /* Factory marked bad or worn out? */
+ if (tmp == 0)
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
+ else
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_WORN);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, int chip)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int res = 0;
+
+ res = read_bbt(mtd, buf, td->pages[0],
+ mtd->size >> this->block_shift, td, 0);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ /*return mtd_read(mtd, offs, len, &retlen, buf);*/
+ return mtd->read(mtd, offs, len, &retlen, buf);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtd_oob_ops ops;
+ int res, ret = 0;
+ struct spi_nand_chip *chip = mtd->priv;
+ fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
+
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+
+ while (len > 0) {
+ ops.datbuf = buf;
+ ops.len = min_t(size_t, len, mtd->writesize);
+ ops.oobbuf = buf + ops.len;
+
+ /*res = mtd_read_oob(mtd, offs, &ops);*/
+ res = mtd->read_oob(mtd, offs, &ops);
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
+ return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ offs += mtd->writesize;
+ }
+ return ret;
+}
+
+static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_data(mtd, buf, offs, td);
+ else
+ return scan_read_oob(mtd, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.datbuf = buf;
+ ops.oobbuf = oob;
+ ops.len = len;
+
+ /*return mtd_write_oob(mtd, offs, &ops);*/
+ return mtd->write_oob(mtd, offs, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ struct spi_nand_chip *this = mtd->priv;
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+ }
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, int numpages)
+{
+ struct mtd_oob_ops ops;
+ int j, ret;
+
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ for (j = 0; j < numpages; j++) {
+ /*
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
+ */
+ ret = mtd->read_oob(mtd, offs, &ops);/*mtd_read_oob(mtd, offs, &ops);*/
+ /* Ignore ECC errors when checking for BBM */
+ if (ret < 0 && !mtd_is_bitflip_or_eccerr(ret))
+ return ret;
+
+ if (mtd_is_eccerr(ret))
+ return 1;
+
+ if (check_short_pattern(buf, bd))
+ return 1;
+
+ offs += mtd->writesize;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int i, numblocks, numpages;
+ int startblock;
+ loff_t from;
+ fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ numpages = 2;
+ else
+ numpages = 1;
+
+ if (chip == -1) {
+ numblocks = mtd->size >> this->block_shift;
+ startblock = 0;
+ from = 0;
+ } else {
+ numblocks = this->size >> this->block_shift;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = (loff_t)startblock << this->block_shift;
+ }
+
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * numpages);
+
+ for (i = startblock; i < numblocks; i++) {
+ int ret;
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+ }
+
+ from += (1 << this->block_shift);
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int i, chips;
+ int startblock, block, dir;
+ int scanlen = mtd->writesize + mtd->oobsize;
+ int bbtblocks;
+ int blocktopage = this->block_shift - this->page_shift;
+
+ fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
+ /* Search direction top -> down? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (mtd->size >> this->block_shift) - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ chips = 1;
+ bbtblocks = mtd->size >> this->block_shift;
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ loff_t offs = (loff_t)actblock << this->block_shift;
+
+ /* Read first page */
+ scan_read(mtd, buf, offs, mtd->writesize, td);
+ fh_dev_debug(&this->spi->dev, "read block %d, first v 0x%08x\n ",
+ actblock, *(int *)buf);
+ fh_dev_debug(&this->spi->dev, "td pattern:%s, offset %d, len %d\n ",
+ td->pattern, td->offs,td->len);
+ if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+ td->pages[i] = actblock << blocktopage;
+ if (td->options & NAND_BBT_VERSION) {
+ offs = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[offs];
+ }
+ break;
+ }
+ }
+ startblock += this->size >> this->block_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ pr_warn("Bad block table not found for chip %d\n", i);
+ else
+ pr_info("Bad block table found at page %d, version 0x%02X\n",
+ td->pages[i], td->version[i]);
+ }
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt(mtd, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(mtd, buf, md);
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+ extern int fh_start_debug;
+ fh_start_debug = 1;
+ fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (!rcode)
+ rcode = 0xff;
+ numblocks = (int)(mtd->size >> this->block_shift);
+ nrchips = 1;
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+ /*
+ * There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
+ continue;
+ }
+ page = block <<
+ (this->block_shift - this->page_shift);
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ pr_err("No space left to write bad block table\n");
+ return -ENOSPC;
+write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1:
+ sft = 3;
+ sftmsk = 0x07;
+ msk[0] = 0x00;
+ msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2:
+ sft = 2;
+ sftmsk = 0x06;
+ msk[0] = 0x00;
+ msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4:
+ sft = 1;
+ sftmsk = 0x04;
+ msk[0] = 0x00;
+ msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8:
+ sft = 0;
+ sftmsk = 0x00;
+ msk[0] = 0x00;
+ msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ to = ((loff_t)page) << this->page_shift;
+
+ fh_dev_debug(&this->spi->dev, " td.options 0x08%x\n", td->options);
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~((loff_t)((1 << this->block_shift) - 1));
+ len = 1 << this->block_shift;
+ res = mtd->read(mtd, to, len, &retlen,
+ buf); // mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("spi_nand_bbt: error reading block ");
+ pr_info("for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("spi_nand_bbt: ECC error while reading ");
+ pr_warn("block for writing bad block table\n");
+ }
+ /* Read oob data */
+ ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd->read_oob(mtd, to + mtd->writesize,
+ &ops); /*mtd_read_oob(mtd, to + mtd->writesize, &ops);*/
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to >> this->page_shift);
+ offs = pageoffs << this->page_shift;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ (len >> this->page_shift) * mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+
+ /*fh_debug_dump(td->pattern, td->len);*/
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks; i++) {
+ uint8_t dat;
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
+ }
+
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ einfo.len = 1 << this->block_shift;
+ res = __spi_nand_erase(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+
+ /*fh_debug_dump(&buf[ooboffs],20);*/
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
+
+ if (res < 0)
+ goto outerr;
+
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+outerr:
+ pr_warn("spi_nand_bbt: error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * spi_nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int spi_nand_memory_bbt(struct mtd_info *mtd,
+ struct nand_bbt_descr *bd)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
+
+ return create_bbt(mtd, this->buf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, create, chipsel, res, res2;
+ struct spi_nand_chip *this = mtd->priv;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ create = 0;
+ rd = NULL;
+ rd2 = NULL;
+ res = res2 = 0;
+ /* Per chip or per device? */
+ chipsel = -1;
+ /* Mirrored table available? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
+ writeops = 0x03;
+ } else if (td->pages[i] == -1) {
+ rd = md;
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
+ rd = td;
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ } else if (((int8_t)(td->version[i] - md->version[i]))
+ > 0) {
+ rd = td;
+ writeops = 0x02;
+ } else {
+ rd = md;
+ writeops = 0x01;
+ }
+ } else {
+ if (td->pages[i] == -1) {
+ create = 1;
+ writeops = 0x01;
+ } else
+ rd = td;
+ }
+
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ /*xxx: create it; if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))*/
+ create_bbt(mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(mtd, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval;
+
+ chips = 1;
+ nrblocks = (int)(mtd->size >> this->block_shift);
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ block = td->pages[i] >>
+ (this->block_shift - this->page_shift);
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ spi_nand_update_bbt(mtd, (loff_t)block <<
+ this->block_shift);
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
+ update = 1;
+ block++;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && td->reserved_block_code)
+ spi_nand_update_bbt(mtd, (loff_t)(block - 1) <<
+ this->block_shift);
+ }
+ fh_dev_debug(&this->spi->dev, "Leave %s\n", __func__);
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @mtd: MTD device structure
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ table_size = mtd->size >> this->block_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->block_shift));
+}
+
+/**
+ * spi_nand_scan_bbt - [SPI-NAND Interface] scan, find, read and maybe create
+ * bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the spi_nand_free_bbt function.
+ */
+int spi_nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int len, res = 0;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
+ len = mtd->size >> (this->block_shift + 2);
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
+ this->bbt = kzalloc(len, GFP_KERNEL);
+ if (!this->bbt)
+ return -ENOMEM;
+
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
+ */
+ if (!td) {
+ res = spi_nand_memory_bbt(mtd, bd);
+ if (res) {
+ pr_err("spi_nand_bbt: can't scan flash and build the RAM-based BBT\n");
+ kfree(this->bbt);
+ this->bbt = NULL;
+ }
+ return res;
+ }
+ verify_bbt_descr(mtd, td);
+ verify_bbt_descr(mtd, md);
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->block_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = vmalloc(len);
+ if (!buf) {
+ kfree(this->bbt);
+ this->bbt = NULL;
+ return -ENOMEM;
+ }
+
+ /* Is the bbt at a given page? */
+ if (td->options & NAND_BBT_ABSPAGE)
+ read_abs_bbts(mtd, buf, td, md);
+ else {
+ /* Search the bad block table using a pattern in oob */
+ search_read_bbts(mtd, buf, td, md);
+ }
+
+ res = check_create(mtd, buf, bd);
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(mtd, td);
+ if (md)
+ mark_bbt_region(mtd, md);
+
+ vfree(buf);
+ return res;
+}
+EXPORT_SYMBOL(spi_nand_scan_bbt);
+
+/**
+ * spi_nand_update_bbt - update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int spi_nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->block_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ chip = 0;
+ chipsel = -1;
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE))
+ res = write_bbt(mtd, buf, md, td, chipsel);
+
+out:
+ kfree(buf);
+ return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 4, //.offs = 8,
+ .len = 4,
+ .veroffs = 2, //.veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 4, //.offs = 8,
+ .len = 4,
+ .veroffs = 2, //.veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * spi_nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: SPI-NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int spi_nand_create_badblock_pattern(struct spi_nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+
+ if (this->badblock_pattern) {
+ pr_warn("Bad block pattern already allocated; not replacing\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+ return 0;
+}
+
+/**
+ * spi_nand_default_bbt - [SPI-NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the spi_nand_scan_bbt function.
+ */
+int spi_nand_default_bbt(struct mtd_info *mtd)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int ret;
+
+ fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
+
+ fh_dev_debug(&this->spi->dev, "\tbbt option %x\n", this->bbt_options);
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ }
+
+ if (!this->badblock_pattern) {
+ ret = spi_nand_create_badblock_pattern(this);
+ if (ret)
+ return ret;
+ }
+
+ fh_dev_debug(&this->spi->dev, "badblock pattern 0x%02x\n",
+ * this->badblock_pattern->pattern);
+ return spi_nand_scan_bbt(mtd, this->badblock_pattern);
+}
+EXPORT_SYMBOL(spi_nand_default_bbt);
+
+/**
+ * spi_nand_isbad_bbt - [SPI-NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int spi_nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int block, res;
+
+ block = (int)(offs >> this->block_shift);
+ res = bbt_get_entry(this, block);
+
+ pr_debug("%s: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ __func__, (unsigned int)offs, block, res);
+
+ switch (res) {
+ case BBT_BLOCK_GOOD:
+ return 0;
+ case BBT_BLOCK_WORN:
+ return 1;
+ case BBT_BLOCK_RESERVED:
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(spi_nand_isbad_bbt);
+/**
+ * spi_nand_markbad_bbt - [SPI-NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int spi_nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct spi_nand_chip *this = mtd->priv;
+ int block, ret = 0;
+
+ block = (int)(offs >> this->block_shift);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = spi_nand_update_bbt(mtd, offs);
+
+ return ret;
+}
+EXPORT_SYMBOL(spi_nand_markbad_bbt);
diff --git a/drivers/mtd/spi-nand/spi-nand-device.c b/drivers/mtd/spi-nand/spi-nand-device.c
new file mode 100644
index 00000000..5f7d5160
--- /dev/null
+++ b/drivers/mtd/spi-nand/spi-nand-device.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2009-2014 Micron Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nand.h>
+#include "spi-nand-ids.h"
+
+#ifdef SPINAND_BBT_DEBUG
+#define fh_dev_debug dev_err
+#else
+#define fh_dev_debug(...)
+#endif
+
+static int spi_nand_read_id(struct spi_nand_chip *chip, u8 *buf)
+{
+ struct spi_device *spi = chip->spi;
+ struct spi_nand_cmd cmd = { 0 };
+
+ cmd.cmd = SPINAND_CMD_READ_ID;
+ cmd.n_rx = SPINAND_MAX_ID_LEN;
+ cmd.rx_buf = buf;
+
+ return spi_nand_send_cmd(spi, &cmd);
+}
+
+static void spi_nand_ecc_status(struct spi_nand_chip *chip, unsigned int status,
+ unsigned int *corrected, unsigned int *ecc_error)
+{
+ unsigned int ecc_status = (status >> SPI_NAND_ECC_SHIFT) &
+ chip->ecc_mask;
+
+ *ecc_error = (ecc_status == chip->ecc_uncorr);
+ if (*ecc_error == 0)
+ *corrected = ecc_status;
+}
+
+static void spi_nand_mt29f_ecc_status(unsigned int status,
+ unsigned int *corrected, unsigned int *ecc_error)
+{
+ unsigned int ecc_status = (status >> SPI_NAND_MT29F_ECC_SHIFT) &
+ SPI_NAND_MT29F_ECC_MASK;
+
+ *ecc_error = (ecc_status == SPI_NAND_MT29F_ECC_UNCORR);
+ if (*ecc_error == 0)
+ *corrected = ecc_status;
+}
+
+static void spi_nand_gd5f_ecc_status(unsigned int status,
+ unsigned int *corrected, unsigned int *ecc_error)
+{
+ unsigned int ecc_status = (status >> SPI_NAND_GD5F_ECC_SHIFT) &
+ SPI_NAND_GD5F_ECC_MASK;
+
+ *ecc_error = (ecc_status == SPI_NAND_GD5F_ECC_UNCORR);
+ /*TODO fix corrected bits*/
+ if (*ecc_error == 0)
+ *corrected = ecc_status;
+}
+
+/*static int spi_nand_manufacture_init(struct spi_nand_chip *chip)
+{
+ switch (chip->mfr_id) {
+ case SPINAND_MFR_MICRON:
+ chip->get_ecc_status = spi_nand_mt29f_ecc_status;
+
+ if (chip->page_spare_size == 64)
+ chip->ecclayout = &micron_ecc_layout_64;
+
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ break;
+ case SPINAND_MFR_GIGADEVICE:
+ chip->get_ecc_status = spi_nand_gd5f_ecc_status;
+ chip->read_cache = spi_nand_read_from_cache_snor_protocol;
+ chip->ecc_strength_ds = 8;
+ chip->ecc_step_ds = chip->page_size >> 2;
+ if (chip->page_spare_size == 128)
+ chip->ecclayout = &gd5f_ecc_layout_128;
+ else if (chip->page_spare_size == 256)
+ chip->ecclayout = &gd5f_ecc_layout_256;
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}*/
+
+static int spi_nand_device_probe(struct spi_device *spi)
+{
+ struct spi_nand_chip *chip;
+ enum spi_nand_device_variant variant;
+ struct mtd_info *mtd;
+ /* struct mtd_part_parser_data ppdata;*/
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
+ int ret, i;
+ struct flash_platform_data *data;
+ struct spi_master *p_master;
+
+ fh_dev_debug(&spi->dev, "%s with spi%d:%d \n", __func__,
+ spi->master->bus_num, spi->chip_select);
+
+ p_master = spi->master;
+ if (p_master->ctl_multi_wire_info.ctl_wire_support
+ & (DUAL_WIRE_SUPPORT|QUAD_WIRE_SUPPORT)) {
+ /*if master support multi wire, set one wire here..*/
+ p_master->ctl_multi_wire_info.change_to_1_wire(p_master);
+ }
+
+ data = spi->dev.platform_data;
+ chip = kzalloc(sizeof(struct spi_nand_chip), GFP_KERNEL);
+ if (!chip) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ chip->spi = spi;
+
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+ mtd->priv = chip;
+ chip->mtd = mtd;
+ spi_set_drvdata(spi, chip);
+ /*
+ * read ID command format might be different for different manufactory
+ * such as, Micron SPI NAND need extra one dummy byte after perform
+ * read ID command but Giga device don't need.
+ *
+ * So, specify manufactory of device in device tree is obligatory
+ */
+/* variant = spi_get_device_id(spi)->driver_data;
+ switch (variant) {
+ case SPI_NAND_MT29F:
+ chip->read_id = spi_nand_mt29f_read_id;
+ break;
+ case SPI_NAND_GD5F:
+ chip->read_id = spi_nand_gd5f_read_id;
+ break;
+ default:
+ dev_err(&spi->dev, "unknown device, id %d\n", variant);
+ ret = -ENODEV;
+ goto err3;
+ }*/
+
+ spi->dev_open_multi_wire_flag = p_master->ctl_multi_wire_info.ctl_wire_support;
+ chip->read_id = spi_nand_read_id;
+ ret = spi_nand_scan_ident(mtd);
+ if (ret) {
+ ret = -ENODEV;
+ goto err3;
+ }
+
+/* spi_nand_manufacture_init(chip);*/
+ chip->get_ecc_status = spi_nand_ecc_status;
+
+ ret = spi_nand_scan_tail(mtd);
+ if (ret) {
+ fh_dev_debug(&spi->dev, "goto err4 %s\n", __func__);
+ goto err4;
+ }
+
+ /* partitions should match sector boundaries; and it may be good to
+ * use readonly partitions for writeprotected sectors (BP2..BP0).
+ */
+ mtd->name = "spi0.0";
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] = { "cmdlinepart", NULL, };
+
+ nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
+ }
+
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
+
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2,
+ "partitions[%d] = " "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name, (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
+ }
+ }
+
+ fh_dev_debug(&spi->dev, " mtd_device_register %s\n", __func__);
+ ret = mtd_device_register(mtd, parts, nr_parts);
+ if (!ret)
+ return 0;
+
+ fh_dev_debug(&spi->dev, " spi_nand_scan_tail_release %s\n", __func__);
+ spi_nand_scan_tail_release(mtd);
+ fh_dev_debug(&spi->dev, "Leave %s\n", __func__);
+err4:
+ spi_nand_scan_ident_release(mtd);
+err3:
+ kfree(mtd);
+err2:
+ kfree(chip);
+err1:
+ return ret;
+}
+
+int spi_nand_device_remove(struct spi_device *spi)
+{
+ struct spi_nand_chip *chip = spi_get_drvdata(spi);
+ struct mtd_info *mtd = chip->mtd;
+
+ spi_nand_release(mtd);
+ kfree(mtd);
+ kfree(chip);
+
+ return 0;
+}
+
+const struct spi_device_id spi_nand_id_table[] = {
+ { "spi-nand", SPI_NAND_GENERIC},
+ { "mt29f", SPI_NAND_MT29F },
+ { "gd5f", SPI_NAND_GD5F },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, spi_nand_id_table);
+
+/**
+ * module_spi_driver() - Helper macro for registering a SPI driver
+ * @__spi_driver: spi_driver struct
+ *
+ * Helper macro for SPI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_spi_driver(__spi_driver) \
+ module_driver(__spi_driver, spi_register_driver, \
+ spi_unregister_driver)
+
+static struct spi_driver spi_nand_device_driver = {
+ .driver = {
+ .name = "spi-nand",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .id_table = spi_nand_id_table,
+ .probe = spi_nand_device_probe,
+ .remove = spi_nand_device_remove,
+};
+
+static int __init spi_nand_init(void)
+{
+ return spi_register_driver(&spi_nand_device_driver);
+}
+
+static void __exit spi_nand_exit(void)
+{
+ spi_unregister_driver(&spi_nand_device_driver);
+}
+
+module_init(spi_nand_init);
+module_exit(spi_nand_exit);
+
+MODULE_DESCRIPTION("SPI NAND device");
+MODULE_AUTHOR("Peter Pan<peterpandong at micron.com>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel.garcia at imgtec.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/mtd/spi-nand/spi-nand-ids.c b/drivers/mtd/spi-nand/spi-nand-ids.c
new file mode 100644
index 00000000..00347a97
--- /dev/null
+++ b/drivers/mtd/spi-nand/spi-nand-ids.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2016 Fullhan, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mtd/spi-nand.h>
+
+/*static struct spi_nand_flash spi_nand_table[] = {
+ SPI_NAND_INFO("MT29F2G01AAAED", 0x2C, 0X22, 2048, 64, 64, 2048,
+ SPINAND_NEED_PLANE_SELECT),
+ SPI_NAND_INFO("MT29F4G01AAADD", 0x2C, 0X32, 2048, 64, 64, 4096,
+ SPINAND_NEED_PLANE_SELECT),
+ SPI_NAND_INFO("GD5F 512MiB 1.8V", 0xC8, 0XA4, 4096, 256, 64, 2048,
+ 0),
+ SPI_NAND_INFO("GD5F 512MiB 3.3V", 0xC8, 0XB4, 4096, 256, 64, 2048,
+ 0),
+ SPI_NAND_INFO("GD5F 256MiB 3.3V", 0xC8, 0XB2, 2048, 128, 64, 2048,
+ 0),
+ SPI_NAND_INFO("GD5F 128MiB 3.3V", 0xC8, 0XB1, 2048, 128, 64, 1024,
+ 0),
+ SPI_NAND_INFO("W25N01GV", 0xEF, 0XAA21, 2048, 64, 64, 1024,
+ 0),
+ {.name = NULL},
+};*/
+
+/**
+* Default OOB area specification layout
+*/
+static struct nand_ecclayout ecc_layout_64 = {
+ .eccbytes = 32,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 24, 25, 26, 27, 28, 29, 30, 21,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 56, 57, 58, 59, 60, 61, 62, 63
+ },
+ .oobavail = 30,
+ .oobfree = {
+ {
+ .offset = 2,
+ .length = 6
+ }, {
+ .offset = 16,
+ .length = 8
+ }, {
+ .offset = 32,
+ .length = 8
+ }, {
+ .offset = 48,
+ .length = 8
+ },
+ }
+};
+
+static struct nand_ecclayout gd5f_ecc_layout_256 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255
+ },
+ .oobavail = 127,
+ .oobfree = { {1, 127} }
+};
+
+static struct nand_ecclayout gd5f_ecc_layout_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 64, 65, 66, 67, 68, 69, 70, 72,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobavail = 62,
+ .oobfree = { {2, 63} }
+};
+
+static struct nand_ecclayout pn26_ecc_layout_128 = {
+ .eccbytes = 52,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14,15,16,17,18,
+ 21,22,23,24,25,26,27,28,29,30,31,32,33,
+ 36,37,38,39,40,41,42,43,44,45,46,47,48,
+ 51,52,53,54,55,56,57,58,59,60,61,62,63
+ },
+ .oobavail = 72,
+ .oobfree = {
+ {
+ .offset = 4,
+ .length = 2
+ }, {
+ .offset = 19,
+ .length = 2
+ }, {
+ .offset = 34,
+ .length = 2
+ }, {
+ .offset = 49,
+ .length = 2
+ },
+ {
+ .offset = 64,
+ .length = 64
+ },
+ }
+};
+
+static struct nand_ecclayout default_ecc_layout = {
+ .eccbytes = 64,
+ .oobavail = 28,
+ .oobfree = { { 2, 30 } }
+};
+
+static struct nand_ecclayout mx35_ecc_layout_64 = {
+ .eccbytes = 0,
+ .oobavail = 62,
+ .oobfree = { {2, 62} }
+};
+
+static struct nand_ecclayout mt29f_ecc_layout_128 = {
+ .eccbytes = 64,
+ .oobavail = 60,
+ .oobfree = { {4, 60} }
+};
+static struct nand_ecclayout tc58_ecc_layout_64 = {
+ .eccbytes = 0,
+ .oobavail = 60,
+ .oobfree = { {4, 60} }
+};
+
+static struct nand_ecclayout ds35_ecc_layout_64 = {
+ .eccbytes = 16,
+ .oobavail = 32,
+ .oobfree = {
+ {
+ .offset = 8,
+ .length = 8
+ }, {
+ .offset = 24,
+ .length = 8
+ }, {
+ .offset = 40,
+ .length = 8
+ }, {
+ .offset = 56,
+ .length = 8
+ },
+ }
+};
+
+
+static struct spi_nand_flash spi_nand_table[] = {
+ {
+ .name = "W25N01GV",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 3,
+ },
+ .dev_id = {0xEF, 0xAA, 0x21},
+ .page_size = 2048,
+ .page_spare_size = 64,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &ecc_layout_64,
+ .qe_addr = 0xa0,
+ .qe_flag = 0x0,
+ .qe_mask = 1<<1,
+ .multi_wire_command_length = 4,
+ },
+ {
+ .name = "MX35LF1GE4AB",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 2,
+ },
+ .dev_id = {0xC2, 0x12},
+ .page_size = 2048,
+ .page_spare_size = 64,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &mx35_ecc_layout_64,
+ .qe_addr = 0xb0,
+ .qe_flag = 0x1,
+ .qe_mask = 1<<0,
+ .multi_wire_command_length = 4,
+ },
+ {
+ .name = "MX35LF2GE4AB",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 2,
+ },
+ .dev_id = {0xC2, 0x22},
+ .page_size = 2048,
+ .page_spare_size = 64,
+ .pages_per_blk = 64,
+ .blks_per_chip = 2048,
+ .options = SPINAND_NEED_PLANE_SELECT,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &mx35_ecc_layout_64,
+ .qe_addr = 0xb0,
+ .qe_flag = 0x1,
+ .qe_mask = 1<<0,
+ .multi_wire_command_length = 4,
+ },
+ {
+ .name = "GD5F1GQ4U",
+ .id_info = {
+ .id_addr = SPI_NAND_ID_NO_DUMMY,
+ .id_len = 3,
+ },
+ .dev_id = {0xC8, 0xB1, 0x48},
+ .page_size = 2048,
+ .page_spare_size = 128,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 7,
+ .ecc_uncorr = 7,
+ .ecc_layout = &gd5f_ecc_layout_128,
+ .qe_addr = 0xb0,
+ .qe_flag = 0x1,
+ .qe_mask = 1<<0,
+ .multi_wire_command_length = 5,
+ },
+ {
+ .name = "GD5F2GQ4U",
+ .id_info = {
+ .id_addr = SPI_NAND_ID_NO_DUMMY,
+ .id_len = 3,
+ },
+ .dev_id = {0xC8, 0xB2, 0x48},
+ .page_size = 2048,
+ .page_spare_size = 128,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 7,
+ .ecc_uncorr = 7,
+ .ecc_layout = &gd5f_ecc_layout_128,
+ .qe_addr = 0xb0,
+ .qe_flag = 0x1,
+ .qe_mask = 1<<0,
+ .multi_wire_command_length = 5,
+ },
+ {
+ .name = "GD5F2GQ4UB",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 2,
+ },
+ .dev_id = {0xD2, 0xC8},
+ .page_size = 2048,
+ .page_spare_size = 128,
+ .pages_per_blk = 64,
+ .blks_per_chip = 2048,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &gd5f_ecc_layout_128,
+ .qe_addr = 0xb0,
+ .qe_flag = 0x1,
+ .qe_mask = 1<<0,
+ .multi_wire_command_length = 4,
+ },
+ {
+ .name = "GD5F1GQ4UB",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 2,
+ },
+ .dev_id = {0xD1, 0xC8},
+ .page_size = 2048,
+ .page_spare_size = 128,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &gd5f_ecc_layout_128,
+ .qe_addr = 0xb0,
+ .qe_flag = 0x1,
+ .qe_mask = 1<<0,
+ .multi_wire_command_length = 4,
+ },
+ {
+ .name = "PN26G01A",
+ .id_info = {
+ .id_addr = 0x0,
+ .id_len = 2,
+ },
+ .dev_id = {0xA1, 0xE1},
+ .page_size = 2048,
+ .page_spare_size = 128,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &pn26_ecc_layout_128,
+ .qe_addr = 0xb0,
+ .qe_flag = 0x1,
+ .qe_mask = 1<<0,
+ .multi_wire_command_length = 4,
+ },
+ /*MT29F1G01*/
+ {
+ .name = "MT29F1G01",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 2,
+ },
+ .dev_id = {0x2c, 0x14},
+ .page_size = 2048,
+ .page_spare_size = 128,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &mt29f_ecc_layout_128,
+ .qe_addr = 0xa0,
+ .qe_mask = 1<<1,
+ .qe_flag = 0,
+ .multi_wire_command_length = 4,
+ },
+ /*TC58CVG0S3H*/
+ {
+ .name = "TC58CVG0S3H",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 2,
+ },
+ .dev_id = {0x98, 0xc2},
+ .page_size = 2048,
+ .page_spare_size = 64,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &tc58_ecc_layout_64,
+ .qe_addr = 0xa0,
+ .qe_mask = 1<<1,
+ .qe_flag = 0,
+ .multi_wire_command_length = 4,
+ },
+ /*DS35X1GA*/
+ {
+ .name = "DS35X1GA",
+ .id_info = {
+ .id_addr = 0,
+ .id_len = 2,
+ },
+ .dev_id = {0xe5, 0x71},
+ .page_size = 2048,
+ .page_spare_size = 64,
+ .pages_per_blk = 64,
+ .blks_per_chip = 1024,
+ .options = 0,
+ .ecc_mask = 3,
+ .ecc_uncorr = 2,
+ .ecc_layout = &ds35_ecc_layout_64,
+ .qe_addr = 0xb0,
+ .qe_mask = 1<<0,
+ .qe_flag = 1,
+ .multi_wire_command_length = 4,
+ },
+
+
+};
+
+/**
+ * spi_nand_scan_id_table - [INTERN] scan chip info in id table
+ * @chip: SPI-NAND device structure
+ * @id: point to manufacture id and device id
+ */
+bool spi_nand_scan_id_table(struct spi_nand_chip *chip, u8 *id)
+{
+ int i, j = 0;
+ struct spi_nand_flash *type = spi_nand_table;
+ int m=0;
+
+ for (m=0; m<ARRAY_SIZE(spi_nand_table); m++,type++) {
+// if (id[0] == type->mfr_id && id[1] == type->dev_id) {
+ for (j=0, i = (SPI_NAND_ID_NO_DUMMY == type->id_info.id_addr) ? 0 : 1;
+ j < type->id_info.id_len;j++,i++ ) {
+ if (id[i] != type->dev_id[j])
+ break;
+ }
+ if (j == type->id_info.id_len) {
+ chip->name = type->name;
+ chip->size = type->page_size * type->pages_per_blk
+ * type->blks_per_chip;
+ chip->block_size = type->page_size
+ * type->pages_per_blk;
+ chip->page_size = type->page_size;
+ chip->page_spare_size = type->page_spare_size;
+ chip->block_shift = ilog2(chip->block_size);
+ chip->page_shift = ilog2(chip->page_size);
+ chip->page_mask = chip->page_size - 1;
+ chip->options = type->options;
+ if (!type->ecc_layout)
+ chip->ecclayout = &default_ecc_layout;
+ else
+ chip->ecclayout = type->ecc_layout;
+ chip->dev_id_len = type->id_info.id_len;
+ chip->ecc_uncorr = type->ecc_uncorr;
+ chip->ecc_mask = type->ecc_mask;
+ memcpy(chip->dev_id, type->dev_id, chip->dev_id_len);
+ chip->qe_addr = type->qe_addr;
+ chip->qe_flag = type->qe_flag;
+ chip->qe_mask = type->qe_mask;
+ chip->multi_wire_command_length =
+ type->multi_wire_command_length;
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/drivers/mtd/spi-nand/spi-nand-ids.h b/drivers/mtd/spi-nand/spi-nand-ids.h
new file mode 100644
index 00000000..c488eee5
--- /dev/null
+++ b/drivers/mtd/spi-nand/spi-nand-ids.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 Fullhan, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef DRIVERS_MTD_SPI_NAND_SPI_NAND_IDS_H_
+#define DRIVERS_MTD_SPI_NAND_SPI_NAND_IDS_H_
+
+enum spi_nand_device_variant {
+ SPI_NAND_GENERIC,
+ SPI_NAND_MT29F,
+ SPI_NAND_GD5F,
+};
+
+
+bool spi_nand_scan_id_table(struct spi_nand_chip *chip, u8 *id);
+
+#endif /* DRIVERS_MTD_SPI_NAND_SPI_NAND_IDS_H_ */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 93359fab..828c6445 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -984,6 +984,8 @@ config DM9000
To compile this driver as a module, choose M here. The module
will be called dm9000.
+
+source "drivers/net/fh_gmac/Kconfig"
config DM9000_DEBUGLEVEL
int "DM9000 maximum debug level"
@@ -2542,6 +2544,7 @@ config S6GMAC
source "drivers/net/stmmac/Kconfig"
+
config PCH_GBE
tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
depends on PCI
@@ -3450,4 +3453,6 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d5ce0115..7bc4daa1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -254,6 +254,7 @@ obj-$(CONFIG_SMSC911X) += smsc911x.o
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
+obj-$(CONFIG_FH_GMAC) += fh_gmac/
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
obj-$(CONFIG_MLX4_CORE) += mlx4/
diff --git a/drivers/net/fh_gmac/Kconfig b/drivers/net/fh_gmac/Kconfig
new file mode 100644
index 00000000..55134815
--- /dev/null
+++ b/drivers/net/fh_gmac/Kconfig
@@ -0,0 +1,21 @@
+config FH_GMAC
+ tristate "FH 10/100 Ethernet driver"
+ select MII
+ select PHYLIB
+ select CRC32
+ depends on NETDEVICES && HAS_IOMEM
+ help
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core.
+
+if FH_GMAC
+
+config FH_GMAC_DA
+ bool "FH GMAC DMA arbitration scheme"
+ default n
+ help
+ Selecting this option, rx has priority over Tx (only for Giga
+ Ethernet device).
+ By default, the DMA arbitration scheme is based on Round-robin
+ (rx:tx priority is 1:1).
+endif
diff --git a/drivers/net/fh_gmac/Makefile b/drivers/net/fh_gmac/Makefile
new file mode 100644
index 00000000..e22c42f1
--- /dev/null
+++ b/drivers/net/fh_gmac/Makefile
@@ -0,0 +1,5 @@
+
+
+obj-$(CONFIG_FH_GMAC) += fh_gmac.o
+
+fh_gmac-objs := fh_gmac_dma.o fh_gmac_main.o fh_gmac_ethtool.o fh_gmac_phyt.o
diff --git a/drivers/net/fh_gmac/fh_gmac.h b/drivers/net/fh_gmac/fh_gmac.h
new file mode 100755
index 00000000..513fd6a1
--- /dev/null
+++ b/drivers/net/fh_gmac/fh_gmac.h
@@ -0,0 +1,245 @@
+/*
+ * fh_gmac.h
+ *
+ * Created on: May 22, 2014
+ * Author: duobao
+ */
+
+#ifndef FH_GMAC_H_
+#define FH_GMAC_H_
+
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+
+#include "fh_gmac_phyt.h"
+#include "fh_gmac_dma.h"
+
+//GMAC-MAC
+#define REG_GMAC_CONFIG (0x0000)
+#define REG_GMAC_FRAME_FILTER (0x0004)
+#define REG_GMAC_HASH_HIGH (0x0008)
+#define REG_GMAC_HASH_LOW (0x000C)
+#define REG_GMAC_GMII_ADDRESS (0x0010)
+#define REG_GMAC_GMII_DATA (0x0014)
+#define REG_GMAC_FLOW_CTRL (0x0018)
+#define REG_GMAC_DEBUG (0x0024)
+#define REG_GMAC_MAC_HIGH (0x0040)
+#define REG_GMAC_MAC_LOW (0x0044)
+//GMAC-DMA
+#define REG_GMAC_BUS_MODE (0x1000)
+#define REG_GMAC_TX_POLL_DEMAND (0x1004)
+#define REG_GMAC_RX_POLL_DEMAND (0x1008)
+#define REG_GMAC_RX_DESC_ADDR (0x100C)
+#define REG_GMAC_TX_DESC_ADDR (0x1010)
+#define REG_GMAC_STATUS (0x1014)
+#define REG_GMAC_OP_MODE (0x1018)
+#define REG_GMAC_INTR_EN (0x101C)
+#define REG_GMAC_ERROR_COUNT (0x1020)
+#define REG_GMAC_AXI_BUS_MODE (0x1028)
+#define REG_GMAC_AXI_STATUS (0x102C)
+#define REG_GMAC_CURR_TX_DESC (0x1048)
+#define REG_GMAC_CURR_RX_DESC (0x104C)
+
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
+
+enum rx_frame_status {
+ good_frame = 0,
+ discard_frame = 1,
+ csum_none = 2,
+ llc_snap = 4,
+};
+
+#define GMAC_MIN_ETHPKTSIZE (60) /* Minimum ethernet pkt size */
+#define GMAC_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
+
+#define BUFFER_SIZE_2K 2048
+#define BUFFER_SIZE_4K 4096
+#define BUFFER_SIZE_8K 8192
+#define BUFFER_SIZE_16K 16384
+
+#ifdef FH_GMAC_DMA_DEBUG
+#define GMAC_DMA_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define GMAC_DMA_DBG(fmt, args...) do { } while (0)
+#endif
+
+#ifdef FH_GMAC_XMIT_DEBUG
+#define TX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define TX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#ifdef FH_GMAC_RX_DEBUG
+#define RX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define RX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define FH_GMAC_DEBUG ( NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL )
+
+enum {
+ gmac_gmii_clock_60_100,
+ gmac_gmii_clock_100_150,
+ gmac_gmii_clock_20_35,
+ gmac_gmii_clock_35_60,
+ gmac_gmii_clock_150_250,
+ gmac_gmii_clock_250_300
+};
+
+enum {
+ gmac_interrupt_all = 0x0001ffff,
+ gmac_interrupt_none = 0x0
+};
+
+typedef struct Gmac_Stats {
+ /* Transmit errors */
+ unsigned long tx_underflow ____cacheline_aligned;
+ unsigned long tx_carrier;
+ unsigned long tx_losscarrier;
+ unsigned long tx_heartbeat;
+ unsigned long tx_deferred;
+ unsigned long tx_vlan;
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ /* Receive errors */
+ unsigned long rx_desc;
+ unsigned long rx_partial;
+ unsigned long rx_runt;
+ unsigned long rx_toolong;
+ unsigned long rx_collision;
+ unsigned long rx_crc;
+ unsigned long rx_length;
+ unsigned long rx_mii;
+ unsigned long rx_multicast;
+ unsigned long rx_gmac_overflow;
+ unsigned long rx_watchdog;
+ unsigned long da_rx_filter_fail;
+ unsigned long sa_rx_filter_fail;
+ unsigned long rx_missed_cntr;
+ unsigned long rx_overflow_cntr;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_jabber_irq;
+ unsigned long rx_overflow_irq;
+ unsigned long rx_buf_unav_irq;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_watchdog_irq;
+ unsigned long tx_early_irq;
+ unsigned long fatal_bus_error_irq;
+ /* Extra info */
+ unsigned long threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long poll_n;
+ unsigned long sched_timer_n;
+ unsigned long normal_irq_n;
+}Gmac_Stats;
+
+typedef struct Gmac_Object {
+ Gmac_Tx_DMA_Descriptors* tx_dma_descriptors ____cacheline_aligned;
+ Gmac_Rx_DMA_Descriptors* rx_dma_descriptors;
+ int full_duplex; //read only
+ int speed_100m; //read only
+
+ struct sk_buff_head rx_recycle;
+ struct sk_buff** rx_skbuff;
+ struct sk_buff** tx_skbuff;
+ dma_addr_t* rx_skbuff_dma;
+ __u32 cur_rx;
+ __u32 dirty_rx;
+ __u32 cur_tx;
+ __u32 dirty_tx;
+ dma_addr_t tx_bus_addr;
+ dma_addr_t rx_bus_addr;
+ __u32 dma_tx_size;
+ __u32 dma_rx_size;
+ __u32 dma_buf_sz;
+
+ spinlock_t lock;
+
+ void __iomem *remap_addr;
+ __u8 local_mac_address[6];
+ __u32 msg_enable;
+ struct device* dev;
+ struct net_device* ndev;
+ struct platform_device* pdev;
+ struct napi_struct napi;
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ Gmac_Stats stats;
+
+ int oldlink;
+ int speed;
+ int oldduplex;
+ __u32 flow_ctrl;
+ __u32 pause;
+
+ int wolopts;
+ int wolenabled;
+
+ int phy_interface;
+ struct fh_gmac_platform_data* priv_data;
+
+ struct clk* clk;
+
+}Gmac_Object;
+
+#define TX_TIMEO 5000 /* default 5 seconds */
+#define DMA_RX_SIZE 256
+#define DMA_TX_SIZE 256
+#define FLOW_OFF 0
+#define FLOW_RX 4
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+#define PAUSE_TIME 0x200
+
+int fh_mdio_register(struct net_device *ndev);
+int fh_mdio_unregister(struct net_device *ndev);
+
+void GMAC_DMA_StartTx(Gmac_Object* pGmac);
+void GMAC_DMA_StopTx(Gmac_Object* pGmac);
+void GMAC_DMA_StartRx(Gmac_Object* pGmac);
+void GMAC_DMA_StopRx(Gmac_Object* pGmac);
+
+void fh_gmac_set_ethtool_ops(struct net_device *netdev);
+
+void GMAC_DMA_InitDescRings(struct net_device *ndev);
+int GMAC_DMA_Init(struct net_device *ndev, __u32 dma_tx, __u32 dma_rx);
+void GMAC_DMA_InitRxDesc(Gmac_Rx_DMA_Descriptors* desc, unsigned int size);
+void GMAC_DMA_InitTxDesc(Gmac_Tx_DMA_Descriptors* desc, unsigned int size);
+void GMAC_DMA_OpMode(Gmac_Object* pGmac);
+void GMAC_DMA_FreeDesc(Gmac_Object* pGmac);
+void GMAC_DMA_FreeRxSkbufs(Gmac_Object* pGmac);
+void GMAC_DMA_FreeTxSkbufs(Gmac_Object* pGmac);
+void GMAC_DMA_DisplayRxDesc(Gmac_Rx_DMA_Descriptors* desc, int size);
+void GMAC_DMA_DisplayTxDesc(Gmac_Tx_DMA_Descriptors* desc, int size);
+int GMAC_DMA_Interrupt(Gmac_Object* pGmac);
+int GMAC_DMA_TxStatus(Gmac_Object* pGmac, Gmac_Tx_DMA_Descriptors* desc);
+int GMAC_DMA_RxStatus(Gmac_Object* pGmac, Gmac_Rx_DMA_Descriptors* desc);
+void GMAC_DMA_ReleaseTxDesc(Gmac_Tx_DMA_Descriptors* desc);
+void GMAC_DMA_DiagnosticFrame(void *data, Gmac_Object* pGmac);
+void GMAC_FlowCtrl(Gmac_Object * pGmac, unsigned int duplex, unsigned int fc,
+ unsigned int pause_time);
+
+#endif /* FH_GMAC_H_ */
diff --git a/drivers/net/fh_gmac/fh_gmac_dma.c b/drivers/net/fh_gmac/fh_gmac_dma.c
new file mode 100644
index 00000000..9f11e146
--- /dev/null
+++ b/drivers/net/fh_gmac/fh_gmac_dma.c
@@ -0,0 +1,519 @@
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include <mach/fh_gmac.h>
+#include "fh_gmac.h"
+#include "fh_gmac_dma.h"
+
+void GMAC_DMA_ReleaseTxDesc(Gmac_Tx_DMA_Descriptors * desc)
+{
+ int ter = desc->desc1.bit.end_of_ring;
+ desc->desc0.dw = 0;
+ desc->desc1.dw = 0;
+ /* set termination field */
+ desc->desc1.bit.end_of_ring = ter;
+}
+
+void GMAC_DMA_DisplayRxDesc(Gmac_Rx_DMA_Descriptors * desc, int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x DES2=0x%x DES3=0x%x",
+ i, (__u32) (&desc[i]), desc[i].desc0.dw,
+ desc[i].desc1.dw, desc[i].desc2.dw, desc[i].desc3.dw);
+ pr_info("\n");
+ }
+}
+
+void GMAC_DMA_DisplayTxDesc(Gmac_Tx_DMA_Descriptors * desc, int size)
+{
+ int i;
+ pr_info("Tx desc:\n");
+ for (i = 0; i < size; i++) {
+ pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (__u32) & desc[i], desc[i].desc0.dw,
+ desc[i].desc1.dw, desc[i].desc2.dw, desc[i].desc3.dw);
+ pr_info("\n");
+ }
+}
+
+void GMAC_DMA_InitRxDesc(Gmac_Rx_DMA_Descriptors * desc, __u32 size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ desc->desc0.bit.own = 1;
+ desc->desc1.bit.buffer1_size = BUFFER_SIZE_2K - 1;
+ if (i == size - 1) {
+ desc->desc1.bit.end_of_ring = 1;
+ }
+ desc++;
+ }
+}
+
+void GMAC_DMA_InitTxDesc(Gmac_Tx_DMA_Descriptors * desc, __u32 size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ desc->desc0.bit.own = 0;
+ if (i == size - 1) {
+ desc->desc1.bit.end_of_ring = 1;
+ }
+ desc++;
+ }
+}
+
+void GMAC_DMA_OpMode(Gmac_Object * pGmac)
+{
+
+ //op mode, reg 6
+ //transmit_store_forward
+ //receive_store_forward
+ writel(0 << 25 | 1 << 21 | 0 << 2 | 0 << 14,
+ pGmac->remap_addr + REG_GMAC_OP_MODE);
+}
+
+void GMAC_DMA_InitDescRings(struct net_device *ndev)
+{
+ int i;
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ struct sk_buff *skb;
+ __u32 txsize = pGmac->dma_tx_size;
+ __u32 rxsize = pGmac->dma_rx_size;
+ __u32 bfsize = pGmac->dma_buf_sz;
+
+ pGmac->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
+ pGmac->rx_skbuff =
+ kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ pGmac->rx_dma_descriptors =
+ (Gmac_Rx_DMA_Descriptors *) dma_alloc_coherent(pGmac->dev,
+ rxsize *
+ sizeof
+ (Gmac_Rx_DMA_Descriptors),
+ &pGmac->rx_bus_addr,
+ GFP_KERNEL);
+ pGmac->tx_skbuff =
+ kmalloc(sizeof(struct sk_buff *) * txsize, GFP_KERNEL);
+ pGmac->tx_dma_descriptors =
+ (Gmac_Tx_DMA_Descriptors *) dma_alloc_coherent(pGmac->dev,
+ txsize *
+ sizeof
+ (Gmac_Tx_DMA_Descriptors),
+ &pGmac->tx_bus_addr,
+ GFP_KERNEL);
+
+ if ((pGmac->rx_dma_descriptors == NULL)
+ || (pGmac->tx_dma_descriptors == NULL)) {
+ pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
+ return;
+ }
+
+ pr_debug("fh gmac (%s) DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ ndev->name, pGmac->rx_dma_descriptors,
+ pGmac->tx_dma_descriptors, (__u32) pGmac->rx_bus_addr,
+ (__u32) pGmac->tx_bus_addr);
+
+ for (i = 0; i < rxsize; i++) {
+ Gmac_Rx_DMA_Descriptors *desc = pGmac->rx_dma_descriptors + i;
+
+ skb = netdev_alloc_skb_ip_align(ndev, bfsize);
+ if (unlikely(skb == NULL)) {
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ break;
+ }
+ pGmac->rx_skbuff[i] = skb;
+ pGmac->rx_skbuff_dma[i] =
+ dma_map_single(pGmac->dev, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ desc->desc2.dw = pGmac->rx_skbuff_dma[i];
+ }
+ pGmac->cur_rx = 0;
+ pGmac->dirty_rx = (__u32) (i - rxsize);
+
+ pGmac->dma_buf_sz = bfsize;
+
+ /* TX INITIALIZATION */
+ for (i = 0; i < txsize; i++) {
+ pGmac->tx_skbuff[i] = NULL;
+ pGmac->tx_dma_descriptors[i].desc2.dw = 0;
+ }
+ pGmac->dirty_tx = 0;
+ pGmac->cur_tx = 0;
+
+ /* Clear the Rx/Tx descriptors */
+ GMAC_DMA_InitRxDesc(pGmac->rx_dma_descriptors, rxsize);
+ GMAC_DMA_InitTxDesc(pGmac->tx_dma_descriptors, txsize);
+#ifdef FH_GMAC_DMA_DEBUG
+ if (netif_msg_hw(pGmac)) {
+ pr_info("RX descriptor ring:\n");
+ GMAC_DMA_DisplayRxDesc(pGmac->rx_dma_descriptors, rxsize);
+ pr_info("TX descriptor ring:\n");
+ GMAC_DMA_DisplayTxDesc(pGmac->tx_dma_descriptors, txsize);
+ }
+#endif
+}
+
+void GMAC_DMA_FreeRxSkbufs(Gmac_Object * pGmac)
+{
+ int i;
+
+ for (i = 0; i < pGmac->dma_rx_size; i++) {
+ if (pGmac->rx_skbuff[i]) {
+ dma_unmap_single(pGmac->dev, pGmac->rx_skbuff_dma[i],
+ pGmac->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(pGmac->rx_skbuff[i]);
+ }
+ pGmac->rx_skbuff[i] = NULL;
+ }
+}
+
+void GMAC_DMA_FreeTxSkbufs(Gmac_Object * pGmac)
+{
+ int i;
+
+ for (i = 0; i < pGmac->dma_tx_size; i++) {
+ if (pGmac->tx_skbuff[i] != NULL) {
+ Gmac_Tx_DMA_Descriptors *desc =
+ pGmac->tx_dma_descriptors + i;
+ if (desc->desc2.dw) {
+ __u32 size;
+ size = desc->desc1.bit.buffer1_size;
+ dma_unmap_single(pGmac->dev, desc->desc2.dw,
+ size, DMA_TO_DEVICE);
+ }
+ dev_kfree_skb_any(pGmac->tx_skbuff[i]);
+ pGmac->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+void GMAC_DMA_FreeDesc(Gmac_Object * pGmac)
+{
+ /* Release the DMA TX/RX socket buffers */
+ GMAC_DMA_FreeRxSkbufs(pGmac);
+ GMAC_DMA_FreeTxSkbufs(pGmac);
+
+ /* Free the region of consistent memory previously allocated for
+ * the DMA */
+ dma_free_coherent(pGmac->dev,
+ pGmac->dma_tx_size * sizeof(Gmac_Tx_DMA_Descriptors),
+ pGmac->tx_dma_descriptors, pGmac->tx_bus_addr);
+ dma_free_coherent(pGmac->dev,
+ pGmac->dma_rx_size * sizeof(Gmac_Tx_DMA_Descriptors),
+ pGmac->rx_dma_descriptors, pGmac->rx_bus_addr);
+ kfree(pGmac->rx_skbuff_dma);
+ kfree(pGmac->rx_skbuff);
+ kfree(pGmac->tx_skbuff);
+}
+
+int GMAC_DMA_Init(struct net_device *ndev, __u32 dma_tx, __u32 dma_rx)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+#ifdef GMAC_RESET
+ int limit;
+
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_BUS_MODE);
+ reg |= 0x1;
+ writel(reg, pGmac->remap_addr + REG_GMAC_BUS_MODE);
+
+ limit = 10;
+ while (limit--) {
+ if (!(readl(pGmac->remap_addr + REG_GMAC_BUS_MODE) & 0x1)) {
+ break;
+ }
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+#endif
+ //initialize dma bus mode reg0
+ //8xpbl
+ //no address_aligned_beats
+ //no fixed_burst
+ writel(0 << 25 | 0 << 24 | 1 << 16 | 32 << 8,
+ pGmac->remap_addr + REG_GMAC_BUS_MODE);
+ writel(0x1a061, pGmac->remap_addr + REG_GMAC_INTR_EN);
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_rx, pGmac->remap_addr + REG_GMAC_RX_DESC_ADDR);
+ writel(dma_tx, pGmac->remap_addr + REG_GMAC_TX_DESC_ADDR);
+
+ return 0;
+}
+
+void GMAC_DMA_StartTx(Gmac_Object * pGmac)
+{
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
+ reg |= 1 << 13;
+ writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
+}
+
+void GMAC_DMA_StopTx(Gmac_Object * pGmac)
+{
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
+ reg &= ~(1 << 13);
+ writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
+}
+
+void GMAC_DMA_StartRx(Gmac_Object * pGmac)
+{
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
+ reg |= 1 << 1;
+ writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
+}
+
+void GMAC_DMA_StopRx(Gmac_Object * pGmac)
+{
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
+ reg &= ~(1 << 1);
+ writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
+}
+
+#ifdef FH_GMAC_DMA_DEBUG
+static void GMAC_DMA_ShowTxState(__u32 status)
+{
+ __u32 state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void GMAC_DMA_ShowRxState(__u32 status)
+{
+ __u32 state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+int GMAC_DMA_Interrupt(Gmac_Object * pGmac)
+{
+ int ret = 0;
+ Gmac_Stats *gmac_stats = &pGmac->stats;
+ /* read the status register (CSR5) */
+ __u32 intr_status;
+ intr_status = readl(pGmac->remap_addr + REG_GMAC_STATUS);
+
+ GMAC_DMA_DBG("%s: [GMAC_STATUS: 0x%08x]\n", __func__, intr_status);
+#ifdef FH_GMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ GMAC_DMA_ShowTxState(intr_status);
+ GMAC_DMA_ShowRxState(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ GMAC_DMA_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ GMAC_DMA_DBG(KERN_INFO "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ gmac_stats->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ GMAC_DMA_DBG(KERN_INFO "transmit jabber\n");
+ gmac_stats->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ GMAC_DMA_DBG(KERN_INFO "recv overflow\n");
+ gmac_stats->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ GMAC_DMA_DBG(KERN_INFO "receive buffer unavailable\n");
+ gmac_stats->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ GMAC_DMA_DBG(KERN_INFO "receive process stopped\n");
+ gmac_stats->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ GMAC_DMA_DBG(KERN_INFO "receive watchdog\n");
+ gmac_stats->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ GMAC_DMA_DBG(KERN_INFO "transmit early interrupt\n");
+ gmac_stats->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ GMAC_DMA_DBG(KERN_INFO "transmit process stopped\n");
+ gmac_stats->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ GMAC_DMA_DBG(KERN_INFO "fatal bus error\n");
+ gmac_stats->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ gmac_stats->normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ ret = handle_tx_rx;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel(intr_status & 0x1ffff, pGmac->remap_addr + REG_GMAC_STATUS);
+ GMAC_DMA_DBG(KERN_INFO "\n\n");
+
+ return ret;
+}
+
+int GMAC_DMA_TxStatus(Gmac_Object * pGmac, Gmac_Tx_DMA_Descriptors * desc)
+{
+ int ret = 0;
+ struct net_device_stats *stats = &pGmac->ndev->stats;
+ Gmac_Stats *gmac_stats = &pGmac->stats;
+
+ if (unlikely(desc->desc0.bit.error_summary)) {
+ if (unlikely(desc->desc0.bit.underflow_error)) {
+ gmac_stats->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(desc->desc0.bit.no_carrier)) {
+ gmac_stats->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(desc->desc0.bit.loss_of_carrier)) {
+ gmac_stats->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((desc->desc0.bit.excessive_deferral) ||
+ (desc->desc0.bit.excessive_collision) ||
+ (desc->desc0.bit.late_collision)))
+ stats->collisions += desc->desc0.bit.collision_count;
+ ret = -1;
+ }
+ if (unlikely(desc->desc0.bit.deferred))
+ gmac_stats->tx_deferred++;
+
+ return ret;
+}
+
+int GMAC_DMA_RxStatus(Gmac_Object * pGmac, Gmac_Rx_DMA_Descriptors * desc)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = &pGmac->ndev->stats;
+ Gmac_Stats *gmac_stats = &pGmac->stats;
+
+ if (unlikely(desc->desc0.bit.last_descriptor == 0)) {
+ pr_warning("ndesc Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(desc->desc0.bit.error_summary)) {
+ if (unlikely(desc->desc0.bit.descriptor_error))
+ gmac_stats->rx_desc++;
+ if (unlikely(desc->desc0.bit.ipc_chksum_error_giant_frame))
+ gmac_stats->rx_toolong++;
+ if (unlikely(desc->desc0.bit.late_collision)) {
+ gmac_stats->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(desc->desc0.bit.crc_error)) {
+ gmac_stats->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(desc->desc0.bit.dribble_error))
+ ret = discard_frame;
+
+ if (unlikely(desc->desc0.bit.length_error)) {
+ gmac_stats->rx_length++;
+ ret = discard_frame;
+ }
+
+ return ret;
+}
+
+void GMAC_DMA_DiagnosticFrame(void *data, Gmac_Object * pGmac)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ Gmac_Stats *gmac_stats = &pGmac->stats;
+ __u32 csr8 = readl(pGmac->remap_addr + REG_GMAC_ERROR_COUNT);
+
+ if (unlikely(csr8)) {
+ //Overflow bit for FIFO Overflow Counter
+ if (csr8 & 0x10000000) {
+ stats->rx_over_errors += 0x800;
+ gmac_stats->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ //indicates the number of frames missed by the application
+ ove_cntr = ((csr8 & 0x0ffe0000) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ gmac_stats->rx_overflow_cntr += ove_cntr;
+ }
+
+ //Overflow bit for Missed Frame Counter
+ if (csr8 & 0x10000) {
+ stats->rx_missed_errors += 0xffff;
+ gmac_stats->rx_missed_cntr += 0xffff;
+ } else {
+ //indicates the number of frames missed by the controller
+ unsigned int miss_f = (csr8 & 0xffff);
+ stats->rx_missed_errors += miss_f;
+ gmac_stats->rx_missed_cntr += miss_f;
+ }
+ }
+}
diff --git a/drivers/net/fh_gmac/fh_gmac_dma.h b/drivers/net/fh_gmac/fh_gmac_dma.h
new file mode 100755
index 00000000..43c02761
--- /dev/null
+++ b/drivers/net/fh_gmac/fh_gmac_dma.h
@@ -0,0 +1,183 @@
+/*
+ * fh_gmac_dma.h
+ *
+ * Created on: May 22, 2014
+ * Author: duobao
+ */
+
+#ifndef FH_GMAC_DMA_H_
+#define FH_GMAC_DMA_H_
+
+
+
+/* DMA Status register defines */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+
+typedef union
+{
+ struct
+ {
+ __u32 deferred :1; //0~31
+ __u32 underflow_error :1;
+ __u32 excessive_deferral :1;
+ __u32 collision_count :4;
+ __u32 vlan_frame :1;
+ __u32 excessive_collision :1;
+ __u32 late_collision :1;
+ __u32 no_carrier :1;
+ __u32 loss_of_carrier :1;
+ __u32 payload_checksum_error :1;
+ __u32 frame_flushed :1;
+ __u32 jabber_timeout :1;
+ __u32 error_summary :1;
+ __u32 ip_header_error :1;
+ __u32 tx_timestamp_status :1;
+ __u32 reserved_30_18 :13;
+ __u32 own :1;
+ }bit;
+ __u32 dw;
+}Gmac_Tx_Descriptor0;
+
+typedef union
+{
+ struct
+ {
+ __u32 buffer1_size :11; //0~31
+ __u32 buffer2_size :11;
+ __u32 timestamp_enable :1;
+ __u32 disable_padding :1;
+ __u32 second_address_chained :1;
+ __u32 end_of_ring :1;
+ __u32 disable_crc :1;
+ __u32 checksum_insertion_ctrl :2;
+ __u32 first_segment :1;
+ __u32 last_segment :1;
+ __u32 intr_on_completion :1;
+ }bit;
+ __u32 dw;
+}Gmac_Tx_Descriptor1;
+
+typedef union
+{
+ struct
+ {
+ __u32 buffer_address_pointer :32; //0~31
+ }bit;
+ __u32 dw;
+}Gmac_Tx_Descriptor2;
+
+typedef union
+{
+ struct
+ {
+ __u32 buffer_address_pointer :32; //0~31
+ }bit;
+ __u32 dw;
+}Gmac_Tx_Descriptor3;
+
+typedef union
+{
+ struct
+ {
+ __u32 mac_addr_payload_chksum_error :1; //0
+ __u32 crc_error :1; //1
+ __u32 dribble_error :1; //2
+ __u32 receive_error :1; //3
+ __u32 watchdog_timeout :1; //4
+ __u32 frame_type :1; //5
+ __u32 late_collision :1; //6
+ __u32 ipc_chksum_error_giant_frame :1; //7
+ __u32 last_descriptor :1; //8
+ __u32 first_descriptor :1; //9
+ __u32 vlan_tag :1; //10
+ __u32 overflow_error :1; //11
+ __u32 length_error :1; //12
+ __u32 sa_filter_fail :1; //13
+ __u32 descriptor_error :1; //14
+ __u32 error_summary :1; //15
+ __u32 frame_length :14;//16~29
+ __u32 da_filter_fail :1; //30
+ __u32 own :1; //31
+ }bit;
+ __u32 dw;
+}Gmac_Rx_Descriptor0;
+
+typedef union
+{
+ struct
+ {
+ __u32 buffer1_size :11; //0~10
+ __u32 buffer2_size :11; //11~21
+ __u32 reserved_23_22 :2; //22~23
+ __u32 second_address_chained :1; //24
+ __u32 end_of_ring :1; //25
+ __u32 reserved_30_26 :5; //26~30
+ __u32 disable_intr_on_completion :1; //31
+ }bit;
+ __u32 dw;
+}Gmac_Rx_Descriptor1;
+
+typedef union
+{
+ struct
+ {
+ __u32 buffer_address_pointer :32; //0~31
+ }bit;
+ __u32 dw;
+}Gmac_Rx_Descriptor2;
+
+typedef union
+{
+ struct
+ {
+ __u32 buffer_address_pointer :32; //0~31
+ }bit;
+ __u32 dw;
+}Gmac_Rx_Descriptor3;
+
+typedef struct
+{
+ Gmac_Tx_Descriptor0 desc0; /* control and status information of descriptor */
+ Gmac_Tx_Descriptor1 desc1; /* buffer sizes */
+ Gmac_Tx_Descriptor2 desc2; /* physical address of the buffer 1 */
+ Gmac_Tx_Descriptor3 desc3; /* physical address of the buffer 2 */
+}Gmac_Tx_DMA_Descriptors;
+
+typedef struct
+{
+ Gmac_Rx_Descriptor0 desc0; /* control and status information of descriptor */
+ Gmac_Rx_Descriptor1 desc1; /* buffer sizes */
+ Gmac_Rx_Descriptor2 desc2; /* physical address of the buffer 1 */
+ Gmac_Rx_Descriptor3 desc3; /* physical address of the buffer 2 */
+}Gmac_Rx_DMA_Descriptors;
+
+
+
+#endif /* FH_GMAC_DMA_H_ */
diff --git a/drivers/net/fh_gmac/fh_gmac_ethtool.c b/drivers/net/fh_gmac/fh_gmac_ethtool.c
new file mode 100755
index 00000000..624b3a02
--- /dev/null
+++ b/drivers/net/fh_gmac/fh_gmac_ethtool.c
@@ -0,0 +1,316 @@
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <mach/fh_gmac.h>
+#include "fh_gmac.h"
+
+#define REG_SPACE_SIZE 0x1054
+#define GMAC_ETHTOOL_NAME "fh_gmac"
+
+struct gmac_stats
+{
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define FH_GMAC_STAT(m) \
+ { #m, FIELD_SIZEOF(Gmac_Stats, m), \
+ offsetof(Gmac_Object, stats.m)}
+
+static const struct gmac_stats gmac_gstrings_stats[] =
+{
+ FH_GMAC_STAT(tx_underflow),
+ FH_GMAC_STAT(tx_carrier),
+ FH_GMAC_STAT(tx_losscarrier),
+ FH_GMAC_STAT(tx_heartbeat),
+ FH_GMAC_STAT(tx_deferred),
+ FH_GMAC_STAT(tx_vlan),
+ FH_GMAC_STAT(tx_jabber),
+ FH_GMAC_STAT(tx_frame_flushed),
+ FH_GMAC_STAT(tx_payload_error),
+ FH_GMAC_STAT(tx_ip_header_error),
+ FH_GMAC_STAT(rx_desc),
+ FH_GMAC_STAT(rx_partial),
+ FH_GMAC_STAT(rx_runt),
+ FH_GMAC_STAT(rx_toolong),
+ FH_GMAC_STAT(rx_collision),
+ FH_GMAC_STAT(rx_crc),
+ FH_GMAC_STAT(rx_length),
+ FH_GMAC_STAT(rx_mii),
+ FH_GMAC_STAT(rx_multicast),
+ FH_GMAC_STAT(rx_gmac_overflow),
+ FH_GMAC_STAT(rx_watchdog),
+ FH_GMAC_STAT(da_rx_filter_fail),
+ FH_GMAC_STAT(sa_rx_filter_fail),
+ FH_GMAC_STAT(rx_missed_cntr),
+ FH_GMAC_STAT(rx_overflow_cntr),
+ FH_GMAC_STAT(tx_undeflow_irq),
+ FH_GMAC_STAT(tx_process_stopped_irq),
+ FH_GMAC_STAT(tx_jabber_irq),
+ FH_GMAC_STAT(rx_overflow_irq),
+ FH_GMAC_STAT(rx_buf_unav_irq),
+ FH_GMAC_STAT(rx_process_stopped_irq),
+ FH_GMAC_STAT(rx_watchdog_irq),
+ FH_GMAC_STAT(tx_early_irq),
+ FH_GMAC_STAT(fatal_bus_error_irq),
+ FH_GMAC_STAT(threshold),
+ FH_GMAC_STAT(tx_pkt_n),
+ FH_GMAC_STAT(rx_pkt_n),
+ FH_GMAC_STAT(poll_n),
+ FH_GMAC_STAT(sched_timer_n),
+ FH_GMAC_STAT(normal_irq_n),
+};
+#define FH_GMAC_STATS_LEN ARRAY_SIZE(gmac_gstrings_stats)
+
+static void gmac_ethtool_getdrvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, GMAC_ETHTOOL_NAME);
+
+ strcpy(info->version, "0.0.1");
+ info->fw_version[0] = '\0';
+ info->n_stats = FH_GMAC_STATS_LEN;
+}
+
+static int gmac_ethtool_getsettings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+ struct phy_device *phy = pGmac->phydev;
+ int rc;
+ if (phy == NULL)
+ {
+ pr_err("%s: %s: PHY is not registered\n",
+ __func__, ndev->name);
+ return -ENODEV;
+ }
+ if (!netif_running(ndev))
+ {
+ pr_err("%s: interface is disabled: we cannot track "
+ "link speed / duplex setting\n", ndev->name);
+ return -EBUSY;
+ }
+ cmd->transceiver = XCVR_INTERNAL;
+ spin_lock_irq(&pGmac->lock);
+ rc = phy_ethtool_gset(phy, cmd);
+ spin_unlock_irq(&pGmac->lock);
+ return rc;
+}
+
+static int gmac_ethtool_setsettings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+ struct phy_device *phy = pGmac->phydev;
+ int rc;
+
+ spin_lock(&pGmac->lock);
+ rc = phy_ethtool_sset(phy, cmd);
+ spin_unlock(&pGmac->lock);
+
+ return rc;
+}
+
+static __u32 gmac_ethtool_getmsglevel(struct net_device *ndev)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+ return pGmac->msg_enable;
+}
+
+static void gmac_ethtool_setmsglevel(struct net_device *ndev, __u32 level)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+ pGmac->msg_enable = level;
+
+}
+
+static int gmac_check_if_running(struct net_device *ndev)
+{
+ if (!netif_running(ndev))
+ return -EBUSY;
+ return 0;
+}
+
+static int gmac_ethtool_get_regs_len(struct net_device *ndev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static void gmac_ethtool_gregs(struct net_device *ndev, struct ethtool_regs *regs, void *space)
+{
+ int i;
+ __u32 *reg_space = (__u32 *) space;
+
+ Gmac_Object* pGmac = netdev_priv(ndev);
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ /* MAC registers */
+ for (i = 0; i < 55; i++)
+ reg_space[i] = readl(pGmac->remap_addr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 22; i++)
+ reg_space[i + 55] = readl(pGmac->remap_addr + (REG_GMAC_BUS_MODE + (i * 4)));
+}
+
+static void gmac_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+
+ spin_lock(&pGmac->lock);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ pause->autoneg = pGmac->phydev->autoneg;
+
+ if (pGmac->flow_ctrl & FLOW_RX)
+ pause->rx_pause = 1;
+ if (pGmac->flow_ctrl & FLOW_TX)
+ pause->tx_pause = 1;
+
+ spin_unlock(&pGmac->lock);
+}
+
+static int gmac_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+ struct phy_device *phy = pGmac->phydev;
+ int new_pause = FLOW_OFF;
+ int ret = 0;
+
+ spin_lock(&pGmac->lock);
+
+ if (pause->rx_pause)
+ new_pause |= FLOW_RX;
+ if (pause->tx_pause)
+ new_pause |= FLOW_TX;
+
+ pGmac->flow_ctrl = new_pause;
+ phy->autoneg = pause->autoneg;
+
+ if (phy->autoneg)
+ {
+ if (netif_running(ndev))
+ ret = phy_start_aneg(phy);
+ }
+ else
+ {
+ GMAC_FlowCtrl(pGmac, phy->duplex, pGmac->flow_ctrl, pGmac->pause);
+ }
+ spin_unlock(&pGmac->lock);
+ return ret;
+}
+
+static void gmac_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *dummy, __u64 *data)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+ int i;
+
+ /* Update HW stats if supported */
+ GMAC_DMA_DiagnosticFrame(&ndev->stats, pGmac);
+
+ for (i = 0; i < FH_GMAC_STATS_LEN; i++)
+ {
+ char *p = (char *)pGmac + gmac_gstrings_stats[i].stat_offset;
+ data[i] = (gmac_gstrings_stats[i].sizeof_stat ==
+ sizeof(__u64)) ? (*(__u64 *)p) : (*(__u32 *)p);
+ }
+}
+
+static int gmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset)
+ {
+ case ETH_SS_STATS:
+ return FH_GMAC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void gmac_get_strings(struct net_device *ndev, __u32 stringset, __u8 *data)
+{
+ int i;
+ __u8 *p = data;
+
+ switch (stringset)
+ {
+ case ETH_SS_STATS:
+ for (i = 0; i < FH_GMAC_STATS_LEN; i++)
+ {
+ memcpy(p, gmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/* Currently only support WOL through Magic packet. */
+static void gmac_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+
+ spin_lock_irq(&pGmac->lock);
+ if (device_can_wakeup(pGmac->dev))
+ {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = pGmac->wolopts;
+ }
+ spin_unlock_irq(&pGmac->lock);
+}
+
+static int gmac_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ Gmac_Object* pGmac = netdev_priv(ndev);
+ __u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(pGmac->dev))
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ if (wol->wolopts)
+ {
+ pr_info("fh_gmac: wakeup enable\n");
+ device_set_wakeup_enable(pGmac->dev, 1);
+ enable_irq_wake(ndev->irq);
+ }
+ else
+ {
+ device_set_wakeup_enable(pGmac->dev, 0);
+ disable_irq_wake(ndev->irq);
+ }
+
+ spin_lock_irq(&pGmac->lock);
+ pGmac->wolopts = wol->wolopts;
+ spin_unlock_irq(&pGmac->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops fh_gmac_ethtool_ops = {
+ .begin = gmac_check_if_running,
+ .get_drvinfo = gmac_ethtool_getdrvinfo,
+ .get_settings = gmac_ethtool_getsettings,
+ .set_settings = gmac_ethtool_setsettings,
+ .get_msglevel = gmac_ethtool_getmsglevel,
+ .set_msglevel = gmac_ethtool_setmsglevel,
+ .get_regs = gmac_ethtool_gregs,
+ .get_regs_len = gmac_ethtool_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = gmac_get_pauseparam,
+ .set_pauseparam = gmac_set_pauseparam,
+ .get_ethtool_stats = gmac_get_ethtool_stats,
+ .get_strings = gmac_get_strings,
+ .get_wol = gmac_get_wol,
+ .set_wol = gmac_set_wol,
+ .get_sset_count = gmac_get_sset_count,
+};
+
+void fh_gmac_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &fh_gmac_ethtool_ops);
+}
diff --git a/drivers/net/fh_gmac/fh_gmac_main.c b/drivers/net/fh_gmac/fh_gmac_main.c
new file mode 100755
index 00000000..b08fe67b
--- /dev/null
+++ b/drivers/net/fh_gmac/fh_gmac_main.c
@@ -0,0 +1,1300 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/phy.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/irqreturn.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <linux/crc32.h>
+#include <mach/fh_gmac.h>
+#include "fh_gmac.h"
+
+/* Module parameters */
+static int watchdog = TX_TIMEO;
+static int debug = 16; /* -1: default, 0: no output, 16: all */
+static int dma_txsize = DMA_TX_SIZE;
+static int dma_rxsize = DMA_RX_SIZE;
+static int flow_ctrl = FLOW_AUTO;
+static int pause = PAUSE_TIME;
+
+static unsigned int phymode = PHY_INTERFACE_MODE_MII;
+
+#if defined(FH_GMAC_XMIT_DEBUG) || defined(FH_GMAC_RX_DEBUG)
+static void print_pkt(unsigned char *buf, int len)
+{
+ int j;
+ printk(KERN_DEBUG "len = %d byte, buf addr: 0x%p", len, buf);
+ for (j = 0; j < len; j++) {
+ if ((j % 16) == 0)
+ printk(KERN_DEBUG"\n %03x:", j);
+ printk(KERN_DEBUG" %02x", buf[j]);
+ }
+ printk(KERN_DEBUG"\n");
+}
+#endif
+
+static __u32 GMAC_BitReverse(register __u32 x)
+{
+ register __u32 y = 0x55555555;
+ x = (((x >> 1) & y) | ((x & y) << 1));
+ y = 0x33333333;
+ x = (((x >> 2) & y) | ((x & y) << 2));
+ y = 0x0f0f0f0f;
+ x = (((x >> 4) & y) | ((x & y) << 4));
+ y = 0x00ff00ff;
+ x = (((x >> 8) & y) | ((x & y) << 8));
+ return (x >> 16) | (x << 16);
+}
+
+
+static void GMAC_SetMacAddress(Gmac_Object *pGmac)
+{
+
+ __u32 macHigh = pGmac->local_mac_address[5]<<8 |
+ pGmac->local_mac_address[4];
+ __u32 macLow = pGmac->local_mac_address[3]<<24 |
+ pGmac->local_mac_address[2]<<16 |
+ pGmac->local_mac_address[1]<<8 |
+ pGmac->local_mac_address[0];
+
+ writel(macHigh, pGmac->remap_addr + REG_GMAC_MAC_HIGH);
+ writel(macLow, pGmac->remap_addr + REG_GMAC_MAC_LOW);
+}
+
+int gmac_dev_set_mac_addr(struct net_device *dev, void *p)
+{
+ Gmac_Object *pGmac = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ memcpy(pGmac->local_mac_address, addr->sa_data, ETH_ALEN);
+ GMAC_SetMacAddress(pGmac);
+ return eth_mac_addr(dev, p);
+}
+
+static inline void GMAC_EnableMac(Gmac_Object *pGmac)
+{
+ /* transmitter enable */
+ /* receive enable */
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
+ reg |= 0xc;
+ writel(reg, pGmac->remap_addr + REG_GMAC_CONFIG);
+
+}
+
+
+
+static inline void GMAC_DisableMac(Gmac_Object *pGmac)
+{
+ /* transmitter disable */
+ /* receive disable */
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
+ reg &= ~0xc;
+ writel(reg | 0xc, pGmac->remap_addr + REG_GMAC_CONFIG);
+}
+
+static inline void GMAC_CoreInit(Gmac_Object *pGmac)
+{
+ /* FIXME: heartbeat disable */
+ /* auto pad or crc stripping */
+ __u32 reg = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
+ reg |= 0x80;
+ writel(reg | 0xc, pGmac->remap_addr + REG_GMAC_CONFIG);
+}
+
+void GMAC_FlowCtrl(Gmac_Object *pGmac, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ __u32 flow = fc;
+
+ if (duplex)
+ flow |= (pause_time << 16);
+ writel(flow, pGmac->remap_addr + REG_GMAC_FLOW_CTRL);
+}
+
+static void gmac_tx_err(Gmac_Object *pGmac)
+{
+
+ netif_stop_queue(pGmac->ndev);
+ GMAC_DMA_StopTx(pGmac);
+ GMAC_DMA_FreeTxSkbufs(pGmac);
+ GMAC_DMA_InitTxDesc(pGmac->tx_dma_descriptors, pGmac->dma_tx_size);
+ pGmac->dirty_tx = 0;
+ pGmac->cur_tx = 0;
+ GMAC_DMA_StartTx(pGmac);
+ pGmac->ndev->stats.tx_errors++;
+ netif_wake_queue(pGmac->ndev);
+}
+
+
+static irqreturn_t fh_gmac_interrupt(int irq, void *dev_id)
+{
+
+ struct net_device *ndev = (struct net_device *)dev_id;
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ int status;
+
+ if (unlikely(!ndev)) {
+ pr_err("%s: invalid ndev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+
+ status = GMAC_DMA_Interrupt(pGmac);
+ if (likely(status == handle_tx_rx)) {
+ if (likely(napi_schedule_prep(&pGmac->napi))) {
+ writel(0x0, pGmac->remap_addr + REG_GMAC_INTR_EN);
+ __napi_schedule(&pGmac->napi);
+ }
+ } else if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* FIXME: tx underflow */
+ } else if (unlikely(status == tx_hard_error)) {
+ gmac_tx_err(pGmac);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fh_gmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely(dma_rxsize < 0))
+ dma_rxsize = DMA_RX_SIZE;
+ if (unlikely(dma_txsize < 0))
+ dma_txsize = DMA_TX_SIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+
+}
+
+static void fh_gmac_adjust_link(struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ struct phy_device *phydev = pGmac->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ if (phydev == NULL)
+ return;
+
+ spin_lock_irqsave(&pGmac->lock, flags);
+ if (phydev->link) {
+ __u32 ctrl = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
+
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != pGmac->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ ctrl &= ~0x800;
+ else
+ ctrl |= 0x800;
+ pGmac->oldduplex = phydev->duplex;
+ }
+ /* Flow Control operation */
+ if (phydev->pause) {
+ __u32 fc = pGmac->flow_ctrl, pause_time = pGmac->pause;
+ GMAC_FlowCtrl(pGmac, phydev->duplex, fc, pause_time);
+ }
+
+ if (phydev->speed != pGmac->speed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 100:
+ ctrl |= 0x4000;
+ if (pGmac->priv_data->set_rmii_speed)
+ pGmac->priv_data->
+ set_rmii_speed(gmac_speed_100m);
+ break;
+ case 10:
+ ctrl &= ~0x4000;
+ if (pGmac->priv_data->set_rmii_speed)
+ pGmac->priv_data->
+ set_rmii_speed(gmac_speed_10m);
+ break;
+ default:
+ if (netif_msg_link(pGmac))
+ pr_warning("%s: Speed (%d) is not 10"
+ " or 100!\n", ndev->name,
+ phydev->speed);
+ break;
+ }
+
+ pGmac->speed = phydev->speed;
+ }
+ writel(ctrl, pGmac->remap_addr + REG_GMAC_CONFIG);
+ if (!pGmac->oldlink) {
+ new_state = 1;
+ pGmac->oldlink = 1;
+ }
+ } else if (pGmac->oldlink) {
+ new_state = 1;
+ pGmac->oldlink = 0;
+ pGmac->speed = 0;
+ pGmac->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(pGmac))
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&pGmac->lock, flags);
+}
+
+
+static inline void fh_gmac_rx_refill(Gmac_Object *pGmac)
+{
+ __u32 rxsize = pGmac->dma_rx_size;
+ int bfsize = pGmac->dma_buf_sz;
+ Gmac_Rx_DMA_Descriptors *desc = pGmac->rx_dma_descriptors;
+
+ for (; pGmac->cur_rx - pGmac->dirty_rx > 0;
+ pGmac->dirty_rx++) {
+ __u32 entry = pGmac->dirty_rx % rxsize;
+ if (likely(pGmac->rx_skbuff[entry] == NULL)) {
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&pGmac->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb_ip_align(pGmac->ndev,
+ bfsize);
+
+ if (unlikely(skb == NULL))
+ break;
+
+ pGmac->rx_skbuff[entry] = skb;
+ pGmac->rx_skbuff_dma[entry] =
+ dma_map_single(pGmac->dev, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ (desc + entry)->desc2.dw = pGmac->rx_skbuff_dma[entry];
+ RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
+ }
+ wmb();
+ (desc+entry)->desc0.bit.own = 1;
+ wmb();
+ }
+}
+
+static int fh_gmac_rx(Gmac_Object *pGmac, int limit)
+{
+ __u32 rxsize = pGmac->dma_rx_size;
+ __u32 entry = pGmac->cur_rx % rxsize;
+ __u32 next_entry;
+ __u32 count = 0;
+ Gmac_Rx_DMA_Descriptors *desc =
+ pGmac->rx_dma_descriptors + entry;
+ Gmac_Rx_DMA_Descriptors *desc_next;
+
+#ifdef FH_GMAC_RX_DEBUG
+ if (netif_msg_hw(pGmac)) {
+ printk(KERN_DEBUG ">>> fh_gmac_rx: descriptor ring:\n");
+ GMAC_DMA_DisplayRxDesc(pGmac->rx_dma_descriptors, rxsize);
+ }
+#endif
+ count = 0;
+ while (!desc->desc0.bit.own) {
+ int status;
+
+ if (count >= limit)
+ break;
+
+ count++;
+
+ next_entry = (++pGmac->cur_rx) % rxsize;
+ desc_next = pGmac->rx_dma_descriptors + next_entry;
+ prefetch(desc_next);
+
+ /* read the status of the incoming frame */
+ status = (GMAC_DMA_RxStatus(pGmac, desc));
+ if (unlikely(status == discard_frame)) {
+ pGmac->ndev->stats.rx_errors++;
+ } else {
+ struct sk_buff *skb;
+ int frame_len;
+ frame_len = desc->desc0.bit.frame_length;
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP) */
+ if (unlikely(status != llc_snap))
+ frame_len -= ETH_FCS_LEN;
+#ifdef FH_GMAC_RX_DEBUG
+ if (frame_len > ETH_FRAME_LEN)
+ pr_debug("\tRX frame size %d, COE status: %d\n",
+ frame_len, status);
+
+ if (netif_msg_hw(pGmac))
+ pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ desc, entry, desc->desc2.dw);
+#endif
+ skb = pGmac->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ pr_err("%s: Inconsistent Rx descriptor chain\n",
+ pGmac->ndev->name);
+ pGmac->ndev->stats.rx_dropped++;
+ break;
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ pGmac->rx_skbuff[entry] = NULL;
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(pGmac->dev,
+ pGmac->rx_skbuff_dma[entry],
+ pGmac->dma_buf_sz, DMA_FROM_DEVICE);
+#ifdef FH_GMAC_RX_DEBUG
+ if (netif_msg_pktdata(pGmac)) {
+ pr_info(" frame received (%dbytes)", frame_len);
+ print_pkt(skb->data, frame_len);
+ }
+#endif
+ skb->protocol = eth_type_trans(skb, pGmac->ndev);
+
+ if (unlikely(status == csum_none)) {
+ /* always for the old mac 10/100 */
+ skb_checksum_none_assert(skb);
+ netif_receive_skb(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ napi_gro_receive(&pGmac->napi, skb);
+ }
+
+ pGmac->ndev->stats.rx_packets++;
+ pGmac->ndev->stats.rx_bytes += frame_len;
+ }
+ entry = next_entry;
+ desc = desc_next; /* use prefetched values */
+ }
+
+ fh_gmac_rx_refill(pGmac);
+
+ pGmac->stats.rx_pkt_n += count;
+
+ return count;
+}
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define FH_GMAC_TX_THRESH(x) (x->dma_tx_size/4)
+
+static inline __u32 gmac_tx_avail(Gmac_Object *pGmac)
+{
+ return pGmac->dirty_tx + pGmac->dma_tx_size - pGmac->cur_tx - 1;
+}
+
+
+
+static void fh_gmac_tx(Gmac_Object *pGmac)
+{
+ __u32 txsize = pGmac->dma_tx_size;
+ while (pGmac->dirty_tx != pGmac->cur_tx) {
+ int last;
+ __u32 entry = pGmac->dirty_tx % txsize;
+ struct sk_buff *skb = pGmac->tx_skbuff[entry];
+ Gmac_Tx_DMA_Descriptors *desc =
+ pGmac->tx_dma_descriptors + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (desc->desc0.bit.own)
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ last = desc->desc1.bit.last_segment;
+ if (likely(last)) {
+ int tx_error = GMAC_DMA_TxStatus(pGmac, desc);
+ if (likely(tx_error == 0)) {
+ pGmac->ndev->stats.tx_packets++;
+ pGmac->stats.tx_pkt_n++;
+ } else {
+ pGmac->ndev->stats.tx_errors++;
+ }
+ }
+ TX_DBG("%s: curr %d, dirty %d\n", __func__,
+ pGmac->cur_tx, pGmac->dirty_tx);
+
+ if (likely(desc->desc2.dw)) {
+ dma_unmap_single(pGmac->dev, desc->desc2.dw,
+ desc->desc1.bit.buffer1_size,
+ DMA_TO_DEVICE);
+ }
+ if (unlikely(desc->desc3.dw))
+ desc->desc3.dw = 0;
+
+ if (likely(skb != NULL)) {
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&pGmac->rx_recycle)
+ < pGmac->dma_rx_size)
+ &&
+ skb_recycle_check(skb, pGmac->dma_buf_sz)) {
+ __skb_queue_head(&pGmac->rx_recycle, skb);
+ } else {
+ dev_kfree_skb(skb);
+ }
+
+ pGmac->tx_skbuff[entry] = NULL;
+ }
+ GMAC_DMA_ReleaseTxDesc(desc);
+
+ entry = (++pGmac->dirty_tx) % txsize;
+ }
+ if (unlikely(netif_queue_stopped(pGmac->ndev) &&
+ gmac_tx_avail(pGmac) >
+ FH_GMAC_TX_THRESH(pGmac))) {
+ netif_tx_lock(pGmac->ndev);
+ if (netif_queue_stopped(pGmac->ndev) &&
+ gmac_tx_avail(pGmac) >
+ FH_GMAC_TX_THRESH(pGmac)) {
+ TX_DBG("%s: restart transmit\n", __func__);
+ netif_wake_queue(pGmac->ndev);
+ }
+ netif_tx_unlock(pGmac->ndev);
+ }
+}
+
+
+static int fh_gmac_poll(struct napi_struct *napi, int budget)
+{
+ Gmac_Object *pGmac = container_of(napi, Gmac_Object, napi);
+ int work_done = 0;
+
+ pGmac->stats.poll_n++;
+ fh_gmac_tx(pGmac);
+ work_done = fh_gmac_rx(pGmac, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ writel(0x1a061, pGmac->remap_addr + REG_GMAC_INTR_EN);
+ }
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void gmac_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ fh_gmac_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
+
+static __u32 gmac_handle_jumbo_frames(struct sk_buff *skb,
+ struct net_device *dev, int checksum_insertion)
+{
+ /* FIXME: 8K jumbo frame */
+ Gmac_Object *pGmac = netdev_priv(dev);
+ __u32 nopaged_len = skb_headlen(skb);
+ __u32 txsize = pGmac->dma_tx_size;
+ __u32 entry = pGmac->cur_tx % txsize;
+ Gmac_Tx_DMA_Descriptors *desc = pGmac->tx_dma_descriptors + entry;
+
+ if (nopaged_len > BUFFER_SIZE_2K) {
+
+ int buf2_size = nopaged_len - BUFFER_SIZE_2K + 1;
+
+ desc->desc2.dw = dma_map_single(pGmac->dev, skb->data,
+ BUFFER_SIZE_2K, DMA_TO_DEVICE);
+ desc->desc3.dw = desc->desc2.dw + BUFFER_SIZE_2K;
+ desc->desc1.bit.first_segment = 1;
+ desc->desc1.bit.buffer1_size = BUFFER_SIZE_2K - 1;
+ desc->desc1.bit.checksum_insertion_ctrl = 3;
+ entry = (++pGmac->cur_tx) % txsize;
+ desc = pGmac->tx_dma_descriptors + entry;
+ desc->desc2.dw = dma_map_single(pGmac->dev,
+ skb->data + BUFFER_SIZE_2K,
+ buf2_size, DMA_TO_DEVICE);
+ desc->desc3.dw = desc->desc2.dw + BUFFER_SIZE_2K;
+ desc->desc1.bit.first_segment = 0;
+ desc->desc1.bit.buffer1_size = buf2_size;
+ desc->desc1.bit.checksum_insertion_ctrl = checksum_insertion;
+ desc->desc0.bit.own = 1;
+ pGmac->tx_skbuff[entry] = NULL;
+ } else {
+ desc->desc2.dw = dma_map_single(pGmac->dev, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->desc3.dw = desc->desc2.dw + BUFFER_SIZE_2K;
+ desc->desc1.bit.first_segment = 1;
+ desc->desc1.bit.buffer1_size = nopaged_len;
+ desc->desc1.bit.checksum_insertion_ctrl = checksum_insertion;
+ }
+ return entry;
+}
+
+/* Configuration changes (passed on by ifconfig) */
+static int gmac_dev_set_config(struct net_device *ndev, struct ifmap *map)
+{
+ if (ndev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != ndev->base_addr) {
+ pr_warning("%s: can't change I/O address\n", ndev->name);
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow changing the IRQ */
+ if (map->irq != ndev->irq) {
+ pr_warning("%s: can't change IRQ number %d\n",
+ ndev->name, ndev->irq);
+ return -EOPNOTSUPP;
+ }
+
+ /* ignore other fields */
+ return 0;
+}
+
+
+static int gmac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ __u32 txsize = pGmac->dma_tx_size;
+ __u32 entry;
+ int i, csum_insertion = 0;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ Gmac_Tx_DMA_Descriptors *desc, *first;
+
+ if (unlikely(gmac_tx_avail(pGmac) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ /* This is a hard error, log it. */
+ pr_err("%s: BUG! Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = pGmac->cur_tx % txsize;
+ /* fixme: debug */
+
+#ifdef FH_GMAC_XMIT_DEBUG
+ if ((skb->len > ETH_FRAME_LEN) || nfrags)
+ pr_info("fh gmac xmit:\n"
+ "\tskb addr %p - len: %d - nopaged_len: %d\n"
+ "\tn_frags: %d - ip_summed: %d - %s gso\n",
+ skb, skb->len, skb_headlen(skb),
+ nfrags, skb->ip_summed,
+ !skb_is_gso(skb) ? "isn't" : "is");
+#endif
+
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL) ? 3 : 0;
+
+ desc = pGmac->tx_dma_descriptors + entry;
+ first = desc;
+
+#ifdef FH_GMAC_XMIT_DEBUG
+ if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
+ pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
+ "\t\tn_frags: %d, ip_summed: %d\n",
+ skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+#endif
+ pGmac->tx_skbuff[entry] = skb;
+ if (unlikely(skb->len >= BUFFER_SIZE_2K)) {
+ printk(KERN_ERR "jumbo_frames detected\n");
+ entry = gmac_handle_jumbo_frames(skb, ndev, csum_insertion);
+ desc = pGmac->tx_dma_descriptors + entry;
+ } else {
+ __u32 nopaged_len = skb_headlen(skb);
+ desc->desc2.dw = dma_map_single(pGmac->dev, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->desc1.bit.first_segment = 1;
+ desc->desc1.bit.buffer1_size = nopaged_len;
+ desc->desc1.bit.checksum_insertion_ctrl = csum_insertion;
+ }
+
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = frag->size;
+
+ entry = (++pGmac->cur_tx) % txsize;
+ desc = pGmac->tx_dma_descriptors + entry;
+
+ TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
+ desc->desc2.dw = dma_map_page(pGmac->dev, frag->page,
+ frag->page_offset,
+ len, DMA_TO_DEVICE);
+ pGmac->tx_skbuff[entry] = NULL;
+ desc->desc1.bit.first_segment = 0;
+ desc->desc1.bit.buffer1_size = len;
+ wmb();
+ desc->desc1.bit.checksum_insertion_ctrl = csum_insertion;
+ desc->desc0.bit.own = 1;
+ wmb();
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ desc->desc1.bit.last_segment = 1;
+ desc->desc1.bit.intr_on_completion = 1;
+ wmb();
+#ifdef CONFIG_STMMAC_TIMER
+ /* Clean IC while using timer */
+ if (likely(priv->tm->enable))
+ priv->hw->desc->clear_tx_ic(desc);
+#endif
+ /* To avoid raise condition */
+ first->desc0.bit.own = 1;
+ wmb();
+ pGmac->cur_tx++;
+
+#ifdef FH_GMAC_XMIT_DEBUG
+ if (netif_msg_pktdata(pGmac)) {
+ pr_info("fh gmac xmit: current=%d, dirty=%d, entry=%d, "
+ "first=%p, nfrags=%d\n",
+ (pGmac->cur_tx % txsize), (pGmac->dirty_tx % txsize),
+ entry, first, nfrags);
+ GMAC_DMA_DisplayTxDesc(pGmac->tx_dma_descriptors, 3);
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+#endif
+ if (unlikely(gmac_tx_avail(pGmac) <= (MAX_SKB_FRAGS + 1))) {
+ TX_DBG("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(ndev);
+ }
+
+ ndev->stats.tx_bytes += skb->len;
+ writel(0x1, pGmac->remap_addr + REG_GMAC_TX_POLL_DEMAND);
+
+ return NETDEV_TX_OK;
+}
+
+static void gmac_dev_tx_timeout(struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ gmac_tx_err(pGmac);
+}
+
+static int gmac_dev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ int max_mtu;
+/*
+ if (netif_running(ndev))
+ {
+ pr_err("%s: must be stopped to change its MTU\n", ndev->name);
+ return -EBUSY;
+ }
+*/
+ max_mtu = ETH_DATA_LEN;
+
+ if ((new_mtu < 46) || (new_mtu > max_mtu)) {
+ pr_err("%s: invalid MTU, max MTU is: %d\n",
+ ndev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ ndev->mtu = new_mtu;
+ netdev_update_features(ndev);
+
+ return 0;
+}
+
+
+static void gmac_set_filter(struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ __u32 value = readl(pGmac->remap_addr + REG_GMAC_FRAME_FILTER);
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* enable Promiscuous Mode */
+ value = 0x1;
+ } else if ((netdev_mc_count(ndev) > 64)
+ || (ndev->flags & IFF_ALLMULTI)) {
+ /* enable Pass All Multicast */
+ value = 0x10;
+
+ writel(0xffffffff, pGmac->remap_addr + REG_GMAC_HASH_HIGH);
+ writel(0xffffffff, pGmac->remap_addr + REG_GMAC_HASH_LOW);
+ } else if (netdev_mc_empty(ndev)) {
+ /* no multicast */
+ value = 0;
+ } else {
+ __u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value = 0x404;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, ndev)
+ {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(GMAC_BitReverse(mc_filter[0]),
+ pGmac->remap_addr + REG_GMAC_HASH_HIGH);
+ writel(GMAC_BitReverse(mc_filter[1]),
+ pGmac->remap_addr + REG_GMAC_HASH_LOW);
+ }
+ writel(value, pGmac->remap_addr + REG_GMAC_FRAME_FILTER);
+}
+
+static void gmac_dev_mcast_set(struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+
+ spin_lock(&pGmac->lock);
+ gmac_set_filter(ndev);
+ spin_unlock(&pGmac->lock);
+}
+
+static int gmac_dev_ioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!pGmac->phydev)
+ return -EINVAL;
+
+ ret = phy_mii_ioctl(pGmac->phydev, ifrq, cmd);
+
+ return ret;
+
+}
+
+
+
+static int fh_gmac_init_phy(struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ struct phy_device *phydev;
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
+
+ if (pGmac->phydev == NULL)
+ return -ENODEV;
+
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%x", 0);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3,
+ PHY_ID_FMT, bus_id, pGmac->priv_data->phyid);
+ pr_debug("fh_gmac_init_phy: trying to attach to %s\n", phy_id);
+
+ phydev = phy_connect(ndev, phy_id, &fh_gmac_adjust_link, 0,
+ pGmac->phy_interface);
+
+ if (IS_ERR(phydev)) {
+ pr_err("%s: Could not attach to PHY\n", ndev->name);
+ return PTR_ERR(phydev);
+ }
+
+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full);
+
+
+ /*
+ * Broken HW is sometimes missing the pull-up resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
+ */
+ if (phydev->phy_id == 0) {
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+ pr_debug("fh_gmac_init_phy: %s: attached to PHY (UID 0x%x)"
+ " Link = %d\n", ndev->name, phydev->phy_id, phydev->link);
+
+ return 0;
+}
+
+
+static int gmac_dev_open(struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ int ret;
+
+ fh_gmac_verify_args();
+
+ /* MDIO bus Registration */
+ ret = fh_mdio_register(ndev);
+ if (ret < 0)
+ goto open_error;
+
+ ret = fh_gmac_init_phy(ndev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ goto open_error;
+ }
+ /* Create and initialize the TX/RX descriptors chains. */
+ /* FIXME: STMMAC_ALIGN(buf_sz); */
+ pGmac->dma_tx_size = dma_txsize;
+ pGmac->dma_rx_size = dma_rxsize;
+ pGmac->dma_buf_sz = BUFFER_SIZE_2K;
+
+ GMAC_DMA_InitDescRings(ndev);
+ /* DMA initialization and SW reset */
+ ret = GMAC_DMA_Init(ndev, pGmac->tx_bus_addr, pGmac->rx_bus_addr);
+ if (ret < 0) {
+ pr_err("%s: DMA initialization failed\n", __func__);
+ goto open_error;
+ }
+
+ /* Copy the MAC addr into the HW */
+ GMAC_SetMacAddress(pGmac);
+
+ /* Initialize the MAC Core */
+ GMAC_CoreInit(pGmac);
+
+ netdev_update_features(ndev);
+
+ /* Request the IRQ lines */
+ ret = request_irq(ndev->irq, fh_gmac_interrupt,
+ IRQF_SHARED, ndev->name, ndev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, ndev->irq, ret);
+ goto open_error;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ GMAC_EnableMac(pGmac);
+
+ /* Set the HW DMA mode and the COE */
+ /* FIXME:TTC or SF */
+ GMAC_DMA_OpMode(pGmac);
+
+ /* Extra statistics */
+ memset(&pGmac->stats, 0, sizeof(struct Gmac_Stats));
+ /* FIXME: threshold? */
+ pGmac->stats.threshold = 64;
+
+ /* Start the ball rolling... */
+ pr_debug("%s: DMA RX/TX processes started...\n", ndev->name);
+ GMAC_DMA_StartTx(pGmac);
+ GMAC_DMA_StartRx(pGmac);
+ /* FIXME: dump register */
+
+ if (pGmac->phydev)
+ phy_start(pGmac->phydev);
+
+ napi_enable(&pGmac->napi);
+ skb_queue_head_init(&pGmac->rx_recycle);
+ netif_start_queue(ndev);
+
+ return 0;
+
+open_error:
+ if (pGmac->phydev)
+ phy_disconnect(pGmac->phydev);
+
+ return ret;
+
+}
+
+static int gmac_dev_stop(struct net_device *ndev)
+{
+
+ Gmac_Object *pGmac = netdev_priv(ndev);
+
+ /* Stop and disconnect the PHY */
+ if (pGmac->phydev) {
+ phy_stop(pGmac->phydev);
+ phy_disconnect(pGmac->phydev);
+ pGmac->phydev = NULL;
+
+ pGmac->oldduplex = 0;
+ pGmac->speed = 0;
+ }
+
+ netif_stop_queue(ndev);
+
+ napi_disable(&pGmac->napi);
+ skb_queue_purge(&pGmac->rx_recycle);
+
+ /* Free the IRQ lines */
+ free_irq(ndev->irq, ndev);
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ GMAC_DMA_StopTx(pGmac);
+ GMAC_DMA_StopRx(pGmac);
+
+ /* Release and free the Rx/Tx resources */
+ GMAC_DMA_FreeDesc(pGmac);
+
+ /* Disable the MAC Rx/Tx */
+ GMAC_DisableMac(pGmac);
+
+ netif_carrier_off(ndev);
+
+ fh_mdio_unregister(ndev);
+
+ return 0;
+}
+
+
+static const struct net_device_ops fh_gmac_netdev_ops = {
+ .ndo_open = gmac_dev_open,
+ .ndo_stop = gmac_dev_stop,
+ .ndo_start_xmit = gmac_dev_xmit,
+ .ndo_set_multicast_list = gmac_dev_mcast_set,
+ .ndo_set_mac_address = gmac_dev_set_mac_addr,
+ .ndo_do_ioctl = gmac_dev_ioctl,
+ .ndo_tx_timeout = gmac_dev_tx_timeout,
+ .ndo_change_mtu = gmac_dev_change_mtu,
+ .ndo_fix_features = NULL,
+ .ndo_set_config = gmac_dev_set_config,
+#ifdef STMMAC_VLAN_TAG_USED
+ .ndo_vlan_rx_register = stmmac_vlan_rx_register,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gmac_poll_controller,
+#endif
+};
+
+
+static int __devinit fh_gmac_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ Gmac_Object *pGmac;
+ struct net_device *ndev;
+ struct resource *mem_res, *irq_res;
+ struct fh_gmac_platform_data *plat_data;
+
+ pr_info("GMAC driver:\n\tplatform registration... ");
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ pr_err("%s: ERROR: getting resource failed"
+ "cannot get IORESOURCE_MEM\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(mem_res->start,
+ resource_size(mem_res), pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (__u32)mem_res->start);
+ return -EBUSY;
+ }
+
+ ndev = alloc_etherdev(sizeof(Gmac_Object));
+
+ if (!ndev) {
+ pr_err("%s: ERROR: allocating the device\n", __func__);
+ ret = -ENOMEM;
+ goto out_release_region;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ pr_err("%s: ERROR: getting resource failed"
+ "cannot get IORESOURCE_IRQ\n", __func__);
+ ret = -ENXIO;
+ goto out_free_ndev;
+ }
+ ndev->irq = irq_res->start;
+
+ pGmac = netdev_priv(ndev);
+
+ pGmac->remap_addr = ioremap(mem_res->start, resource_size(mem_res));
+
+ if (!pGmac->remap_addr) {
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
+ ret = -ENOMEM;
+ goto out_free_ndev;
+ }
+
+ pGmac->clk = clk_get(&pdev->dev, "eth_clk");
+ if (IS_ERR(pGmac->clk)) {
+ ret = PTR_ERR(pGmac->clk);
+ goto out_unmap;
+ }
+ clk_enable(pGmac->clk);
+ /* add net_device to platform_device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ pGmac->dev = &(pdev->dev);
+ pGmac->pdev = pdev;
+ pGmac->ndev = ndev;
+ plat_data = pdev->dev.platform_data;
+ pGmac->priv_data = plat_data;
+
+ platform_set_drvdata(pdev, ndev);
+ ndev->base_addr = (unsigned long)pGmac->remap_addr;
+
+ ether_setup(ndev);
+ ndev->netdev_ops = &fh_gmac_netdev_ops;
+ fh_gmac_set_ethtool_ops(ndev);
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ pGmac->msg_enable = netif_msg_init(debug, FH_GMAC_DEBUG);
+
+
+ if (flow_ctrl) {
+ /* RX/TX pause on */
+ pGmac->flow_ctrl = FLOW_AUTO;
+ }
+
+ pGmac->pause = pause;
+
+
+ netif_napi_add(ndev, &(pGmac->napi), fh_gmac_poll, 64);
+
+
+ if (!is_valid_ether_addr(pGmac->local_mac_address)) {
+ /* Use random MAC if none passed */
+ random_ether_addr(pGmac->local_mac_address);
+ pr_warning("\tusing random MAC address: %pM\n",
+ pGmac->local_mac_address);
+ }
+ ndev->dev_addr = pGmac->local_mac_address;
+
+ spin_lock_init(&pGmac->lock);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the netdevice\n",
+ __func__, ret);
+ ret = -ENODEV;
+ goto out_plat_exit;
+ }
+
+ pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
+ "\t\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
+ pdev->id, ndev->irq, pGmac->remap_addr);
+
+ plat_data->interface = pGmac->phy_interface = phymode;
+
+ /* Custom initialisation */
+ if (pGmac->priv_data->early_init)
+ pGmac->priv_data->early_init(plat_data);
+
+ if (pGmac->priv_data->plat_init)
+ pGmac->priv_data->plat_init(plat_data);
+
+ return 0;
+
+out_plat_exit:
+ clk_disable(pGmac->clk);
+out_unmap:
+ iounmap(pGmac->remap_addr);
+out_free_ndev:
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+out_release_region:
+ release_mem_region(mem_res->start, resource_size(mem_res));
+
+ return ret;
+}
+
+static int __init parse_tag_phymode(const struct tag *tag)
+{
+ phymode = tag->u.phymode.phymode;
+ return 0;
+}
+
+__tagtable(ATAG_PHYMODE, parse_tag_phymode);
+
+static int __devexit fh_gmac_remove(struct platform_device *pdev)
+{
+
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ struct resource *res;
+
+ pr_info("%s:\n\tremoving driver", __func__);
+
+ GMAC_DMA_StopTx(pGmac);
+ GMAC_DMA_StopRx(pGmac);
+
+ GMAC_DisableMac(pGmac);
+
+ netif_carrier_off(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+
+ clk_disable(pGmac->clk);
+
+ iounmap((void *)pGmac->remap_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+ free_netdev(ndev);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+static int fh_gmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ Gmac_Object *pGmac = netdev_priv(ndev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ spin_lock(&pGmac->lock);
+
+ netif_device_detach(ndev);
+ netif_stop_queue(ndev);
+ if (pGmac->phydev)
+ phy_stop(pGmac->phydev);
+
+ napi_disable(&pGmac->napi);
+
+ /* Stop TX/RX DMA */
+ GMAC_DMA_StopTx(pGmac);
+ GMAC_DMA_StopRx(pGmac);
+ /* Clear the Rx/Tx descriptors */
+ GMAC_DMA_InitRxDesc(pGmac->rx_dma_descriptors, pGmac->dma_rx_size);
+ GMAC_DMA_InitTxDesc(pGmac->tx_dma_descriptors, pGmac->dma_tx_size);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(pGmac->dev)) {
+ /* no power management required */
+ /* priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); */
+ } else {
+ GMAC_DisableMac(pGmac);
+ }
+
+ spin_unlock(&pGmac->lock);
+ return 0;
+}
+
+static int fh_gmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ u32 reg;
+
+ reg = pGmac->mii->read(pGmac->mii, 0, 0);
+ reg |= 1 << 15;
+ pGmac->mii->write(pGmac->mii, 0, 0, reg);
+
+ pGmac->mii->reset(pGmac->mii);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ spin_lock(&pGmac->lock);
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console). */
+ if (device_may_wakeup(pGmac->dev)) {
+ /* no power management required */
+ /* priv->hw->mac->pmt(priv->ioaddr, 0); */
+ }
+
+ netif_device_attach(ndev);
+
+ /* Enable the MAC and DMA */
+ GMAC_EnableMac(pGmac);
+ GMAC_DMA_StartTx(pGmac);
+ GMAC_DMA_StartRx(pGmac);
+
+ napi_enable(&pGmac->napi);
+
+ if (pGmac->phydev)
+ phy_start(pGmac->phydev);
+
+ netif_start_queue(ndev);
+
+ spin_unlock(&pGmac->lock);
+ return 0;
+}
+
+static int fh_gmac_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return gmac_dev_stop(ndev);
+}
+
+static int fh_gmac_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return gmac_dev_open(ndev);
+}
+
+static const struct dev_pm_ops fh_gmac_pm_ops = {
+ .suspend = fh_gmac_suspend,
+ .resume = fh_gmac_resume,
+ .freeze = fh_gmac_freeze,
+ .thaw = fh_gmac_restore,
+ .restore = fh_gmac_restore,
+};
+#else
+static const struct dev_pm_ops fh_gmac_pm_ops;
+#endif /* CONFIG_PM */
+
+
+static struct platform_driver fh_gmac_driver = {
+ .driver = {
+ .name = "fh_gmac",
+ .owner = THIS_MODULE,
+ .pm = &fh_gmac_pm_ops,
+ },
+ .probe = fh_gmac_probe,
+ .remove = __devexit_p(fh_gmac_remove),
+};
+
+static int __init fh_gmac_init(void)
+{
+ return platform_driver_register(&fh_gmac_driver);
+}
+late_initcall(fh_gmac_init);
+
+
+static void __exit fh_gmac_exit(void)
+{
+ platform_driver_unregister(&fh_gmac_driver);
+}
+module_exit(fh_gmac_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("QIN");
+MODULE_DESCRIPTION("Fullhan Ethernet driver");
diff --git a/drivers/net/fh_gmac/fh_gmac_phyt.c b/drivers/net/fh_gmac/fh_gmac_phyt.c
new file mode 100644
index 00000000..f9aee3d1
--- /dev/null
+++ b/drivers/net/fh_gmac/fh_gmac_phyt.c
@@ -0,0 +1,227 @@
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <mach/fh_gmac.h>
+#include "fh_gmac_phyt.h"
+#include "fh_gmac.h"
+
+static int fh_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ int timeout = 1000;
+
+ if (phyaddr < 0)
+ return -ENODEV;
+
+ writel(phyaddr << 11 | gmac_gmii_clock_100_150 << 2 | phyreg << 6 | 0x1,
+ pGmac->remap_addr + REG_GMAC_GMII_ADDRESS);
+
+ while (readl(pGmac->remap_addr + REG_GMAC_GMII_ADDRESS) & 0x1) {
+ udelay(100);
+ timeout--;
+ if (timeout < 0) {
+ printk(KERN_ERR "ERROR: %s, timeout\n", __func__);
+ break;
+ }
+ }
+
+ return readl(pGmac->remap_addr + REG_GMAC_GMII_DATA);
+}
+
+static int fh_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ int timeout = 1000;
+
+ if (phyaddr < 0)
+ return -ENODEV;
+
+ writel(phydata, pGmac->remap_addr + REG_GMAC_GMII_DATA);
+ writel(0x1 << 1 | phyaddr << 11 | gmac_gmii_clock_100_150 << 2 | phyreg
+ << 6 | 0x1, pGmac->remap_addr + REG_GMAC_GMII_ADDRESS);
+
+ while (readl(pGmac->remap_addr + REG_GMAC_GMII_ADDRESS) & 0x1) {
+ udelay(100);
+ timeout--;
+ if (timeout < 0) {
+ printk(KERN_ERR "ERROR: %s, timeout\n", __func__);
+ break;
+ }
+ }
+ return 0;
+}
+
+int fh_mdio_reset(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ struct fh_gmac_platform_data *plat_data;
+
+ plat_data = pGmac->priv_data;
+
+ if (plat_data && plat_data->phy_reset)
+ plat_data->phy_reset();
+
+ return 0;
+}
+
+int fh_mdio_set_mii(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ __u32 rmii_mode;
+ int phyid = pGmac->priv_data->phyid;
+
+ if (pGmac->phydev == NULL)
+ return -ENODEV;
+
+ if (pGmac->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ switch (pGmac->phydev->phy_id) {
+ case FH_GMAC_PHY_RTL8201:
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_page_select, 7);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_rmii_mode, 0x1ffa);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_page_select, 0);
+ break;
+ case FH_GMAC_PHY_IP101G:
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_ip101g_page_select, 16);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_rmii_mode, 0x1006);
+#if defined(CONFIG_ARCH_FH8856) || defined(CONFIG_ARCH_ZY2) \
+ || defined(CONFIG_ARCH_FH8626V100)
+ /* adjust ip101g rxd0 & rxd1 drv curr */
+ fh_mdio_write(bus, phyid, 26, 0xc5ed);
+#endif
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_ip101g_page_select, 0x10);
+ break;
+ case FH_GMAC_PHY_TI83848:
+ rmii_mode = fh_mdio_read(bus, phyid,
+ gmac_phyt_ti83848_rmii_mode);
+ rmii_mode |= 0x20;
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_ti83848_rmii_mode, rmii_mode);
+ break;
+ default:
+ return -ENODEV;
+ }
+ } else if (pGmac->phy_interface == PHY_INTERFACE_MODE_MII) {
+ switch (pGmac->phydev->phy_id) {
+ case FH_GMAC_PHY_RTL8201:
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_page_select, 7);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_rmii_mode, 0x6ff3);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_page_select, 0);
+ break;
+ case FH_GMAC_PHY_IP101G:
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_ip101g_page_select, 16);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_rtl8201_rmii_mode, 0x2);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_ip101g_page_select, 0x10);
+ break;
+ case FH_GMAC_PHY_TI83848:
+ rmii_mode = fh_mdio_read(bus, phyid,
+ gmac_phyt_ti83848_rmii_mode);
+ rmii_mode &= ~(0x20);
+ fh_mdio_write(bus, phyid,
+ gmac_phyt_ti83848_rmii_mode, rmii_mode);
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+int fh_mdio_register(struct net_device *ndev)
+{
+ int err = 0, found, addr;
+ struct mii_bus *new_bus;
+ Gmac_Object *pGmac = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
+
+ new_bus = mdiobus_alloc();
+ if (new_bus == NULL)
+ return -ENOMEM;
+ new_bus->name =
+ pGmac->phy_interface ==
+ PHY_INTERFACE_MODE_MII ? "gmac_mii" : "gmac_rmii";
+ new_bus->read = &fh_mdio_read;
+ new_bus->write = &fh_mdio_write;
+ new_bus->reset = &fh_mdio_reset;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+ new_bus->priv = ndev;
+ new_bus->parent = pGmac->dev;
+ err = mdiobus_register(new_bus);
+ if (err != 0) {
+ pr_err("%s: Cannot register as MDIO bus, error: %d\n",
+ new_bus->name, err);
+ goto bus_register_fail;
+ }
+
+ pGmac->mii = new_bus;
+
+ found = 0;
+ for (addr = 0; addr < 32; addr++) {
+ phydev = new_bus->phy_map[addr];
+ if (phydev) {
+ if (pGmac->priv_data->phyid == -1)
+ pGmac->priv_data->phyid = addr;
+
+ found = 1;
+ pGmac->phydev = phydev;
+ break;
+ }
+ }
+
+ if (pGmac->mii == NULL || phydev == NULL) {
+ pr_warning("%s: MII BUS or phydev is NULL\n", ndev->name);
+ err = -ENXIO;
+ goto bus_register_fail;
+ }
+
+ err = fh_mdio_set_mii(pGmac->mii);
+
+ if (!found || err) {
+ pr_warning("%s: No PHY found\n", ndev->name);
+ err = -ENXIO;
+ goto bus_register_fail;
+ }
+
+ pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
+ ndev->name, pGmac->phydev->phy_id, addr,
+ pGmac->phydev->irq, dev_name(&pGmac->phydev->dev),
+ (addr ==
+ pGmac->priv_data->phyid) ? " active" : "");
+
+ return 0;
+
+bus_register_fail:
+ pGmac->phydev = NULL;
+ mdiobus_unregister(new_bus);
+ kfree(new_bus);
+ return err;
+}
+
+int fh_mdio_unregister(struct net_device *ndev)
+{
+ Gmac_Object *pGmac = netdev_priv(ndev);
+
+ mdiobus_unregister(pGmac->mii);
+ pGmac->mii->priv = NULL;
+ kfree(pGmac->mii);
+ return 0;
+}
diff --git a/drivers/net/fh_gmac/fh_gmac_phyt.h b/drivers/net/fh_gmac/fh_gmac_phyt.h
new file mode 100755
index 00000000..de53c08d
--- /dev/null
+++ b/drivers/net/fh_gmac/fh_gmac_phyt.h
@@ -0,0 +1,83 @@
+/*
+ * fh_gmac_phyt.h
+ *
+ * Created on: May 22, 2014
+ * Author: duobao
+ */
+
+#ifndef FH_GMAC_PHYT_H_
+#define FH_GMAC_PHYT_H_
+
+#define FH_GMAC_PHY_IP101G 0x02430C54
+#define FH_GMAC_PHY_RTL8201 0x001CC816
+#define FH_GMAC_PHY_TI83848 0xFFFFFFFF
+
+enum
+{
+ gmac_phyt_speed_10M_half_duplex = 1,
+ gmac_phyt_speed_100M_half_duplex = 2,
+ gmac_phyt_speed_10M_full_duplex = 5,
+ gmac_phyt_speed_100M_full_duplex = 6
+};
+
+
+typedef union
+{
+ struct
+ {
+ __u32 reserved_6_0 :7;
+ __u32 collision_test :1;
+ __u32 duplex_mode :1;
+ __u32 restart_auto_negotiate :1;
+ __u32 isolate :1;
+ __u32 power_down :1;
+ __u32 auto_negotiate_enable :1;
+ __u32 speed_select :1;
+ __u32 loopback :1;
+ __u32 reset :1;
+ __u32 reserved_31_16 :16;
+ }bit;
+ __u32 dw;
+}Reg_Phyt_Basic_Ctrl;
+
+
+typedef union
+{
+ struct
+ {
+ __u32 extended_capabilities :1;
+ __u32 jabber_detect :1;
+ __u32 link_status :1;
+ __u32 auto_negotiate_ability :1;
+ __u32 remote_fault :1;
+ __u32 auto_negotiate_complete :1;
+ __u32 reserved_10_6 :5;
+ __u32 base_t_half_duplex_10 :1;
+ __u32 base_t_full_duplex_10 :1;
+ __u32 base_tx_half_duplex_100 :1;
+ __u32 base_tx_full_duplex_100 :1;
+ __u32 base_t_4 :1;
+ __u32 reserved_31_16 :16;
+ }bit;
+ __u32 dw;
+}Reg_Phyt_Basic_Status;
+
+typedef union
+{
+ struct
+ {
+ __u32 scramble_disable :1;
+ __u32 reserved_1 :1;
+ __u32 speed_indication :3;
+ __u32 reserved_5 :1;
+ __u32 enable_4b5b :1;
+ __u32 gpo :3;
+ __u32 reserved_11_10 :2;
+ __u32 auto_done :1;
+ __u32 reserved_31_13 :19;
+ }bit;
+ __u32 dw;
+}Reg_Phyt_Special_Status;
+
+
+#endif /* FH_GMAC_PHYT_H_ */
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a4759576..1c6eed4b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -749,7 +749,8 @@ void phy_start(struct phy_device *phydev)
phydev->state = PHY_PENDING;
break;
case PHY_READY:
- phydev->state = PHY_UP;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
+ phydev->state = PHY_AN;
break;
case PHY_HALTED:
phydev->state = PHY_RESUMING;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ff109fe5..6ede872a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -845,7 +845,7 @@ static int genphy_config_init(struct phy_device *phydev)
* all possible port types */
features = (SUPPORTED_TP | SUPPORTED_MII
| SUPPORTED_AUI | SUPPORTED_FIBRE |
- SUPPORTED_BNC);
+ SUPPORTED_BNC | SUPPORTED_Pause);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 52502883..3b5025a1 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -20,8 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-// #define DEBUG // error path messages, extra info
-// #define VERBOSE // more; success messages
+/* #define DEBUG */
+/* #define VERBOSE */
#include <linux/module.h>
#include <linux/kmod.h>
@@ -39,6 +39,22 @@
#define DRIVER_VERSION "14-Jun-2006"
static const char driver_name [] = "asix";
+/* cpy from linux 49 by zhangy 2018-10-31*/
+struct asix_rx_fixup_info {
+ struct sk_buff *ax_skb;
+ u32 header;
+ u16 remaining;
+ bool split_head;
+};
+/* cpy from linux 49 by zhangy 2018-10-31*/
+struct asix_common_private {
+ void (*resume)(struct usbnet *dev);
+ void (*suspend)(struct usbnet *dev);
+ u16 presvd_phy_advertise;
+ u16 presvd_phy_bmcr;
+ struct asix_rx_fixup_info rx_fixup_info;
+};
+
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
#define AX_CMD_SET_SW_MII 0x06
@@ -135,8 +151,8 @@ static const char driver_name [] = "asix";
#define AX_RX_CTL_MFB_8192 0x0200
#define AX_RX_CTL_MFB_16384 0x0300
-#define AX_DEFAULT_RX_CTL \
- (AX_RX_CTL_SO | AX_RX_CTL_AB )
+
+#define AX_DEFAULT_RX_CTL ( AX_RX_CTL_AB )
/* GPIO 0 .. 2 toggles */
#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
@@ -163,7 +179,7 @@ static const char driver_name [] = "asix";
#define MARVELL_CTRL_TXDELAY 0x0002
#define MARVELL_CTRL_RXDELAY 0x0080
-
+struct asix_common_private * get_asix_private(struct usbnet *dev);
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
@@ -266,15 +282,19 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
int status;
struct urb *urb;
- netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ netdev_dbg(dev->net, "asix_write_cmd_async()"
+ "cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
cmd, value, index, size);
if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
- netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
+ netdev_err(dev->net,
+ "Error allocating URB in write_cmd_async!\n");
return;
}
- if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
- netdev_err(dev->net, "Failed to allocate memory for control request\n");
+ if ((req = kmalloc(sizeof(struct usb_ctrlrequest),
+ GFP_ATOMIC)) == NULL) {
+ netdev_err(dev->net,
+ "Failed to allocate memory for control request\n");
usb_free_urb(urb);
return;
}
@@ -290,7 +310,7 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
(void *)req, data, size,
asix_async_cmd_callback, req);
- if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
+ if ((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
netdev_err(dev->net, "Error submitting the control message: status=%d\n",
status);
kfree(req);
@@ -298,98 +318,120 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
}
+/* cpy from linux 49 by zhangy 2018-10-31*/
static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
- u8 *head;
- u32 header;
- char *packet;
- struct sk_buff *ax_skb;
+ struct asix_rx_fixup_info *rx;
+ struct asix_common_private *priv;
+ int offset = 0;
u16 size;
+ priv = get_asix_private(dev);
+ rx = &priv->rx_fixup_info;
+ /* When an Ethernet frame spans multiple URB socket buffers,
+ * do a sanity test for the Data header synchronisation.
+ * Attempt to detect the situation of the previous socket buffer having
+ * been truncated or a socket buffer was missing. These situations
+ * cause a discontinuity in the data stream and therefore need to avoid
+ * appending bad data to the end of the current netdev socket buffer.
+ * Also avoid unnecessarily discarding a good current netdev socket
+ * buffer.
+ */
+ if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
+ offset = ((rx->remaining + 1) & 0xfffe);
+ rx->header = get_unaligned_le32(skb->data + offset);
+ offset = 0;
+
+ size = (u16)(rx->header & 0x7ff);
+ if (size != ((~rx->header >> 16) & 0x7ff)) {
+ netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
+ rx->remaining);
+ if (rx->ax_skb) {
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ /* Discard the incomplete netdev Ethernet frame
+ * and assume the Data header is at the start of
+ * the current URB socket buffer.
+ */
+ }
+ rx->remaining = 0;
+ }
+ }
- head = (u8 *) skb->data;
- memcpy(&header, head, sizeof(header));
- le32_to_cpus(&header);
- packet = head + sizeof(header);
+ while (offset + sizeof(u16) <= skb->len) {
+ u16 copy_length;
+ unsigned char *data;
+
+ if (!rx->remaining) {
+ if (skb->len - offset == sizeof(u16)) {
+ rx->header = get_unaligned_le16(
+ skb->data + offset);
+ rx->split_head = true;
+ offset += sizeof(u16);
+ break;
+ }
- skb_pull(skb, 4);
+ if (rx->split_head == true) {
+ rx->header |= (get_unaligned_le16(
+ skb->data + offset) << 16);
+ rx->split_head = false;
+ offset += sizeof(u16);
+ } else {
+ rx->header = get_unaligned_le32(skb->data +
+ offset);
+ offset += sizeof(u32);
+ }
- while (skb->len > 0) {
- if ((short)(header & 0x0000ffff) !=
- ~((short)((header & 0xffff0000) >> 16))) {
- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- }
- /* get the packet length */
- size = (u16) (header & 0x0000ffff);
-
- if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
- u8 alignment = (unsigned long)skb->data & 0x3;
- if (alignment != 0x2) {
- /*
- * not 16bit aligned so use the room provided by
- * the 32 bit header to align the data
- *
- * note we want 16bit alignment as MAC header is
- * 14bytes thus ip header will be aligned on
- * 32bit boundary so accessing ipheader elements
- * using a cast to struct ip header wont cause
- * an unaligned accesses.
- */
- u8 realignment = (alignment + 2) & 0x3;
- memmove(skb->data - realignment,
- skb->data,
- size);
- skb->data -= realignment;
- skb_set_tail_pointer(skb, size);
+ /* take frame length from Data header 32-bit word */
+ size = (u16)(rx->header & 0x7ff);
+ if (size != ((~rx->header >> 16) & 0x7ff)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
+ rx->header, offset);
+ return 0;
}
- return 2;
- }
+ if (size > dev->net->mtu + ETH_HLEN + 4) {
+ netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+ size);
+ return 0;
+ }
+
+ /* Sometimes may fail to get a netdev socket buffer but
+ * continue to process the URB socket buffer so that
+ * synchronisation of the Ethernet frame Data header
+ * word is maintained.
+ */
+ rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
- if (size > dev->net->mtu + ETH_HLEN) {
- netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
- size);
- return 0;
+ rx->remaining = size;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (ax_skb) {
- u8 alignment = (unsigned long)packet & 0x3;
- ax_skb->len = size;
-
- if (alignment != 0x2) {
- /*
- * not 16bit aligned use the room provided by
- * the 32 bit header to align the data
- */
- u8 realignment = (alignment + 2) & 0x3;
- memmove(packet - realignment, packet, size);
- packet -= realignment;
- }
- ax_skb->data = packet;
- skb_set_tail_pointer(ax_skb, size);
- usbnet_skb_return(dev, ax_skb);
+
+ if (rx->remaining > skb->len - offset) {
+ copy_length = skb->len - offset;
+ rx->remaining -= copy_length;
} else {
- return 0;
+ copy_length = rx->remaining;
+ rx->remaining = 0;
}
- skb_pull(skb, (size + 1) & 0xfffe);
-
- if (skb->len == 0)
- break;
+ if (rx->ax_skb) {
+ data = skb_put(rx->ax_skb, copy_length);
+ memcpy(data, skb->data + offset, copy_length);
+ if (!rx->remaining)
+ usbnet_skb_return(dev, rx->ax_skb);
+ }
- head = (u8 *) skb->data;
- memcpy(&header, head, sizeof(header));
- le32_to_cpus(&header);
- packet = head + sizeof(header);
- skb_pull(skb, 4);
+ offset += (copy_length + 1) & 0xfffe;
}
- if (skb->len < 0) {
- netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
- skb->len);
+ if (skb->len != offset) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
+ skb->len, offset);
return 0;
}
+
return 1;
}
+
static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
@@ -442,7 +484,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
if (netif_carrier_ok(dev->net) != link) {
if (link) {
netif_carrier_on(dev->net);
- usbnet_defer_kevent (dev, EVENT_LINK_RESET );
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
} else
netif_carrier_off(dev->net);
netdev_dbg(dev->net, "Link Status is: %d\n", link);
@@ -489,8 +531,7 @@ out:
static int asix_sw_reset(struct usbnet *dev, u8 flags)
{
int ret;
-
- ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
if (ret < 0)
netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
@@ -514,7 +555,6 @@ out:
static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
{
int ret;
-
netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
if (ret < 0)
@@ -562,9 +602,6 @@ static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
value, ret);
- if (sleep)
- msleep(sleep);
-
return ret;
}
@@ -575,7 +612,7 @@ static void asix_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
- u16 rx_ctl = AX_DEFAULT_RX_CTL;
+ u16 rx_ctl = AX_DEFAULT_RX_CTL | AX_RX_CTL_SO;
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
@@ -729,7 +766,7 @@ static int asix_get_eeprom(struct net_device *net,
eeprom->magic = AX_EEPROM_MAGIC;
/* ax8817x returns 2 bytes from eeprom on read */
- for (i=0; i < eeprom->len / 2; i++) {
+ for (i = 0; i < eeprom->len / 2; i++) {
if (asix_read_cmd(dev, AX_CMD_READ_EEPROM,
eeprom->offset + i, 0, 2, &ebuf[i]) < 0)
return -EINVAL;
@@ -757,7 +794,7 @@ static u32 asix_get_link(struct net_device *net)
return mii_link_ok(&dev->mii);
}
-static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
+static int asix_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
{
struct usbnet *dev = netdev_priv(net);
@@ -870,7 +907,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_multicast_list = ax88172_set_multicast,
@@ -886,7 +923,7 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = AX88172_EEPROM_LEN;
- usbnet_get_endpoints(dev,intf);
+ usbnet_get_endpoints(dev, intf);
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
@@ -894,7 +931,6 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
(gpio_bits >> (i * 8)) & 0xff, 0, 0,
NULL)) < 0)
goto out;
- msleep(5);
}
if ((ret = asix_write_rx_ctl(dev, 0x80)) < 0)
@@ -963,7 +999,7 @@ static int ax88772_link_reset(struct usbnet *dev)
ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
-
+ asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL | AX_RX_CTL_SO);
return 0;
}
@@ -973,12 +1009,46 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_set_mac_address = asix_set_mac_address,
+ .ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_multicast_list = asix_set_multicast,
};
+#ifdef ASIX_USE_UNBIND
+/* zhangy add unbind. but crash when usb reconnect */
+static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
+{
+ if (rx->ax_skb)
+ {
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
+ rx->remaining = 0;
+ rx->split_head = false;
+ rx->header = 0;
+
+}
+static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct asix_common_private *p_private;
+ p_private = dev->driver_info->driver_priv;
+ if (p_private) {
+ reset_asix_rx_fixup_info(&p_private->rx_fixup_info);
+ /*may be add reset first...*/
+ kfree(p_private);
+ }
+
+}
+#endif
+
+struct asix_common_private *get_asix_private(struct usbnet *dev)
+{
+ if (dev->driver_info->driver_priv)
+ return (struct asix_common_private *)dev->driver_info->driver_priv;
+ return NULL;
+}
+
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, embd_phy;
@@ -989,7 +1059,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = AX88772_EEPROM_LEN;
- usbnet_get_endpoints(dev,intf);
+ usbnet_get_endpoints(dev, intf);
if ((ret = asix_write_gpio(dev,
AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5)) < 0)
@@ -1002,25 +1072,26 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dbg("Select PHY #1 failed: %d", ret);
goto out;
}
-
+ /* malloc private info by zhangy add below*/
+ if (!dev->driver_info->driver_priv) {
+ dev->driver_info->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
+ if (!dev->driver_info->driver_priv)
+ return -ENOMEM;
+ }
if ((ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL)) < 0)
goto out;
- msleep(150);
if ((ret = asix_sw_reset(dev, AX_SWRESET_CLEAR)) < 0)
goto out;
- msleep(150);
if (embd_phy) {
if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL)) < 0)
goto out;
- }
- else {
+ } else {
if ((ret = asix_sw_reset(dev, AX_SWRESET_PRTE)) < 0)
goto out;
}
- msleep(150);
rx_ctl = asix_read_rx_ctl(dev);
dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
if ((ret = asix_write_rx_ctl(dev, 0x0000)) < 0)
@@ -1051,13 +1122,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if ((ret = asix_sw_reset(dev, AX_SWRESET_PRL)) < 0)
goto out;
- msleep(150);
-
if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL)) < 0)
goto out;
- msleep(150);
-
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
@@ -1154,13 +1221,13 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
reg &= 0xfc0f;
switch (speed) {
- case SPEED_1000:
+ case SPEED_1000:
reg |= 0x03e0;
break;
- case SPEED_100:
+ case SPEED_100:
reg |= 0x03b0;
break;
- default:
+ default:
reg |= 0x02f0;
}
@@ -1286,7 +1353,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
int gpio0 = 0;
u32 phyid;
- usbnet_get_endpoints(dev,intf);
+ usbnet_get_endpoints(dev, intf);
asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
dbg("GPIO Status: 0x%04x", status);
@@ -1320,10 +1387,8 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
}
asix_sw_reset(dev, 0);
- msleep(150);
asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
- msleep(150);
asix_write_rx_ctl(dev, 0);
@@ -1352,7 +1417,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
if (data->phymode == PHY_MODE_MARVELL) {
marvell_phy_init(dev);
- msleep(60);
}
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
@@ -1425,10 +1489,13 @@ static const struct driver_info hawking_uf200_info = {
static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
+#ifdef ASIX_USE_UNBIND
+ .unbind = ax88772_unbind,
+#endif
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_link_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR ,
.rx_fixup = asix_rx_fixup,
.tx_fixup = asix_tx_fixup,
};
@@ -1444,125 +1511,146 @@ static const struct driver_info ax88178_info = {
.tx_fixup = asix_tx_fixup,
};
-static const struct usb_device_id products [] = {
+static const struct usb_device_id products[] = {
{
- // Linksys USB200M
USB_DEVICE (0x077b, 0x2226),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // Netgear FA120
+},
+{
USB_DEVICE (0x0846, 0x1040),
.driver_info = (unsigned long) &netgear_fa120_info,
-}, {
- // DLink DUB-E100
+},
+{
USB_DEVICE (0x2001, 0x1a00),
.driver_info = (unsigned long) &dlink_dub_e100_info,
-}, {
- // Intellinet, ST Lab USB Ethernet
+},
+{
USB_DEVICE (0x0b95, 0x1720),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // Hawking UF200, TrendNet TU2-ET100
+},
+{
USB_DEVICE (0x07b8, 0x420a),
.driver_info = (unsigned long) &hawking_uf200_info,
-}, {
- // Billionton Systems, USB2AR
+},
+{
USB_DEVICE (0x08dd, 0x90ff),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // ATEN UC210T
+},
+{
USB_DEVICE (0x0557, 0x2009),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // Buffalo LUA-U2-KTX
+},
+{
+
USB_DEVICE (0x0411, 0x003d),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // Buffalo LUA-U2-GT 10/100/1000
+},
+{
+
USB_DEVICE (0x0411, 0x006e),
.driver_info = (unsigned long) &ax88178_info,
-}, {
- // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
+},
+{
+
USB_DEVICE (0x6189, 0x182d),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // corega FEther USB2-TX
+},
+{
+
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // Surecom EP-1427X-2
+},
+{
+
USB_DEVICE (0x1189, 0x0893),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // goodway corp usb gwusb2e
+},
+{
+
USB_DEVICE (0x1631, 0x6200),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // JVC MP-PRX1 Port Replicator
+},
+{
+
USB_DEVICE (0x04f1, 0x3008),
.driver_info = (unsigned long) &ax8817x_info,
-}, {
- // ASIX AX88772B 10/100
+},
+{
+
USB_DEVICE (0x0b95, 0x772b),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // ASIX AX88772 10/100
+},
+{
+
USB_DEVICE (0x0b95, 0x7720),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // ASIX AX88178 10/100/1000
+},
+{
+
USB_DEVICE (0x0b95, 0x1780),
.driver_info = (unsigned long) &ax88178_info,
-}, {
- // Logitec LAN-GTJ/U2A
+},
+{
+
USB_DEVICE (0x0789, 0x0160),
.driver_info = (unsigned long) &ax88178_info,
-}, {
- // Linksys USB200M Rev 2
+},
+{
+
USB_DEVICE (0x13b1, 0x0018),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // 0Q0 cable ethernet
+},
+{
+
USB_DEVICE (0x1557, 0x7720),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // DLink DUB-E100 H/W Ver B1
+},
+{
+
USB_DEVICE (0x07d1, 0x3c05),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // DLink DUB-E100 H/W Ver B1 Alternate
+},
+{
+
USB_DEVICE (0x2001, 0x3c05),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // Linksys USB1000
+},
+{
+
USB_DEVICE (0x1737, 0x0039),
.driver_info = (unsigned long) &ax88178_info,
-}, {
- // IO-DATA ETG-US2
- USB_DEVICE (0x04bb, 0x0930),
+},
+{
+
+ USB_DEVICE(0x04bb, 0x0930),
.driver_info = (unsigned long) &ax88178_info,
-}, {
- // Belkin F5D5055
+},
+{
+
USB_DEVICE(0x050d, 0x5055),
.driver_info = (unsigned long) &ax88178_info,
-}, {
- // Apple USB Ethernet Adapter
+},
+{
+
USB_DEVICE(0x05ac, 0x1402),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // Cables-to-Go USB Ethernet Adapter
+},
+{
+
USB_DEVICE(0x0b95, 0x772a),
.driver_info = (unsigned long) &ax88772_info,
-}, {
- // ABOCOM for pci
+},
+{
+
USB_DEVICE(0x14ea, 0xab11),
.driver_info = (unsigned long) &ax88178_info,
-}, {
- // ASIX 88772a
+},
+{
+
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
},
- { }, // END
+ { },
};
MODULE_DEVICE_TABLE(usb, products);
@@ -1578,13 +1666,13 @@ static struct usb_driver asix_driver = {
static int __init asix_init(void)
{
- return usb_register(&asix_driver);
+ return usb_register(&asix_driver);
}
module_init(asix_init);
static void __exit asix_exit(void)
{
- usb_deregister(&asix_driver);
+ usb_deregister(&asix_driver);
}
module_exit(asix_exit);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ce395fe5..1e456a82 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -64,19 +64,18 @@
* is required, under load. Jumbograms change the equation.
*/
#define RX_MAX_QUEUE_MEMORY (60 * 1518)
+
#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
(RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
(RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
-// reawaken network queue this soon after stopping; else watchdog barks
+
#define TX_TIMEOUT_JIFFIES (5*HZ)
-// throttle rx/tx briefly after some faults, so khubd might disconnect()
-// us (it polls at HZ/4 usually) before we report too many false errors.
+
#define THROTTLE_JIFFIES (HZ/8)
-// between wakeups
#define UNLINK_TIMEOUT_MS 3
/*-------------------------------------------------------------------------*/
@@ -92,7 +91,7 @@ module_param (msg_level, int, 0);
MODULE_PARM_DESC (msg_level, "Override default message level");
/*-------------------------------------------------------------------------*/
-
+static void usbnet_bh (unsigned long param);
/* handles CDC Ethernet and many other network "bulk data" interfaces */
int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
{
@@ -239,13 +238,15 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
skb->len + sizeof (struct ethhdr), skb->protocol);
memset (skb->cb, 0, sizeof (struct skb_data));
status = netif_rx (skb);
- if (status != NET_RX_SUCCESS)
+ if (status != NET_RX_SUCCESS){
netif_dbg(dev, rx_err, dev->net,
"netif_rx status %d\n", status);
+ }
+
}
EXPORT_SYMBOL_GPL(usbnet_skb_return);
-
+
/*-------------------------------------------------------------------------
*
* Network Device Driver (peer link to "Host Device", from USB host)
@@ -283,18 +284,30 @@ EXPORT_SYMBOL_GPL(usbnet_change_mtu);
* completion callbacks. 2.5 should have fixed those bugs...
*/
-static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
+static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
+ struct sk_buff_head *list, enum skb_state state)
{
unsigned long flags;
+ enum skb_state old_state;
+ struct skb_data *entry = (struct skb_data *) skb->cb;
spin_lock_irqsave(&list->lock, flags);
+ old_state = entry->state;
+ entry->state = state;
__skb_unlink(skb, list);
- spin_unlock(&list->lock);
- spin_lock(&dev->done.lock);
+
+ /* defer_bh() is never called with list == &dev->done.
+ * spin_lock_nested() tells lockdep that it is OK to take
+ * dev->done.lock here with list->lock held.
+ */
+ spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
+
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
- spin_unlock_irqrestore(&dev->done.lock, flags);
+ spin_unlock(&dev->done.lock);
+ spin_unlock_irqrestore(&list->lock, flags);
+ return old_state;
}
/* some work can't be done in tasklets, so we use keventd
@@ -324,12 +337,19 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
unsigned long lockflags;
size_t size = dev->rx_urb_size;
+ /* prevent rx skb allocation when error ratio is high */
+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+ usb_free_urb(urb);
+ return -ENOLINK;
+ }
+
if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb);
return -ENOMEM;
}
+ /* skb_reserve case memleak here */
skb_reserve (skb, NET_IP_ALIGN);
entry = (struct skb_data *) skb->cb;
@@ -337,10 +357,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
entry->dev = dev;
entry->state = rx_start;
entry->length = 0;
-
usb_fill_bulk_urb (urb, dev->udev, dev->in,
skb->data, size, rx_complete, skb);
-
spin_lock_irqsave (&dev->rxq.lock, lockflags);
if (netif_running (dev->net) &&
@@ -395,44 +413,41 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
}
// else network stack removes extra byte if we forced a short packet
- if (skb->len) {
- /* all data was already cloned from skb inside the driver */
- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
- dev_kfree_skb_any(skb);
- else
- usbnet_skb_return(dev, skb);
+ /* all data was already cloned from skb inside the driver */
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+ goto done;
+
+ if (skb->len < ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
+ } else {
+ usbnet_skb_return(dev, skb);
return;
}
- netif_dbg(dev, rx_err, dev->net, "drop\n");
- dev->net->stats.rx_errors++;
done:
skb_queue_tail(&dev->done, skb);
}
/*-------------------------------------------------------------------------*/
+
static void rx_complete (struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct skb_data *entry = (struct skb_data *) skb->cb;
struct usbnet *dev = entry->dev;
int urb_status = urb->status;
+ enum skb_state state;
skb_put (skb, urb->actual_length);
- entry->state = rx_done;
+ state = rx_done;
entry->urb = NULL;
switch (urb_status) {
/* success */
case 0:
- if (skb->len < dev->net->hard_header_len) {
- entry->state = rx_cleanup;
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- netif_dbg(dev, rx_err, dev->net,
- "rx length %d\n", skb->len);
- }
break;
/* stalls need manual reset. this is rare ... except that
@@ -452,9 +467,9 @@ static void rx_complete (struct urb *urb)
"rx shutdown, code %d\n", urb_status);
goto block;
- /* we get controller i/o faults during khubd disconnect() delays.
+ /* we get controller i/o faults during hub_wq disconnect() delays.
* throttle down resubmits, to avoid log floods; just temporarily,
- * so we still recover when the fault isn't a khubd delay.
+ * so we still recover when the fault isn't a hub_wq delay.
*/
case -EPROTO:
case -ETIME:
@@ -466,7 +481,7 @@ static void rx_complete (struct urb *urb)
"rx throttle %d\n", urb_status);
}
block:
- entry->state = rx_cleanup;
+ state = rx_cleanup;
entry->urb = urb;
urb = NULL;
break;
@@ -477,25 +492,37 @@ block:
// FALLTHROUGH
default:
- entry->state = rx_cleanup;
+ state = rx_cleanup;
dev->net->stats.rx_errors++;
netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
break;
}
- defer_bh(dev, skb, &dev->rxq);
+ /* stop rx if packet error rate is high */
+ if (++dev->pkt_cnt > 30) {
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ } else {
+ if (state == rx_cleanup)
+ dev->pkt_err++;
+ if (dev->pkt_err > 20)
+ set_bit(EVENT_RX_KILL, &dev->flags);
+ }
+
+ state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
if (netif_running (dev->net) &&
- !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ !test_bit (EVENT_RX_HALT, &dev->flags) &&
+ state != unlink_start) {
rx_submit (dev, urb, GFP_ATOMIC);
+ usb_mark_last_busy(dev->udev);
return;
}
usb_free_urb (urb);
}
netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
-
static void intr_complete (struct urb *urb)
{
struct usbnet *dev = urb->context;
@@ -573,18 +600,34 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
unsigned long flags;
- struct sk_buff *skb, *skbnext;
+ struct sk_buff *skb;
int count = 0;
spin_lock_irqsave (&q->lock, flags);
- skb_queue_walk_safe(q, skb, skbnext) {
+ while (!skb_queue_empty(q)) {
struct skb_data *entry;
struct urb *urb;
int retval;
- entry = (struct skb_data *) skb->cb;
+ skb_queue_walk(q, skb) {
+ entry = (struct skb_data *) skb->cb;
+ if (entry->state != unlink_start)
+ goto found;
+ }
+ break;
+found:
+ entry->state = unlink_start;
urb = entry->urb;
+ /*
+ * Get reference count of the URB to avoid it to be
+ * freed during usb_unlink_urb, which may trigger
+ * use-after-free problem inside usb_unlink_urb since
+ * usb_unlink_urb is always racing with .complete
+ * handler(include defer_bh).
+ */
+ usb_get_urb(urb);
+ spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
@@ -592,6 +635,8 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
+ usb_put_urb(urb);
+ spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore (&q->lock, flags);
return count;
@@ -710,7 +755,6 @@ int usbnet_open (struct net_device *net)
goto done_nopm;
}
- // put into "known safe" state
if (info->reset && (retval = info->reset (dev)) < 0) {
netif_info(dev, ifup, dev->net,
"open reset fail (%d) usbnet usb-%s-%s, %s\n",
@@ -750,6 +794,11 @@ int usbnet_open (struct net_device *net)
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
"simple");
+ /* reset rx error state */
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
if (info->manage_power) {
@@ -884,7 +933,6 @@ kevent (struct work_struct *work)
container_of(work, struct usbnet, kevent);
int status;
- /* usb_clear_halt() needs a thread context */
if (test_bit (EVENT_TX_HALT, &dev->flags)) {
unlink_urbs (dev, &dev->txq);
status = usb_autopm_get_interface(dev->intf);
@@ -1023,8 +1071,8 @@ static void tx_complete (struct urb *urb)
usb_autopm_put_interface_async(dev->intf);
urb->dev = NULL;
- entry->state = tx_done;
- defer_bh(dev, skb, &dev->txq);
+
+ (void) defer_bh(dev, skb, &dev->txq, tx_done);
}
/*-------------------------------------------------------------------------*/
@@ -1082,7 +1130,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
usb_fill_bulk_urb (urb, dev->udev, dev->out,
skb->data, skb->len, tx_complete, skb);
-
+ //printk("\n [tx] user complete add is %x\n",(unsigned int)tx_complete);
/* don't assume the hardware handles USB_ZERO_PACKET
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
@@ -1149,7 +1197,10 @@ drop:
not_drop:
if (skb)
dev_kfree_skb_any (skb);
- usb_free_urb (urb);
+ if (urb) {
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
} else
netif_dbg(dev, tx_queued, dev->net,
"> tx, len %d, type 0x%x\n", length, skb->protocol);
@@ -1162,7 +1213,7 @@ EXPORT_SYMBOL_GPL(usbnet_start_xmit);
/*-------------------------------------------------------------------------*/
-// tasklet (work deferred from completions, in_irq) or timer
+
static void usbnet_bh (unsigned long param)
{
@@ -1174,11 +1225,14 @@ static void usbnet_bh (unsigned long param)
entry = (struct skb_data *) skb->cb;
switch (entry->state) {
case rx_done:
+
entry->state = rx_cleanup;
rx_process (dev, skb);
continue;
case tx_done:
+ kfree(entry->urb->sg);
case rx_cleanup:
+
usb_free_urb (entry->urb);
dev_kfree_skb (skb);
continue;
@@ -1187,7 +1241,9 @@ static void usbnet_bh (unsigned long param)
}
}
- // waiting for all pending urbs to complete?
+ /* restart RX again after disabling due to high error rate */
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
wake_up (dev->wait);
@@ -1196,6 +1252,7 @@ static void usbnet_bh (unsigned long param)
// or are we maybe short a few urbs?
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
+ netif_carrier_ok(dev->net) &&
!timer_pending (&dev->delay) &&
!test_bit (EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
@@ -1205,7 +1262,6 @@ static void usbnet_bh (unsigned long param)
struct urb *urb;
int i;
- // don't refill the queue all at once
for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
urb = usb_alloc_urb (0, GFP_ATOMIC);
if (urb != NULL) {
@@ -1218,12 +1274,17 @@ static void usbnet_bh (unsigned long param)
netif_dbg(dev, link, dev->net,
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
- if (dev->rxq.qlen < qlen)
+ if (dev->rxq.qlen < qlen){
tasklet_schedule (&dev->bh);
+ }
+
}
- if (dev->txq.qlen < TX_QLEN (dev))
+ if (dev->txq.qlen < TX_QLEN (dev)){
netif_wake_queue (dev->net);
+ }
+
}
+
}
@@ -1237,35 +1298,34 @@ static void usbnet_bh (unsigned long param)
void usbnet_disconnect (struct usb_interface *intf)
{
- struct usbnet *dev;
- struct usb_device *xdev;
- struct net_device *net;
+ struct usbnet *dev;
+ struct usb_device *xdev;
+ struct net_device *net;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
if (!dev)
return;
- xdev = interface_to_usbdev (intf);
+ xdev = interface_to_usbdev(intf);
netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
- intf->dev.driver->name,
- xdev->bus->bus_name, xdev->devpath,
- dev->driver_info->description);
+ intf->dev.driver->name, xdev->bus->bus_name,
+ xdev->devpath, dev->driver_info->description);
net = dev->net;
- unregister_netdev (net);
+ unregister_netdev(net);
cancel_work_sync(&dev->kevent);
if (dev->driver_info->unbind)
- dev->driver_info->unbind (dev, intf);
+ dev->driver_info->unbind(dev, intf);
usb_kill_urb(dev->interrupt);
usb_free_urb(dev->interrupt);
free_netdev(net);
- usb_put_dev (xdev);
+ usb_put_dev(xdev);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1325,7 +1385,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
status = -ENOMEM;
- // set up our own records
+
net = alloc_etherdev(sizeof(*dev));
if (!net) {
dbg ("can't kmalloc dev");
@@ -1374,16 +1434,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
net->ethtool_ops = &usbnet_ethtool_ops;
- // allow device-specific bind/init procedures
- // NOTE net->name still not usable ...
+
if (info->bind) {
status = info->bind (dev, udev);
if (status < 0)
goto out1;
- // heuristic: "usb%d" for links we know are two-host,
- // else "eth%d" when there's reasonable doubt. userspace
- // can rename the link if it knows better.
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
(net->dev_addr [0] & 0x02) == 0))
@@ -1411,11 +1467,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
status = 0;
}
+#if(1)
if (status >= 0 && dev->status)
status = init_status (dev, udev);
if (status < 0)
goto out3;
-
+#endif
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
@@ -1435,7 +1492,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->driver_info->description,
net->dev_addr);
- // ok, it's ready to go.
+
usb_set_intfdata (udev, dev);
netif_device_attach (net);
@@ -1447,8 +1504,15 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
out3:
if (info->unbind)
- info->unbind (dev, udev);
+ info->unbind(dev, udev);
out1:
+ /* subdrivers must undo all they did in bind() if they
+ * fail it, but we may fail later and a deferred kevent
+ * may trigger an error resubmitting itself and, worse,
+ * schedule a timer. So we kill it all just in case.
+ */
+ cancel_work_sync(&dev->kevent);
+ del_timer_sync(&dev->delay);
free_netdev(net);
out:
usb_put_dev(xdev);
@@ -1465,7 +1529,7 @@ EXPORT_SYMBOL_GPL(usbnet_probe);
int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
{
- struct usbnet *dev = usb_get_intfdata(intf);
+ struct usbnet *dev = usb_get_intfdata(intf);
if (!dev->suspend_count++) {
spin_lock_irq(&dev->txq.lock);
@@ -1481,7 +1545,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
* accelerate emptying of the rx and queues, to avoid
* having everything error out.
*/
- netif_device_detach (dev->net);
+ netif_device_detach(dev->net);
usbnet_terminate_urbs(dev);
usb_kill_urb(dev->interrupt);
@@ -1489,7 +1553,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
* reattach so runtime management can use and
* wake the device
*/
- netif_device_attach (dev->net);
+ netif_device_attach(dev->net);
}
return 0;
}
@@ -1497,10 +1561,10 @@ EXPORT_SYMBOL_GPL(usbnet_suspend);
int usbnet_resume (struct usb_interface *intf)
{
- struct usbnet *dev = usb_get_intfdata(intf);
- struct sk_buff *skb;
- struct urb *res;
- int retval;
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct sk_buff *skb;
+ struct urb *res;
+ int retval;
if (!--dev->suspend_count) {
/* resume interrupt URBs */
@@ -1510,10 +1574,11 @@ int usbnet_resume (struct usb_interface *intf)
spin_lock_irq(&dev->txq.lock);
while ((res = usb_get_from_anchor(&dev->deferred))) {
- skb = (struct sk_buff *)res->context;
+ skb = (struct sk_buff *) res->context;
retval = usb_submit_urb(res, GFP_ATOMIC);
if (retval < 0) {
dev_kfree_skb_any(skb);
+ kfree(res->sg);
usb_free_urb(res);
usb_autopm_put_interface_async(dev->intf);
} else {
@@ -1529,7 +1594,7 @@ int usbnet_resume (struct usb_interface *intf)
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_start_queue(dev->net);
- tasklet_schedule (&dev->bh);
+ tasklet_schedule(&dev->bh);
}
}
return 0;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
old mode 100644
new mode 100755
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
old mode 100644
new mode 100755
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
new file mode 100644
index 00000000..8381f7c0
--- /dev/null
+++ b/drivers/pwm/Kconfig
@@ -0,0 +1,83 @@
+menuconfig PWM
+ bool "Pulse-Width Modulation (PWM) Support"
+ help
+ Generic Pulse-Width Modulation (PWM) support.
+
+ In Pulse-Width Modulation, a variation of the width of pulses
+ in a rectangular pulse signal is used as a means to alter the
+ average power of the signal. Applications include efficient
+ power delivery and voltage regulation. In computer systems,
+ PWMs are commonly used to control fans or the brightness of
+ display backlights.
+
+ This framework provides a generic interface to PWM devices
+ within the Linux kernel. On the driver side it provides an API
+ to register and unregister a PWM chip, an abstraction of a PWM
+ controller, that supports one or more PWM devices. Client
+ drivers can request PWM devices and use the generic framework
+ to configure as well as enable and disable them.
+
+ This generic framework replaces the legacy PWM framework which
+ allows only a single driver implementing the required API. Not
+ all legacy implementations have been ported to the framework
+ yet. The framework provides an API that is backward compatible
+ with the legacy framework so that existing client drivers
+ continue to work as expected.
+
+ If unsure, say no.
+
+if PWM
+
+config PWM_FULLHAN
+ tristate "FH PWM support"
+ help
+ To compile this driver as a module, choose M here: the module will
+ be called fh_pwm.
+
+if PWM_FULLHAN
+if ARCH_FH8626V100
+config FH_PWM_NUM
+ int
+ prompt "Number of PWMs, range: 1~14"
+ default 2
+ range 1 14
+ help
+ Number of PWMs
+
+config PWM_FULLHAN_V21
+ bool
+ default y
+
+endif
+
+if ARCH_FH8856
+config FH_PWM_NUM
+ int
+ prompt "Number of PWMs, range: 1~8"
+ default 2
+ range 1 8
+ help
+ Number of PWMs
+
+config PWM_FULLHAN_V20
+ bool
+ default y
+endif
+
+if ARCH_FH8830
+config FH_PWM_NUM
+ int
+ prompt "Number of PWMs, range: 1~8"
+ default 2
+ range 1 8
+ help
+ Number of PWMs
+
+config PWM_FULLHAN_V20
+ bool
+ default y
+endif
+
+endif
+
+endif
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
new file mode 100644
index 00000000..64d42f52
--- /dev/null
+++ b/drivers/pwm/Makefile
@@ -0,0 +1,20 @@
+obj-$(CONFIG_PWM) += core.o
+obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
+obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
+obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
+obj-$(CONFIG_PWM_IMX) += pwm-imx.o
+obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
+obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
+obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
+obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
+obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
+obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
+obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
+obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
+obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
+obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
+obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
+obj-$(CONFIG_PWM_TWL) += pwm-twl.o
+obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
+obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
+obj-$(CONFIG_PWM_FULLHAN) += pwmv2-fullhan.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
new file mode 100644
index 00000000..01e8aba6
--- /dev/null
+++ b/drivers/pwm/core.c
@@ -0,0 +1,642 @@
+/*
+ * Generic pwmlib implementation
+ *
+ * Copyright (C) 2011 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2011-2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/radix-tree.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define MAX_PWMS 1024
+
+/* flags in the third cell of the DT PWM specifier */
+#define PWM_SPEC_POLARITY (1 << 0)
+
+static DEFINE_MUTEX(pwm_lookup_lock);
+static LIST_HEAD(pwm_lookup_list);
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_chips);
+static DECLARE_BITMAP(allocated_pwms, MAX_PWMS);
+static RADIX_TREE(pwm_tree, GFP_KERNEL);
+
+static struct pwm_device *pwm_to_device(unsigned int pwm)
+{
+ return radix_tree_lookup(&pwm_tree, pwm);
+}
+
+static int alloc_pwms(int pwm, unsigned int count)
+{
+ unsigned int from = 0;
+ unsigned int start;
+
+ if (pwm >= MAX_PWMS)
+ return -EINVAL;
+
+ if (pwm >= 0)
+ from = pwm;
+
+ start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from,
+ count, 0);
+
+ if (pwm >= 0 && start != pwm)
+ return -EEXIST;
+
+ if (start + count > MAX_PWMS)
+ return -ENOSPC;
+
+ return start;
+}
+
+static void free_pwms(struct pwm_chip *chip)
+{
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+ radix_tree_delete(&pwm_tree, pwm->pwm);
+ }
+
+ bitmap_clear(allocated_pwms, chip->base, chip->npwm);
+
+ kfree(chip->pwms);
+ chip->pwms = NULL;
+}
+
+static struct pwm_chip *pwmchip_find_by_name(const char *name)
+{
+ struct pwm_chip *chip;
+
+ if (!name)
+ return NULL;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(chip, &pwm_chips, list) {
+ const char *chip_name = dev_name(chip->dev);
+
+ if (chip_name && strcmp(chip_name, name) == 0) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return NULL;
+}
+
+static int pwm_device_request(struct pwm_device *pwm, const char *label)
+{
+ int err;
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ return -EBUSY;
+
+ if (!try_module_get(pwm->chip->ops->owner))
+ return -ENODEV;
+
+ if (pwm->chip->ops->request) {
+ err = pwm->chip->ops->request(pwm->chip, pwm);
+ if (err) {
+ module_put(pwm->chip->ops->owner);
+ return err;
+ }
+ }
+
+ set_bit(PWMF_REQUESTED, &pwm->flags);
+ pwm->label = label;
+
+ return 0;
+}
+
+/**
+ * pwm_set_chip_data() - set private chip data for a PWM
+ * @pwm: PWM device
+ * @data: pointer to chip-specific data
+ */
+int pwm_set_chip_data(struct pwm_device *pwm, void *data)
+{
+ if (!pwm)
+ return -EINVAL;
+
+ pwm->chip_data = data;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pwm_set_chip_data);
+
+/**
+ * pwm_get_chip_data() - get private chip data for a PWM
+ * @pwm: PWM device
+ */
+void *pwm_get_chip_data(struct pwm_device *pwm)
+{
+ return pwm ? pwm->chip_data : NULL;
+}
+EXPORT_SYMBOL_GPL(pwm_get_chip_data);
+
+/**
+ * pwmchip_add() - register a new PWM chip
+ * @chip: the PWM chip to add
+ *
+ * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
+ * will be used.
+ */
+int pwmchip_add(struct pwm_chip *chip)
+{
+ struct pwm_device *pwm;
+ unsigned int i;
+ int ret;
+
+ if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
+ !chip->ops->enable || !chip->ops->disable)
+ return -EINVAL;
+
+ mutex_lock(&pwm_lock);
+
+ ret = alloc_pwms(chip->base, chip->npwm);
+ if (ret < 0)
+ goto out;
+
+ chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL);
+ if (!chip->pwms) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ chip->base = ret;
+
+ for (i = 0; i < chip->npwm; i++) {
+ pwm = &chip->pwms[i];
+
+ pwm->chip = chip;
+ pwm->pwm = chip->base + i;
+ pwm->hwpwm = i;
+
+ radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
+ }
+
+ bitmap_set(allocated_pwms, chip->base, chip->npwm);
+
+ INIT_LIST_HEAD(&chip->list);
+ list_add(&chip->list, &pwm_chips);
+
+ ret = 0;
+out:
+ mutex_unlock(&pwm_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwmchip_add);
+
+/**
+ * pwmchip_remove() - remove a PWM chip
+ * @chip: the PWM chip to remove
+ *
+ * Removes a PWM chip. This function may return busy if the PWM chip provides
+ * a PWM device that is still requested.
+ */
+int pwmchip_remove(struct pwm_chip *chip)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&pwm_lock);
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ list_del_init(&chip->list);
+
+ free_pwms(chip);
+
+out:
+ mutex_unlock(&pwm_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwmchip_remove);
+
+/**
+ * pwm_request() - request a PWM device
+ * @pwm_id: global PWM device index
+ * @label: PWM device label
+ *
+ * This function is deprecated, use pwm_get() instead.
+ */
+struct pwm_device *pwm_request(int pwm, const char *label)
+{
+ struct pwm_device *dev;
+ int err;
+
+ if (pwm < 0 || pwm >= MAX_PWMS)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+
+ dev = pwm_to_device(pwm);
+ if (!dev) {
+ dev = ERR_PTR(-ENODEV);
+ goto out;
+ }
+
+ err = pwm_device_request(dev, label);
+ if (err < 0)
+ dev = ERR_PTR(err);
+
+out:
+ mutex_unlock(&pwm_lock);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(pwm_request);
+
+/**
+ * pwm_request_from_chip() - request a PWM device relative to a PWM chip
+ * @chip: PWM chip
+ * @index: per-chip index of the PWM to request
+ * @label: a literal description string of this PWM
+ *
+ * Returns the PWM at the given index of the given PWM chip. A negative error
+ * code is returned if the index is not valid for the specified PWM chip or
+ * if the PWM device cannot be requested.
+ */
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
+{
+ struct pwm_device *pwm;
+ int err;
+
+ if (!chip || index >= chip->npwm)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+ pwm = &chip->pwms[index];
+
+ err = pwm_device_request(pwm, label);
+ if (err < 0)
+ pwm = ERR_PTR(err);
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_request_from_chip);
+
+/**
+ * pwm_free() - free a PWM device
+ * @pwm: PWM device
+ *
+ * This function is deprecated, use pwm_put() instead.
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+ pwm_put(pwm);
+}
+EXPORT_SYMBOL_GPL(pwm_free);
+
+/**
+ * pwm_config() - change a PWM device configuration
+ * @pwm: PWM device
+ * @duty_ns: "on" time (in nanoseconds)
+ * @period_ns: duration (in nanoseconds) of one cycle
+ */
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
+}
+EXPORT_SYMBOL_GPL(pwm_config);
+
+/**
+ * pwm_set_polarity() - configure the polarity of a PWM signal
+ * @pwm: PWM device
+ * @polarity: new polarity of the PWM signal
+ *
+ * Note that the polarity cannot be configured while the PWM device is enabled
+ */
+int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
+{
+ if (!pwm || !pwm->chip->ops)
+ return -EINVAL;
+
+ if (!pwm->chip->ops->set_polarity)
+ return -ENOSYS;
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ return -EBUSY;
+
+ return pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
+}
+EXPORT_SYMBOL_GPL(pwm_set_polarity);
+
+/**
+ * pwm_enable() - start a PWM output toggling
+ * @pwm: PWM device
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+ if (pwm && !test_and_set_bit(PWMF_ENABLED, &pwm->flags))
+ return pwm->chip->ops->enable(pwm->chip, pwm);
+
+ return pwm ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(pwm_enable);
+
+/**
+ * pwm_disable() - stop a PWM output toggling
+ * @pwm: PWM device
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+ if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags))
+ pwm->chip->ops->disable(pwm->chip, pwm);
+}
+EXPORT_SYMBOL_GPL(pwm_disable);
+
+/**
+ * pwm_add_table() - register PWM device consumers
+ * @table: array of consumers to register
+ * @num: number of consumers in table
+ */
+void __init pwm_add_table(struct pwm_lookup *table, size_t num)
+{
+ mutex_lock(&pwm_lookup_lock);
+
+ while (num--) {
+ list_add_tail(&table->list, &pwm_lookup_list);
+ table++;
+ }
+
+ mutex_unlock(&pwm_lookup_lock);
+}
+
+/**
+ * pwm_get() - look up and request a PWM device
+ * @dev: device for PWM consumer
+ * @con_id: consumer name
+ *
+ * Lookup is first attempted using DT. If the device was not instantiated from
+ * a device tree, a PWM chip and a relative index is looked up via a table
+ * supplied by board setup code (see pwm_add_table()).
+ *
+ * Once a PWM chip has been found the specified PWM device will be requested
+ * and is ready to be used.
+ */
+struct pwm_device *pwm_get(struct device *dev, const char *con_id)
+{
+ struct pwm_device *pwm = ERR_PTR(-ENODEV);
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct pwm_chip *chip = NULL;
+ unsigned int index = 0;
+ unsigned int best = 0;
+ struct pwm_lookup *p;
+ unsigned int match;
+
+ /*
+ * We look up the provider in the static table typically provided by
+ * board setup code. We first try to lookup the consumer device by
+ * name. If the consumer device was passed in as NULL or if no match
+ * was found, we try to find the consumer by directly looking it up
+ * by name.
+ *
+ * If a match is found, the provider PWM chip is looked up by name
+ * and a PWM device is requested using the PWM device per-chip index.
+ *
+ * The lookup algorithm was shamelessly taken from the clock
+ * framework:
+ *
+ * We do slightly fuzzy matching here:
+ * An entry with a NULL ID is assumed to be a wildcard.
+ * If an entry has a device ID, it must match
+ * If an entry has a connection ID, it must match
+ * Then we take the most specific entry - with the following order
+ * of precedence: dev+con > dev only > con only.
+ */
+ mutex_lock(&pwm_lookup_lock);
+
+ list_for_each_entry(p, &pwm_lookup_list, list) {
+ match = 0;
+
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
+
+ match += 2;
+ }
+
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
+
+ match += 1;
+ }
+
+ if (match > best) {
+ chip = pwmchip_find_by_name(p->provider);
+ index = p->index;
+
+ if (match != 3)
+ best = match;
+ else
+ break;
+ }
+ }
+
+ if (chip)
+ pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id);
+
+ mutex_unlock(&pwm_lookup_lock);
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_get);
+
+/**
+ * pwm_put() - release a PWM device
+ * @pwm: PWM device
+ */
+void pwm_put(struct pwm_device *pwm)
+{
+ if (!pwm)
+ return;
+
+ mutex_lock(&pwm_lock);
+
+ if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
+ pr_warn("PWM device already freed\n");
+ goto out;
+ }
+
+ if (pwm->chip->ops->free)
+ pwm->chip->ops->free(pwm->chip, pwm);
+
+ pwm->label = NULL;
+
+ module_put(pwm->chip->ops->owner);
+out:
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL_GPL(pwm_put);
+
+static void devm_pwm_release(struct device *dev, void *res)
+{
+ pwm_put(*(struct pwm_device **)res);
+}
+
+/**
+ * devm_pwm_get() - resource managed pwm_get()
+ * @dev: device for PWM consumer
+ * @con_id: consumer name
+ *
+ * This function performs like pwm_get() but the acquired PWM device will
+ * automatically be released on driver detach.
+ */
+struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
+{
+ struct pwm_device **ptr, *pwm;
+
+ ptr = devres_alloc(devm_pwm_release, sizeof(**ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ pwm = pwm_get(dev, con_id);
+ if (!IS_ERR(pwm)) {
+ *ptr = pwm;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(devm_pwm_get);
+
+/**
+ * pwm_can_sleep() - report whether PWM access will sleep
+ * @pwm: PWM device
+ *
+ * It returns true if accessing the PWM can sleep, false otherwise.
+ */
+bool pwm_can_sleep(struct pwm_device *pwm)
+{
+ return pwm->chip->can_sleep;
+}
+EXPORT_SYMBOL_GPL(pwm_can_sleep);
+
+#ifdef CONFIG_DEBUG_FS
+static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ seq_printf(s, " requested");
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ seq_printf(s, " enabled");
+
+ seq_printf(s, "\n");
+ }
+}
+
+static void *pwm_seq_start(struct seq_file *s, loff_t *pos)
+{
+ mutex_lock(&pwm_lock);
+ s->private = "";
+
+ return seq_list_start(&pwm_chips, *pos);
+}
+
+static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ s->private = "\n";
+
+ return seq_list_next(v, &pwm_chips, pos);
+}
+
+static void pwm_seq_stop(struct seq_file *s, void *v)
+{
+ mutex_unlock(&pwm_lock);
+}
+
+static int pwm_seq_show(struct seq_file *s, void *v)
+{
+ struct pwm_chip *chip = list_entry(v, struct pwm_chip, list);
+
+ seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private,
+ chip->dev->bus ? chip->dev->bus->name : "no-bus",
+ dev_name(chip->dev), chip->npwm,
+ (chip->npwm != 1) ? "s" : "");
+
+ if (chip->ops->dbg_show)
+ chip->ops->dbg_show(chip, s);
+ else
+ pwm_dbg_show(chip, s);
+
+ return 0;
+}
+
+static const struct seq_operations pwm_seq_ops = {
+ .start = pwm_seq_start,
+ .next = pwm_seq_next,
+ .stop = pwm_seq_stop,
+ .show = pwm_seq_show,
+};
+
+static int pwm_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &pwm_seq_ops);
+}
+
+static const struct file_operations pwm_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = pwm_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init pwm_debugfs_init(void)
+{
+ debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL,
+ &pwm_debugfs_ops);
+
+ return 0;
+}
+
+subsys_initcall(pwm_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/pwm/pwm-fullhan.c b/drivers/pwm/pwm-fullhan.c
new file mode 100644
index 00000000..b1f3c618
--- /dev/null
+++ b/drivers/pwm/pwm-fullhan.c
@@ -0,0 +1,607 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+#include <linux/printk.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/decompress/mm.h>
+#include <linux/of_address.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/uaccess.h>
+#include "pwm-fullhan.h"
+
+#undef FH_PWM_DEBUG
+#ifdef FH_PWM_DEBUG
+#define PRINT_DBG(fmt,args...) printk(fmt,##args)
+#else
+#define PRINT_DBG(fmt,args...) do{} while(0)
+#endif
+
+struct fh_pwm_chip {
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *clk;
+ struct proc_dir_entry *proc_file;
+};
+
+struct fh_pwm_chip fh_pwm = {
+
+};
+
+
+static int pwm_get_duty_cycle_ns(struct fh_pwm_chip_data *chip_data)
+{
+ u32 reg, period, duty;
+ u32 clk_rate = clk_get_rate(fh_pwm.clk);
+
+ reg = readl(fh_pwm.base + REG_PWM_CMD(chip_data->id));
+ period = reg & 0x0fff;
+ duty = (reg >> 16) & 0xfff;
+ duty = period - duty; //reverse duty cycle
+
+ if (period == 0)
+ period = duty;
+
+ chip_data->counter_ns = duty * 1000000000 / clk_rate;
+ chip_data->period_ns = period * 1000000000 / clk_rate;
+
+ PRINT_DBG("get duty: %d, period: %d, reg: 0x%x\n", duty, period, reg);
+
+ return 0;
+}
+
+static int pwm_set_duty_cycle_ns(struct fh_pwm_chip_data *chip_data)
+{
+ u32 period, duty, reg, clk_rate, duty_revert;
+ clk_rate = clk_get_rate(fh_pwm.clk);
+ if (!clk_rate) {
+ pr_err("PWM: clock rate is 0\n");
+ return -EINVAL;
+ }
+ period = chip_data->period_ns / (1000000000 / clk_rate);
+
+ if (period < 8) {
+ pr_err("PWM: min period is 8\n");
+ return -EINVAL;
+ }
+
+ duty = chip_data->counter_ns / (1000000000 / clk_rate);
+
+ if (period < duty) {
+ pr_err("PWM: period < duty\n");
+ return -EINVAL;
+ }
+
+ duty_revert = period - duty;
+
+ if (duty == period)
+ reg = (duty & 0xfff) << 16 | (0 & 0xfff);
+ else
+ reg = (duty_revert & 0xfff) << 16 | (period & 0xfff);
+
+ PRINT_DBG("set duty_revert: %d, period: %d, reg: 0x%x\n", duty_revert, period, reg);
+
+ writel(reg, fh_pwm.base + REG_PWM_CMD(chip_data->id));
+ return 0;
+}
+
+
+static int pwm_set_duty_cycle_percent(struct fh_pwm_chip_data *chip_data)
+{
+ u32 period, duty, reg, clk_rate, duty_revert;
+ clk_rate = clk_get_rate(fh_pwm.clk);
+ if (!clk_rate) {
+ pr_err("PWM: clock rate is 0\n");
+ return -EINVAL;
+ }
+
+ if (chip_data->percent > 100 || chip_data->percent < 0) {
+ pr_err("PWM: pwm->percent is out of range\n");
+ return -EINVAL;
+ }
+
+ period = chip_data->period_ns / (1000000000 / clk_rate);
+
+ if (period < 8) {
+ pr_err("PWM: min period is 8\n");
+ return -EINVAL;
+ }
+
+ duty = period * 100 / chip_data->percent;
+
+ if (period < duty) {
+ pr_err("PWM: period < duty\n");
+ return -EINVAL;
+ }
+
+ duty_revert = period - duty;
+
+ if (duty == period)
+ reg = (duty & 0xfff) << 16 | (0 & 0xfff);
+ else
+ reg = (duty_revert & 0xfff) << 16 | (period & 0xfff);
+
+ PRINT_DBG("set duty_revert: %d, period: %d, reg: 0x%x\n", duty_revert, period, reg);
+
+ writel(reg, fh_pwm.base + REG_PWM_CMD(chip_data->id));
+ return 0;
+}
+
+
+int fh_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = pwm_get_chip_data(pwm);
+ if (!chip_data) {
+ pr_err("%s: ERROR: PWM %d does NOT exist\n",
+ __func__, pwm->hwpwm);
+ return -ENXIO;
+ }
+ chip_data->counter_ns = duty_ns;
+ chip_data->period_ns = period_ns;
+ pwm_set_duty_cycle_ns(chip_data);
+ return 0;
+}
+
+static int _fh_pwm_enable(struct fh_pwm_chip_data *chip_data)
+{
+ int i;
+ unsigned int reg = 0;
+ chip_data->working = 1;
+
+ for (i = 0; i < fh_pwm.chip.npwm; i++) {
+ chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[i]);
+ reg |= chip_data->working << i;
+ }
+
+ writel(reg, fh_pwm.base + REG_PWM_CTRL);
+ return 0;
+}
+
+int fh_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = pwm_get_chip_data(pwm);
+ if (!chip_data) {
+ pr_err("%s: ERROR: PWM %d does NOT exist\n",
+ __func__, pwm->hwpwm);
+ return -ENXIO;
+ }
+
+ _fh_pwm_enable(chip_data);
+
+ return 0;
+}
+
+static int _fh_pwm_disable(struct fh_pwm_chip_data *chip_data)
+{
+ int i;
+ unsigned int reg = 0;
+ chip_data->working = 0;
+
+ for (i = 0; i < fh_pwm.chip.npwm; i++) {
+ chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[i]);
+ reg |= chip_data->working << i;
+ }
+
+ writel(reg, fh_pwm.base + REG_PWM_CTRL);
+ return 0;
+}
+
+void fh_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = pwm_get_chip_data(pwm);
+ if (!chip_data) {
+ pr_err("%s: ERROR: PWM %d does NOT exist\n",
+ __func__, pwm->hwpwm);
+ return;
+ }
+
+ _fh_pwm_disable(chip_data);
+}
+
+static int fh_pwm_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct fh_pwm_chip_data *chip_data;
+ struct pwm_device *pwm;
+
+ for (i = 0; i < fh_pwm.chip.npwm; i++) {
+ pwm = &fh_pwm.chip.pwms[i];
+
+ if (!pwm) {
+ pr_err("%s: ERROR: PWM %d does NOT exist\n",
+ __func__, i);
+ return -ENXIO;
+ }
+ chip_data = pwm_get_chip_data(pwm);
+ if (!chip_data)
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+
+ chip_data->id = pwm->hwpwm;
+ chip_data->working = 0;
+ pwm->chip_data = chip_data;
+ }
+ return 0;
+}
+
+static int fh_pwm_release(struct inode *inode, struct file *filp)
+{
+ int i;
+ struct fh_pwm_chip_data *chip_data;
+ struct pwm_device *pwm;
+
+ for (i = 0; i < fh_pwm.chip.npwm; i++) {
+ pwm = &fh_pwm.chip.pwms[i];
+
+ if (!pwm) {
+ pr_err("%s: ERROR: PWM %d does NOT exist\n",
+ __func__, i);
+ return -ENOTTY;
+ }
+ chip_data = pwm_get_chip_data(pwm);
+ free(chip_data);
+ pwm_set_chip_data(pwm, NULL);
+ }
+ return 0;
+}
+
+
+static long fh_pwm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct fh_pwm_chip_data *pwm;
+
+ if (unlikely(_IOC_TYPE(cmd) != PWM_IOCTL_MAGIC)) {
+ pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
+ __func__, _IOC_TYPE(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (unlikely(_IOC_NR(cmd) > PWM_IOCTL_MAXNR)) {
+ pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
+
+ if (ret) {
+ pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -EACCES);
+ return -EACCES;
+ }
+
+ switch (cmd) {
+ case ENABLE_PWM:
+ pwm = (struct fh_pwm_chip_data __user *)arg;
+ _fh_pwm_enable(pwm);
+ break;
+ case DISABLE_PWM:
+ pwm = (struct fh_pwm_chip_data __user *)arg;
+ _fh_pwm_disable(pwm);
+ break;
+ case SET_PWM_DUTY_CYCLE:
+ pwm = (struct fh_pwm_chip_data __user *)arg;
+ printk("ioctl: pwm addr: %p, pwm->period: %d ns\n", pwm, pwm->period_ns);
+ pwm_set_duty_cycle_ns(pwm);
+ break;
+ case GET_PWM_DUTY_CYCLE:
+ pwm = (struct fh_pwm_chip_data __user *)arg;
+ printk("ioctl: pwm->id: %d, pwm->counter: %d, pwm->period: %d\n", pwm->id, pwm->counter_ns,
+ pwm->period_ns);
+ pwm_get_duty_cycle_ns(pwm);
+ break;
+ case SET_PWM_DUTY_CYCLE_PERCENT:
+ pwm = (struct fh_pwm_chip_data __user *)arg;
+ printk("ioctl: pwm->id: %d, pwm->counter: %d, pwm->period: %d\n", pwm->id, pwm->counter_ns,
+ pwm->period_ns);
+ pwm_set_duty_cycle_percent(pwm);
+ break;
+ }
+
+
+ return ret;
+}
+
+static const struct file_operations fh_pwm_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_pwm_open,
+ .release = fh_pwm_release,
+ .unlocked_ioctl = fh_pwm_ioctl,
+};
+
+static struct miscdevice fh_pwm_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &fh_pwm_fops,
+};
+
+static const struct pwm_ops fh_pwm_ops = {
+ .config = fh_pwm_config,
+ .enable = fh_pwm_enable,
+ .disable = fh_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+
+static void del_char(char *str, char ch)
+{
+ char *p = str;
+ char *q = str;
+ while (*q) {
+ if (*q != ch)
+ *p++ = *q;
+ q++;
+ }
+ *p = '\0';
+}
+
+static ssize_t fh_pwm_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
+{
+ int i;
+ char message[32] = {0};
+ char *const delim = ",";
+ char *cur = message;
+ char *param_str[4];
+ unsigned int param[4];
+ struct fh_pwm_chip_data *chip_data;
+
+ len = (len > 32) ? 32 : len;
+
+ if (copy_from_user(message, buf, len))
+ return -EFAULT;
+
+ for (i = 0; i < 4; i++) {
+ param_str[i] = strsep(&cur, delim);
+ if (!param_str[i]) {
+ pr_err("%s: ERROR: parameter[%d] is empty\n", __func__, i);
+ return -EINVAL;
+ } else {
+ del_char(param_str[i], ' ');
+ del_char(param_str[i], '\n');
+ param[i] = (u32)simple_strtoul(param_str[i], NULL, 10);
+ if (param[i] < 0) {
+ pr_err("%s: ERROR: parameter[%d] is incorrect\n", __func__, i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ printk("set pwm %d to %s, duty cycle: %u ns, period cycle: %u\n", param[0],
+ param[1] ? "enable" : "disable", param[2], param[3]);
+ chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[param[0]]);
+ chip_data->counter_ns = param[2];
+ chip_data->period_ns = param[3];
+
+ param[1] ? fh_pwm_enable(&fh_pwm.chip, &fh_pwm.chip.pwms[param[0]]) : fh_pwm_disable(&fh_pwm.chip,
+ &fh_pwm.chip.pwms[param[0]]);
+ pwm_set_duty_cycle_ns(chip_data);
+
+ return len;
+}
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter = 0;
+ if (*pos == 0)
+ return &counter;
+ else {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+ seq_printf(sfile, "\nPWM Status:\n");
+
+ for (i = 0; i < fh_pwm.chip.npwm; i++) {
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[i]);
+ seq_printf(sfile, "id: %d \t%s, duty_ns: %u, period_ns: %u\n",
+ chip_data->id,
+ (chip_data->working) ? "ENABLE" : "DISABLE",
+ chip_data->counter_ns,
+ chip_data->period_ns);
+ }
+ return 0;
+}
+
+static const struct seq_operations isp_seq_ops = {
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int fh_pwm_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &isp_seq_ops);
+}
+
+
+static struct file_operations fh_pwm_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = fh_pwm_proc_open,
+ .read = seq_read,
+ .write = fh_pwm_proc_write,
+ .release = seq_release,
+};
+
+static int __devinit fh_pwm_probe(struct platform_device *pdev)
+{
+ int err, i;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENXIO;
+ goto fail_no_mem_resource;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ err = -EBUSY;
+ goto fail_no_mem_resource;
+ }
+
+ fh_pwm.base = ioremap(res->start, resource_size(res));
+ if (fh_pwm.base == NULL) {
+ err = -ENXIO;
+ goto fail_no_ioremap;
+ }
+
+ fh_pwm.clk = clk_get(&pdev->dev, "pwm_clk");
+
+ if (IS_ERR(fh_pwm.clk)) {
+ err = PTR_ERR(fh_pwm.clk);
+ goto fail_no_clk;
+ }
+
+ clk_enable(fh_pwm.clk);
+
+ err = misc_register(&fh_pwm_misc);
+ if (err < 0) {
+ pr_err("%s: ERROR: %s registration failed",
+ __func__, DEVICE_NAME);
+ return -ENXIO;
+ }
+
+ fh_pwm.chip.dev = &pdev->dev;
+ fh_pwm.chip.ops = &fh_pwm_ops;
+ fh_pwm.chip.base = pdev->id;
+ fh_pwm.chip.npwm = CONFIG_FH_PWM_NUM;
+
+ err = pwmchip_add(&fh_pwm.chip);
+ if (err < 0) {
+ pr_err("%s: ERROR: %s pwmchip_add failed",
+ __func__, DEVICE_NAME);
+ return err;
+ }
+
+ for (i = 0; i < fh_pwm.chip.npwm; i++) {
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
+ if (chip_data == NULL) {
+ pr_err("pwm[%d], chip data malloced failed\n", i);
+ continue;
+ }
+
+ chip_data->id = i;
+ chip_data->working = 0;
+
+ pwm_set_chip_data(&fh_pwm.chip.pwms[i], chip_data);
+ }
+
+ platform_set_drvdata(pdev, &fh_pwm);
+
+ /* disable pwm at startup. Avoids zero value. */
+ writel(0x0, fh_pwm.base + REG_PWM_CTRL);
+
+ pr_info("PWM driver, Number: %d, IO base addr: 0x%p\n",
+ fh_pwm.chip.npwm, fh_pwm.base);
+
+ fh_pwm.proc_file = create_proc_entry(FH_PWM_PROC_FILE, 0644, NULL);
+
+ if (fh_pwm.proc_file)
+ fh_pwm.proc_file->proc_fops = &fh_pwm_proc_ops;
+ else
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, DEVICE_NAME);
+
+ dev_dbg(&pdev->dev, "PWM probe successful, IO base addr: %p\n",
+ fh_pwm.base);
+ return 0;
+
+fail_no_clk:
+ iounmap(fh_pwm.base);
+fail_no_ioremap:
+ release_mem_region(res->start, resource_size(res));
+fail_no_mem_resource:
+ return err;
+}
+
+static int __exit fh_pwm_remove(struct platform_device *pdev)
+{
+ int err, i;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ for (i = 0; i < fh_pwm.chip.npwm; i++)
+ kfree(fh_pwm.chip.pwms[i].chip_data);
+
+ err = pwmchip_remove(&fh_pwm.chip);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&pdev->dev, "pwm driver removed\n");
+
+ writel(0x0, fh_pwm.base + REG_PWM_CTRL);
+ clk_disable(fh_pwm.clk);
+
+ iounmap(fh_pwm.base);
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ misc_deregister(&fh_pwm_misc);
+ return 0;
+}
+
+static struct platform_driver fh_pwm_driver = {
+ .driver =
+ {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_pwm_probe,
+ .remove = __exit_p(fh_pwm_remove),
+};
+
+static int __init fh_pwm_init(void)
+{
+ return platform_driver_register(&fh_pwm_driver);
+}
+
+static void __exit fh_pwm_exit(void)
+{
+
+ platform_driver_unregister(&fh_pwm_driver);
+
+}
+
+module_init(fh_pwm_init);
+module_exit(fh_pwm_exit);
+
+
+MODULE_AUTHOR("fullhan");
+
+MODULE_DESCRIPTION("FH PWM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
diff --git a/drivers/pwm/pwm-fullhan.h b/drivers/pwm/pwm-fullhan.h
new file mode 100644
index 00000000..cb81ac09
--- /dev/null
+++ b/drivers/pwm/pwm-fullhan.h
@@ -0,0 +1,31 @@
+#ifndef FH_PMU_H_
+#define FH_PMU_H_
+
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+
+#define DEVICE_NAME "fh_pwm"
+#define FH_PWM_PROC_FILE "driver/pwm"
+
+#define REG_PWM_CTRL (0x00)
+#define REG_PWM_CMD(n) (((n) * 4) + REG_PWM_CTRL + 4)
+
+#define PWM_IOCTL_MAGIC 'p'
+#define ENABLE_PWM _IOWR(PWM_IOCTL_MAGIC, 0, __u32)
+#define DISABLE_PWM _IOWR(PWM_IOCTL_MAGIC, 1, __u32)
+
+#define SET_PWM_DUTY_CYCLE _IOWR(PWM_IOCTL_MAGIC, 2, __u32)
+#define GET_PWM_DUTY_CYCLE _IOWR(PWM_IOCTL_MAGIC, 3, __u32)
+#define SET_PWM_DUTY_CYCLE_PERCENT _IOWR(PWM_IOCTL_MAGIC, 4, __u32)
+#define PWM_IOCTL_MAXNR 8
+
+struct fh_pwm_chip_data
+{
+ int id;
+ int working;
+ u32 period_ns;
+ u32 counter_ns;
+ int percent;
+};
+
+#endif /* FH_PMU_H_ */
diff --git a/drivers/pwm/pwmv2-fullhan.c b/drivers/pwm/pwmv2-fullhan.c
new file mode 100644
index 00000000..d2dc6bba
--- /dev/null
+++ b/drivers/pwm/pwmv2-fullhan.c
@@ -0,0 +1,1043 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+#include <linux/printk.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/decompress/mm.h>
+#include <linux/of_address.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#include <linux/uaccess.h>
+#include "pwmv2-fullhan.h"
+#include "mach/fh_predefined.h"
+
+#define FH_PWM_DEBUG
+#ifdef FH_PWM_DEBUG
+#define PRINT_DBG(fmt, args...) printk(fmt, ##args)
+#else
+#define PRINT_DBG(fmt, args...) do {} while (0)
+#endif
+
+#define STATUS_INT (1<<31)
+#define STATUS_FINALL0 (1<<0)
+#define STATUS_FINALL1 (1<<1)
+#define STATUS_FINALL2 (1<<2)
+#define STATUS_FINALL3 (1<<3)
+#define STATUS_FINALL4 (1<<4)
+#define STATUS_FINALL5 (1<<5)
+#define STATUS_FINALL6 (1<<6)
+#define STATUS_FINALL7 (1<<7)
+#define STATUS_FINONCE0 (1<<8)
+#define STATUS_FINONCE1 (1<<9)
+#define STATUS_FINONCE2 (1<<10)
+#define STATUS_FINONCE3 (1<<11)
+#define STATUS_FINONCE4 (1<<12)
+#define STATUS_FINONCE5 (1<<13)
+#define STATUS_FINONCE6 (1<<14)
+#define STATUS_FINONCE7 (1<<15)
+
+#define OFFSET_PWM_BASE(n) (0x100 + 0x100 * n)
+
+#define OFFSET_PWM_GLOBAL_CTRL0 (0x000)
+#define OFFSET_PWM_GLOBAL_CTRL1 (0x004)
+#define OFFSET_PWM_GLOBAL_CTRL2 (0x008)
+#define OFFSET_PWM_INT_ENABLE (0x010)
+#define OFFSET_PWM_INT_STATUS (0x014)
+
+#define OFFSET_PWM_CTRL(n) (0x000 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_CFG0(n) (0x004 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_CFG1(n) (0x008 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_CFG2(n) (0x00c + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_CFG3(n) (0x010 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_CFG4(n) (0x014 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_STATUS0(n) (0x020 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_STATUS1(n) (0x024 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_STATUS2(n) (0x028 + OFFSET_PWM_BASE(n))
+#define OFFSET_PWM_STOPTIME_BIT(n) (8 + 16 * ((n) / 8) + ((n) % 8))
+#define OFFSET_PWM_FINSHALL_BIT(n) (0 + 16 * ((n) / 8) + ((n) % 8))
+#define OFFSET_PWM_FINSHONCE_BIT(n) (8 + 16 * ((n) / 8) + ((n) % 8))
+
+struct fh_pwm_driver {
+ unsigned int irq;
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *clk;
+ struct proc_dir_entry *proc_file;
+};
+
+struct fh_pwm_driver *fh_pwm_drv;
+
+static void fh_pwm_output_mask(unsigned int mask)
+{
+ writel(mask, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
+}
+
+static void fh_pwm_output_enable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
+ reg |= (1 << n);
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
+}
+
+static void fh_pwm_output_disable(unsigned int n)
+{
+ unsigned int reg;
+/*
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+ reg |= 1 << (8 + n);
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+*/
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
+ reg &= ~(1 << n);
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
+}
+
+static void fh_pwm_config_enable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
+ reg |= (1 << n);
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
+}
+
+static void fh_pwm_config_disable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
+ reg &= ~(1 << n);
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
+}
+#ifdef DEBUG
+static void fh_pwm_shadow_enable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+ reg |= (1 << n);
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+}
+
+static void fh_pwm_shadow_disable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+ reg &= ~(1 << n);
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+}
+#endif
+
+static void fh_pwm_interrupt_finishall_enable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+
+ reg |= (1 << OFFSET_PWM_FINSHALL_BIT(n));
+
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+}
+
+static void fh_pwm_interrupt_finishall_disable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+
+ reg &= ~(1 << OFFSET_PWM_FINSHALL_BIT(n));
+
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+}
+
+static void fh_pwm_interrupt_finishonce_enable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+
+ reg |= (1 << OFFSET_PWM_FINSHONCE_BIT(n));
+
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+}
+
+static void fh_pwm_interrupt_finishonce_disable(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+
+ reg &= ~(1 << OFFSET_PWM_FINSHONCE_BIT(n));
+
+ writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+}
+
+static unsigned int fh_pwm_interrupt_get_status(void)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
+
+ reg &= readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
+
+ return reg;
+}
+
+static void fh_pwm_interrupt_finishonce_clear(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
+#ifdef CONFIG_PWM_FULLHAN_V21
+ reg |= (1 << OFFSET_PWM_FINSHONCE_BIT(n));
+#else
+ reg &= ~(1 << OFFSET_PWM_FINSHONCE_BIT(n));
+#endif
+
+ return writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
+}
+
+static void fh_pwm_interrupt_finishall_clear(unsigned int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
+
+#ifdef CONFIG_PWM_FULLHAN_V21
+ reg |= (1 << OFFSET_PWM_FINSHALL_BIT(n));
+#else
+ reg &= ~(1 << OFFSET_PWM_FINSHALL_BIT(n));
+#endif
+
+ return writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
+}
+
+
+static void fh_pwm_get_status(struct fh_pwm_chip_data *chip_data)
+{
+ unsigned int status0, status1, status2;
+ status0 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS0(chip_data->id));
+ status1 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS1(chip_data->id));
+ status2 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS2(chip_data->id));
+ chip_data->status.busy = (status2 >> 4) & 0x1;
+ chip_data->status.error = (status2 >> 3) & 0x1;
+ chip_data->status.total_cnt = status1;
+ chip_data->status.done_cnt = status0;
+}
+
+static int fh_pwm_wait_done(struct fh_pwm_chip_data *chip_data)
+{
+ if (chip_data)
+ fh_pwm_get_status(chip_data);
+
+ if (chip_data->status.busy == 0)
+ return chip_data->status.done_cnt;
+
+ if (chip_data->config.finish_all) {
+ wait_event_interruptible(chip_data->wait_done, chip_data->status.busy == 0);
+ } else {
+ while (chip_data->status.busy == 1) {
+ mdelay(10);
+ fh_pwm_get_status(chip_data);
+ }
+ }
+ return chip_data->status.done_cnt;
+}
+
+static int fh_pwm_set_config(struct fh_pwm_chip_data *chip_data)
+{
+ unsigned int clk_rate = clk_get_rate(fh_pwm_drv->clk);
+ unsigned int ctrl = 0, period, duty, delay, phase, reg;
+ unsigned int period_used, duty_used, phase_used, busy;
+
+ fh_pwm_config_disable(chip_data->id);
+
+ period_used = readl(fh_pwm_drv->base + OFFSET_PWM_CFG0(chip_data->id));
+ duty_used = readl(fh_pwm_drv->base + OFFSET_PWM_CFG1(chip_data->id));
+ phase_used = readl(fh_pwm_drv->base + OFFSET_PWM_CFG2(chip_data->id));
+ busy = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS2(chip_data->id)) \
+ & (0x10);
+ period = chip_data->config.period_ns / (NSEC_PER_SEC / clk_rate);
+ duty = chip_data->config.duty_ns / (NSEC_PER_SEC / clk_rate);
+ delay = chip_data->config.delay_ns / (NSEC_PER_SEC / clk_rate);
+ phase = chip_data->config.phase_ns / (NSEC_PER_SEC / clk_rate);
+
+ if (period > 0x1ffffff) {
+ pr_err("PWM: period exceed 24-bit\n");
+ return -EINVAL;
+ }
+
+ if (duty > 0x1ffffff) {
+ pr_err("PWM: duty exceed 24-bit\n");
+ return -EINVAL;
+ }
+
+ if (duty > period) {
+ pr_err("PWM: duty is over period\n");
+ return -EINVAL;
+ }
+
+ PRINT_DBG("set period: 0x%x\n", period);
+ PRINT_DBG("set duty: 0x%x\n", duty);
+ PRINT_DBG("set phase: 0x%x\n", phase);
+ PRINT_DBG("set delay: 0x%x\n", delay);
+
+ writel(period, fh_pwm_drv->base + OFFSET_PWM_CFG0(chip_data->id));
+ writel(duty, fh_pwm_drv->base + OFFSET_PWM_CFG1(chip_data->id));
+ writel(phase, fh_pwm_drv->base + OFFSET_PWM_CFG2(chip_data->id));
+ writel(delay, fh_pwm_drv->base + OFFSET_PWM_CFG3(chip_data->id));
+ writel(chip_data->config.pulses,
+ fh_pwm_drv->base + OFFSET_PWM_CFG4(chip_data->id));
+
+ if (chip_data->config.delay_ns)
+ ctrl |= 1 << 3;
+
+ if (!chip_data->config.pulses)
+ ctrl |= 1 << 0;
+
+ ctrl |= (chip_data->config.stop & 0x3) << 1;
+
+ writel(ctrl, fh_pwm_drv->base + OFFSET_PWM_CTRL(chip_data->id));
+
+ PRINT_DBG("set ctrl: 0x%x\n", ctrl);
+
+ ctrl = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+
+ reg = (chip_data->config.stop >> 4) & 0x1;
+ if (reg)
+ ctrl |= (1 << OFFSET_PWM_STOPTIME_BIT(chip_data->id));
+ else
+ ctrl &= ~(1 << OFFSET_PWM_STOPTIME_BIT(chip_data->id));
+
+ writel(ctrl, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
+
+ writel(chip_data->config.pulses,
+ fh_pwm_drv->base + OFFSET_PWM_CFG4(chip_data->id));
+ PRINT_DBG("set pulses: 0x%x\n", chip_data->config.pulses);
+
+ if (chip_data->config.finish_once)
+ fh_pwm_interrupt_finishonce_enable(chip_data->id);
+ else
+ fh_pwm_interrupt_finishonce_disable(chip_data->id);
+
+ if (chip_data->config.finish_all) {
+ init_waitqueue_head(&chip_data->wait_done);
+ fh_pwm_interrupt_finishall_enable(chip_data->id);
+ } else
+ fh_pwm_interrupt_finishall_disable(chip_data->id);
+
+ if (busy && \
+ (((duty + phase) < period) ^ ((duty_used + phase_used) < period_used)))
+ fh_pwm_output_disable(chip_data->id);
+
+ fh_pwm_config_enable(chip_data->id);
+
+ return 0;
+}
+
+static void fh_pwm_get_config(struct fh_pwm_chip_data *chip_data)
+{
+ unsigned int clk_rate = clk_get_rate(fh_pwm_drv->clk);
+ unsigned int ctrl = 0, period, duty, delay, phase, pulses,
+ status0, status1, status2;
+
+ period = readl(fh_pwm_drv->base + OFFSET_PWM_CFG0(chip_data->id));
+ duty = readl(fh_pwm_drv->base + OFFSET_PWM_CFG1(chip_data->id));
+ phase = readl(fh_pwm_drv->base + OFFSET_PWM_CFG2(chip_data->id));
+ delay = readl(fh_pwm_drv->base + OFFSET_PWM_CFG3(chip_data->id));
+ pulses = readl(fh_pwm_drv->base + OFFSET_PWM_CFG4(chip_data->id));
+ ctrl = readl(fh_pwm_drv->base + OFFSET_PWM_CTRL(chip_data->id));
+ status0 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS0(chip_data->id));
+ status1 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS1(chip_data->id));
+ status2 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS2(chip_data->id));
+
+
+ PRINT_DBG("==============================\n");
+ PRINT_DBG("pwm%d register config:\n", chip_data->id);
+ PRINT_DBG("\t\tperiod: 0x%x\n", period);
+ PRINT_DBG("\t\tduty: 0x%x\n", duty);
+ PRINT_DBG("\t\tphase: 0x%x\n", phase);
+ PRINT_DBG("\t\tdelay: 0x%x\n", delay);
+ PRINT_DBG("\t\tpulses: 0x%x\n", pulses);
+ PRINT_DBG("\t\tctrl: 0x%x\n", ctrl);
+ PRINT_DBG("\t\tstatus0: 0x%x\n", status0);
+ PRINT_DBG("\t\tstatus1: 0x%x\n", status1);
+ PRINT_DBG("\t\tstatus2: 0x%x\n", status2);
+
+ chip_data->config.period_ns = period * (NSEC_PER_SEC / clk_rate);
+ chip_data->config.duty_ns = duty * (NSEC_PER_SEC / clk_rate);
+
+ PRINT_DBG("\t\tclk_rate: %d\n", clk_rate);
+ PRINT_DBG("\t\tconfig.period_ns: %d\n", chip_data->config.period_ns);
+ PRINT_DBG("\t\tconfig.duty_ns: %d\n", chip_data->config.duty_ns);
+ PRINT_DBG("==============================\n\n");
+
+ chip_data->config.phase_ns = phase * (NSEC_PER_SEC / clk_rate);
+ chip_data->config.delay_ns = delay * (NSEC_PER_SEC / clk_rate);
+ chip_data->config.pulses = pulses;
+ chip_data->config.stop = (ctrl >> 1) & 0x3;
+ chip_data->config.percent = chip_data->config.duty_ns /
+ (chip_data->config.period_ns / 100);
+
+ chip_data->status.busy = (status2 >> 4) & 0x1;
+ chip_data->status.error = (status2 >> 3) & 0x1;
+ chip_data->status.total_cnt = status1;
+ chip_data->status.done_cnt = status0;
+}
+
+int fh_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
+ if (chip_data == NULL) {
+ pr_err("pwm[%d], chip data malloc failed\n", pwm->hwpwm);
+ return -ENOMEM;
+ }
+
+ chip_data->id = pwm->hwpwm;
+ chip_data->config.duty_ns = duty_ns;
+ chip_data->config.period_ns = period_ns;
+
+ fh_pwm_set_config(chip_data);
+
+ kfree(chip_data);
+
+ return 0;
+}
+
+int fh_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ fh_pwm_output_enable(pwm->hwpwm);
+ return 0;
+}
+
+void fh_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ fh_pwm_output_disable(pwm->hwpwm);
+}
+
+static int fh_pwm_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int fh_pwm_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+
+static long fh_pwm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct fh_pwm_chip_data chip_data = {0};
+ unsigned int val;
+
+ if (unlikely(_IOC_TYPE(cmd) != PWM_IOCTL_MAGIC)) {
+ pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
+ __func__, _IOC_TYPE(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (unlikely(_IOC_NR(cmd) > PWM_IOCTL_MAXNR)) {
+ pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -ENOTTY);
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ret = !access_ok(VERIFY_WRITE,
+ (void __user *)arg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ ret = !access_ok(VERIFY_READ,
+ (void __user *)arg, _IOC_SIZE(cmd));
+
+ if (ret) {
+ pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
+ __func__, _IOC_NR(cmd), -EACCES);
+ return -EACCES;
+ }
+
+ switch (cmd) {
+ case SET_PWM_ENABLE:
+ get_user(val, (unsigned int __user *)arg);
+ fh_pwm_output_enable(val);
+ break;
+ case ENABLE_PWM:
+ ret = copy_from_user((void *)&chip_data,
+ (void __user *)arg,
+ sizeof(struct fh_pwm_chip_data));
+ if (ret != 0) {
+ pr_err("ioctrl:Get chip data from user space failed\n");
+ return -EFAULT;
+ }
+
+ if (chip_data.id >= fh_pwm_drv->chip.npwm) {
+ pr_err("ioctrl:PWM Channel Error\n");
+ return -EFAULT;
+ }
+
+ memcpy(
+ fh_pwm_drv->chip.pwms[chip_data.id].chip_data,
+ (void *)&chip_data,
+ sizeof(struct fh_pwm_chip_data));
+
+ fh_pwm_output_enable(chip_data.id);
+ break;
+ case DISABLE_PWM:
+ ret = copy_from_user((void *)&chip_data,
+ (void __user *)arg,
+ sizeof(struct fh_pwm_chip_data));
+ if (ret != 0) {
+ pr_err("ioctrl:Get chip data from user space failed\n");
+ return -EFAULT;
+ }
+
+ if (chip_data.id >= fh_pwm_drv->chip.npwm) {
+ pr_err("ioctrl:PWM Channel Error\n");
+ return -EFAULT;
+ }
+ memcpy(
+ fh_pwm_drv->chip.pwms[chip_data.id].chip_data,
+ (void *)&chip_data,
+ sizeof(struct fh_pwm_chip_data));
+
+ fh_pwm_output_disable(chip_data.id);
+ break;
+ case SET_PWM_DUTY_CYCLE:
+ ret = copy_from_user((void *)&chip_data,
+ (void __user *)arg,
+ sizeof(struct fh_pwm_chip_data));
+ if (ret != 0) {
+ pr_err("ioctrl:Get chip data from user space failed\n");
+ return -EFAULT;
+ }
+ if (chip_data.id >= fh_pwm_drv->chip.npwm) {
+ pr_err("ioctrl:PWM Channel Error\n");
+ return -EFAULT;
+ }
+ memcpy(
+ fh_pwm_drv->chip.pwms[chip_data.id].chip_data,
+ (void *)&chip_data,
+ sizeof(struct fh_pwm_chip_data));
+
+ printk("ioctl: SET_PWM_DUTY_CYCLE, "
+ "pwm->id: %d, pwm->counter: %d, pwm->period: %d ns\n",
+ chip_data.id, chip_data.config.duty_ns,
+ chip_data.config.period_ns);
+
+ ret = fh_pwm_set_config(fh_pwm_drv->chip.pwms[chip_data.id].chip_data);
+ break;
+ case GET_PWM_DUTY_CYCLE:
+ ret = copy_from_user((void *)&chip_data,
+ (void __user *)arg,
+ sizeof(struct fh_pwm_chip_data));
+ if (ret != 0) {
+ pr_err("ioctrl:Get chip data from user space failed\n");
+ return -EFAULT;
+ }
+ if (chip_data.id >= fh_pwm_drv->chip.npwm) {
+ pr_err("ioctrl:PWM Channel Error\n");
+ return -EFAULT;
+ }
+ memcpy(
+ fh_pwm_drv->chip.pwms[chip_data.id].chip_data,
+ (void *)&chip_data,
+ sizeof(struct fh_pwm_chip_data));
+ pr_info("ioctl: GET_PWM_DUTY_CYCLE, "
+ "pwm->id: %d, pwm->counter: %d, pwm->period: %d ns\n",
+ chip_data.id, chip_data.config.duty_ns,
+ chip_data.config.period_ns);
+
+ fh_pwm_get_config(&chip_data);
+ ret = copy_to_user((void __user *)arg,
+ (void *)&chip_data,
+ sizeof(struct fh_pwm_chip_data));
+ if (ret != 0) {
+ pr_err("ioctrl:Copy chip data to user space failed\n");
+ return -EFAULT;
+ }
+ break;
+ case SET_PWM_DUTY_CYCLE_PERCENT:
+ ret = copy_from_user((void *)&chip_data,
+ (void __user *)arg,
+ sizeof(struct fh_pwm_chip_data));
+ if (ret != 0) {
+ pr_err("Get chip data from user space failed\n");
+ return -EFAULT;
+ }
+
+ if (chip_data.id >= fh_pwm_drv->chip.npwm) {
+ pr_err("ioctrl:PWM Channel Error\n");
+ return -EFAULT;
+ }
+
+ if (chip_data.config.percent > 100) {
+ pr_err("ERROR: percentage is over 100\n");
+ return -EIO;
+ }
+ chip_data.config.duty_ns = chip_data.config.period_ns *
+ chip_data.config.percent / 100;
+ pr_info("ioctl: SET_PWM_DUTY_CYCLE_PERCENT, "
+ "pwm->id: %d, pwm->counter: %d, pwm->period: %d ns\n",
+ chip_data.id, chip_data.config.duty_ns,
+ chip_data.config.period_ns);
+
+ memcpy(
+ fh_pwm_drv->chip.pwms[chip_data.id].chip_data,
+ (void *)&chip_data,
+ sizeof(struct fh_pwm_chip_data));
+
+ ret = fh_pwm_set_config(&chip_data);
+ break;
+ case ENABLE_MUL_PWM:
+ get_user(val, (unsigned int __user *)arg);
+ fh_pwm_output_mask(val);
+ break;
+ case ENABLE_FINSHALL_INTR:
+ get_user(val, (unsigned int __user *)arg);
+ fh_pwm_interrupt_finishall_enable(val);
+ break;
+ case ENABLE_FINSHONCE_INTR:
+ get_user(val, (unsigned int __user *)arg);
+ fh_pwm_interrupt_finishonce_enable(val);
+ break;
+ case DISABLE_FINSHALL_INTR:
+ get_user(val, (unsigned int __user *)arg);
+ fh_pwm_interrupt_finishall_disable(val);
+ break;
+ case DISABLE_FINSHONCE_INTR:
+ get_user(val, (unsigned int __user *)arg);
+ fh_pwm_interrupt_finishonce_disable(val);
+ break;
+ case WAIT_PWM_FINSHALL:
+ get_user(val, (unsigned int __user *)arg);
+ ret = fh_pwm_wait_done(fh_pwm_drv->chip.pwms[val].chip_data);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations fh_pwm_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_pwm_open,
+ .release = fh_pwm_release,
+ .unlocked_ioctl = fh_pwm_ioctl,
+};
+
+static struct miscdevice fh_pwm_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &fh_pwm_fops,
+};
+
+static const struct pwm_ops fh_pwm_ops = {
+ .config = fh_pwm_config,
+ .enable = fh_pwm_enable,
+ .disable = fh_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+
+static void del_char(char *str, char ch)
+{
+ char *p = str;
+ char *q = str;
+ while (*q) {
+ if (*q != ch)
+ *p++ = *q;
+ q++;
+ }
+ *p = '\0';
+}
+
+static ssize_t fh_pwm_proc_write(struct file *filp,
+ const char *buf, size_t len, loff_t *off)
+{
+ int i;
+ char message[64] = {0};
+ char *const delim = ",";
+ char *cur = message;
+ char *param_str[8];
+ unsigned int param[8];
+ struct fh_pwm_chip_data *chip_data;
+
+ len = (len > 64) ? 64 : len;
+
+ if (copy_from_user(message, buf, len))
+ return -EFAULT;
+
+ for (i = 0; i < 8; i++) {
+ param_str[i] = strsep(&cur, delim);
+ if (!param_str[i]) {
+ pr_err("%s: ERROR: parameter[%d] is empty\n",
+ __func__, i);
+ pr_err("id, switch_mask, duty_ns, period_ns, "
+ "numofpules, delay_ns, phase_ns, stop_status\n");
+ pr_err("eg. echo '0,1,5000,10000,0,0,1000,0' > /proc/driver/pwm\n");
+ return -EINVAL;
+ } else {
+ del_char(param_str[i], ' ');
+ del_char(param_str[i], '\n');
+ param[i] = (unsigned int)simple_strtoul(param_str[i], NULL, 10);
+ if (param[i] < 0) {
+ pr_err("%s: ERROR: parameter[%d] is incorrect\n", __func__, i);
+ pr_err("id, switch_mask, duty_ns, period_ns, numofpules, "
+ "delay_ns, phase_ns, stop_status\n");
+ pr_err("eg. echo '0,1,5000,10000,0,0,1000,0' > /proc/driver/pwm\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
+ if (chip_data == NULL) {
+ pr_err("pwm[%d], chip data malloc failed\n", i);
+ return 0;
+ }
+
+ chip_data->id = param[0];
+ chip_data->config.duty_ns = param[2];
+ chip_data->config.period_ns = param[3];
+ chip_data->config.pulses = param[4];
+ chip_data->config.delay_ns = param[5];
+ chip_data->config.phase_ns = param[6];
+ chip_data->config.stop = param[7];
+
+ fh_pwm_set_config(chip_data);
+
+ printk("set pwm %d, enable: 0x%x, duty cycle: %u ns, period cycle: %u,"
+ "numofpulse: %d, delay: %d ns, phase: %d ns, stop: %d\n",
+ param[0], param[1], param[2], param[3],
+ param[4], param[5], param[6], param[7]);
+
+ fh_pwm_output_mask(param[1]);
+
+ kfree(chip_data);
+
+ return len;
+}
+
+static void *v_seq_start(struct seq_file *s, loff_t *pos)
+{
+ static unsigned long counter;
+ if (*pos == 0)
+ return &counter;
+ else {
+ *pos = 0;
+ return NULL;
+ }
+}
+
+static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void v_seq_stop(struct seq_file *s, void *v)
+{
+
+}
+
+static int v_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+ seq_printf(sfile, "\nPWM Status:\n");
+
+ seq_printf(sfile, "global_ctrl0: 0x%x\n",
+ readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0));
+ seq_printf(sfile, "global_ctrl1: 0x%x\n",
+ readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1));
+ seq_printf(sfile, "global_ctrl2: 0x%x\n",
+ readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2));
+
+ for (i = 0; i < fh_pwm_drv->chip.npwm; i++) {
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = pwm_get_chip_data(&fh_pwm_drv->chip.pwms[i]);
+ fh_pwm_get_config(chip_data);
+
+ seq_printf(sfile, "id: %d \t%s, duty_ns: %u, period_ns: %u\n",
+ chip_data->id,
+ (chip_data->status.busy) ? "ENABLE" : "DISABLE",
+ chip_data->config.duty_ns,
+ chip_data->config.period_ns);
+ }
+ return 0;
+}
+
+static const struct seq_operations pwm_seq_ops = {
+ .start = v_seq_start,
+ .next = v_seq_next,
+ .stop = v_seq_stop,
+ .show = v_seq_show
+};
+
+static int fh_pwm_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &pwm_seq_ops);
+}
+
+
+static const struct file_operations fh_pwm_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = fh_pwm_proc_open,
+ .read = seq_read,
+ .write = fh_pwm_proc_write,
+ .release = seq_release,
+};
+
+static irqreturn_t fh_pwm_interrupt(int this_irq, void *param)
+{
+#ifdef CONFIG_PWM_FULLHAN_V21
+ unsigned int status, stat_once_l, stat_once_h, stat_all_l, stat_all_h;
+ unsigned int irq;
+ struct fh_pwm_chip_data *fh_chip_data;
+
+ status = fh_pwm_interrupt_get_status();
+ status &= 0xffffffff;
+ stat_once_l = (status >> 8) & 0xff;
+ stat_all_l = status & 0xff;
+ stat_once_h = (status >> 24) & 0x3f;
+ stat_all_h = (status >> 16) & 0x3f;
+
+ if (stat_once_l) {
+ irq = fls(stat_once_l);
+ fh_pwm_interrupt_finishonce_clear(irq - 1);
+ }
+ if (stat_once_h) {
+ irq = fls(stat_once_h) + 8;
+ fh_pwm_interrupt_finishonce_clear(irq - 1);
+ }
+ if (stat_all_l) {
+ irq = fls(stat_all_l);
+ fh_pwm_interrupt_finishall_clear(irq - 1);
+ fh_chip_data = (struct fh_pwm_chip_data *)fh_pwm_drv->chip.pwms[irq - 1].chip_data;
+ fh_pwm_get_status(fh_chip_data);
+ wake_up_interruptible(&fh_chip_data->wait_done);
+ }
+ if (stat_all_h) {
+ irq = fls(stat_all_h) + 8;
+ fh_pwm_interrupt_finishall_clear(irq - 1);
+ fh_chip_data = (struct fh_pwm_chip_data *)fh_pwm_drv->chip.pwms[irq - 1].chip_data;
+ fh_pwm_get_status(fh_chip_data);
+ wake_up_interruptible(&fh_chip_data->wait_done);
+ }
+#endif
+#ifdef CONFIG_PWM_FULLHAN_V20
+ unsigned int status, stat_once, stat_all;
+ struct fh_pwm_chip_data *chip_data;
+ unsigned int irq;
+
+ status = fh_pwm_interrupt_get_status();
+
+ status &= 0xffff;
+
+ stat_once = (status >> 8) & 0xff;
+ stat_all = status & 0xff;
+
+ if (stat_once) {
+ irq = fls(stat_once);
+ chip_data = pwm_get_chip_data(&fh_pwm_drv->chip.pwms[irq - 1]);
+ fh_pwm_interrupt_finishonce_clear(irq - 1);
+ }
+
+ if (stat_all) {
+ irq = fls(stat_all);
+ chip_data = pwm_get_chip_data(&fh_pwm_drv->chip.pwms[irq - 1]);
+ fh_pwm_interrupt_finishall_clear(irq - 1);
+ fh_pwm_get_status(chip_data);
+ wake_up_interruptible(&chip_data->wait_done);
+ }
+#endif
+ return IRQ_HANDLED;
+}
+
+static int __devinit fh_pwm_probe(struct platform_device *pdev)
+{
+ int err, i;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENXIO;
+ goto fail_no_mem_resource;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ err = -EBUSY;
+ goto fail_no_mem_resource;
+ }
+
+ fh_pwm_drv = kzalloc(sizeof(struct fh_pwm_driver), GFP_KERNEL);
+
+ fh_pwm_drv->base = ioremap(res->start, resource_size(res));
+ if (fh_pwm_drv->base == NULL) {
+ err = -ENXIO;
+ goto fail_no_ioremap;
+ }
+
+ fh_pwm_drv->clk = clk_get(&pdev->dev, "pwm_clk");
+
+ if (IS_ERR(fh_pwm_drv->clk)) {
+ err = PTR_ERR(fh_pwm_drv->clk);
+ goto fail_no_clk;
+ }
+
+ clk_enable(fh_pwm_drv->clk);
+
+ PRINT_DBG("%s: clk_rate: %lu\n", __func__, clk_get_rate(fh_pwm_drv->clk));
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ goto fail_no_clk;
+ }
+
+ fh_pwm_drv->irq = err;
+
+ err = request_irq(fh_pwm_drv->irq,
+ fh_pwm_interrupt, IRQF_DISABLED,
+ dev_name(&pdev->dev), fh_pwm_drv);
+ if (err) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", fh_pwm_drv->irq);
+ goto fail_no_clk;
+ }
+
+ err = misc_register(&fh_pwm_misc);
+ if (err < 0) {
+ pr_err("%s: ERROR: %s registration failed",
+ __func__, DEVICE_NAME);
+ return -ENXIO;
+ }
+
+ fh_pwm_drv->chip.dev = &pdev->dev;
+ fh_pwm_drv->chip.ops = &fh_pwm_ops;
+ fh_pwm_drv->chip.base = pdev->id;
+ fh_pwm_drv->chip.npwm = CONFIG_FH_PWM_NUM;
+
+ err = pwmchip_add(&fh_pwm_drv->chip);
+ if (err < 0) {
+ pr_err("%s: ERROR: %s pwmchip_add failed",
+ __func__, DEVICE_NAME);
+ return err;
+ }
+
+ for (i = 0; i < fh_pwm_drv->chip.npwm; i++) {
+ struct fh_pwm_chip_data *chip_data;
+
+ chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
+ if (chip_data == NULL) {
+ pr_err("pwm[%d], chip data malloc failed\n", i);
+ continue;
+ }
+
+ chip_data->id = i;
+
+ pwm_set_chip_data(&fh_pwm_drv->chip.pwms[i], chip_data);
+ }
+
+ fh_pwm_output_mask(0);
+
+ platform_set_drvdata(pdev, fh_pwm_drv);
+
+ pr_info("PWM driver, Number: %d, IO base addr: 0x%p\n",
+ fh_pwm_drv->chip.npwm, fh_pwm_drv->base);
+
+ fh_pwm_drv->proc_file = create_proc_entry(FH_PWM_PROC_FILE, 0644, NULL);
+
+ if (fh_pwm_drv->proc_file)
+ fh_pwm_drv->proc_file->proc_fops = &fh_pwm_proc_ops;
+ else
+ pr_err("%s: ERROR: %s proc file create failed",
+ __func__, DEVICE_NAME);
+
+ dev_dbg(&pdev->dev, "PWM probe successful, IO base addr: %p\n",
+ fh_pwm_drv->base);
+ return 0;
+
+fail_no_clk:
+ iounmap(fh_pwm_drv->base);
+fail_no_ioremap:
+ release_mem_region(res->start, resource_size(res));
+fail_no_mem_resource:
+ return err;
+}
+
+static int __exit fh_pwm_remove(struct platform_device *pdev)
+{
+ int err, i;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ for (i = 0; i < fh_pwm_drv->chip.npwm; i++)
+ kfree(fh_pwm_drv->chip.pwms[i].chip_data);
+
+ err = pwmchip_remove(&fh_pwm_drv->chip);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&pdev->dev, "pwm driver removed\n");
+
+ fh_pwm_output_mask(0);
+ clk_disable(fh_pwm_drv->clk);
+ free_irq(fh_pwm_drv->irq, NULL);
+ iounmap(fh_pwm_drv->base);
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ misc_deregister(&fh_pwm_misc);
+
+ free(fh_pwm_drv);
+ fh_pwm_drv = NULL;
+
+ return 0;
+}
+
+static struct platform_driver fh_pwm_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fh_pwm_probe,
+ .remove = __exit_p(fh_pwm_remove),
+};
+
+static int __init fh_pwm_init(void)
+{
+ return platform_driver_register(&fh_pwm_driver);
+}
+
+static void __exit fh_pwm_exit(void)
+{
+
+ platform_driver_unregister(&fh_pwm_driver);
+
+}
+
+module_init(fh_pwm_init);
+module_exit(fh_pwm_exit);
+
+
+MODULE_AUTHOR("fullhan");
+
+MODULE_DESCRIPTION("FH PWM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
diff --git a/drivers/pwm/pwmv2-fullhan.h b/drivers/pwm/pwmv2-fullhan.h
new file mode 100644
index 00000000..6d05dcc8
--- /dev/null
+++ b/drivers/pwm/pwmv2-fullhan.h
@@ -0,0 +1,70 @@
+#ifndef FH_PMU_H_
+#define FH_PMU_H_
+
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <linux/wait.h>
+
+#define DEVICE_NAME "fh_pwm"
+#define FH_PWM_PROC_FILE "driver/pwm"
+
+#define REG_PWM_CTRL (0x00)
+#define REG_PWM_CMD(n) (((n) * 4) + REG_PWM_CTRL + 4)
+
+#define PWM_IOCTL_MAGIC 'p'
+#define ENABLE_PWM _IOWR(PWM_IOCTL_MAGIC, 0, __u32)
+#define DISABLE_PWM _IOWR(PWM_IOCTL_MAGIC, 1, __u32)
+
+#define SET_PWM_DUTY_CYCLE _IOWR(PWM_IOCTL_MAGIC, 2, __u32)
+#define GET_PWM_DUTY_CYCLE _IOWR(PWM_IOCTL_MAGIC, 3, __u32)
+#define SET_PWM_DUTY_CYCLE_PERCENT _IOWR(PWM_IOCTL_MAGIC, 4, __u32)
+#define SET_PWM_ENABLE _IOWR(PWM_IOCTL_MAGIC, 5, __u32)
+#define ENABLE_MUL_PWM _IOWR(PWM_IOCTL_MAGIC, 6, __u32)
+#define ENABLE_FINSHALL_INTR _IOWR(PWM_IOCTL_MAGIC, 7, __u32)
+#define ENABLE_FINSHONCE_INTR _IOWR(PWM_IOCTL_MAGIC, 8, __u32)
+#define DISABLE_FINSHALL_INTR _IOWR(PWM_IOCTL_MAGIC, 9, __u32)
+#define DISABLE_FINSHONCE_INTR _IOWR(PWM_IOCTL_MAGIC, 10, __u32)
+#define WAIT_PWM_FINSHALL _IOWR(PWM_IOCTL_MAGIC, 12, __u32)
+
+#define PWM_IOCTL_MAXNR 16
+
+struct fh_pwm_config
+{
+ unsigned int period_ns;
+ unsigned int duty_ns;
+ unsigned int pulses;
+#define FH_PWM_STOPLVL_LOW (0x0)
+#define FH_PWM_STOPLVL_HIGH (0x3)
+#define FH_PWM_STOPLVL_KEEP (0x1)
+
+#define FH_PWM_STOPCTRL_ATONCE (0x10)
+#define FH_PWM_STOPCTRL_AFTERFINISH (0x00)
+ unsigned int stop;
+ unsigned int delay_ns;
+ unsigned int phase_ns;
+ unsigned int percent;
+ unsigned int finish_once;
+ unsigned int finish_all;
+};
+
+struct fh_pwm_status
+{
+ unsigned int done_cnt;
+ unsigned int total_cnt;
+ unsigned int busy;
+ unsigned int error;
+};
+
+struct fh_pwm_chip_data
+{
+ int id;
+ struct fh_pwm_config config;
+ struct fh_pwm_status status;
+ wait_queue_head_t wait_done;
+ void (*finishall_callback)(struct fh_pwm_chip_data *data);
+ void (*finishonce_callback)(struct fh_pwm_chip_data *data);
+};
+
+
+
+#endif /* FH_PMU_H_ */
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index ce2aabf5..96fa8993 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1060,4 +1060,37 @@ config RTC_DRV_PUV3
This drive can also be built as a module. If so, the module
will be called rtc-puv3.
+
+config RTC_DRV_FH
+ tristate "FH On-Chip RTC"
+ depends on RTC_CLASS
+ depends on ! (ARCH_FH8856 || ARCH_ZY2 || ARCH_FH8626V100)
+ help
+ Say Y here to enable support for the on-chip RTC found in
+ FH processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rtc-fh.
+
+config RTC_DRV_FH_V2
+ tristate "FH On-Chip RTC"
+ depends on RTC_CLASS
+ depends on ARCH_FH8856 || ARCH_ZY2 || ARCH_FH8626V100
+ help
+ Say Y here to enable support for the on-chip RTC found in
+ FH processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rtc-fh.
+if RTC_DRV_FH_V2
+ config USE_TSENSOR
+ bool 'USE TSENSOR'
+ default n
+
+ config USE_TSENSOR_OFFSET
+ bool 'USE ENHANCEMENT TSENSOR'
+ default n
+endif
+
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0ffefe87..337f688e 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -108,3 +108,6 @@ obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
+
+obj-$(CONFIG_RTC_DRV_FH) += rtc-fh_v1.o
+obj-$(CONFIG_RTC_DRV_FH_V2) += rtc-fh_v2.o
diff --git a/drivers/rtc/rtc-fh_v1.c b/drivers/rtc/rtc-fh_v1.c
new file mode 100644
index 00000000..b9ae1cdf
--- /dev/null
+++ b/drivers/rtc/rtc-fh_v1.c
@@ -0,0 +1,902 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/fh_rtc_v1.h>
+#include <mach/fh_sadc.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+
+/* #define FH_RTC_DEBUG_PRINT */
+
+#ifdef FH_RTC_DEBUG_PRINT
+#define RTC_PRINT_DBG(fmt, args...) \
+ printk(KERN_INFO "[FH_RTC_DEBUG]: "); \
+ printk(fmt, ## args)
+#else
+#define RTC_PRINT_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define RTC_MAGIC 0x55555555
+#define RTC_PHASE 0x03840384
+
+#define SYNC_LOOP_COUNT 100
+
+struct fh_rtc_controller {
+ void * __iomem regs;
+ unsigned int irq;
+ unsigned int paddr;
+ unsigned int base_year;
+ unsigned int base_month;
+ unsigned int base_day;
+ struct rtc_device *rtc;
+ struct clk *clk;
+ struct proc_dir_entry *proc_rtc_entry;
+ int sadc_channel;
+
+ struct workqueue_struct *wq;
+ struct delayed_work self_adjust;
+};
+
+struct fh_rtc_controller *fh_rtc;
+
+enum {
+
+ TIME_FUNC = 0,
+ ALARM_FUNC,
+
+};
+
+/* value of SADC channel for reference to get current temperature */
+long SadcValue[28] = {
+ 260, 293, 332, 375, 426,
+ 483, 548, 621, 706, 800,
+ 906, 1022, 1149, 1287, 1435,
+ 1590, 1750, 1913, 2075, 2233,
+ 2385, 2527, 2656, 2772, 2873,
+ 2960, 3034, 3094
+};
+
+/* value of temperature for reference */
+int Temperature1[28] = {
+ 95000, 90000, 85000, 80000, 75000,
+ 70000, 65000, 60000, 55000, 50000,
+ 45000, 40000, 35000, 30000, 25000,
+ 20000, 15000, 10000, 4000, 0,
+ -5000, -10000, -15000, -20000, -25000,
+ -30000, -35000, -40000
+};
+
+/* value of temperature for reference to get current deviation */
+int Temperature2[136] = {
+ -40000, -39000, -38000, -37000, -36000, -35000,
+ -34000, -33000, -32000, -31000, -30000, -29000,
+ -28000, -27000, -26000, -25000, -24000, -23000,
+ -22000, -21000, -20000, -19000, -18000, -17000,
+ -16000, -15000, -14000, -13000, -12000, -11000,
+ -10000, -9000, -8000, -7000, -6000, -5000, -4000,
+ -3000, -2000, -1000, 0, 1000, 2000, 3000, 4000,
+ 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000,
+ 13000, 14000, 15000, 16000, 17000, 18000, 19000,
+ 20000, 21000, 22000, 23000, 24000, 25000, 26000,
+ 27000, 28000, 29000, 30000, 31000, 32000, 33000,
+ 34000, 35000, 36000, 37000, 38000, 39000, 40000,
+ 41000, 42000, 43000, 44000, 45000, 46000, 47000,
+ 48000, 49000, 50000, 51000, 52000, 53000, 54000,
+ 55000, 56000, 57000, 58000, 59000, 60000, 61000,
+ 62000, 63000, 64000, 65000, 66000, 67000, 68000,
+ 69000, 70000, 71000, 72000, 73000, 74000, 75000,
+ 76000, 77000, 78000, 79000, 80000, 81000, 82000,
+ 83000, 84000, 85000, 86000, 87000, 88000, 89000,
+ 90000, 91000, 92000, 93000, 94000, 95000
+};
+
+/* the value of deviation to adjust rtc clock */
+long Deviation[136] = {
+ 1690000, 1638400, 1587600, 1537600, 1488400, 1440000,
+ 1392400, 1345600, 1299600, 1254400, 1210000, 1166400,
+ 1123600, 1081600, 1040400, 1000000, 960400, 921600,
+ 883600, 846400, 810000, 774400, 739600, 705600, 672400,
+ 640000, 608400, 577600, 547600, 518400, 490000, 462400,
+ 435600, 409600, 384400, 360000, 336400, 313600, 291600,
+ 270400, 250000, 230400, 211600, 193600, 176400, 160000,
+ 144400, 129600, 115600, 102400, 90000, 78400, 67600, 57600,
+ 48400, 40000, 32400, 25600, 19600, 14400, 10000, 6400,
+ 3600, 1600, 400, 0, 400, 1600, 3600, 6400, 10000, 14400,
+ 19600, 25600, 32400, 40000, 48400, 57600, 67600, 78400,
+ 90000, 102400, 115600, 129600, 144400, 160000, 176400,
+ 193600, 211600, 230400, 250000, 270400, 291600, 313600,
+ 336400, 360000, 384400, 409600, 435600, 462400, 490000,
+ 518400, 547600, 577600, 608400, 640000, 672400, 705600,
+ 739600, 774400, 810000, 846400, 883600, 921600, 960400,
+ 1000000, 1040400, 1081600, 1123600, 1166400, 1210000,
+ 1254400, 1299600, 1345600, 1392400, 1440000, 1488400,
+ 1537600, 1587600, 1638400, 1690000, 1742400, 1795600,
+ 1849600, 1904400, 1960000
+};
+
+static int accelerate_second_rtc(int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
+ reg &= ~(0x7000000);
+ reg |= 0x30000000 | ((n & 0x7) << 24);
+ writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
+ return 0;
+}
+
+static int accelerate_minute_rtc(int m)
+{
+ unsigned int reg;
+
+ reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
+ reg &= ~(0x3F0000);
+ reg |= 0x30000000 | ((m & 0x3f) << 16);
+ writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
+ return 0;
+}
+
+static int slow_down_second_rtc(int n)
+{
+ unsigned int reg;
+
+ reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
+ reg &= ~(0x7000000);
+ reg |= 0x10000000 | ((n & 0x7) << 24);
+ writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
+ return 0;
+}
+
+static int slow_down_minute_rtc(int m)
+{
+ unsigned int reg;
+
+ reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
+ reg &= ~(0x3F0000);
+ reg |= 0x10000000 | ((m & 0x3f) << 16);
+ writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
+ return 0;
+}
+
+static unsigned int fh_rtc_get_hw_sec_data(unsigned int func_switch)
+{
+
+ unsigned int ret_sec, raw_value, sec_value;
+ unsigned int min_value, hour_value, day_value;
+
+ if (func_switch == TIME_FUNC)
+ raw_value = fh_rtc_get_time(fh_rtc->regs);
+ else
+ raw_value = fh_rtc_get_alarm_time(fh_rtc->regs);
+
+ sec_value = FH_GET_RTC_SEC(raw_value);
+ min_value = FH_GET_RTC_MIN(raw_value);
+ hour_value = FH_GET_RTC_HOUR(raw_value);
+ day_value = FH_GET_RTC_DAY(raw_value);
+ ret_sec = (day_value * 86400) + (hour_value * 3600)
+ + (min_value * 60) + sec_value;
+
+ return ret_sec;
+
+}
+
+static void fh_rtc_set_hw_sec_data(struct rtc_time *rtc_tm,
+ unsigned int func_switch) {
+
+ unsigned int raw_value, sec_value, min_value;
+ unsigned int hour_value, day_value;
+
+ day_value = rtc_year_days(rtc_tm->tm_mday, rtc_tm->tm_mon,
+ rtc_tm->tm_year+1900);
+ day_value += (rtc_tm->tm_year-70)*365
+ + ELAPSED_LEAP_YEARS(rtc_tm->tm_year);
+
+ hour_value = rtc_tm->tm_hour;
+ min_value = rtc_tm->tm_min;
+ sec_value = rtc_tm->tm_sec;
+
+ raw_value = (day_value << DAY_BIT_START)
+ | (hour_value << HOUR_BIT_START)
+ | (min_value << MIN_BIT_START)
+ | (sec_value << SEC_BIT_START);
+
+ if (func_switch == TIME_FUNC)
+ fh_rtc_set_time(fh_rtc->regs, raw_value);
+ else
+ fh_rtc_set_alarm_time(fh_rtc->regs, raw_value);
+
+}
+
+static int fh_rtc_exam_magic(void)
+{
+ unsigned int magic, status;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ magic = GET_REG(fh_rtc->regs + FH_RTC_USER_REG);
+
+ if (magic != RTC_MAGIC) {
+ status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
+ status &= 0x2;
+ SET_REG(fh_rtc->regs+FH_RTC_SYNC, status);
+
+ msleep(30);
+ } else {
+ return 0;
+ }
+ }
+
+ printk(KERN_INFO "ERROR: read rtc failed: 0x%x\n", magic);
+
+ return -EAGAIN;
+
+}
+
+static int fh_rtc_open(struct device *dev)
+{
+ return 0;
+}
+
+static void fh_rtc_release(struct device *dev)
+{
+ return;
+}
+
+static int fh_rtc_tm_compare(struct rtc_time *tm0, struct rtc_time *tm1)
+{
+ unsigned long read = 0, write = 0;
+
+ rtc_tm_to_time(tm0, &read);
+ rtc_tm_to_time(tm1, &write);
+
+ if (write > read || write < read - 2) {
+ RTC_PRINT_DBG("ERROR: read(%d-%d-%d %d:%d:%d) vs "
+ "write(%d-%d-%d %d:%d:%d)\n",
+ tm0->tm_year + 1900,
+ tm0->tm_mon + 1,
+ tm0->tm_mday,
+ tm0->tm_hour,
+ tm0->tm_min,
+ tm0->tm_sec,
+ tm1->tm_year + 1900,
+ tm1->tm_mon + 1,
+ tm1->tm_mday,
+ tm1->tm_hour,
+ tm1->tm_min,
+ tm1->tm_sec);
+ return -1;
+ }
+ return 0;
+}
+
+static int fh_rtc_gettime_nosync(struct device *dev, struct rtc_time *rtc_tm)
+{
+ unsigned int temp;
+
+ temp = fh_rtc_get_hw_sec_data(TIME_FUNC);
+ rtc_time_to_tm(temp, rtc_tm);
+ RTC_PRINT_DBG("rtc read date:0x%x\n", temp);
+ return 0;
+}
+
+
+static int fh_rtc_gettime_sync(struct device *dev, struct rtc_time *rtc_tm)
+{
+ unsigned int status;
+ unsigned int loop_count;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fh_rtc_controller *fh_rtc = platform_get_drvdata(pdev);
+
+ status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
+ status &= 0x2;
+
+ SET_REG(fh_rtc->regs+FH_RTC_SYNC, status);
+ msleep(30);
+
+ for (loop_count = 0;
+ loop_count <= SYNC_LOOP_COUNT;
+ loop_count++) {
+ udelay(100);
+ status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
+ status &= 0x1;
+ if(status == 1) {
+ unsigned int temp;
+ temp = fh_rtc_get_hw_sec_data(TIME_FUNC);
+ rtc_time_to_tm(temp, rtc_tm);
+ RTC_PRINT_DBG("rtc read date:0x%x\n", temp);
+ return 0;
+ }
+
+ }
+
+ printk(KERN_INFO "rtc read sync fail!\n");
+ return -EAGAIN;
+}
+
+static int fh_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc_time rtc_tm_read0;
+ unsigned int status;
+ unsigned int loop_count;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fh_rtc_controller *fh_rtc = platform_get_drvdata(pdev);
+ int cnt, ret, read_count = 0;
+
+ RTC_PRINT_DBG("rtc write %d-%d-%d %d:%d:%d\n",
+ tm->tm_year + 1900,
+ tm->tm_mon + 1,
+ tm->tm_mday,
+ tm->tm_hour,
+ tm->tm_min,
+ tm->tm_sec);
+
+ SET_REG(fh_rtc->regs + FH_RTC_USER_REG, RTC_MAGIC);
+ msleep(3);
+
+ for (cnt = 0; cnt < 5; cnt++) {
+ int rewrite_count = 0;
+REWRITE:
+ ret = 0;
+
+ fh_rtc_set_hw_sec_data(tm, TIME_FUNC);
+
+ /*spin_lock_irqsave(&rtc_lock, flag);*/
+
+ for (loop_count = 0;
+ loop_count <= SYNC_LOOP_COUNT;
+ loop_count++) {
+ udelay(100);
+
+ status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
+ status &= 0x2;
+ if (status == 0x2) {
+ printk(KERN_INFO "rtc write loop_count :%d\n",
+ loop_count);
+ if(loop_count > 20) {
+ RTC_PRINT_DBG("error: rewrite: %d, "
+ "rtc write loop_count :%d\n",
+ rewrite_count,
+ loop_count);
+ msleep(3);
+ rewrite_count++;
+ if (rewrite_count < 5) {
+ goto REWRITE;
+ } else {
+ RTC_PRINT_DBG("rtc write retry exceed\n");
+ msleep(3);
+ break;
+ }
+ }
+ /*spin_unlock_irqrestore(&rtc_lock, flag);*/
+ msleep(3);
+ break;
+ }
+ }
+
+ if (loop_count >= SYNC_LOOP_COUNT) {
+ printk(KERN_INFO "rtc write sync fail!\n");
+ return -EAGAIN;
+ }
+
+ for (read_count = 0; read_count < 5; read_count++) {
+ fh_rtc_gettime_sync(dev, &rtc_tm_read0);
+ ret += fh_rtc_tm_compare(&rtc_tm_read0, tm);
+ }
+
+ if (!ret) {
+ return 0;
+ }
+
+ }
+
+ return -1;
+}
+
+static int fh_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *rtc_tm = &alrm->time;
+
+ rtc_time_to_tm(fh_rtc_get_hw_sec_data(ALARM_FUNC), rtc_tm);
+
+ return 0;
+}
+
+static int fh_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *rtc_tm = &alrm->time;
+
+ fh_rtc_set_hw_sec_data(rtc_tm, ALARM_FUNC);
+
+ return 0;
+}
+
+
+
+static int fh_rtc_irq_enable(struct device *dev, unsigned int enabled)
+{
+
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fh_rtc_controller *fh_rtc = platform_get_drvdata(pdev);
+
+ if (enabled) {
+ fh_rtc_enable_interrupt(fh_rtc->regs,
+ FH_RTC_ISR_SEC_POS | FH_RTC_ISR_ALARM_POS);
+ }
+ else{
+
+ fh_rtc_disenable_interrupt(fh_rtc->regs,
+ FH_RTC_ISR_SEC_POS | FH_RTC_ISR_ALARM_POS);
+ }
+
+ return 0;
+}
+
+
+
+
+static irqreturn_t fh_rtc_irq(int irq, void *dev_id)
+{
+
+ struct fh_rtc_controller *fh_rtc = (struct fh_rtc_controller *)dev_id;
+ unsigned long events = 0;
+ unsigned int isr_status;
+ struct rtc_device *rtc = fh_rtc->rtc;
+
+ isr_status = fh_rtc_get_enabled_interrupt(fh_rtc->regs);
+
+ fh_rtc_clear_interrupt_status(fh_rtc->regs, isr_status);
+
+ if (isr_status & FH_RTC_ISR_SEC_POS) {
+
+ events |= RTC_IRQF | RTC_UF;
+ }
+ else if(isr_status & FH_RTC_ISR_ALARM_POS){
+ events |= RTC_IRQF | RTC_AF;
+ }
+ else{
+ pr_info("rtc unknown isr...\n");
+ return IRQ_HANDLED;
+ }
+ rtc_update_irq(rtc, 1, events);
+
+ return IRQ_HANDLED;
+
+}
+
+static const struct rtc_class_ops fh_rtcops = {
+ .open = fh_rtc_open,
+ .release = fh_rtc_release,
+ .read_time = fh_rtc_gettime_nosync,
+ .set_time = fh_rtc_settime,
+ .read_alarm = fh_rtc_getalarm,
+ .set_alarm = fh_rtc_setalarm,
+ .alarm_irq_enable = fh_rtc_irq_enable,
+};
+
+/*get the read of SADC and adjust RTC clock*/
+int fh_adjust_rtc(void)
+{
+ int m, n; /*m:MinuteOffset, n:SecondOffset*/
+ long T = 25000;
+ int i, j, temp;
+ long Ppm = 0;
+ long value[7];
+ int flag = 0;
+ long sum = 0;
+ long v;
+ int num;
+
+ for (i = 0; i < 7; i++) {
+ value[i] = fh_sadc_get_value(fh_rtc->sadc_channel);
+ if(!value[i])
+ {
+ printk("ERROR: %s, sadc value %lu is incorrect\n",
+ __func__, value[i]);
+ return -EIO;
+ }
+ mdelay(100);
+ }
+ for (i = 0; i < 7; i++) {
+ for (j = i + 1; j < 7; j++) {
+ if (value[j] < value[i]) {
+ temp = value[i];
+ value[i] = value[j];
+ value[j] = temp;
+ }
+ }
+ }
+ sum = value[2] + value[3] + value[4];
+ v = sum / 3;
+ printk("the average value of SADC is:%ld\n", v);
+ if(v >= 3094) { /*if temperature is lower than -40℃,adjust by -40℃*/
+ Ppm = 1690000;
+ n = Ppm / 305176;
+ Ppm -= 305176 * n;
+ m = Ppm / 5086;
+ printk("SecondOffset is: %d\n", n);
+ printk("MinuteOffset is: %d\n", m);
+ if ((n <= 7) && (m <= 63)) {
+ accelerate_second_rtc(n);
+ accelerate_minute_rtc(m);
+ printk("rtc clock has been adjusted!\n");
+ } else {
+ printk("beyond range of adjust\n");
+ }
+ return 0;
+ }
+ if(v < 260) { /*if temperature is higher than 95℃,adjust by 95℃*/
+ Ppm = 1960000;
+ n = Ppm / 305176;
+ Ppm -= 305176 * n;
+ m = Ppm / 5086;
+ printk("SecondOffset is: %d\n", n);
+ printk("MinuteOffset is: %d\n", m);
+ if ((n <= 7) && (m <= 63)) {
+ accelerate_second_rtc(n);
+ accelerate_minute_rtc(m);
+ printk("rtc clock has been adjusted!\n");
+ } else {
+ printk("beyond range of adjust\n");
+ }
+
+ return 0;
+ }
+ for (i = 0; i < 27; i++) { /*calculate temperature by voltage*/
+ if ((v >= SadcValue[i]) && (v < SadcValue[i+1])) {
+ T = Temperature1[i] - ((Temperature1[i] - Temperature1[i+1]) *
+ (SadcValue[i] - v) / (SadcValue[i] - SadcValue[i+1]));
+ } else {
+ //printk("the reading of SADC is beyond of voltage range\n");
+ continue;
+ }
+ }
+ for (i = 0; i < 135; i++) { /*get deviation by temperature*/
+ if ((T >= Temperature2[i]) && (T < Temperature2[i+1])) {
+ num = i;
+ flag = 1;
+ if ((Temperature2[num+1] - T) <= 500) {
+ T = Temperature2[num + 1];
+ Ppm = Deviation[num + 1];
+ } else if ((Temperature2[num+1] - T) > 500) {
+ T = Temperature2[num];
+ Ppm = Deviation[num];
+ }
+ printk("current temperature is: %ld\n", T);
+ printk("current deviation of RTC crystal oscillator is: %ld\n", Ppm);
+ }
+ }
+ if (flag == 1) {
+ n = Ppm / 305176;
+ Ppm -= 305176 * n;
+ m = Ppm / 5086;
+ printk("SecondOffset is: %d\n", n);
+ printk("MinuteOffset is: %d\n", m);
+ if ((n <= 7) && (m <= 63)) {
+ accelerate_second_rtc(n);
+ accelerate_minute_rtc(m);
+ printk("rtc clock has been adjusted!\n");
+ } else {
+ printk("beyond range of adjust\n");
+ }
+ }
+ return 0;
+}
+
+long get_rtc_temperature(void)
+{
+ long T = 0;
+ int i, j, temp;
+ long value[7];
+ long sum = 0;
+ long v;
+ for (i = 0; i < 7; i++) {
+ value[i] = fh_sadc_get_value(fh_rtc->sadc_channel);
+ if(!value[i])
+ {
+ printk("ERROR: %s, sadc value %lu is incorrect\n",
+ __func__, value[i]);
+ return -EIO;
+ }
+ mdelay(100);
+ }
+ for (i = 0; i < 7; i++) {
+ for (j = i + 1; j < 7; j++) {
+ if (value[j] < value[i]) {
+ temp = value[i];
+ value[i] = value[j];
+ value[j] = temp;
+ }
+ }
+ }
+ sum = value[2] + value[3] + value[4];
+ v = sum / 3;
+ printk("the average value of SADC is:%ld\n", v);
+ for (i = 0; i < 27; i++) {
+ if ((v >= SadcValue[i]) && (v < SadcValue[i+1])) {
+ T = Temperature1[i] - ((Temperature1[i] - Temperature1[i+1]) *
+ (SadcValue[i] - v) / (SadcValue[i] - SadcValue[i+1]));
+ } else {
+ //printk("the reading of SADC is beyond of voltage range\n");
+ continue;
+ }
+ }
+ printk("current temperature is: %ld\n", T);
+ return 0;
+}
+
+void fh_rtc_self_adjustment(struct work_struct *work)
+{
+ fh_adjust_rtc();
+
+ queue_delayed_work(fh_rtc->wq, &fh_rtc->self_adjust, 5000);
+}
+
+
+static void create_proc_rtc(struct fh_rtc_controller *rtc);
+static void remove_proc(void);
+static int __devinit fh_rtc_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct resource *ioarea;
+ struct fh_rtc_platform_data * rtc_platform_info;
+ struct rtc_device *rtc;
+ struct resource *res;
+
+ fh_rtc = kzalloc(sizeof(struct fh_rtc_controller), GFP_KERNEL);
+ if (!fh_rtc)
+ return -ENOMEM;
+
+ memset(fh_rtc, 0, sizeof(struct fh_rtc_controller));
+
+ /* board info below */
+ rtc_platform_info = (struct fh_rtc_platform_data *)pdev->dev.platform_data;
+ if(rtc_platform_info == NULL){
+ dev_err(&pdev->dev, "%s, rtc platform error.\n",
+ __func__);
+ err = -ENODEV;
+ goto err_nores;
+ }
+ fh_rtc->base_year = rtc_platform_info->base_year;
+ fh_rtc->base_month = rtc_platform_info->base_month;
+ fh_rtc->base_day = rtc_platform_info->base_day;
+ fh_rtc->sadc_channel = rtc_platform_info->sadc_channel;
+
+ /* find the IRQs */
+ fh_rtc->irq = platform_get_irq(pdev, 0);
+ if (fh_rtc->irq < 0) {
+ dev_err(&pdev->dev, "%s, rtc irq error.\n",
+ __func__);
+ err = fh_rtc->irq;
+ goto err_nores;
+ }
+
+ /* get the memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ err = -ENOENT;
+ goto err_nores;
+ }
+
+ fh_rtc->paddr = res->start;
+ ioarea = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if(!ioarea) {
+ dev_err(&pdev->dev, "rtc region already claimed\n");
+ err = -EBUSY;
+ goto err_nores;
+ }
+
+ fh_rtc->regs = ioremap(res->start, resource_size(res));
+ if (!fh_rtc->regs) {
+ dev_err(&pdev->dev, "rtc already mapped\n");
+ err = -EINVAL;
+ goto err_nores;
+ }
+
+ /* register RTC and exit */
+ platform_set_drvdata(pdev, fh_rtc);
+ rtc = rtc_device_register(rtc_platform_info->dev_name, &pdev->dev, &fh_rtcops,
+ THIS_MODULE);
+
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "cannot attach rtc\n");
+ err = PTR_ERR(rtc);
+ goto err_nores;
+ }
+ fh_rtc->rtc = rtc;
+
+ err = request_irq(fh_rtc->irq , fh_rtc_irq, 0,
+ dev_name(&pdev->dev), fh_rtc);
+ if (err) {
+ dev_dbg(&pdev->dev, "request_irq failed, %d\n", err);
+ goto err_nores;
+ }
+
+ create_proc_rtc(fh_rtc);
+
+ SET_REG(fh_rtc->regs + FH_RTC_DEBUG, RTC_PHASE);
+
+ if(fh_rtc->sadc_channel >= 0)
+ {
+ pr_info("RTC: start self adjustment\n");
+ fh_rtc->wq = create_workqueue("rtc_wq");
+ if(!fh_rtc->wq)
+ {
+ dev_err(&pdev->dev, "no memory to create rtc workqueue\n");
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&fh_rtc->self_adjust, fh_rtc_self_adjustment);
+
+ queue_delayed_work(fh_rtc->wq, &fh_rtc->self_adjust, 5000);
+ }
+
+ err = fh_rtc_exam_magic();
+
+ if(err)
+ return -1;
+
+
+err_nores:
+ return err;
+
+ return 0;
+}
+
+static int __devexit fh_rtc_remove(struct platform_device *dev)
+{
+ struct fh_rtc_controller *fh_rtc = platform_get_drvdata(dev);
+
+ remove_proc();
+ free_irq(fh_rtc->irq, fh_rtc);
+ rtc_device_unregister(fh_rtc->rtc);
+
+ iounmap(fh_rtc->regs);
+ platform_set_drvdata(dev, NULL);
+ kfree(fh_rtc);
+ return 0;
+}
+
+
+
+#ifdef CONFIG_PM
+
+/* RTC Power management control */
+
+static int fh_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int fh_rtc_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+#else
+#define fh_rtc_suspend NULL
+#define fh_rtc_resume NULL
+#endif
+
+
+
+static struct platform_driver fh_rtc_driver = {
+ .probe = fh_rtc_probe,
+ .remove = __devexit_p(fh_rtc_remove),
+ .suspend = fh_rtc_suspend,
+ .resume = fh_rtc_resume,
+ .driver = {
+ .name = "fh_rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init fh_rtc_init(void) {
+
+ return platform_driver_register(&fh_rtc_driver);
+}
+
+static void __exit fh_rtc_exit(void) {
+ platform_driver_unregister(&fh_rtc_driver);
+}
+
+static void del_char(char *str, char ch)
+{
+ char *p = str;
+ char *q = str;
+ while (*q) {
+ if (*q != ch) {
+ *p++ = *q;
+ }
+ q++;
+ }
+ *p = '\0';
+}
+
+ssize_t proc_read(char *page, char **start, off_t off, int count,
+ int *eof, struct fh_rtc_controller *data) {
+ ssize_t len = 0;
+
+ printk(KERN_INFO "------------- dump register -------------\n");
+ printk(KERN_INFO "cnt:0x%x\n",fh_rtc_get_time(data->regs) );
+ printk(KERN_INFO "offset:0x%x\n",fh_rtc_get_offset(data->regs));
+ printk(KERN_INFO "fail:0x%x\n",fh_rtc_get_power_fail(data->regs));
+ printk(KERN_INFO "alarm_cnt:0x%x\n",fh_rtc_get_alarm_time(data->regs));
+ printk(KERN_INFO "int stat:0x%x\n",fh_rtc_get_int_status(data->regs));
+ printk(KERN_INFO "int en:0x%x\n",fh_rtc_get_enabled_interrupt(data->regs));
+ printk(KERN_INFO "sync:0x%x\n",fh_rtc_get_sync(data->regs));
+ printk(KERN_INFO "debug:0x%x\n",fh_rtc_get_debug(data->regs));
+ printk(KERN_INFO "-------------------------------------------\n");
+
+ return len;
+}
+
+
+static ssize_t fh_rtc_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
+{
+ char message[32] = {0};
+ char * const delim = ",";
+ char *cur = message, *power_str;
+ int power;
+ len = (len > 32) ? 32 : len;
+ if (copy_from_user(message, buf, len))
+ return -EFAULT;
+ power_str = strsep(&cur, delim);
+ if (!power_str) {
+ pr_err("%s: ERROR: parameter is empty\n", __func__);
+ return -EINVAL;
+ } else {
+ del_char(power_str, ' ');
+ del_char(power_str, '\n');
+ power = (unsigned int)simple_strtoul(power_str, NULL, 10);
+ if (power < 0) {
+ pr_err("%s: ERROR: parameter is incorrect\n", __func__);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "the diff between rtc and sys is %d\n",
+ power);
+ if (power == 0)
+ fh_adjust_rtc();
+ else if (power == 1)
+ get_rtc_temperature();
+ }
+ return len;
+}
+
+static void create_proc_rtc(struct fh_rtc_controller *rtc)
+{
+ fh_rtc->proc_rtc_entry =
+ create_proc_entry(FH_RTC_PROC_FILE,
+ S_IRUGO, NULL);
+
+ if (!fh_rtc->proc_rtc_entry) {
+ printk(KERN_ERR"create proc failed\n");
+ } else {
+ fh_rtc->proc_rtc_entry->read_proc =
+ (read_proc_t *)proc_read;
+ fh_rtc->proc_rtc_entry->write_proc =
+ (write_proc_t *)fh_rtc_proc_write;
+ fh_rtc->proc_rtc_entry->data = rtc;
+ }
+}
+
+static void remove_proc(void) {
+ remove_proc_entry(FH_RTC_PROC_FILE, NULL);
+}
+
+module_init(fh_rtc_init);
+module_exit(fh_rtc_exit);
+
+MODULE_DESCRIPTION("FH SOC RTC Driver");
+MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fh-rtc");
diff --git a/drivers/rtc/rtc-fh_v2.c b/drivers/rtc/rtc-fh_v2.c
new file mode 100644
index 00000000..1967c2e4
--- /dev/null
+++ b/drivers/rtc/rtc-fh_v2.c
@@ -0,0 +1,1385 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/fh_rtc_v2.h>
+#include <mach/fh_sadc.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/irqreturn.h>
+/* #define FH_RTC_DEBUG_PRINT */
+
+#ifdef FH_RTC_DEBUG_PRINT
+#define RTC_PRINT_DBG(fmt, args...) \
+ do {
+ printk(KERN_INFO "[FH_RTC_DEBUG]: "); \
+ printk(fmt, ## args)
+ } while (0)
+#else
+#define RTC_PRINT_DBG(fmt, args...) do { } while (0)
+#endif
+
+unsigned int rtc_use_efuse_viture_addr;
+
+#ifdef CONFIG_USE_TSENSOR
+#define USE_TSENSOR
+#ifdef CONFIG_USE_TSENSOR_OFFSET
+#define USE_TSENSOR_OFFSET
+#endif
+#endif
+/*
+#define USE_TSENSOR_OFFSET
+#define USE_TSENSOR
+*/
+/* #define USE_DEBUG_REGISTER */
+
+struct fh_rtc_controller {
+ void * __iomem regs;
+ unsigned int v_addr;
+ unsigned int irq;
+ unsigned int paddr;
+ unsigned int base_year;
+ unsigned int base_month;
+ unsigned int base_day;
+ struct rtc_device *rtc;
+ struct clk *clk;
+ struct proc_dir_entry *proc_rtc_entry;
+ int sadc_channel;
+
+ struct workqueue_struct *wq;
+ struct delayed_work self_adjust;
+};
+struct fh_rtc_core_int_status {
+ unsigned int core_int_en;
+ unsigned int core_int_status;
+};
+struct fh_rtc_controller *fh_rtc;
+struct fh_rtc_core_int_status fh_core_int;
+enum {
+
+ TIME_FUNC = 0, ALARM_FUNC,
+
+};
+#ifdef USE_TSENSOR
+#define LUT_COF 58
+#define LUT_OFFSET 0xff
+#define TSENSOR_OFFSET 55
+#define TSENSOR_COF 58
+#define TSENSOR_STEP 3
+#define TEMP_CP 25
+#define TSENSOR_CP_DEFAULT_OUT 0x993
+
+unsigned int TSENSOR_LUT[12] = {0x1b1e2023,
+ 0x11131618,
+ 0x090b0d0f,
+ 0x03040607,
+ 0x00010202,
+ 0x01000000,
+ 0x04030201,
+ 0x0b090706,
+ 0x1713100e,
+ 0x27221e1a,
+ 0x3e37322c,
+ 0x5b534c44};
+#endif
+
+
+
+static int fh_rtc_get_core(unsigned int base_addr, unsigned int reg_num);
+#ifdef USE_TSENSOR_OFFSET
+#define RTC_USE_EFUSE_CMD (0x0000)
+#define RTC_USE_EFUSE_CONFIG (0x0004)
+#define RTC_USE_EFUSE_MATCH_KEY (0x0008)
+#define RTC_USE_EFUSE_TIMING0 (0x000C)
+#define RTC_USE_EFUSE_TIMING1 (0x0010)
+#define RTC_USE_EFUSE_TIMING2 (0x0014)
+#define RTC_USE_EFUSE_TIMING3 (0x0018)
+#define RTC_USE_EFUSE_TIMING4 (0x001C)
+#define RTC_USE_EFUSE_TIMING5 (0x0020)
+#define RTC_USE_EFUSE_TIMING6 (0x0024)
+#define RTC_USE_EFUSE_DOUT (0x0028)
+#define RTC_USE_EFUSE_STATUS0 (0x002C)
+#define RTC_USE_EFUSE_STATUS1 (0x0030)
+#define RTC_USE_EFUSE_STATUS2 (0x0034)
+#define RTC_USE_EFUSE_STATUS3 (0x0038)
+#define RTC_USE_EFUSE_STATUS4 (0x003C)
+#define RTC_USE_EFUSE_MEM_INFO (0x0100)
+static int tsensor_detect_complete(unsigned int i)
+{
+ unsigned int rdata;
+ unsigned int loop_count;
+
+ rdata = 0
+ loop_count = 0;
+ while ((rdata&(1<<i)) != 1<<i) {
+ rdata = readl(rtc_use_efuse_viture_addr +
+ RTC_USE_EFUSE_STATUS3);
+
+ if (rdata != 0x0) {
+
+ pr_info("detect complet status 0x%x\n", rdata);
+ break;
+ }
+ rdata = readl(rtc_use_efuse_viture_addr +
+ RTC_USE_EFUSE_STATUS0);
+ loop_count++;
+ usleep_range(1000, 2000);
+ if (loop_count >= 100) {
+
+ pr_err("RTC: can't detect completed!\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+int tsensor_load_usrcmd(void)
+{
+ unsigned rdata;
+ int status;
+
+ status = 0;
+ rdata = readl(rtc_use_efuse_viture_addr + RTC_USE_EFUSE_STATUS0);
+ if ((rdata&(1<<31)) != 1<<31) {
+ writel(0x1, rtc_use_efuse_viture_addr+RTC_USE_EFUSE_CMD);
+ status = tsensor_detect_complete(1);
+ if (status < 0) {
+ pr_err("RTC:load usrcmd error!\n");
+ return -EBUSY;
+ }
+
+ rdata = readl(rtc_use_efuse_viture_addr+RTC_USE_EFUSE_DOUT);
+ }
+
+ return 0;
+}
+int tsensor_efuse_read(int entry)
+{
+ unsigned rdata;
+ int status = 0;
+
+ status = tsensor_load_usrcmd();
+ if (status < 0) {
+ pr_err("RTC:tsensor_efuse_read load usrcmd error!\n");
+ return -EBUSY;
+ }
+ writel((entry<<4)+0x3, rtc_use_efuse_viture_addr+RTC_USE_EFUSE_CMD);
+ status = 0;
+ status = tsensor_detect_complete(3);
+ if (status < 0) {
+ pr_err("RTC:tsensor_efuse_read load data error!\n");
+ return -EBUSY;
+ }
+ rdata = readl(rtc_use_efuse_viture_addr+RTC_USE_EFUSE_DOUT);
+
+ return rdata;
+}
+int tsensor_adjust_lut(void)
+{
+ unsigned int tsensor_init_data;
+ unsigned int tsensor_12bit;
+ unsigned int low_data, high_data;
+ int temp_diff;
+ int tsensor_out_value_diff;
+
+ low_data = tsensor_efuse_read(60);
+ high_data = tsensor_efuse_read(61);
+ tsensor_init_data = (high_data<<8)|low_data;
+ tsensor_12bit = tsensor_init_data&0xfff;
+ tsensor_out_value_diff = tsensor_12bit - TSENSOR_CP_DEFAULT_OUT;
+
+ temp_diff = tsensor_out_value_diff*LUT_COF/4096;
+
+ return temp_diff;
+}
+#endif
+#ifdef USE_TSENSOR
+static int fh_rtc_temp_cfg_coef_offset(unsigned int coef, unsigned int offset);
+static int fh_rtc_temp_cfg_thl_thh(unsigned int thl, unsigned int thh);
+#if 0
+static int fh_rtc_temp_cfg_update_time(unsigned int time);
+#endif
+#endif
+static int fh_rtc_set_core(unsigned int base_addr, unsigned int reg_num,\
+ unsigned int value);
+#ifdef USE_TSENSOR
+static void rtc_adjust(void)
+{
+ int i;
+ int offset_index;
+ char offset_lut[48];
+
+ for (i = 0; i < 12; i++)
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_OFFSET_LUT+(i<<4),
+ TSENSOR_LUT[i]);
+
+ for (i = 0; i < 12; i++) {
+ offset_lut[i*4] = TSENSOR_LUT[i]&0xff;
+ offset_lut[i*4+1] = (TSENSOR_LUT[i]>>4);
+ offset_lut[i*4+2] = (TSENSOR_LUT[i]>>8);
+ offset_lut[i*4+3] = (TSENSOR_LUT[i]>>12);
+ }
+
+ offset_index = 0;
+ for (i = 0; i < 46; i++) {
+ if (offset_lut[i] > offset_lut[i+1])
+ offset_index = i + 1;
+ else
+ offset_lut[i+1] = offset_lut[i];
+ }
+
+#ifdef USE_TSENSOR_OFFSET
+ i = tsensor_adjust_lut();
+ if ((i < 0) || (i > 5))
+ i = 0;
+ fh_rtc_temp_cfg_coef_offset(LUT_COF, LUT_OFFSET-i);
+ pr_info("tsensor diff value : %d\n", i);
+#else
+ fh_rtc_temp_cfg_coef_offset(LUT_COF, LUT_OFFSET);
+#endif
+ fh_rtc_temp_cfg_thl_thh(0, 47);
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_OFFSET,
+ (offset_index<<2) | (offset_index<<10)|
+ OFFSET_BK_EN|OFFSET_BK_AUTO);
+}
+#endif
+
+static int fh_rtc_core_idle(unsigned int base_addr)
+{
+ unsigned int status;
+
+ status = GET_REG(base_addr+FH_RTC_INT_STATUS);
+ if (status & FH_RTC_INT_STATUS_CORE_IDLE)
+ return 0;
+ else
+ return -1;
+}
+
+static int fh_rtc_core_wr(unsigned int base_addr)
+{
+ int reg;
+
+ reg = GET_REG(base_addr+FH_RTC_INT_STATUS);
+ reg &= (~FH_RTC_INT_STATUS_CORE_IDLE);
+ SET_REG(base_addr+FH_RTC_INT_STATUS, reg);
+
+ return 0;
+}
+
+static int fh_rtc_get_time(unsigned int base_addr)
+{
+ int reg, count, status;
+
+ if (fh_rtc_core_idle(base_addr) < 0) {
+ pr_info("fh rtc get time error\n");
+ return -1;
+ }
+ fh_rtc_core_wr(base_addr);
+ SET_REG(base_addr+FH_RTC_CMD, RTC_READ);
+ for (count = 0; count < 250; count++) {
+ status = fh_rtc_core_idle(base_addr);
+ if (status == 0) {
+ reg = GET_REG(base_addr+FH_RTC_RD_DATA);
+ return reg;
+ }
+ udelay(100);
+
+ }
+ pr_info("rtc core busy can't get time\n");
+
+ return -1;
+}
+
+static int fh_rtc_set_core(unsigned int base_addr, unsigned int reg_num,\
+ unsigned int value)
+{
+ int count, status;
+
+ if (fh_rtc_core_idle(base_addr) < 0) {
+ pr_info("rtc get time:rtc core busy\n");
+ return -1;
+ }
+ fh_rtc_core_wr(base_addr);
+
+ SET_REG(base_addr+FH_RTC_WR_DATA, value);
+ SET_REG(base_addr+FH_RTC_CMD, reg_num|RTC_WRITE);
+ for (count = 0; count < 250; count++) {
+ status = fh_rtc_core_idle(base_addr);
+ if (status == 0)
+ return 0;
+
+ udelay(100);
+
+ }
+ pr_info("rtc SET CORE REG TIMEOUT\n");
+
+ return -1;
+}
+
+static int fh_rtc_get_core(unsigned int base_addr, unsigned int reg_num)
+{
+ int reg, count, status;
+
+ if (fh_rtc_core_idle(base_addr) < 0) {
+ pr_info("rtc get time:rtc core busy %d\n", __LINE__);
+ return -1;
+ }
+ fh_rtc_core_wr(base_addr);
+ SET_REG(base_addr+FH_RTC_CMD, reg_num|RTC_READ);
+ for (count = 0; count < 150; count++) {
+ status = fh_rtc_core_idle(base_addr);
+ if (status == 0) {
+ reg = GET_REG(base_addr+FH_RTC_RD_DATA);
+ return reg;
+ }
+ udelay(100);
+
+ }
+ pr_info("rtc GET CORE REG TIMEOUT line %d\n", __LINE__);
+
+ return -1;
+}
+
+static int fh_rtc_get_temp(unsigned int base_addr)
+{
+ int reg, count, status;
+
+ if (fh_rtc_core_idle(base_addr) < 0) {
+ pr_info("rtc get time:rtc core busy %d\n", __LINE__);
+
+ return -1;
+ }
+ fh_rtc_core_wr(base_addr);
+ SET_REG(base_addr+FH_RTC_CMD, RTC_TEMP);
+ for (count = 0; count < 150; count++) {
+ status = fh_rtc_core_idle(base_addr);
+ if (status == 0) {
+ reg = GET_REG(base_addr+FH_RTC_RD_DATA);
+
+ return reg;
+ }
+ udelay(100);
+
+ }
+ pr_info("rtc GET CORE REG TIMEOUT line %d\n", __LINE__);
+
+ return -1;
+}
+
+static int fh_rtc_set_time(unsigned int base_addr, unsigned int value)
+{
+ int count;
+
+ if (fh_rtc_core_idle(base_addr) < 0) {
+ pr_info("set time :rtc core busy\n");
+
+ return -1;
+ }
+ fh_rtc_core_wr(base_addr);
+ SET_REG(base_addr+FH_RTC_WR_DATA, value);
+ SET_REG(base_addr+FH_RTC_CMD, RTC_WRITE);
+ for (count = 0; count < 150; count++) {
+ if (fh_rtc_core_idle(base_addr) == 0)
+ return 0;
+
+ udelay(100);
+ }
+ pr_info("rtc core busy can't set time\n");
+
+ return -1;
+}
+#ifdef DRIVER_TEST
+static int fh_rtc_set_time_first(unsigned int base_addr, unsigned int value)
+{
+
+ SET_REG(base_addr+FH_RTC_WR_DATA, value);
+ SET_REG(base_addr+FH_RTC_CMD, RTC_WRITE);
+
+}
+#endif
+
+static int fh_rtc_alarm_enable_interrupt(unsigned int base_addr)
+{
+ int status;
+
+ status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ status & (~FH_RTC_ISR_ALARM_POS));
+
+ status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN);
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN,
+ (status | FH_RTC_CORE_INT_EN_ALM_INT));
+
+ status = GET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS);
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS, status&
+ (~FH_RTC_INT_STATUS_CORE));
+
+ status = GET_REG(fh_rtc->v_addr+FH_RTC_INT_EN);
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN, status &
+ (~FH_RTC_INT_CORE_INT_ERR_MASK));
+ return 0;
+}
+
+static int fh_rtc_alarm_disable_interrupt(unsigned int base_addr,
+ unsigned int value)
+{
+ int rtc_core_status;
+
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN);
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN, (rtc_core_status & (~FH_RTC_CORE_INT_EN_ALM_INT)));
+
+ return 0;
+}
+
+static int fh_rtc_set_alarm_time(unsigned int base_addr, unsigned int value)
+{
+ int status;
+
+#ifdef USE_DEBUG_REGISTER
+ int rtc_core_status, cys_count;
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_COUNTER);
+ if (rtc_core_status > value) {
+ pr_info("alarm time > now time\n");
+ return -ETIME;
+ }
+#endif
+ status = fh_rtc_set_core(base_addr,
+ FH_RTC_CMD_ALARM_CFG, value);
+ if (status < 0) {
+ pr_info("set alarm time failed\n");
+
+ return -1;
+ }
+#ifdef USE_DEBUG_REGISTER
+ if (rtc_core_status == value) {
+ cys_count = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_DEBUG);
+ if (cys_count > 0x3e00)
+ return -ETIME;
+ }
+#endif
+ return 0;
+}
+
+static int fh_rtc_get_alarm_time(unsigned int base_addr)
+{
+ int data;
+
+ data = fh_rtc_get_core(base_addr, FH_RTC_CMD_ALARM_CFG);
+
+ return data;
+}
+static unsigned int fh_rtc_get_hw_sec_data(unsigned int func_switch)
+{
+
+ unsigned int ret_sec, raw_value, sec_value;
+ unsigned int min_value, hour_value, day_value;
+
+ if (func_switch == TIME_FUNC)
+ raw_value = fh_rtc_get_time((unsigned int)fh_rtc->regs);
+ else
+ raw_value = fh_rtc_get_alarm_time((unsigned int)fh_rtc->regs);
+
+ sec_value = FH_GET_RTC_SEC(raw_value);
+ min_value = FH_GET_RTC_MIN(raw_value);
+ hour_value = FH_GET_RTC_HOUR(raw_value);
+ day_value = FH_GET_RTC_DAY(raw_value);
+ ret_sec = (day_value * 86400) + (hour_value * 3600) + (min_value * 60)\
+ + sec_value;
+
+ return ret_sec;
+
+}
+
+#ifdef DRIVER_TEST
+#define TEMP_DATA 0x203
+static unsigned int fh_rtc_get_tmp(void)
+{
+ unsigned int value;
+ value = GET_REG((unsigned int)fh_rtc->regs + FH_RTC_CMD_TEMP_INFO);
+ return value;
+}
+static void fh_rtc_tmp_en(void)
+{
+ fh_rtc_set_core((unsigned int)fh_rtc->regs, FH_RTC_CMD_OFFSET,
+ TEMP_DATA);
+}
+#endif
+
+static int fh_rtc_set_hw_sec_data(struct rtc_time *rtc_tm,
+ unsigned int func_switch)
+{
+
+ unsigned int raw_value, sec_value, min_value;
+ unsigned int hour_value, day_value;
+ unsigned long now;
+ int status;
+
+ if (func_switch == ALARM_FUNC) {
+ rtc_tm_to_time(rtc_tm, &now);
+ now--;
+ rtc_time_to_tm(now, rtc_tm);
+ }
+
+ day_value = rtc_year_days(rtc_tm->tm_mday, rtc_tm->tm_mon,
+ rtc_tm->tm_year + 1900);
+ day_value += (rtc_tm->tm_year - 70)
+ * 365 + ELAPSED_LEAP_YEARS(rtc_tm->tm_year);
+
+ hour_value = rtc_tm->tm_hour;
+ min_value = rtc_tm->tm_min;
+ sec_value = rtc_tm->tm_sec;
+
+ raw_value = (day_value << DAY_BIT_START) | \
+ (hour_value << HOUR_BIT_START)\
+ | (min_value << MIN_BIT_START) | \
+ (sec_value << SEC_BIT_START);
+
+ if (func_switch == TIME_FUNC) {
+ fh_rtc_set_time((unsigned int)fh_rtc->regs, raw_value);
+ return 0;
+ } else {
+ status = fh_rtc_set_alarm_time((unsigned int)fh_rtc->regs,
+ raw_value);
+ return status;
+ }
+}
+
+static int fh_rtc_open(struct device *dev)
+{
+ return 0;
+}
+
+static void fh_rtc_release(struct device *dev)
+{
+ return;
+}
+
+#ifdef DRIVER_TEST
+static int fh_rtc_tm_compare(struct rtc_time *tm0, struct rtc_time *tm1)
+{
+ unsigned long read = 0, write = 0;
+
+ rtc_tm_to_time(tm0, &read);
+ rtc_tm_to_time(tm1, &write);
+
+ if (write > read || write < read - 2) {
+ RTC_PRINT_DBG(
+ "ERROR: read(%d-%d-%d %d:%d:%d) vs "\
+ "write(%d-%d-%d %d:%d:%d)\n",\
+ tm0->tm_year + 1900, tm0->tm_mon + 1, \
+ tm0->tm_mday,\
+ tm0->tm_hour, tm0->tm_min, \
+ tm0->tm_sec, tm1->tm_year + 1900,\
+ tm1->tm_mon + 1, tm1->tm_mday, \
+ tm1->tm_hour, tm1->tm_min,\
+ tm1->tm_sec);
+
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+static int fh_rtc_gettime_nosync(struct device *dev, struct rtc_time *rtc_tm)
+{
+ unsigned int temp;
+
+ temp = fh_rtc_get_hw_sec_data(TIME_FUNC);
+ rtc_time_to_tm(temp, rtc_tm);
+ RTC_PRINT_DBG("rtc read date:0x%x\n", temp);
+
+ return 0;
+}
+
+static int fh_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ RTC_PRINT_DBG("rtc write %d-%d-%d %d:%d:%d\n", tm->tm_year + 1900,
+ tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, \
+ tm->tm_min, tm->tm_sec);
+
+ fh_rtc_set_hw_sec_data(tm, TIME_FUNC);
+
+ return 0;
+}
+
+static int fh_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *rtc_tm = &alrm->time;
+
+ rtc_time_to_tm(fh_rtc_get_hw_sec_data(ALARM_FUNC), rtc_tm);
+
+ return 0;
+}
+
+static int fh_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *rtc_tm = &alrm->time;
+ int status;
+
+ status = fh_rtc_set_hw_sec_data(rtc_tm, ALARM_FUNC);
+ if (status)
+ return status;
+ if (alrm->enabled)
+ fh_rtc_alarm_enable_interrupt(fh_rtc->v_addr);
+ else
+ fh_rtc_alarm_disable_interrupt(fh_rtc->v_addr,
+ FH_RTC_ISR_ALARM_POS);
+ return 0;
+}
+
+#ifdef DRIVER_TEST
+static int dump_interrput_status(unsigned int base_addr)
+{
+ int status;
+
+ status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+ pr_info("core int status : %x\n", status);
+
+ status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN);
+ pr_info("core int en : %x\n", status);
+ status = GET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS);
+ pr_info("wrapper int status : 0x%x\n", status);
+ status = GET_REG(fh_rtc->v_addr+FH_RTC_INT_EN);
+ pr_info("wrapper int en : 0x%x\n", status);
+
+ return 0;
+}
+#endif
+
+static int fh_rtc_irq_enable(struct device *dev, unsigned int enabled)
+{
+
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fh_rtc_controller *fh_rtc = platform_get_drvdata(pdev);
+ int status;
+
+ if (enabled) {
+
+ status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ status & (~FH_RTC_ISR_ALARM_POS));
+
+ status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN);
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN,
+ (status | FH_RTC_CORE_INT_EN_ALM_INT));
+
+ status = GET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS);
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS, status&
+ (~FH_RTC_INT_STATUS_CORE));
+
+ status = GET_REG(fh_rtc->v_addr+FH_RTC_INT_EN);
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN, status &
+ (~FH_RTC_INT_CORE_INT_ERR_MASK));
+
+ } else
+ fh_rtc_alarm_disable_interrupt(fh_rtc->v_addr,
+ FH_RTC_ISR_ALARM_POS);
+
+ return 0;
+}
+
+static void fh_rtc_clear_interrupt_status(unsigned int base_addr,
+ unsigned int value)
+{
+ unsigned int int_en, int_status;
+
+ int_en = GET_REG((unsigned int)base_addr+FH_RTC_INT_EN);
+ int_en &= 0x7f;
+ SET_REG(base_addr+FH_RTC_INT_EN,\
+ (((value)\
+ & 0x7f)<<16)|int_en|\
+ FH_RTC_INT_CORE_IDLE_ERR_MASK|FH_RTC_INT_CORE_INT_ERR_MASK);
+ int_status = value & 0x10a0;
+ SET_REG(base_addr+FH_RTC_INT_STATUS, int_status);
+}
+static struct work_struct rtc_int_wq;
+
+static void rtc_core_int_handler(struct work_struct *work)
+{
+ unsigned long events = 0;
+ int rtc_int_status, rtc_int_en;
+ unsigned int wr_int_status;
+ unsigned int wr_int_status_clr;
+ int rtc_core_status;
+ struct rtc_device *rtc = fh_rtc->rtc;
+
+ /* get core interrupt status */
+ rtc_int_en = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN);
+ wr_int_status_clr = GET_REG(fh_rtc->v_addr +
+ FH_RTC_INT_STATUS);
+ rtc_int_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+
+ /* get core interrupt failed */
+ if (rtc_int_en < 0) {
+ pr_info("can't get core int en\n");
+ return;
+ }
+ if (rtc_int_status < 0) {
+ pr_info("can't get core int status\n");
+ return;
+ }
+ /* do interrupt */
+ if (rtc_int_en & rtc_int_status & FH_RTC_ISR_ALARM_POS) {
+ pr_info("ALARM INT\n");
+ /* clear core alarm interrupt status */
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status &
+ (~FH_RTC_ISR_ALARM_POS));
+ /* eanble interrupt */
+ wr_int_status = GET_REG(fh_rtc->v_addr+
+ FH_RTC_INT_EN);
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN,\
+ wr_int_status&
+ (FH_RTC_INT_CORE_INT_ALL_COV));
+ /* updata to RTC CORE alarm work */
+ events |= RTC_IRQF | RTC_AF;
+ rtc_update_irq(rtc, 1, events);
+
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_ISR_SEC_POS) {
+
+ pr_info("FH_RTC_ISR_SEC_POS\n");
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status & (~FH_RTC_ISR_SEC_POS));
+ wr_int_status = GET_REG(fh_rtc->v_addr+
+ FH_RTC_INT_EN);
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN,\
+ wr_int_status&
+ (FH_RTC_INT_CORE_INT_STATUS_COV));
+
+ events |= RTC_IRQF | RTC_UF;
+ rtc_update_irq(rtc, 1, events);
+
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_ISR_MIN_POS) {
+
+ pr_info("FH_RTC_ISR_MIN_POS\n");
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status & (~FH_RTC_ISR_MIN_POS));
+ wr_int_status = GET_REG(fh_rtc->regs+
+ FH_RTC_INT_EN);
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN,
+ wr_int_status&\
+ (FH_RTC_INT_CORE_INT_ERR_MASK_COV));
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS,
+ wr_int_status_clr&\
+ (FH_RTC_INT_CORE_INT_STATUS_COV));
+
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_ISR_HOUR_POS) {
+ pr_info("FH_RTC_ISR_HOUR_POS\n");
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status & (~FH_RTC_ISR_HOUR_POS));
+ wr_int_status = GET_REG(fh_rtc->v_addr+
+ FH_RTC_INT_EN);
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN,
+ wr_int_status&\
+ (FH_RTC_INT_CORE_INT_ERR_MASK_COV));
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS,
+ wr_int_status_clr&\
+ (FH_RTC_INT_CORE_INT_STATUS_COV));
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_ISR_DAY_POS) {
+ pr_info("FH_RTC_ISR_DAY_POS\n");
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status & (~FH_RTC_ISR_DAY_POS));
+ wr_int_status = GET_REG(fh_rtc->v_addr+
+ FH_RTC_INT_EN);
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN,
+ wr_int_status&\
+ (FH_RTC_INT_CORE_INT_ERR_MASK_COV));
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS,
+ wr_int_status_clr&\
+ (FH_RTC_INT_CORE_INT_STATUS_COV));
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_ISR_POWERFAIL_POS) {
+ pr_info("FH_RTC_ISR_POWERFAIL_POS\n");
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status &
+ (~FH_RTC_ISR_POWERFAIL_POS));
+
+ wr_int_status = GET_REG(fh_rtc->v_addr+
+ FH_RTC_INT_EN);
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS,
+ wr_int_status_clr&\
+ (FH_RTC_INT_CORE_INT_STATUS_COV));
+
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_ISR_RX_CRC_ERR_INT) {
+ pr_info("FH_RTC_ISR_RX_CRC_ERR_INT\n");
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_ISR_RX_COM_ERR_INT) {
+ pr_info("FH_RTC_ISR_RX_COM_ERR_INT\n");
+ } else if (rtc_int_en & rtc_int_status & FH_RTC_LEN_ERR_INT) {
+ pr_info("FH_RTC_LEN_ERR_INT\n");
+ } else {
+ pr_info("unexpect isr\n");
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS, rtc_int_en & (~rtc_int_status));
+ }
+
+
+}
+
+static irqreturn_t fh_rtc_irq(int irq, void *dev_id)
+{
+
+ struct fh_rtc_controller *fh_rtc = (struct fh_rtc_controller *) dev_id;
+ unsigned int isr_status;
+ /*
+ * 1.clear wrapper interrput status
+ * 2.mask core int
+ * 3.schedule interrput work
+ */
+ isr_status = GET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS);
+ fh_rtc_clear_interrupt_status(fh_rtc->v_addr, isr_status);
+
+ if (isr_status & FH_RTC_INT_STATUS_CORE) {
+ schedule_work(&rtc_int_wq);
+ } else if (isr_status & FH_RTC_INT_STATUS_RX_CRC_ERR)
+ pr_info("FH_RTC_INT_STATUS_RX_CRC_ERR\n");
+ else if (isr_status & FH_RTC_INT_STATUS_RX_COM_ERR) {
+ pr_info("FH_RTC_INT_STATUS_RX_COM_ERR\n");
+ } else if (isr_status & FH_RTC_INT_STATUS_RX_LEN_ERR) {
+ pr_info("FH_RTC_INT_STATUS_RX_LEN_ERR\n");
+ } else if (isr_status & FH_RTC_INT_STATUS_CNT_THL) {
+ pr_info("FH_RTC_INT_STATUS_CNT_THL\n");
+ } else if (isr_status & FH_RTC_INT_STATUS_CNT_THH) {
+ pr_info("FH_RTC_INT_STATUS_CNT_THH\n");
+ } else if (isr_status & FH_RTC_INT_STATUS_CORE_IDLE) {
+ pr_info("FH_RTC_INT_STATUS_CORE_IDLE\n");
+ } else if (isr_status & FH_RTC_INT_STATUS_WRAPPER_BUSY) {
+ pr_info("FH_RTC_INT_STATUS_WRAPPER_BUSY\n");
+ } else if (isr_status & FH_RTC_INT_STATUS_CORE_BUSY) {
+ pr_info("FH_RTC_INT_STATUS_CORE_BUSY\n");
+ } else {
+ pr_info("rtc unknown isr...\n");
+
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops fh_rtcops = {
+ .open = fh_rtc_open,
+ .release = fh_rtc_release,
+ .read_time = fh_rtc_gettime_nosync,
+ .set_time = fh_rtc_settime,
+ .read_alarm = fh_rtc_getalarm,
+ .set_alarm = fh_rtc_setalarm,
+ .alarm_irq_enable = fh_rtc_irq_enable,
+};
+
+/*get the read of SADC and adjust RTC clock*/
+static struct miscdevice fh_rtc_misc;
+static void create_proc_rtc(struct fh_rtc_controller *rtc);
+static void remove_proc(void);
+static int fh_rtc_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct fh_rtc_platform_data *rtc_platform_info;
+ struct rtc_device *rtc;
+ struct resource *res;
+ int rtc_core_status;
+
+ /* malloc mem region */
+ fh_rtc = kzalloc(sizeof(struct fh_rtc_controller), GFP_KERNEL);
+ if (!fh_rtc)
+ return -ENOMEM;
+
+ memset(fh_rtc, 0, sizeof(struct fh_rtc_controller));
+
+ /* board info below */
+ rtc_platform_info = (struct fh_rtc_platform_data *)\
+ pdev->dev.platform_data;
+ if (rtc_platform_info == NULL) {
+ dev_err(&pdev->dev, "%s, rtc platform error.\n",
+ __func__);
+ err = -ENODEV;
+ goto err_nores;
+ }
+ fh_rtc->base_year = rtc_platform_info->base_year;
+ fh_rtc->base_month = rtc_platform_info->base_month;
+ fh_rtc->base_day = rtc_platform_info->base_day;
+ fh_rtc->sadc_channel = rtc_platform_info->sadc_channel;
+
+ /* find the IRQs */
+ fh_rtc->irq = platform_get_irq(pdev, 0);
+ if (fh_rtc->irq < 0) {
+ dev_err(&pdev->dev, "%s, rtc irq error.\n",
+ __func__);
+ err = fh_rtc->irq;
+ goto err_nores;
+ }
+
+ /* get the memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ err = -ENOENT;
+ goto err_nores;
+ }
+
+ fh_rtc->paddr = res->start;
+
+ fh_rtc->regs = ioremap(res->start, resource_size(res));
+ fh_rtc->v_addr = (unsigned int)fh_rtc->regs;
+ if (!fh_rtc->regs) {
+ dev_err(&pdev->dev, "rtc already mapped\n");
+ err = -EINVAL;
+ goto err_nores;
+ }
+
+ fh_rtc->clk = clk_get(&pdev->dev, "rtc_pclk_gate");
+
+ if (IS_ERR(fh_rtc->clk))
+ printk(KERN_INFO "cannot get rtc_pclk\n");
+ else
+ clk_enable(fh_rtc->clk);
+
+ /* efuse mem map */
+ rtc_use_efuse_viture_addr = (unsigned int)ioremap(EFUSE_REG_BASE,
+ resource_size(res));
+
+ /* init int status */
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_EN,
+ FH_RTC_INT_CORE_INT_ERR_EN|\
+ FH_RTC_INT_CORE_IDLE_ERR_EN | \
+ FH_RTC_INT_CORE_INT_ERR_MASK |\
+ FH_RTC_INT_CORE_IDLE_ERR_MASK
+ );
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS,
+ FH_RTC_INT_STATUS_CORE |\
+ FH_RTC_INT_STATUS_CORE_IDLE);
+
+
+ SET_REG(fh_rtc->v_addr+FH_RTC_INT_STATUS, 0x20);
+ mdelay(1);
+ rtc_core_status = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS);
+ if (rtc_core_status & FH_RTC_ISR_POWERFAIL_POS) {
+ RTC_PRINT_DBG("rtc powerfailed !\n");
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status &
+ (~FH_RTC_ISR_POWERFAIL_POS));
+ }
+ /* clear all core's irq status an enable */
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_EN,
+ rtc_core_status &
+ (~FH_RTC_INT_CORE_INT_ALL_COV));
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_INT_STATUS,
+ rtc_core_status &
+ (~FH_RTC_INT_CORE_INT_ALL_COV));
+
+ platform_set_drvdata(pdev, fh_rtc);
+
+ /* register RTC and exit */
+ rtc = rtc_device_register(rtc_platform_info->dev_name, \
+ &pdev->dev, &fh_rtcops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "cannot attach rtc\n");
+ err = PTR_ERR(rtc);
+ goto err_nores;
+ }
+ fh_rtc->rtc = rtc;
+
+ INIT_WORK(&rtc_int_wq, rtc_core_int_handler);
+ /* register RTC IRQs */
+ err = request_irq(fh_rtc->irq , fh_rtc_irq, 0,
+ dev_name(&pdev->dev), fh_rtc);
+ if (err) {
+ RTC_PRINT_DBG("request_irq failed, %d\n", err);
+ goto err_nores;
+ }
+
+ /* register RTC MISC device */
+ err = misc_register(&fh_rtc_misc);
+ if (err < 0)
+ RTC_PRINT_DBG("register rtc misc device error\n");
+ /* register RTC PROC FILE */
+ create_proc_rtc(fh_rtc);
+
+#ifdef USE_TSENSOR
+ rtc_adjust();
+#endif
+
+ return 0;
+
+err_nores:
+ return err;
+}
+
+static int fh_rtc_remove(struct platform_device *dev)
+{
+ struct fh_rtc_controller *fh_rtc = platform_get_drvdata(dev);
+
+ remove_proc();
+ free_irq(fh_rtc->irq, fh_rtc);
+ rtc_device_unregister(fh_rtc->rtc);
+ iounmap(fh_rtc->regs);
+ platform_set_drvdata(dev, NULL);
+ kfree(fh_rtc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+/* RTC Power management control */
+
+static int fh_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int fh_rtc_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+#else
+#define fh_rtc_suspend NULL
+#define fh_rtc_resume NULL
+#endif
+#define FH_RTC_MISC_DEVICE_NAME "fh_rtc_misc"
+
+static int fh_rtc_misc_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int fh_rtc_misc_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+long fh_rtc_misc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int current_tsensor_data, current_offset, current_idx;
+ int status;
+ int count;
+ int lut[12] = { 0 };
+ int reg[2] = {0};
+
+ switch (cmd) {
+ /* get temperature data from tsensor */
+ case GET_TSENSOR_DATA:
+ current_tsensor_data = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_INFO);
+ status = copy_to_user((int __user *)(arg),
+ &current_tsensor_data,
+ 4);
+ if (status < 0)
+ pr_info("copy to user failed\n");
+ break;
+ /* get current used offset data */
+ case GET_CURRENT_OFFSET_DATA:
+ current_offset = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_OFFSET);
+ current_offset = current_offset&(0xff<<16);
+ current_offset = current_offset>>16;
+ status = copy_to_user((int __user *)(arg),
+ &current_offset, 4);
+
+ if (status < 0)
+ pr_info("copy to user failed\n");
+ break;
+ /* get current used offset index of lut */
+ case GET_CURRENT_OFFSET_IDX:
+ current_offset = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_OFFSET);
+ current_idx = current_offset&(0x3f<<10);
+ current_idx = current_idx>>10;
+ status = copy_to_user((int __user *)(arg),
+ &current_idx, 4);
+
+ if (status < 0)
+ pr_info("copy to user failed\n");
+ break;
+ /* updata all of lut */
+ case RTC_GET_LUT:
+ for (count = 0; count < 12; count++)
+ lut[count] = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_OFFSET_LUT + (count<<4));
+
+ status = copy_to_user((int __user *)(arg), &lut[0],
+ count*4);
+ if (status < 0)
+ pr_info("copy to user failed\n");
+ break;
+ /* modify lut */
+ case RTC_SET_LUT:
+ status = copy_from_user(&lut[0], (int __user *)(arg),
+ 48);
+ if (status < 0)
+ pr_info("copy from user failed\n");
+ for (count = 0; count < 12; count++)
+ fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_OFFSET_LUT + (count<<4), lut[count]);
+ break;
+ /* get RTC core reg value */
+ case GET_REG_VALUE:
+ for (count = 0; count < 9; count++)
+ lut[count] = fh_rtc_get_core(fh_rtc->v_addr,
+ (count&0xf)<<4);
+
+ status = copy_to_user((int __user *)(arg), &lut[0],
+ count*4);
+ if (status < 0)
+ pr_info("copy to user failed\n");
+ break;
+ /* set RTC core reg value */
+ case SET_REG_VALUE:
+ status = copy_from_user(&reg[0], (int __user *)(arg), 8);
+ if (status < 0)
+ pr_info("copy from user failed\n");
+
+ fh_rtc_set_core(fh_rtc->v_addr,
+ reg[0],
+ reg[1]);
+ pr_info("set reg addr :%x, value: %x\n",
+ reg[0], reg[1]);
+ break;
+ /* get temperature value */
+ case GET_TEMP_VALUE:
+ status = fh_rtc_get_temp(fh_rtc->v_addr);
+ lut[0] = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_INFO);
+ pr_info("temp value is %x\n", lut[0]);
+ status = copy_to_user((int __user *)(arg),
+ &lut[0], 4);
+ if (status < 0)
+ pr_info("copy to user failed\n");
+ break;
+ default:
+ pr_info("val is invalied\n");
+ break;
+ }
+
+ return 0;
+
+}
+static const struct file_operations fh_rtc_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = fh_rtc_misc_open,
+ .release = fh_rtc_misc_release,
+ .unlocked_ioctl = fh_rtc_misc_ioctl,
+};
+
+static struct miscdevice fh_rtc_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = FH_RTC_MISC_DEVICE_NAME,
+ /*.nodename = FH_EFUSE_MISC_DEVICE_NODE_NAME,*/
+ .fops = &fh_rtc_misc_fops,
+};
+static struct platform_driver fh_rtc_driver = {
+ .probe = fh_rtc_probe,
+ .remove = __devexit_p(fh_rtc_remove),
+ .suspend = fh_rtc_suspend,
+ .resume = fh_rtc_resume,
+ .driver = {
+ .name = "fh_rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init fh_rtc_init(void)
+{
+
+ return platform_driver_register(&fh_rtc_driver);
+}
+
+static void __exit fh_rtc_exit(void)
+{
+ platform_driver_unregister(&fh_rtc_driver);
+}
+
+#ifdef USE_TSENSOR
+static int fh_rtc_temp_cfg_coef_offset(unsigned int coef, unsigned int offset)
+{
+ unsigned int temp_cfg;
+ int status;
+
+ if (coef > 0xff) {
+ pr_err("coef invalid\n");
+ return -1;
+ }
+ if (offset > 0xffff) {
+ pr_err("offset invalid\n");
+ return -1;
+ }
+ temp_cfg = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_CFG);
+ temp_cfg &= 0xffff0000;
+ temp_cfg |= coef;
+ temp_cfg |= (offset<<8);
+
+ status = fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_CFG, temp_cfg);
+
+ return status;
+}
+
+static int fh_rtc_temp_cfg_thl_thh(unsigned int thl, unsigned int thh)
+{
+ unsigned int temp_cfg;
+ int status;
+
+ if (thl > 0x3f) {
+ pr_err("thl invalid\n");
+ return -1;
+ }
+ if (thh > 0x3f) {
+ pr_err("thh invalid\n");
+ return -1;
+ }
+
+ temp_cfg = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_CFG);
+ temp_cfg &= 0xf000ffff;
+ temp_cfg |= (thl<<16);
+ temp_cfg |= (thh<<22);
+
+ status = fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_CFG,
+ temp_cfg);
+
+ return status;
+}
+#if 0
+static int fh_rtc_temp_cfg_update_time(unsigned int time)
+{
+ unsigned int temp_cfg;
+ int status;
+
+ if (time > 5) {
+ pr_err("update time invalid\n");
+ return -1;
+ }
+
+ temp_cfg = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_CFG);
+ temp_cfg &= 0xf0000000;
+ temp_cfg |= time;
+
+ status = fh_rtc_set_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_CFG, temp_cfg);
+
+ return status;
+}
+#endif
+#endif
+ssize_t zy_rtc_proc_read(char *page, char **start, off_t off, \
+ int count, int *eof, struct fh_rtc_controller *data)
+{
+#ifdef USE_TSENSOR
+ int current_offset, current_idx, current_tsensor_data, current_cfg;
+
+ current_offset = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_OFFSET);
+ current_cfg = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_CFG);
+ current_offset = current_offset&(0xff<<16);
+ current_idx = current_offset&(0x3f<<10);
+ current_tsensor_data = fh_rtc_get_core(fh_rtc->v_addr,
+ FH_RTC_CMD_TEMP_INFO);
+ current_tsensor_data = current_tsensor_data*210;
+ current_tsensor_data = (int)(current_tsensor_data/4096);
+ pr_info("[RTC]:current offset:%x\n", current_offset>>16);
+ pr_info("[RTC]:current temp:%d\n", current_tsensor_data-56);
+#endif
+
+ return 0;
+}
+
+static ssize_t fh_rtc_proc_write(struct file *filp, \
+ const char *buf, size_t len,\
+loff_t *off)
+{
+ return 0;
+}
+
+static void create_proc_rtc(struct fh_rtc_controller *rtc)
+{
+ fh_rtc->proc_rtc_entry = create_proc_entry(FH_RTC_PROC_FILE,\
+ S_IRUGO, NULL);
+
+ if (!fh_rtc->proc_rtc_entry) {
+ pr_err("create proc failed\n");
+ } else {
+ fh_rtc->proc_rtc_entry->read_proc = \
+ (read_proc_t *) zy_rtc_proc_read;
+ fh_rtc->proc_rtc_entry->write_proc = \
+ (write_proc_t *) fh_rtc_proc_write;
+ fh_rtc->proc_rtc_entry->data = rtc;
+ }
+}
+
+static void remove_proc(void)
+{
+ remove_proc_entry(FH_RTC_PROC_FILE, NULL);
+}
+
+module_init(fh_rtc_init);
+module_exit(fh_rtc_exit);
+
+
+MODULE_DESCRIPTION("FH SOC RTC Driver");
+MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fh-rtc");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index de35c3ad..ca24baf2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -442,6 +442,15 @@ config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
depends on SPI_DESIGNWARE && HAVE_CLK
+
+config SPI_FH
+ tristate "fh spi driver for DW SPI core"
+ depends on SPI_MASTER
+
+config SPI_FH_SLAVE
+ tristate "fh spi slave driver for DW SPI core"
+
+
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0f8c69b6..463ffaed 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -22,7 +22,13 @@ obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o
dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o
+
obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
+
+obj-$(CONFIG_SPI_FH) += fh_spi.o
+obj-$(CONFIG_SPI_FH_SLAVE) += fh_spi_slave.o
+
+
obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o
obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
diff --git a/drivers/spi/fh_spi.c b/drivers/spi/fh_spi.c
new file mode 100644
index 00000000..4e4bd2dc
--- /dev/null
+++ b/drivers/spi/fh_spi.c
@@ -0,0 +1,2115 @@
+/** @file fh_spi.c
+ * @note ShangHai FullHan Co., Ltd. All Right Reserved.
+ * @brief fh driver
+ * @author yu.zhang
+ * @date 2015/1/11
+ * @note history
+ * @note 2014-1-11 V1.0.0 create the file.
+ */
+/*****************************************************************************
+ * Include Section
+ * add all #include here
+ *****************************************************************************/
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/gpio.h>
+#include <linux/dmaengine.h>
+#include <mach/spi.h>
+#include <mach/fh_dmac.h>
+#include <linux/dma-mapping.h>
+#include <mach/fh_dmac_regs.h>
+/*****************************************************************************
+ * Define section
+ * add all #define here
+ *****************************************************************************/
+#define lift_shift_bit_num(bit_num) (1<<bit_num)
+#define SPI_IRQ_TXEIS (lift_shift_bit_num(0))
+#define SPI_IRQ_TXOIS (lift_shift_bit_num(1))
+#define SPI_IRQ_RXUIS (lift_shift_bit_num(2))
+#define SPI_IRQ_RXOIS (lift_shift_bit_num(3))
+#define SPI_IRQ_RXFIS (lift_shift_bit_num(4))
+#define SPI_IRQ_MSTIS (lift_shift_bit_num(5))
+#define SPI_STATUS_BUSY (lift_shift_bit_num(0))
+#define SPI_STATUS_TFNF (lift_shift_bit_num(1))
+#define SPI_STATUS_TFE (lift_shift_bit_num(2))
+#define SPI_STATUS_RFNE (lift_shift_bit_num(3))
+#define SPI_STATUS_RFF (lift_shift_bit_num(4))
+#define SPI_STATUS_TXE (lift_shift_bit_num(5))
+#define SPI_STATUS_DCOL (lift_shift_bit_num(6))
+#define CACHE_LINE_SIZE (32)
+#define PUMP_DATA_NONE_MODE (0x00)
+#define PUMP_DATA_DMA_MODE (0x11)
+#define PUMP_DATA_ISR_MODE (0x22)
+#define PUMP_DATA_POLL_MODE (0x33)
+#define DMA_TRANS_GATE_LEVEL 1024
+#define SPI_DATA_REG_OFFSET (0x60)
+#define SPI_RX_ONLY_ONE_TIME_SIZE (0x10000)
+#define MAX_SG_LEN 128
+#if (SPI_RX_ONLY_ONE_TIME_SIZE > 2048)
+#define SG_ONE_TIME_MAX_SIZE 2048
+#else
+#define SG_ONE_TIME_MAX_SIZE SPI_RX_ONLY_ONE_TIME_SIZE
+#endif
+#define SPI_DMA_PROTCTL_DATA 6
+/****************************************************************************
+ * ADT section
+ * add definition of user defined Data Type that only be used in this file here
+ ***************************************************************************/
+enum {
+ CONFIG_OK = 0, CONFIG_PARA_ERROR = lift_shift_bit_num(0),
+ CONFIG_BUSY = lift_shift_bit_num(1),
+ WRITE_READ_OK = 0,
+ WRITE_READ_ERROR = lift_shift_bit_num(2),
+ WRITE_READ_TIME_OUT = lift_shift_bit_num(3),
+ WRITE_ONLY_OK = 0,
+ WRITE_ONLY_ERROR = lift_shift_bit_num(4),
+ WRITE_ONLY_TIME_OUT = lift_shift_bit_num(5),
+ READ_ONLY_OK = 0,
+ READ_ONLY_ERROR = lift_shift_bit_num(6),
+ READ_ONLY_TIME_OUT = lift_shift_bit_num(7),
+ EEPROM_OK = 0,
+ EEPROM_ERROR = lift_shift_bit_num(8),
+ EEPROM_TIME_OUT = lift_shift_bit_num(9),
+ MULTI_MASTER_ERROR = lift_shift_bit_num(10),
+ TX_OVERFLOW_ERROR = lift_shift_bit_num(11),
+ RX_OVERFLOW_ERROR = lift_shift_bit_num(12),
+};
+
+typedef enum enum_spi_enable {
+SPI_DISABLE = 0,
+SPI_ENABLE = (lift_shift_bit_num(0)),
+} spi_enable_e;
+
+typedef enum enum_spi_polarity {
+SPI_POLARITY_LOW = 0,
+SPI_POLARITY_HIGH = (lift_shift_bit_num(7)),
+SPI_POLARITY_RANGE = (lift_shift_bit_num(7)),
+} spi_polarity_e;
+
+typedef enum enum_spi_phase {
+SPI_PHASE_RX_FIRST = 0,
+SPI_PHASE_TX_FIRST = (lift_shift_bit_num(6)),
+SPI_PHASE_RANGE = (lift_shift_bit_num(6)),
+} spi_phase_e;
+
+typedef enum enum_spi_format {
+SPI_MOTOROLA_MODE = 0x00,
+SPI_TI_MODE = 0x10,
+SPI_MICROWIRE_MODE = 0x20,
+SPI_FRAME_FORMAT_RANGE = 0x30,
+} spi_format_e;
+
+
+typedef enum enum_spi_data_size {
+SPI_DATA_SIZE_4BIT = 0x03,
+SPI_DATA_SIZE_5BIT = 0x04,
+SPI_DATA_SIZE_6BIT = 0x05,
+SPI_DATA_SIZE_7BIT = 0x06,
+SPI_DATA_SIZE_8BIT = 0x07,
+SPI_DATA_SIZE_9BIT = 0x08,
+SPI_DATA_SIZE_10BIT = 0x09,
+SPI_DATA_SIZE_16BIT = 0x0f,
+SPI_DATA_SIZE_RANGE = 0x0f,
+} spi_data_size_e;
+
+typedef enum enum_spi_transfer_mode {
+SPI_TX_RX_MODE = 0x000,
+SPI_ONLY_TX_MODE = 0x100,
+SPI_ONLY_RX_MODE = 0x200,
+SPI_EEPROM_MODE = 0x300,
+SPI_TRANSFER_MODE_RANGE = 0x300,
+} spi_transfer_mode_e;
+
+typedef enum enum_spi_baudrate {
+SPI_SCLKIN = 50000000,
+SPI_SCLKOUT_27000000 = (SPI_SCLKIN / 27000000),
+SPI_SCLKOUT_13500000 = (SPI_SCLKIN / 13500000),
+SPI_SCLKOUT_6750000 = (SPI_SCLKIN / 6750000),
+SPI_SCLKOUT_4500000 = (SPI_SCLKIN / 4500000),
+SPI_SCLKOUT_3375000 = (SPI_SCLKIN / 3375000),
+SPI_SCLKOUT_2700000 = (SPI_SCLKIN / 2700000),
+SPI_SCLKOUT_1500000 = (SPI_SCLKIN / 1500000),
+SPI_SCLKOUT_500000 = (SPI_SCLKIN / 500000),
+SPI_SCLKOUT_100000 = (SPI_SCLKIN / 100000),
+} spi_baudrate_e;
+
+typedef enum enum_spi_irq {
+SPI_IRQ_TXEIM = (lift_shift_bit_num(0)),
+SPI_IRQ_TXOIM = (lift_shift_bit_num(1)),
+SPI_IRQ_RXUIM = (lift_shift_bit_num(2)),
+SPI_IRQ_RXOIM = (lift_shift_bit_num(3)),
+SPI_IRQ_RXFIM = (lift_shift_bit_num(4)),
+SPI_IRQ_MSTIM = (lift_shift_bit_num(5)),
+SPI_IRQ_ALL = 0x3f,
+} spi_irq_e;
+
+typedef enum enum_spi_slave {
+SPI_SLAVE_PORT0 = (lift_shift_bit_num(0)),
+SPI_SLAVE_PORT1 = (lift_shift_bit_num(1)),
+} spi_slave_e;
+
+typedef enum enum_spi_dma_control_mode {
+SPI_DMA_RX_POS = (lift_shift_bit_num(0)),
+SPI_DMA_TX_POS = (lift_shift_bit_num(1)),
+SPI_DMA_CONTROL_RANGE = 0x03,
+} spi_dma_control_mode_e;
+
+/*read wire mode*/
+typedef enum enum_spi_read_wire_mode {
+STANDARD_READ = 0x00,
+DUAL_OUTPUT = 0x01,
+DUAL_IO = 0x02,
+QUAD_OUTPUT = 0x03,
+QUAD_IO = 0x04,
+} spi_read_wire_mode_e;
+
+/*program wire mode*/
+typedef enum enum_spi_prog_wire_mode {
+STANDARD_PROG = 0x00,
+QUAD_INPUT = 0x01,
+} spi_prog_wire_mode_e;
+
+/*ahb Xip config*/
+typedef enum enum_spi_xip_config {
+XIP_DISABLE = 0,
+XIP_ENABLE = 1,
+} spi_xip_config_e;
+
+/*ahb DPI config*/
+typedef enum enum_spi_dpi_config {
+DPI_DISABLE = 0,
+DPI_ENABLE = 1,
+} spi_dpi_config_e;
+
+/*ahb QPI config*/
+typedef enum enum_spi_qpi_config {
+QPI_DISABLE = 0,
+QPI_ENABLE = 1,
+} spi_qpi_config_e;
+
+struct fh_spi_reg {
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 ssienr;
+ u32 mwcr;
+ u32 ser;
+ u32 baudr;
+ u32 txfltr;
+ u32 rxfltr;
+ u32 txflr;
+ u32 rxflr;
+ u32 sr;
+ u32 imr;
+ u32 isr;
+ u32 risr;
+ u32 txoicr;
+ u32 rxoicr;
+ u32 rxuicr;
+ u32 msticr;
+ u32 icr;
+ u32 dmacr;
+ u32 dmatdlr;
+ u32 dmardlr;
+ u32 idr;
+ u32 version;
+ u32 dr; /* Currently oper as 32 bits,
+ though only low 16 bits matters */
+ u32 rev[35];
+ u32 rx_sample_dly;
+ u32 ccfgr;
+ u32 opcr;
+ u32 timcr;
+ u32 bbar0;
+ u32 bbar1;
+};
+
+#define dw_readl(dw, name) \
+__raw_readl(&(((struct fh_spi_reg *)dw->regs)->name))
+#define dw_writel(dw, name, val) \
+__raw_writel((val), &(((struct fh_spi_reg *)dw->regs)->name))
+#define dw_readw(dw, name) \
+__raw_readw(&(((struct fh_spi_reg *)dw->regs)->name))
+#define dw_writew(dw, name, val) \
+__raw_writew((val), &(((struct fh_spi_reg *)dw->regs)->name))
+
+struct _fh_spi_dma_transfer {
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ struct scatterlist sgl[MAX_SG_LEN];
+ u32 sgl_data_size[MAX_SG_LEN];
+ u32 actual_sgl_size;
+ struct dma_async_tx_descriptor *desc;
+};
+
+struct fh_spi {
+ void * __iomem regs; /* vaddr of the control registers */
+ u32 id;
+ u32 paddr;
+ u32 slave_port;
+ u32 irq; /* irq no */
+ u32 fifo_len; /* depth of the FIFO buffer */
+ u32 cpol;
+ u32 cpha;
+ u32 isr_flag;
+ /*clk*/
+ u32 apb_clock_in;
+ u32 max_freq; /* max bus freq supported */
+ u32 div;
+ /*use id u32 bus_num;*//*which bus*/
+ u32 num_cs; /* supported slave numbers */
+ u32 data_width;
+ u32 frame_mode;
+ u32 transfer_mode;
+ u32 active_cs_pin;
+ /*copy from the user...*/
+ u32 tx_len;
+ u32 rx_len;
+ void *rx_buff;
+ void *tx_buff;
+ u32 tx_dma_add;
+ u32 rx_dma_add;
+ u32 tx_hs_no;
+ u32 rx_hs_no;
+ u32 tx_dumy_buff[4];
+ u32 rx_dumy_buff[4];
+ u32 tx_dumy_dma_add;
+ u32 rx_dumy_dma_add;
+ struct fh_spi_cs cs_data[SPI_MASTER_CONTROLLER_MAX_SLAVE];
+ u32 pump_data_mode;
+ struct _fh_spi_dma_transfer dma_rx;
+ struct _fh_spi_dma_transfer dma_tx;
+ u32 complete_times;
+ struct fh_spi_platform_data *board_info;
+};
+
+struct fh_spi_controller {
+ struct device *master_dev;
+ struct clk *clk;
+ spinlock_t lock;
+ struct list_head queue;
+ struct platform_device *p_dev;
+ struct work_struct work;
+ struct workqueue_struct *workqueue;
+ struct spi_message *active_message;
+ struct spi_transfer *active_transfer;
+ struct fh_spi dwc;
+ struct completion done;
+ /*add multi*/
+ u32 active_wire_width;
+ u32 dir;
+ struct spi_device *active_spi_dev;
+};
+
+/******************************************************************************
+ * Function prototype section
+ * add prototypes for all functions called by this file,execepting those
+ * declared in header file
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Global variables section - Exported
+ * add declaration of global variables that will be exported here
+ * e.g.
+ * int8_t foo;
+ ****************************************************************************/
+
+/*****************************************************************************
+
+ * static fun;
+ *****************************************************************************/
+
+static u32 Spi_Enable(struct fh_spi *dw, spi_enable_e enable);
+static u32 Spi_SetPolarity(struct fh_spi *dw, spi_polarity_e polarity);
+static u32 Spi_SetPhase(struct fh_spi *dw, spi_phase_e phase);
+static u32 Spi_SetFrameFormat(struct fh_spi *dw, spi_format_e format);
+static u32 Spi_SetBaudrate(struct fh_spi *dw, spi_baudrate_e baudrate);
+static u32 Spi_DisableIrq(struct fh_spi *dw, u32 irq);
+static u32 Spi_ReadStatus(struct fh_spi *dw);
+static u32 Spi_EnableSlaveen(struct fh_spi *dw, spi_slave_e port);
+static u32 Spi_DisableSlaveen(struct fh_spi *dw, spi_slave_e port);
+static u32 Spi_EnableIrq(struct fh_spi *dw, u32 irq);
+static u32 Spi_SetTxlevlel(struct fh_spi *dw, u32 level);
+static u32 Spi_ReadTxfifolevel(struct fh_spi *dw);
+static u32 Spi_ReadRxfifolevel(struct fh_spi *dw);
+static u32 Spi_WriteData(struct fh_spi *dw, u32 data);
+static u32 Spi_ReadData(struct fh_spi *dw);
+static u32 Spi_Isrstatus(struct fh_spi *dw);
+static void Spi_SetDmaTxDataLevel(struct fh_spi *dw, u32 level);
+static void Spi_SetDmaRxDataLevel(struct fh_spi *dw, u32 level);
+static void Spi_SetDmaControlEnable(struct fh_spi *dw,
+spi_dma_control_mode_e enable_pos);
+static bool fh_spi_dma_chan_filter(struct dma_chan *chan, void *param);
+static int fh_spi_setup(struct spi_device *spi);
+static u32 Spi_SetRxdelay(struct fh_spi *dw, u8 data);
+static u32 Spi_RawIsrstatus(struct fh_spi *dw);
+static int rx_only_fix_data_width(struct fh_spi_controller *fh_spi, u32 size);
+/*****************************************************************************
+ * Global variables section - Local
+ * define global variables(will be refered only in this file) here,
+ * static keyword should be used to limit scope of local variable to this file
+ * e.g.
+ * static uint8_t ufoo;
+ *****************************************************************************/
+
+/* function body */
+
+static u32 Spi_Enable(struct fh_spi *dw, spi_enable_e enable)
+{
+ dw_writel(dw, ssienr, enable);
+ return CONFIG_OK;
+}
+
+static u32 Spi_ContinueReadNum(struct fh_spi *dw, u32 num)
+{
+ dw_writel(dw, ctrl1, (num-1));
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetPolarity(struct fh_spi *dw, spi_polarity_e polarity)
+{
+ u32 data;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_POLARITY_RANGE;
+ data |= polarity;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetPhase(struct fh_spi *dw, spi_phase_e phase)
+{
+ u32 data;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_PHASE_RANGE;
+ data |= phase;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetFrameFormat(struct fh_spi *dw, spi_format_e format)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_FRAME_FORMAT_RANGE;
+ data |= format;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetTransferMode(struct fh_spi *dw, spi_transfer_mode_e mode)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_TRANSFER_MODE_RANGE;
+ data |= mode;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetBaudrate(struct fh_spi *dw, spi_baudrate_e baudrate)
+{
+ dw_writel(dw, baudr, baudrate);
+ return CONFIG_OK;
+}
+
+static u32 Spi_DisableIrq(struct fh_spi *dw, u32 irq)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, imr);
+ data &= ~irq;
+ dw_writel(dw, imr, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_EnableIrq(struct fh_spi *dw, u32 irq)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, imr);
+ data |= irq;
+ dw_writel(dw, imr, data);
+ return CONFIG_OK;
+
+}
+
+static u32 Spi_SetTxlevlel(struct fh_spi *dw, u32 level)
+{
+ dw_writel(dw, txfltr, level);
+ return CONFIG_OK;
+}
+
+static u32 Spi_ReadTxfifolevel(struct fh_spi *dw)
+{
+ return dw_readl(dw, txflr);
+}
+
+static u32 Spi_ReadRxfifolevel(struct fh_spi *dw)
+{
+ return (u32) dw_readl(dw, rxflr);
+}
+
+static u32 Spi_ReadStatus(struct fh_spi *dw)
+{
+ return (u32) dw_readl(dw, sr);
+}
+
+static u32 Spi_EnableSlaveen(struct fh_spi *dw, spi_slave_e port)
+{
+ u32 data = 0;
+
+ gpio_direction_output(dw->active_cs_pin, 0);
+ data = dw_readl(dw, ser);
+ data |= port;
+ dw_writel(dw, ser, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_DisableSlaveen(struct fh_spi *dw, spi_slave_e port)
+{
+ u32 data = 0;
+ gpio_direction_output(dw->active_cs_pin, 1);
+ data = dw_readl(dw, ser);
+ data &= ~port;
+ dw_writel(dw, ser, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_WriteData(struct fh_spi *dw, u32 data)
+{
+ dw_writel(dw, dr, data);
+ return WRITE_ONLY_OK;
+}
+
+static u32 Spi_ReadData(struct fh_spi *dw)
+{
+ return dw_readl(dw, dr);
+}
+
+static void Spi_Clearallerror(struct fh_spi *dw)
+{
+ u32 data = dw_readl(dw, icr);
+ data = 0;
+}
+
+static u32 Spi_Isrstatus(struct fh_spi *dw)
+{
+ u32 data = dw_readl(dw, isr);
+ return data;
+}
+
+static u32 Spi_RawIsrstatus(struct fh_spi *dw)
+{
+ u32 data = dw_readl(dw, risr);
+ return data;
+}
+
+static void Spi_SetDmaTxDataLevel(struct fh_spi *dw, u32 level)
+{
+ dw_writel(dw, dmatdlr, level);
+}
+
+static void Spi_SetDmaRxDataLevel(struct fh_spi *dw, u32 level)
+{
+ dw_writel(dw, dmardlr, level);
+}
+
+static void Spi_SetDmaControlEnable(struct fh_spi *dw,
+spi_dma_control_mode_e enable_pos)
+{
+ u32 data;
+ data = dw_readl(dw, dmacr);
+ data |= enable_pos;
+ dw_writel(dw, dmacr, data);
+}
+
+static void Spi_SetDmaControlDisable(struct fh_spi *dw,
+spi_dma_control_mode_e enable_pos)
+{
+ u32 data;
+ data = dw_readl(dw, dmacr);
+ data &= ~enable_pos;
+ dw_writel(dw, dmacr, data);
+}
+
+static u32 Spi_SetRxdelay(struct fh_spi *dw, u8 data)
+{
+ dw_writel(dw, rx_sample_dly, data);
+ return CONFIG_OK;
+}
+
+static inline u32 tx_max_tx_only(struct fh_spi_controller *fh_spi)
+{
+ u32 hw_tx_level;
+ hw_tx_level = Spi_ReadTxfifolevel(&fh_spi->dwc);
+ hw_tx_level = fh_spi->dwc.fifo_len - hw_tx_level;
+
+ return min(hw_tx_level, fh_spi->dwc.tx_len);
+}
+
+static inline u32 tx_max(struct fh_spi_controller *fh_spi)
+{
+
+ u32 hw_tx_level, hw_rx_level;
+ u32 temp_tx_lev;
+ temp_tx_lev = Spi_ReadTxfifolevel(&fh_spi->dwc);
+ hw_rx_level = temp_tx_lev + Spi_ReadRxfifolevel(&fh_spi->dwc);
+ if (hw_rx_level >= fh_spi->dwc.fifo_len)
+ return 0;
+ /* add shift data... maybe should add apb bus delay */
+ hw_rx_level++;
+
+ hw_tx_level = temp_tx_lev;
+ hw_tx_level = fh_spi->dwc.fifo_len - hw_tx_level;
+ hw_rx_level = fh_spi->dwc.fifo_len - hw_rx_level;
+ /* min(hw_tx_level, fh_spi->dwc.tx_len); */
+ return min(min(hw_tx_level, fh_spi->dwc.tx_len), hw_rx_level);
+}
+
+/* Return the max entries we should read out of rx fifo */
+static inline u32 rx_max(struct fh_spi_controller *fh_spi)
+{
+ u32 hw_rx_level;
+ hw_rx_level = Spi_ReadRxfifolevel(&fh_spi->dwc);
+ return hw_rx_level;
+}
+
+/*add spic new code here below..*/
+static void reg_bit_process(u32 *data, u32 value, u32 mask) {
+(*data) &= ~mask;
+(*data) |= value;
+}
+
+
+static int Spi_SetApbReadWireMode(struct fh_spi *dw, spi_read_wire_mode_e mode)
+{
+
+ u32 data = dw_readl(dw, ccfgr);
+ switch (mode){
+ case STANDARD_READ:
+ reg_bit_process(&data, 0 << 8, 7 << 8);
+ reg_bit_process(&data, 0 << 2, 3 << 2);
+ reg_bit_process(&data, 0 << 0, 3 << 0);
+ reg_bit_process(&data, 0 << 4, 7 << 4);
+ break;
+
+ case DUAL_OUTPUT:
+ reg_bit_process(&data, 0 << 8, 7 << 8);
+ reg_bit_process(&data, 3 << 2, 3 << 2);
+ reg_bit_process(&data, 1 << 0, 3 << 0);
+ reg_bit_process(&data, 0 << 4, 7 << 4);
+ break;
+
+ case QUAD_OUTPUT:
+ reg_bit_process(&data, 0 << 8, 7 << 8);
+ reg_bit_process(&data, 3 << 2, 3 << 2);
+ reg_bit_process(&data, 2 << 0, 3 << 0);
+ reg_bit_process(&data, 0 << 4, 7 << 4);
+ break;
+ default:
+ printk("wrong mode now....\n");
+ }
+ data |= 1<<13;
+ dw_writel(dw, ccfgr, data);
+ return CONFIG_OK;
+}
+
+static int Spi_SetXip(struct fh_spi *dw, spi_xip_config_e value)
+{
+
+
+ u32 data = dw_readl(dw, ccfgr);
+ u32 data1 = dw_readl(dw, opcr);
+
+ if (value == XIP_ENABLE) {
+ reg_bit_process(&data, XIP_ENABLE << 11, 1 << 11);
+ reg_bit_process(&data1, 0x20 << 20, 0xff << 20);
+ } else if (value == XIP_DISABLE) {
+ reg_bit_process(&data, XIP_DISABLE << 11, 1 << 11);
+ reg_bit_process(&data1, 0xff << 20, 0xff << 20);
+ }
+ dw_writel(dw, ccfgr, data);
+ dw_writel(dw, opcr, data1);
+
+ return 0;
+}
+
+static int Spi_SetDPI(struct fh_spi *dw, spi_dpi_config_e value)
+{
+
+ u32 data = dw_readl(dw, opcr);
+ reg_bit_process(&data, value << 16, 1 << 16);
+ dw_writel(dw, opcr, data);
+ return 0;
+}
+
+SINT32 Spi_SetSwap(struct fh_spi *dw, unsigned int value)
+{
+ u32 data;
+
+ Spi_Enable(dw, SPI_DISABLE);
+ data = dw_readl(dw, ccfgr);
+ data &= ~(1<<12);
+ data |= (value<<12);
+ dw_writel(dw, ccfgr, data);
+ Spi_Enable(dw, SPI_ENABLE);
+ return 0;
+}
+
+SINT32 Spi_SetWidth(struct fh_spi *dw, unsigned int value)
+{
+ u32 data;
+
+ Spi_Enable(dw, SPI_DISABLE);
+ data = dw_readl(dw, ctrl0);
+ data &= ~(0x0f<<0);
+ data |= ((value-1)<<0);
+ dw_writel(dw, ctrl0, data);
+ Spi_Enable(dw, SPI_ENABLE);
+ return 0;
+}
+
+static int Spi_SetQPI(struct fh_spi *dw, spi_qpi_config_e value)
+{
+
+ u32 data = dw_readl(dw, opcr);
+
+ reg_bit_process(&data, value << 17, 1 << 17);
+ dw_writel(dw, opcr, data);
+
+ return 0;
+}
+
+static int Spi_TimingConfigure(struct fh_spi *dw, u32 value)
+{
+ dw_writel(dw, timcr, value);
+ return 0;
+}
+
+void fh_spic_check_idle(struct fh_spi *dw){
+ u32 status;
+ status = Spi_ReadStatus(dw);
+ /*ahb rx fifo not empty..*/
+ BUG_ON((status & 1<<10) != 0);
+ /*ahb tx fifo empty..*/
+ BUG_ON((status & 1<<9) != 1<<9);
+ /*apb rx fifo*/
+ BUG_ON((status & 1<<3) != 0);
+ /*apb tx fifo*/
+ BUG_ON((status & 1<<2) != 1<<2);
+ /*shift not busy..*/
+ BUG_ON((status & 1) != 0);
+}
+
+
+int spic_wire_init(struct spi_master * p_master){
+
+ struct fh_spi_controller *fh_spi;
+ fh_spi = spi_master_get_devdata(p_master);
+ Spi_SetXip(&fh_spi->dwc, XIP_DISABLE);
+ Spi_SetDPI(&fh_spi->dwc, DPI_DISABLE);
+ Spi_SetQPI(&fh_spi->dwc, QPI_DISABLE);
+ Spi_TimingConfigure(&fh_spi->dwc, 0x0);
+ Spi_SetApbReadWireMode(&fh_spi->dwc,STANDARD_READ);
+ return 0;
+
+}
+
+void spi_bus_change_1_wire(struct spi_master *p_master)
+{
+ struct fh_spi_controller *fh_spi;
+ fh_spi = spi_master_get_devdata(p_master);
+ fh_spi->active_wire_width = ONE_WIRE_SUPPORT;
+ fh_spi->dir = SPI_DATA_DIR_DUOLEX;
+ fh_spic_check_idle(&fh_spi->dwc);
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetApbReadWireMode(&fh_spi->dwc, STANDARD_READ);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+
+}
+
+
+void spi_bus_change_2_wire(struct spi_master *p_master,unsigned int dir)
+{
+
+ struct fh_spi_controller *fh_spi;
+ fh_spi = spi_master_get_devdata(p_master);
+ fh_spi->active_wire_width = DUAL_WIRE_SUPPORT;
+ fh_spi->dir = dir;
+ fh_spic_check_idle(&fh_spi->dwc);
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetApbReadWireMode(&fh_spi->dwc, DUAL_OUTPUT);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+}
+
+void spi_bus_change_4_wire(struct spi_master *p_master,unsigned int dir)
+{
+ struct fh_spi_controller *fh_spi;
+ fh_spi = spi_master_get_devdata(p_master);
+ fh_spi->active_wire_width = QUAD_WIRE_SUPPORT;
+ fh_spi->dir = dir;
+ fh_spic_check_idle(&fh_spi->dwc);
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetApbReadWireMode(&fh_spi->dwc, QUAD_OUTPUT);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+}
+
+
+static void spi_wait_tx_only_done(struct fh_spi_controller *fh_spi){
+ u32 status;
+ do {
+ status = Spi_ReadStatus(&fh_spi->dwc);
+ } while ((status & 0x01) || (!(status & 0x04)));
+}
+
+static int fh_spi_init_hw(struct fh_spi_controller *fh_spi,
+struct fh_spi_platform_data *board_info,struct spi_master *p_master)
+{
+ int status, i;
+ struct _spi_advanced_info *p_adv_info = NULL;
+ fh_spi->dwc.id = board_info->bus_no;
+ fh_spi->dwc.fifo_len = board_info->fifo_len;
+ fh_spi->dwc.num_cs = board_info->slave_max_num;
+ for (i = 0; i < fh_spi->dwc.num_cs; i++) {
+ fh_spi->dwc.cs_data[i].GPIO_Pin =
+ board_info->cs_data[i].GPIO_Pin;
+ fh_spi->dwc.cs_data[i].name = board_info->cs_data[i].name;
+ }
+
+ fh_spi->dwc.rx_hs_no = board_info->rx_handshake_num;
+ fh_spi->dwc.tx_hs_no = board_info->tx_handshake_num;
+ memset(&fh_spi->dwc.dma_rx, 0, sizeof(struct _fh_spi_dma_transfer));
+ memset(&fh_spi->dwc.dma_tx, 0, sizeof(struct _fh_spi_dma_transfer));
+ fh_spi->dwc.complete_times = 0;
+ fh_spi->dwc.pump_data_mode = PUMP_DATA_POLL_MODE;
+ /*bind the platform data here....*/
+ fh_spi->dwc.board_info = board_info;
+
+ fh_spi->dwc.isr_flag = SPI_IRQ_TXEIM | SPI_IRQ_TXOIM | SPI_IRQ_RXUIM
+ | SPI_IRQ_RXOIM;
+ fh_spi->dwc.frame_mode = SPI_MOTOROLA_MODE;
+ fh_spi->dwc.transfer_mode = SPI_TX_RX_MODE;
+
+ do {
+ status = Spi_ReadStatus(&fh_spi->dwc);
+ } while (status & 0x01);
+
+ /*add multi wire support..*/
+ if(board_info->ctl_wire_support & MULTI_WIRE_SUPPORT){
+ /*get master adv info and bind the driver on it...*/
+ p_adv_info = spi_master_get_advanced_data(p_master);
+ p_adv_info->ctl_wire_support = board_info->ctl_wire_support;
+ p_adv_info->multi_wire_func_init = spic_wire_init;
+ p_adv_info->change_to_1_wire = spi_bus_change_1_wire;
+ p_adv_info->change_to_2_wire = spi_bus_change_2_wire;
+ p_adv_info->change_to_4_wire = spi_bus_change_4_wire;
+ }
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ if(board_info->ctl_wire_support & MULTI_WIRE_SUPPORT) {
+ if (p_adv_info)
+ p_adv_info->multi_wire_func_init(p_master);
+ }
+ Spi_SetFrameFormat(&fh_spi->dwc, fh_spi->dwc.frame_mode);
+ Spi_SetTransferMode(&fh_spi->dwc, fh_spi->dwc.transfer_mode);
+ Spi_DisableIrq(&fh_spi->dwc, SPI_IRQ_ALL);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ return 0;
+}
+
+static irqreturn_t fh_spi_irq(int irq, void *dev_id)
+{
+ u8* txbuf;
+ struct fh_spi_controller *fh_spi;
+ u32 isr_status;
+ u32 rx_fifo_capability, tx_fifo_capability;
+ u16 data;
+ unsigned size;
+ fh_spi = (struct fh_spi_controller *) dev_id;
+ data = 0x00;
+ txbuf = (u8*) fh_spi->dwc.tx_buff;
+ isr_status = Spi_Isrstatus(&fh_spi->dwc);
+ size = fh_spi->active_transfer->len;
+ if (isr_status & (SPI_IRQ_TXOIM | SPI_IRQ_RXUIM | SPI_IRQ_RXOIM)) {
+ Spi_Clearallerror(&fh_spi->dwc);
+ dev_err(&fh_spi->p_dev->dev, "spi isr status:%x\n", isr_status);
+ WARN_ON(1);
+ }
+
+ Spi_DisableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
+
+ if (fh_spi->dwc.transfer_mode == SPI_TX_RX_MODE) {
+
+ tx_fifo_capability = tx_max(fh_spi);
+ rx_fifo_capability = rx_max(fh_spi);
+ fh_spi->dwc.rx_len += rx_fifo_capability;
+ while (rx_fifo_capability) {
+ data = Spi_ReadData(&fh_spi->dwc);
+ if (fh_spi->dwc.rx_buff != NULL)
+ *(u8 *)fh_spi->dwc.rx_buff++ = (u8) data;
+
+ rx_fifo_capability--;
+ }
+
+ if (fh_spi->dwc.rx_len == size) {
+ complete(&(fh_spi->done));
+ return IRQ_HANDLED;
+ }
+
+ fh_spi->dwc.tx_len -= tx_fifo_capability;
+ while (tx_fifo_capability) {
+ data = 0x0;
+ if (fh_spi->dwc.tx_buff != NULL)
+ data = *(u8*) fh_spi->dwc.tx_buff++;
+ Spi_WriteData(&fh_spi->dwc, data);
+ tx_fifo_capability--;
+ }
+ Spi_EnableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
+
+ } else if (fh_spi->dwc.transfer_mode == SPI_ONLY_TX_MODE) {
+ tx_fifo_capability = tx_max(fh_spi);
+
+ fh_spi->dwc.tx_len -= tx_fifo_capability;
+ while (tx_fifo_capability) {
+ Spi_WriteData(&fh_spi->dwc, *txbuf++);
+ fh_spi->dwc.tx_buff++;
+ tx_fifo_capability--;
+ }
+ if (fh_spi->dwc.tx_len == 0) {
+ complete(&(fh_spi->done));
+ return IRQ_HANDLED;
+ }
+ /*reopen tx isr...*/
+ Spi_EnableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
+ }
+ return IRQ_HANDLED;
+
+}
+
+static int fh_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ u8 bits_per_word;
+ u32 hz;
+ u32 div;
+ struct fh_spi_controller *fh_spi = spi_master_get_devdata(spi->master);
+
+ bits_per_word = spi->bits_per_word;
+ if (t && t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ /*
+ * Calculate speed:
+ * - by default, use maximum speed from ssp clk
+ * - if device overrides it, use it
+ * - if transfer specifies other speed, use transfer's one
+ */
+ /*hz = 1000 * ss->speed_khz / ss->divider;*/
+ hz = fh_spi->dwc.max_freq;
+ if (spi->max_speed_hz)
+ hz = min(hz, spi->max_speed_hz);
+ if (t && t->speed_hz)
+ hz = min(hz, t->speed_hz);
+
+ div = fh_spi->dwc.apb_clock_in / hz;
+ fh_spi->dwc.div = div;
+
+ if (hz == 0)
+ {
+ dev_err(&spi->dev, "Cannot continue with zero clock\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (bits_per_word != 8)
+ {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __func__, bits_per_word);
+ return -EINVAL;
+ }
+
+ if (spi->mode & SPI_CPOL)
+ fh_spi->dwc.cpol = SPI_POLARITY_HIGH;
+ else
+ fh_spi->dwc.cpol = SPI_POLARITY_LOW;
+
+ if (spi->mode & SPI_CPHA)
+ fh_spi->dwc.cpha = SPI_PHASE_TX_FIRST;
+ else
+ fh_spi->dwc.cpha = SPI_PHASE_RX_FIRST;
+
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetPolarity(&fh_spi->dwc, fh_spi->dwc.cpol);
+ Spi_SetPhase(&fh_spi->dwc, fh_spi->dwc.cpha);
+ Spi_SetBaudrate(&fh_spi->dwc, fh_spi->dwc.div);
+ Spi_SetRxdelay(&fh_spi->dwc, 1);
+ Spi_DisableIrq(&fh_spi->dwc, SPI_IRQ_ALL);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ fh_spi_setup(spi);
+ return 0;
+}
+
+void fix_multi_xfer_mode(struct fh_spi_controller *fh_spi) {
+ struct spi_device *p_spi_dev;
+
+ p_spi_dev = fh_spi->active_spi_dev;
+ if(p_spi_dev->dev_open_multi_wire_flag & MULTI_WIRE_SUPPORT) {
+ if((fh_spi->active_wire_width &
+ (DUAL_WIRE_SUPPORT | QUAD_WIRE_SUPPORT))
+ && (fh_spi->dir == SPI_DATA_DIR_OUT)) {
+ fh_spi->dwc.transfer_mode = SPI_ONLY_TX_MODE;
+ }
+ else if((fh_spi->active_wire_width &
+ (DUAL_WIRE_SUPPORT | QUAD_WIRE_SUPPORT)) &&
+ (fh_spi->dir == SPI_DATA_DIR_IN)) {
+ fh_spi->dwc.transfer_mode = SPI_ONLY_RX_MODE;
+ }
+ /*do not parse one wire..*/
+ }
+}
+
+static void spi_ctl_fix_pump_data_mode(struct fh_spi_controller *fh_spi) {
+ if ((fh_spi->dwc.rx_buff == NULL) && (fh_spi->dwc.tx_buff != NULL))
+ fh_spi->dwc.transfer_mode = SPI_ONLY_TX_MODE;
+ else if ((fh_spi->dwc.rx_buff != NULL) && (fh_spi->dwc.tx_buff == NULL))
+ fh_spi->dwc.transfer_mode = SPI_ONLY_RX_MODE;
+ else
+ fh_spi->dwc.transfer_mode = SPI_TX_RX_MODE;
+}
+static int isr_pump_data(struct fh_spi_controller *fh_spi)
+{
+ struct spi_device *p_spi_dev;
+ p_spi_dev = fh_spi->active_spi_dev;
+ fh_spi->dwc.isr_flag &= ~(SPI_IRQ_TXEIM | SPI_IRQ_RXFIM);
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ spi_ctl_fix_pump_data_mode(fh_spi);
+ fh_spi->dwc.isr_flag |= SPI_IRQ_TXEIM;
+ Spi_SetTxlevlel(&fh_spi->dwc, fh_spi->dwc.fifo_len - 1);
+ Spi_EnableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
+ /*add spic multi wire parse*/
+ fix_multi_xfer_mode(fh_spi);
+ if (fh_spi->dwc.transfer_mode == SPI_ONLY_RX_MODE)
+ fh_spi->dwc.transfer_mode = SPI_TX_RX_MODE;
+ Spi_SetTransferMode(&fh_spi->dwc, fh_spi->dwc.transfer_mode);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ wait_for_completion(&fh_spi->done);
+ /*add wait spi idle..*/
+ spi_wait_tx_only_done(fh_spi);
+
+ return 0;
+}
+
+static bool fh_spi_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ int dma_channel = *(int *) param;
+ bool ret = false;
+
+ if (chan->chan_id == dma_channel)
+ ret = true;
+ return ret;
+}
+
+static void fh_spi_tx_rx_dma_done_rx(void *arg)
+{
+
+ struct fh_spi_controller *fh_spi = (struct fh_spi_controller *) arg;
+ /*printk("%s\n",__func__);*/
+ fh_spi->dwc.complete_times++;
+ if (fh_spi->dwc.complete_times == 2) {
+ fh_spi->dwc.complete_times = 0;
+ complete(&(fh_spi->done));
+ }
+}
+
+static void fh_spi_tx_rx_dma_done_tx(void *arg)
+{
+
+ struct fh_spi_controller *fh_spi = (struct fh_spi_controller *) arg;
+ /*printk("%s\n",__func__);*/
+ fh_spi->dwc.complete_times++;
+ if (fh_spi->dwc.complete_times == 2) {
+ fh_spi->dwc.complete_times = 0;
+ complete(&(fh_spi->done));
+ }
+}
+
+static void fh_spi_tx_only_dma_done(void *arg)
+{
+ struct fh_spi_controller *fh_spi = (struct fh_spi_controller *) arg;
+ complete(&(fh_spi->done));
+}
+
+static void fh_spi_rx_only_dma_done(void *arg)
+{
+ struct fh_spi_controller *fh_spi = (struct fh_spi_controller *) arg;
+ /*printk("%s\n",__func__);*/
+ complete(&(fh_spi->done));
+
+}
+
+static int dma_set_tx_para(struct fh_spi_controller *fh_spi,
+void (*call_back)(void *arg))
+{
+
+ struct fh_dma_extra ext_para;
+ struct dma_slave_config *tx_config;
+ struct spi_transfer *t;
+ struct dma_chan *txchan;
+ struct scatterlist *p_sca_list;
+ unsigned int sg_size = 0;
+ int i, xfer_len, one_sg_data_len;
+ unsigned char *temp_buf;
+ int one_time_size;
+ t = fh_spi->active_transfer;
+ memset(&fh_spi->dwc.dma_tx.cfg, 0, sizeof(struct dma_slave_config));
+ memset(&ext_para, 0, sizeof(struct fh_dma_extra));
+ txchan = fh_spi->dwc.dma_tx.chan;
+ tx_config = &fh_spi->dwc.dma_tx.cfg;
+ if(fh_spi->dwc.board_info->data_reg_offset != 0 )
+ tx_config->dst_addr = fh_spi->dwc.paddr +
+ fh_spi->dwc.board_info->data_reg_offset;
+ else
+ tx_config->dst_addr = fh_spi->dwc.paddr + SPI_DATA_REG_OFFSET;
+
+ /* set the spi data tx reg */
+ tx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx_config->slave_id = fh_spi->dwc.tx_hs_no;
+ tx_config->src_maxburst = 8;
+ tx_config->dst_maxburst = 8;
+ tx_config->direction = DMA_MEM_TO_DEV;
+ tx_config->device_fc = FALSE;
+ xfer_len = fh_spi->dwc.tx_len;
+ temp_buf = (unsigned char*)t->tx_buf;
+ one_time_size = SG_ONE_TIME_MAX_SIZE;
+ if (xfer_len >= one_time_size)
+ sg_size = xfer_len / one_time_size;
+
+ if (xfer_len % one_time_size)
+ sg_size++;
+ if(sg_size > MAX_SG_LEN) {
+ printk("%s_%d :: too large sg size:0x%x\n",
+ __func__,__LINE__,sg_size);
+ return -1;
+ }
+
+ if (fh_spi->dwc.board_info->dma_master_sel_enable ==
+ SPI_DMA_MASTER_SEL_ENABLE) {
+ ext_para.master_flag = MASTER_SEL_ENABLE;
+ ext_para.dst_master =
+ fh_spi->dwc.board_info->dma_master_ctl_sel;
+ ext_para.src_master =
+ fh_spi->dwc.board_info->dma_master_mem_sel;
+ }
+
+ p_sca_list = &fh_spi->dwc.dma_tx.sgl[0];
+ for(i=0;i<sg_size;i++,p_sca_list++){
+ one_sg_data_len = min(xfer_len,one_time_size);
+ xfer_len -= one_sg_data_len;
+
+ if (t->tx_buf == NULL)
+ {
+ ext_para.sinc = FH_DMA_SLAVE_FIX;
+ p_sca_list->dma_address = fh_spi->dwc.tx_dumy_dma_add;
+ } else {
+ ext_para.sinc = FH_DMA_SLAVE_INC;
+ p_sca_list->dma_address = dma_map_single(
+ txchan->dev->device.parent,
+ (void *) temp_buf, one_sg_data_len,
+ DMA_TO_DEVICE);
+ fh_spi->dwc.dma_tx.sgl_data_size[i] = one_sg_data_len;
+ temp_buf += one_sg_data_len;
+
+ }
+ p_sca_list->length = one_sg_data_len;
+ }
+
+ ext_para.dinc = FH_DMA_SLAVE_FIX;
+ dmaengine_slave_config(txchan, tx_config);
+
+ fh_spi->dwc.dma_tx.desc = txchan->device->device_prep_slave_sg(txchan,
+ &fh_spi->dwc.dma_tx.sgl[0], sg_size, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP,
+ &ext_para);
+ fh_spi->dwc.dma_tx.actual_sgl_size = sg_size;
+ fh_spi->dwc.dma_tx.desc->callback = call_back;
+ fh_spi->dwc.dma_tx.desc->callback_param = fh_spi;
+ return 0;
+}
+
+static int dma_set_rx_para(struct fh_spi_controller *fh_spi,
+void (*call_back)(void *arg), unsigned int rx_para_size, unsigned int data_width)
+{
+ struct fh_dma_extra ext_para;
+ struct dma_slave_config *rx_config;
+ struct spi_transfer *t;
+ struct dma_chan *rxchan;
+ struct scatterlist *p_sca_list;
+ unsigned int sg_size = 0;
+ int i, xfer_len, one_sg_data_len;
+ unsigned char *temp_buf;
+ unsigned int one_time_size;
+ t = fh_spi->active_transfer;
+ rxchan = fh_spi->dwc.dma_rx.chan;
+ memset(&fh_spi->dwc.dma_rx.cfg, 0, sizeof(struct dma_slave_config));
+ memset(&ext_para, 0, sizeof(struct fh_dma_extra));
+ rx_config = &fh_spi->dwc.dma_rx.cfg;
+ if(fh_spi->dwc.board_info->data_reg_offset != 0 )
+ rx_config->src_addr =
+ fh_spi->dwc.paddr + fh_spi->dwc.board_info->data_reg_offset;
+ else
+ rx_config->src_addr = fh_spi->dwc.paddr + SPI_DATA_REG_OFFSET;
+
+ if (data_width == 8)
+ rx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (data_width == 16)
+ rx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ dev_err(&fh_spi->p_dev->dev,
+ "error data width...%d\n", data_width);
+
+ /*rx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;*/
+ rx_config->slave_id = fh_spi->dwc.rx_hs_no;
+ rx_config->src_maxburst = 8;
+ rx_config->dst_maxburst = 8;
+ rx_config->device_fc = FALSE;
+ rx_config->direction = DMA_DEV_TO_MEM;
+ xfer_len = rx_para_size;
+ temp_buf = (unsigned char *)t->rx_buf;
+
+ if (fh_spi->dwc.board_info->dma_protctl_enable ==
+ SPI_DMA_PROTCTL_ENABLE) {
+ ext_para.protctl_flag = PROTCTL_ENABLE;
+ ext_para.protctl_data =
+ fh_spi->dwc.board_info->dma_protctl_data;
+ }
+
+ if (fh_spi->dwc.board_info->dma_master_sel_enable ==
+ SPI_DMA_MASTER_SEL_ENABLE) {
+ ext_para.master_flag = MASTER_SEL_ENABLE;
+ ext_para.src_master =
+ fh_spi->dwc.board_info->dma_master_ctl_sel;
+ ext_para.dst_master =
+ fh_spi->dwc.board_info->dma_master_mem_sel;
+ }
+
+ if (fh_spi->dwc.board_info->data_increase_support == INC_SUPPORT) {
+ if (t->rx_buf != NULL) {
+ if (((unsigned int)t->rx_buf % 4) || (xfer_len % 4)) {
+ dev_err(&fh_spi->p_dev->dev,
+ "rx buf:%x should 4B allign, size:%x should 4B allign\n",
+ (unsigned int)t->rx_buf, xfer_len);
+ BUG_ON(((unsigned int)t->rx_buf % 4) || (xfer_len % 4));
+ }
+ }
+
+ rx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ one_time_size = min((u32)SG_ONE_TIME_MAX_SIZE,
+ fh_spi->dwc.board_info->data_field_size);
+ } else {
+ /*if set 1B,do not need mem allign and size allign,
+ but mem access will use single mode and have low optimize*/
+ rx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ one_time_size = SG_ONE_TIME_MAX_SIZE;
+ }
+
+ if (xfer_len >= one_time_size)
+ sg_size = xfer_len / one_time_size;
+
+ if (xfer_len % one_time_size)
+ sg_size++;
+
+ if (sg_size > MAX_SG_LEN) {
+ printk("%s_%d :: too large sg size:0x%x\n",
+ __func__, __LINE__, sg_size);
+ return -1;
+ }
+ p_sca_list = &fh_spi->dwc.dma_rx.sgl[0];
+ for (i = 0; i < sg_size; i++, p_sca_list++)
+ {
+ one_sg_data_len = min((u32)xfer_len, one_time_size);
+ xfer_len -= one_sg_data_len;
+ if (t->rx_buf == NULL) {
+ ext_para.dinc = FH_DMA_SLAVE_FIX;
+ p_sca_list->dma_address = fh_spi->dwc.rx_dumy_dma_add;
+ } else {
+ ext_para.dinc = FH_DMA_SLAVE_INC;
+ p_sca_list->dma_address = dma_map_single(
+ rxchan->dev->device.parent,
+ (void *)temp_buf, one_sg_data_len,
+ DMA_FROM_DEVICE);
+ fh_spi->dwc.dma_rx.sgl_data_size[i] = one_sg_data_len;
+ temp_buf += one_sg_data_len;
+ }
+ p_sca_list->length = one_sg_data_len;
+ }
+
+ if (fh_spi->dwc.board_info->data_increase_support == INC_SUPPORT)
+ ext_para.sinc = FH_DMA_SLAVE_INC;
+ else
+ ext_para.sinc = FH_DMA_SLAVE_FIX;
+
+ dmaengine_slave_config(rxchan, rx_config);
+ fh_spi->dwc.dma_rx.desc = rxchan->device->device_prep_slave_sg(rxchan,
+ &fh_spi->dwc.dma_rx.sgl[0], sg_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP,
+ &ext_para);
+ fh_spi->dwc.dma_rx.actual_sgl_size = sg_size;
+ fh_spi->dwc.dma_rx.desc->callback = call_back;
+ fh_spi->dwc.dma_rx.desc->callback_param = fh_spi;
+ return 0;
+}
+
+void unmap_dma_tx_sg(struct fh_spi_controller *fh_spi)
+{
+ struct dma_chan *txchan;
+ struct scatterlist *p_sca_list;
+ int i;
+
+ txchan = fh_spi->dwc.dma_tx.chan;
+ p_sca_list = &fh_spi->dwc.dma_tx.sgl[0];
+ for (i = 0; i < fh_spi->dwc.dma_tx.actual_sgl_size; i++, p_sca_list++)
+ {
+ dma_unmap_single(txchan->dev->device.parent,
+ p_sca_list->dma_address,
+ fh_spi->dwc.dma_tx.sgl_data_size[i], DMA_MEM_TO_DEV);
+ }
+}
+
+void unmap_dma_rx_sg(struct fh_spi_controller *fh_spi)
+{
+ struct dma_chan *rxchan;
+ struct scatterlist *p_sca_list;
+ int i;
+
+ rxchan = fh_spi->dwc.dma_rx.chan;
+ p_sca_list = &fh_spi->dwc.dma_rx.sgl[0];
+ for (i = 0; i < fh_spi->dwc.dma_rx.actual_sgl_size; i++, p_sca_list++)
+ {
+ dma_unmap_single(rxchan->dev->device.parent,
+ p_sca_list->dma_address,
+ fh_spi->dwc.dma_rx.sgl_data_size[i], DMA_FROM_DEVICE);
+ }
+}
+
+
+static int dma_pump_tx_rx_data(struct fh_spi_controller *fh_spi)
+{
+
+ struct spi_transfer *t;
+ struct dma_chan *rxchan;
+ struct dma_chan *txchan;
+ int ret;
+ t = fh_spi->active_transfer;
+ txchan = fh_spi->dwc.dma_tx.chan;
+ rxchan = fh_spi->dwc.dma_rx.chan;
+ init_completion(&fh_spi->done);
+ ret = dma_set_tx_para(fh_spi, fh_spi_tx_rx_dma_done_tx);
+ if (ret != 0)
+ return ret;
+
+ ret = dma_set_rx_para(fh_spi, fh_spi_tx_rx_dma_done_rx,
+ fh_spi->dwc.tx_len , 8);
+ if (ret != 0) {
+ unmap_dma_tx_sg(fh_spi);
+ return ret;
+ }
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetDmaRxDataLevel(&fh_spi->dwc, 7);
+ Spi_SetDmaTxDataLevel(&fh_spi->dwc, fh_spi->dwc.fifo_len - 8);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ fh_spi->dwc.dma_rx.desc->tx_submit(fh_spi->dwc.dma_rx.desc);
+ fh_spi->dwc.dma_tx.desc->tx_submit(fh_spi->dwc.dma_tx.desc);
+ Spi_SetDmaControlEnable(&fh_spi->dwc, SPI_DMA_RX_POS);
+ Spi_SetDmaControlEnable(&fh_spi->dwc, SPI_DMA_TX_POS);
+ wait_for_completion(&fh_spi->done);
+ if (t->tx_buf != NULL)
+ unmap_dma_tx_sg(fh_spi);
+
+ if (t->rx_buf != NULL)
+ unmap_dma_rx_sg(fh_spi);
+
+ Spi_SetDmaControlDisable(&fh_spi->dwc, SPI_DMA_RX_POS);
+ Spi_SetDmaControlDisable(&fh_spi->dwc, SPI_DMA_TX_POS);
+ return 0;
+}
+
+static int dma_pump_tx_only_data(struct fh_spi_controller *fh_spi)
+{
+ struct spi_transfer *t;
+ struct dma_chan *rxchan;
+ struct dma_chan *txchan;
+ int ret;
+ t = fh_spi->active_transfer;
+ txchan = fh_spi->dwc.dma_tx.chan;
+ rxchan = fh_spi->dwc.dma_rx.chan;
+ init_completion(&fh_spi->done);
+
+ ret = dma_set_tx_para(fh_spi, fh_spi_tx_only_dma_done);
+ if (ret != 0)
+ return ret;
+
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetDmaTxDataLevel(&fh_spi->dwc, fh_spi->dwc.fifo_len - 8);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ fh_spi->dwc.dma_tx.desc->tx_submit(fh_spi->dwc.dma_tx.desc);
+ Spi_SetDmaControlEnable(&fh_spi->dwc, SPI_DMA_TX_POS);
+ wait_for_completion(&fh_spi->done);
+ /*wait spi tx fifo done..*/
+ spi_wait_tx_only_done(fh_spi);
+ if (t->tx_buf != NULL)
+ unmap_dma_tx_sg(fh_spi);
+
+ Spi_SetDmaControlDisable(&fh_spi->dwc, SPI_DMA_TX_POS);
+ return 0;
+}
+
+
+static int dma_pump_rx_only_data(struct fh_spi_controller *fh_spi)
+{
+ struct spi_transfer *t;
+ struct dma_chan *rxchan;
+ struct dma_chan *txchan;
+ int ret;
+ unsigned int temp_size;
+ unsigned int data_width;
+
+ u32 raw_isr;
+ t = fh_spi->active_transfer;
+ txchan = fh_spi->dwc.dma_tx.chan;
+ rxchan = fh_spi->dwc.dma_rx.chan;
+ init_completion(&fh_spi->done);
+ temp_size = min(fh_spi->dwc.tx_len, (u32)SPI_RX_ONLY_ONE_TIME_SIZE);
+ data_width = rx_only_fix_data_width(fh_spi, temp_size);
+
+ if (data_width == 16) {
+ Spi_SetSwap(&fh_spi->dwc, 1);
+ Spi_SetWidth(&fh_spi->dwc, 16);
+ }
+
+ ret = dma_set_rx_para(fh_spi, fh_spi_rx_only_dma_done,
+ temp_size, data_width);
+ if (ret != 0) {
+ if (data_width == 16) {
+ Spi_SetSwap(&fh_spi->dwc, 0);
+ Spi_SetWidth(&fh_spi->dwc, 8);
+ }
+ return ret;
+ }
+
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetDmaRxDataLevel(&fh_spi->dwc, 7);
+ Spi_ContinueReadNum(&fh_spi->dwc, temp_size / (data_width / 8));
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ fh_spi->dwc.dma_rx.desc->tx_submit(fh_spi->dwc.dma_rx.desc);
+ Spi_SetDmaControlEnable(&fh_spi->dwc, SPI_DMA_RX_POS);
+ Spi_WriteData(&fh_spi->dwc, 0xffffffff);
+ if (!(wait_for_completion_timeout(&fh_spi->done, 20*HZ))) {
+ raw_isr = Spi_RawIsrstatus(&fh_spi->dwc);
+ printk("%s %d time out..spi raw status is %x\n",
+ __func__, __LINE__, raw_isr);
+ if (t->rx_buf != NULL)
+ unmap_dma_rx_sg(fh_spi);
+ Spi_SetDmaControlDisable(&fh_spi->dwc, SPI_DMA_RX_POS);
+ if (data_width == 16) {
+ Spi_SetSwap(&fh_spi->dwc, 0);
+ Spi_SetWidth(&fh_spi->dwc, 8);
+ }
+ return -1;
+ }
+ if (t->rx_buf != NULL)
+ unmap_dma_rx_sg(fh_spi);
+
+ Spi_SetDmaControlDisable(&fh_spi->dwc, SPI_DMA_RX_POS);
+ /*here need to cal the data has been transfered ...
+ if we need to start a new transfer
+ then maybe i could recall the rx only func :)*/
+ fh_spi->dwc.rx_len += temp_size;
+ fh_spi->dwc.tx_len -= temp_size;
+ fh_spi->active_transfer->rx_buf += temp_size;
+ if (data_width == 16) {
+ Spi_SetSwap(&fh_spi->dwc, 0);
+ Spi_SetWidth(&fh_spi->dwc, 8);
+ }
+ return 0;
+}
+
+static int dma_pump_data(struct fh_spi_controller *fh_spi)
+{
+ spi_ctl_fix_pump_data_mode(fh_spi);
+ fix_multi_xfer_mode(fh_spi);
+ if (fh_spi->dwc.transfer_mode == SPI_ONLY_RX_MODE) {
+ if (fh_spi->dwc.board_info->spidma_xfer_mode != RX_ONLY_MODE)
+ fh_spi->dwc.transfer_mode = SPI_TX_RX_MODE;
+ }
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetTransferMode(&fh_spi->dwc, fh_spi->dwc.transfer_mode);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ if (fh_spi->dwc.transfer_mode == SPI_ONLY_TX_MODE)
+ dma_pump_tx_only_data(fh_spi);
+ else if (fh_spi->dwc.transfer_mode == SPI_ONLY_RX_MODE) {
+ do {
+ dma_pump_rx_only_data(fh_spi);
+ } while (fh_spi->dwc.tx_len != 0);
+ } else
+ dma_pump_tx_rx_data(fh_spi);
+ return 0;
+}
+
+
+static int poll_rx_only_with_regwidth(struct fh_spi_controller *fh_spi,
+u8 *rxbuf, u32 size, u32 reg_width)
+{
+ register u32 rx_fifo_capability;
+ u32 otp_xfer_size;
+ u8 *rxbuf_8;
+ u16 *rxbuf_16;
+
+ rxbuf_8 = (u8 *)rxbuf;
+ rxbuf_16 = (u16 *)rxbuf;
+ if (reg_width == 16) {
+ Spi_SetSwap(&fh_spi->dwc, 1);
+ Spi_SetWidth(&fh_spi->dwc, 16);
+ }
+ /* refix size div reg_width */
+ size = size / (reg_width / 8);
+start:
+ /* or rx fifo error.. */
+ if (size == 0) {
+ if (reg_width == 16) {
+ Spi_SetSwap(&fh_spi->dwc, 0);
+ Spi_SetWidth(&fh_spi->dwc, 8);
+ }
+ return 0;
+ }
+
+ otp_xfer_size = min(fh_spi->dwc.fifo_len, size);
+ size -= otp_xfer_size;
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_ContinueReadNum(&fh_spi->dwc, otp_xfer_size);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ Spi_WriteData(&fh_spi->dwc, 0xffffffff);
+ do {
+ rx_fifo_capability = rx_max(fh_spi);
+ otp_xfer_size -= rx_fifo_capability;
+ while (rx_fifo_capability) {
+ if (reg_width == 16)
+ *rxbuf_16++ = (u16)Spi_ReadData(&fh_spi->dwc);
+ else
+ *rxbuf_8++ = (u8)Spi_ReadData(&fh_spi->dwc);
+ rx_fifo_capability--;
+ }
+ } while (otp_xfer_size);
+ goto start;
+}
+
+
+static int rx_only_fix_data_width(struct fh_spi_controller *fh_spi, u32 size)
+{
+ u32 data_width = 0;
+
+ if (((int)fh_spi->dwc.rx_buff % 2) || (size % 2) ||
+ (fh_spi->dwc.board_info->swap_support != SWAP_SUPPORT))
+ data_width = 8;
+ else
+ data_width = 16;
+ return data_width;
+}
+
+static int poll_pump_rx_only_data(struct fh_spi_controller *fh_spi)
+{
+ u32 size;
+ u32 data_width = 0;
+ int ret;
+
+ size = fh_spi->dwc.tx_len;
+ data_width = rx_only_fix_data_width(fh_spi, size);
+ ret = poll_rx_only_with_regwidth(fh_spi,
+ fh_spi->dwc.rx_buff, size, data_width);
+ return ret;
+}
+
+
+static int poll_pump_tx_rx_data(struct fh_spi_controller *fh_spi)
+{
+ register u32 rx_fifo_capability, tx_fifo_capability;
+ u8 *txbuf;
+ u8 *rxbuf;
+ u16 data;
+ u32 size;
+ rxbuf = (u8 *)fh_spi->dwc.rx_buff;
+ txbuf = (u8 *)fh_spi->dwc.tx_buff;
+ size = fh_spi->active_transfer->len;
+
+ goto first;
+start:
+ rx_fifo_capability = rx_max(fh_spi);
+ fh_spi->dwc.rx_len += rx_fifo_capability;
+ if (rxbuf != NULL) {
+ fh_spi->dwc.rx_buff += rx_fifo_capability;
+ while (rx_fifo_capability) {
+ *rxbuf++ = Spi_ReadData(&fh_spi->dwc);
+ rx_fifo_capability--;
+ }
+
+ } else {
+ while (rx_fifo_capability) {
+ data = Spi_ReadData(&fh_spi->dwc);
+ rx_fifo_capability--;
+ }
+ }
+ if (fh_spi->dwc.rx_len == size)
+ return 0;
+first:
+ tx_fifo_capability = tx_max(fh_spi);
+ fh_spi->dwc.tx_len -= tx_fifo_capability;
+ if (txbuf != NULL) {
+ fh_spi->dwc.tx_buff += tx_fifo_capability;
+ while (tx_fifo_capability) {
+ Spi_WriteData(&fh_spi->dwc, *txbuf++);
+ tx_fifo_capability--;
+ }
+
+ } else {
+ while (tx_fifo_capability) {
+ Spi_WriteData(&fh_spi->dwc, 0xff);
+ tx_fifo_capability--;
+ }
+ }
+
+ goto start;
+}
+
+static int poll_pump_tx_only_data(struct fh_spi_controller *fh_spi)
+{
+ register u32 tx_fifo_capability;
+ u8 *txbuf;
+ txbuf = (u8 *) fh_spi->dwc.tx_buff;
+ /*tx_max_tx_only*/
+ while (fh_spi->dwc.tx_len) {
+ tx_fifo_capability = tx_max_tx_only(fh_spi);
+ fh_spi->dwc.tx_len -= tx_fifo_capability;
+ while (tx_fifo_capability) {
+ Spi_WriteData(&fh_spi->dwc, *txbuf++);
+ fh_spi->dwc.tx_buff++;
+ tx_fifo_capability--;
+ }
+ }
+ /*wait idle and tx fifo empty..*/
+ spi_wait_tx_only_done(fh_spi);
+ return 0;
+}
+
+static int poll_pump_data(struct fh_spi_controller *fh_spi)
+{
+ spi_ctl_fix_pump_data_mode(fh_spi);
+ /*if multi open ,recheck mode..*/
+ fix_multi_xfer_mode(fh_spi);
+ Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
+ Spi_SetTransferMode(&fh_spi->dwc, fh_spi->dwc.transfer_mode);
+ Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
+ if (fh_spi->dwc.transfer_mode == SPI_ONLY_TX_MODE)
+ poll_pump_tx_only_data(fh_spi);
+ else if (fh_spi->dwc.transfer_mode == SPI_ONLY_RX_MODE)
+ poll_pump_rx_only_data(fh_spi);
+ else
+ poll_pump_tx_rx_data(fh_spi);
+ return 0;
+}
+
+void fix_fh_spi_xfer_wire_mode(struct spi_device *spi_dev,
+struct spi_transfer *t)
+{
+ struct spi_master *spi_master;
+ struct _spi_advanced_info *p_info;
+ spi_master = spi_dev->master;
+ p_info = &spi_master->ctl_multi_wire_info;
+ BUG_ON(spi_dev->dev_open_multi_wire_flag >
+ spi_master->ctl_multi_wire_info.ctl_wire_support);
+ if (t->xfer_wire_mode == ONE_WIRE_SUPPORT)
+ p_info->change_to_1_wire(spi_master);
+ else if (t->xfer_wire_mode == DUAL_WIRE_SUPPORT)
+ p_info->change_to_2_wire(spi_master, t->xfer_dir);
+ else if (t->xfer_wire_mode == QUAD_WIRE_SUPPORT)
+ p_info->change_to_4_wire(spi_master, t->xfer_dir);
+}
+
+
+static int fh_spi_handle_message(struct fh_spi_controller *fh_spi,
+struct spi_message *m)
+{
+ bool first, last;
+ struct spi_transfer *t, *tmp_t;
+ int status = 0;
+ int cs_change;
+ struct spi_device *spi_dev;
+ struct spi_master *spi_master;
+ cs_change = 1;
+ m->actual_length = 0;
+ fh_spi->active_message = m;
+ /*get spi slave from the message bind on it.*/
+ spi_dev = m->spi;
+ spi_master = spi_dev->master;
+ /*bind the active spi dev to the controller..*/
+ fh_spi->active_spi_dev = spi_dev;
+ list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list)
+ {
+ first = (&t->transfer_list == m->transfers.next);
+ last = (&t->transfer_list == m->transfers.prev);
+ if (first || t->speed_hz || t->bits_per_word)
+ fh_spi_setup_transfer(m->spi, t);
+
+ if (cs_change)
+ Spi_EnableSlaveen(&fh_spi->dwc, fh_spi->dwc.slave_port);
+ cs_change = t->cs_change;
+ fh_spi->active_transfer = t;
+ fh_spi->dwc.tx_len = t->len;
+ fh_spi->dwc.rx_len = 0;
+ fh_spi->dwc.tx_buff = (void *) t->tx_buf;
+ fh_spi->dwc.rx_buff = t->rx_buf;
+
+ if (fh_spi->dwc.board_info->dma_transfer_enable
+ == SPI_TRANSFER_USE_DMA) {
+ if (fh_spi->dwc.tx_len < DMA_TRANS_GATE_LEVEL)
+ fh_spi->dwc.pump_data_mode = PUMP_DATA_POLL_MODE;
+ else
+ fh_spi->dwc.pump_data_mode = PUMP_DATA_DMA_MODE;
+ } else {
+ fh_spi->dwc.pump_data_mode = PUMP_DATA_POLL_MODE;
+ }
+
+ if (fh_spi->dwc.pump_data_mode == PUMP_DATA_DMA_MODE) {
+ if (fh_spi->dwc.board_info->data_increase_support == INC_SUPPORT) {
+ if (t->rx_buf != NULL) {
+ if (((unsigned int)t->rx_buf % 4) || (t->len % 4)) {
+ fh_spi->dwc.pump_data_mode = PUMP_DATA_POLL_MODE;
+ }
+ }
+ }
+ }
+
+ if (spi_dev->dev_open_multi_wire_flag & MULTI_WIRE_SUPPORT)
+ fix_fh_spi_xfer_wire_mode(spi_dev, t);
+
+ switch (fh_spi->dwc.pump_data_mode) {
+ case PUMP_DATA_DMA_MODE:
+ status = dma_pump_data(fh_spi);
+ /*if the dma pump data error ,
+ aoto jump to the isr mode ..*/
+ if (status == 0) {
+ break;
+ } else {
+ WARN_ON(1);
+ dev_err(&fh_spi->p_dev->dev,
+ "spi dma pump data error\n");
+ fh_spi->dwc.pump_data_mode
+ = PUMP_DATA_POLL_MODE;
+ }
+
+ case PUMP_DATA_ISR_MODE:
+ status = isr_pump_data(fh_spi);
+ break;
+
+ case PUMP_DATA_POLL_MODE:
+ status = poll_pump_data(fh_spi);
+ break;
+ default:
+ status = -1;
+ WARN_ON(1);
+ dev_err(&fh_spi->p_dev->dev,
+ "spi pump data mode error..\n");
+ }
+
+ if (!cs_change && last) {
+ Spi_DisableSlaveen(&fh_spi->dwc,
+ fh_spi->dwc.slave_port);
+ }
+/*
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+*/
+ m->actual_length += t->len;
+ if (status)
+ break;
+
+ }
+
+ return status;
+
+}
+
+static void fh_spi_handle(struct work_struct *w)
+{
+ struct fh_spi_controller
+ *fh_spi = container_of(w, struct fh_spi_controller, work);
+ unsigned long flags;
+ struct spi_message *m;
+ spin_lock_irqsave(&fh_spi->lock, flags);
+ while (!list_empty(&fh_spi->queue)) {
+ m = list_entry(fh_spi->queue.next, struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irqrestore(&fh_spi->lock, flags);
+ m->status = fh_spi_handle_message(fh_spi, m);
+ if (m->complete)
+ m->complete(m->context);
+
+ spin_lock_irqsave(&fh_spi->lock, flags);
+ }
+ spin_unlock_irqrestore(&fh_spi->lock, flags);
+
+}
+
+static int fh_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct fh_spi_controller *fh_spi = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->status = -EINPROGRESS;
+ spin_lock_irqsave(&fh_spi->lock, flags);
+ list_add_tail(&m->queue, &fh_spi->queue);
+ spin_unlock_irqrestore(&fh_spi->lock, flags);
+ queue_work(fh_spi->workqueue, &fh_spi->work);
+
+ return 0;
+
+}
+
+static int fh_spi_setup(struct spi_device *spi)
+{
+ /* spi_setup() does basic checks,
+ * stmp_spi_setup_transfer() does more later
+ */
+ struct fh_spi_controller *fh_spi = spi_master_get_devdata(spi->master);
+
+ fh_spi->dwc.active_cs_pin =
+ fh_spi->dwc.cs_data[spi->chip_select].GPIO_Pin;
+
+ if (spi->chip_select >= fh_spi->dwc.num_cs) {
+ dev_err(&spi->dev, "%s, unsupported chip select no=%d\n",
+ __func__, spi->chip_select);
+ return -EINVAL;
+ }
+ fh_spi->dwc.slave_port = 1 << spi->chip_select;
+
+ if (spi->bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __func__, spi->bits_per_word);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*free the controller malloc data for the spi_dev*/
+static void fh_spi_cleanup(struct spi_device *spi)
+{
+
+}
+
+static int __devinit fh_spi_probe(struct platform_device *dev)
+{
+ int err = 0;
+ struct spi_master *master;
+ struct fh_spi_controller *fh_spi;
+ struct resource *r, *ioarea;
+ int ret, i, j;
+ dma_cap_mask_t mask;
+ int filter_no;
+ /*board info below*/
+ struct fh_spi_platform_data *spi_platform_info;
+
+ spi_platform_info =
+ (struct fh_spi_platform_data *) dev->dev.platform_data;
+ if (spi_platform_info == NULL) {
+
+ err = -ENODEV;
+ goto out0;
+ }
+
+ master = spi_alloc_master(&dev->dev, sizeof(struct fh_spi_controller));
+ if (master == NULL) {
+ err = -ENOMEM;
+ dev_err(&dev->dev, "%s, master malloc failed.\n", __func__);
+ goto out0;
+ }
+
+ /*get the spi private data*/
+ fh_spi = spi_master_get_devdata(master);
+ if (!fh_spi) {
+ dev_err(&dev->dev, "%s, master dev data is null.\n", __func__);
+ err = -ENOMEM;
+ /*free the spi master data*/
+ goto out_put_master;
+ }
+ /*controller's master dev is platform dev~~*/
+ fh_spi->master_dev = &dev->dev;
+ /*bind the platform dev*/
+ fh_spi->p_dev = dev;
+ /*set the platform dev private data*/
+ platform_set_drvdata(dev, master);
+
+ fh_spi->dwc.irq = platform_get_irq(dev, 0);
+ if (fh_spi->dwc.irq < 0) {
+ dev_err(&dev->dev, "%s, spi irq no error.\n", __func__);
+ err = fh_spi->dwc.irq;
+ goto out_set_plat_drvdata_null;
+ }
+
+ err = request_irq(fh_spi->dwc.irq, fh_spi_irq, 0, dev_name(&dev->dev),
+ fh_spi);
+ if (err) {
+ dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
+ goto out_set_plat_drvdata_null;
+ }
+
+ /* Get resources(memory, IRQ) associated with the device */
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ err = -ENODEV;
+ /*release the irq..*/
+ goto out_free_irq;
+ }
+
+ fh_spi->dwc.paddr = r->start;
+ ioarea = request_mem_region(r->start, resource_size(r), dev->name);
+ if (!ioarea) {
+ dev_err(&dev->dev, "spi region already claimed\n");
+ err = -EBUSY;
+ goto out_free_irq;
+ }
+
+ fh_spi->dwc.regs = ioremap(r->start, resource_size(r));
+ if (!fh_spi->dwc.regs) {
+ dev_err(&dev->dev, "spi region already mapped\n");
+ err = -EINVAL;
+ /*free mem region*/
+ goto out_relase_mem_region;
+ }
+
+ INIT_WORK(&fh_spi->work, fh_spi_handle);
+ init_completion(&fh_spi->done);
+ INIT_LIST_HEAD(&fh_spi->queue);
+ spin_lock_init(&fh_spi->lock);
+
+ fh_spi->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
+ if (!fh_spi->workqueue) {
+ err = -ENXIO;
+ /*release mem remap*/
+ goto out_iounmap;
+ }
+ /*spi common interface*/
+ master->transfer = fh_spi_transfer;
+ master->setup = fh_spi_setup;
+ master->cleanup = fh_spi_cleanup;
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->bus_num = dev->id;
+ spi_platform_info->bus_no = dev->id;
+ master->num_chipselect = spi_platform_info->slave_max_num;
+ /*parse the controller board info~~~
+ clk enable in the func
+ */
+ ret = fh_spi_init_hw(fh_spi, spi_platform_info, master);
+ if (ret) {
+ err = ret;
+ goto out_destroy_queue;
+ }
+ fh_spi->clk = clk_get(&fh_spi->p_dev->dev, spi_platform_info->clk_name);
+
+ if (IS_ERR(fh_spi->clk)) {
+ dev_err(&fh_spi->p_dev->dev, "cannot find the spi%d clk.\n",
+ fh_spi->dwc.id);
+ err = PTR_ERR(fh_spi->clk);
+ goto out_destroy_queue;
+ }
+
+ clk_enable(fh_spi->clk);
+ fh_spi->dwc.apb_clock_in = clk_get_rate(fh_spi->clk);
+ if (spi_platform_info->apb_clock_in > fh_spi->dwc.apb_clock_in) {
+ clk_set_rate(fh_spi->clk, spi_platform_info->apb_clock_in);
+ fh_spi->dwc.apb_clock_in = spi_platform_info->apb_clock_in;
+ }
+ if (spi_platform_info->max_speed_support != 0)
+ fh_spi->dwc.max_freq = spi_platform_info->max_speed_support;
+ else
+ fh_spi->dwc.max_freq = fh_spi->dwc.apb_clock_in / 2;
+ for (i = 0; i < fh_spi->dwc.num_cs; i++) {
+ ret = gpio_request(fh_spi->dwc.cs_data[i].GPIO_Pin,
+ fh_spi->dwc.cs_data[i].name);
+ if (ret) {
+ dev_err(&dev->dev,
+ "spi failed to request the gpio:%d\n",
+ fh_spi->dwc.cs_data[i].GPIO_Pin);
+ /*release the gpio already request..*/
+ if (i != 0) {
+ for (j = 0; j < i; j++)
+ gpio_free(fh_spi->dwc.cs_data[j].GPIO_Pin);
+ }
+ err = ret;
+ /*clk disable*/
+ goto out_clk_disable;
+ }
+ /*set the dir*/
+ gpio_direction_output(fh_spi->dwc.cs_data[i].GPIO_Pin,
+ GPIOF_OUT_INIT_HIGH);
+ }
+ /*fix:need use the platform dma channel.. not 0 and 1....*/
+ if (fh_spi->dwc.board_info->dma_transfer_enable ==
+ SPI_TRANSFER_USE_DMA) {
+ filter_no = fh_spi->dwc.board_info->tx_dma_channel;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ fh_spi->dwc.dma_tx.chan = dma_request_channel(mask,
+ fh_spi_dma_chan_filter, &filter_no);
+
+ if (!fh_spi->dwc.dma_tx.chan) {
+ dev_err(&fh_spi->p_dev->dev,
+ "spi%d request dma channel error....\n",
+ fh_spi->dwc.id);
+ fh_spi->dwc.board_info->dma_transfer_enable = 0;
+ goto step_register_master;
+ }
+ filter_no = fh_spi->dwc.board_info->rx_dma_channel;
+ fh_spi->dwc.dma_rx.chan = dma_request_channel(mask,
+ fh_spi_dma_chan_filter, &filter_no);
+ if (!fh_spi->dwc.dma_rx.chan) {
+ dev_err(&fh_spi->p_dev->dev,
+ "spi%d request dma channel error....\n",
+ fh_spi->dwc.id);
+ dma_release_channel(fh_spi->dwc.dma_tx.chan);
+ fh_spi->dwc.board_info->dma_transfer_enable = 0;
+ goto step_register_master;
+ }
+
+ fh_spi->dwc.tx_dumy_dma_add = dma_map_single(
+ fh_spi->dwc.dma_tx.chan->dev->device.parent,
+ (void *) fh_spi->dwc.tx_dumy_buff,
+ sizeof(fh_spi->dwc.tx_dumy_buff),
+ DMA_TO_DEVICE);
+
+ fh_spi->dwc.rx_dumy_dma_add = dma_map_single(
+ fh_spi->dwc.dma_rx.chan->dev->device.parent,
+ (void *) fh_spi->dwc.rx_dumy_buff,
+ sizeof(fh_spi->dwc.rx_dumy_buff),
+ DMA_TO_DEVICE);
+ }
+
+step_register_master:
+ err = spi_register_master(master);
+ if (err) {
+ dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
+ goto out_gpio_tree;
+ }
+
+ return 0;
+
+out_gpio_tree:
+ for (i = 0; i < fh_spi->dwc.num_cs; i++)
+ gpio_free(fh_spi->dwc.cs_data[i].GPIO_Pin);
+out_clk_disable:
+ clk_disable(fh_spi->clk);
+out_destroy_queue:
+ destroy_workqueue(fh_spi->workqueue);
+out_iounmap:
+ iounmap(fh_spi->dwc.regs);
+out_relase_mem_region:
+ release_mem_region(r->start, resource_size(r));
+out_free_irq:
+ free_irq(fh_spi->dwc.irq, fh_spi);
+out_set_plat_drvdata_null:
+ memset(fh_spi, 0, sizeof(struct fh_spi_controller));
+ platform_set_drvdata(dev, NULL);
+out_put_master:
+ spi_master_put(master);
+out0:
+ return err;
+
+}
+
+
+static int __devexit fh_spi_remove(struct platform_device *dev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ struct fh_spi_controller *fh_spi;
+ int i;
+ master = platform_get_drvdata(dev);
+ if (master == NULL)
+ goto out0;
+
+ fh_spi = spi_master_get_devdata(master);
+ spi_unregister_master(master);
+ /*gpio free*/
+ for (i = 0; i < fh_spi->dwc.num_cs; i++)
+ gpio_free(fh_spi->dwc.cs_data[i].GPIO_Pin);
+ /*clk disable*/
+ clk_disable(fh_spi->clk);
+ /*dma free*/
+ if (fh_spi->dwc.board_info->dma_transfer_enable ==
+ SPI_TRANSFER_USE_DMA) {
+ if (fh_spi->dwc.dma_rx.chan) {
+ dma_release_channel(fh_spi->dwc.dma_rx.chan);
+ fh_spi->dwc.dma_rx.chan->private = NULL;
+ }
+ if (fh_spi->dwc.dma_tx.chan) {
+ dma_release_channel(fh_spi->dwc.dma_tx.chan);
+ fh_spi->dwc.dma_tx.chan->private = NULL;
+ }
+ }
+ /*queue free*/
+ destroy_workqueue(fh_spi->workqueue);
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ /*io map free*/
+ iounmap(fh_spi->dwc.regs);
+ /*mem region free*/
+ release_mem_region(r->start, resource_size(r));
+ /*irq free*/
+ free_irq(fh_spi->dwc.irq, fh_spi);
+ /*clear the spi master dev data*/
+ memset(fh_spi, 0, sizeof(struct fh_spi_controller));
+ /*put master*/
+ platform_set_drvdata(dev, NULL);
+ spi_master_put(master);
+
+out0:
+ return 0;
+
+}
+
+static struct platform_driver fh_spi_driver = {
+ .probe = fh_spi_probe,
+ .remove = __devexit_p(fh_spi_remove),
+ .driver = {
+ .name = "fh_spi",
+ .owner = THIS_MODULE,
+ },
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+static int __init fh_spi_init(void)
+{
+ return platform_driver_register(&fh_spi_driver);
+}
+
+static void __exit fh_spi_exit(void)
+{
+ platform_driver_unregister(&fh_spi_driver);
+}
+
+module_init(fh_spi_init);
+module_exit(fh_spi_exit);
+MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
+MODULE_DESCRIPTION("DUOBAO SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/fh_spi_slave.c b/drivers/spi/fh_spi_slave.c
new file mode 100644
index 00000000..f3e864a4
--- /dev/null
+++ b/drivers/spi/fh_spi_slave.c
@@ -0,0 +1,979 @@
+/*
+ * fh_slave_spi.c
+ *
+ * Created on: Sep 19, 2016
+ * Author: duobao
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/gpio.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <mach/fh_dmac.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <mach/io.h>
+#include <linux/kfifo.h>
+#include <mach/spi.h>
+
+#define lift_shift_bit_num(bit_num) (1<<bit_num)
+//read spi irq, only useful if you set which is masked
+#define SPI_IRQ_TXEIS (lift_shift_bit_num(0))
+#define SPI_IRQ_TXOIS (lift_shift_bit_num(1))
+#define SPI_IRQ_RXUIS (lift_shift_bit_num(2))
+#define SPI_IRQ_RXOIS (lift_shift_bit_num(3))
+#define SPI_IRQ_RXFIS (lift_shift_bit_num(4))
+#define SPI_IRQ_MSTIS (lift_shift_bit_num(5))
+//spi status
+#define SPI_STATUS_BUSY (lift_shift_bit_num(0))
+#define SPI_STATUS_TFNF (lift_shift_bit_num(1))
+#define SPI_STATUS_TFE (lift_shift_bit_num(2))
+#define SPI_STATUS_RFNE (lift_shift_bit_num(3))
+#define SPI_STATUS_RFF (lift_shift_bit_num(4))
+#define SPI_STATUS_TXE (lift_shift_bit_num(5))
+#define SPI_STATUS_DCOL (lift_shift_bit_num(6))
+#define CACHE_LINE_SIZE (32)
+#define PUMP_DATA_NONE_MODE (0x00)
+#define PUMP_DATA_DMA_MODE (0x11)
+#define PUMP_DATA_ISR_MODE (0x22)
+#define PUMP_DATA_POLL_MODE (0x33)
+#define SPI_DIV_TRANSFER_SIZE (256)
+#define SPI_DATA_REG_OFFSET (0x60)
+#define KFIFO_SIZE 2048
+#define DUMY_DATA 0xff
+#define SPI_SLAVE_MAX_FIFO_SIZE 256
+#define SLAVE_SET_PHASE 1
+#define SLAVE_SET_POLARITY SLAVE_SET_PHASE + 1
+#define SLAVE_INIT_RX_FIFO SLAVE_SET_POLARITY + 1
+#define SLAVE_INIT_TX_FIFO SLAVE_INIT_RX_FIFO + 1
+#define SLAVE_GET_ERROR_STATUS SLAVE_INIT_TX_FIFO + 1
+
+//#define FH_SPI_SLAVE_DEBUG
+#define MAX_SPI_SLAVES 8
+/****************************************************************************
+ * ADT section
+ * add definition of user defined Data Type that only be used in this file here
+ ***************************************************************************/
+enum {
+ CONFIG_OK = 0, CONFIG_PARA_ERROR = lift_shift_bit_num(0),
+ //only for the set slave en/disable
+ CONFIG_BUSY = lift_shift_bit_num(1),
+ //only for write_read mode
+ WRITE_READ_OK = 0,
+ WRITE_READ_ERROR = lift_shift_bit_num(2),
+ WRITE_READ_TIME_OUT = lift_shift_bit_num(3),
+ //only for write only mode
+ WRITE_ONLY_OK = 0,
+ WRITE_ONLY_ERROR = lift_shift_bit_num(4),
+ WRITE_ONLY_TIME_OUT = lift_shift_bit_num(5),
+ //only for read only mode
+ READ_ONLY_OK = 0,
+ READ_ONLY_ERROR = lift_shift_bit_num(6),
+ READ_ONLY_TIME_OUT = lift_shift_bit_num(7),
+ //eeprom mode
+ EEPROM_OK = 0,
+ EEPROM_ERROR = lift_shift_bit_num(8),
+ EEPROM_TIME_OUT = lift_shift_bit_num(9),
+ //if read/write/eeprom error,the error below could give you more info by reading the 'Spi_ReadTransferError' function
+ MULTI_MASTER_ERROR = lift_shift_bit_num(10),
+ TX_OVERFLOW_ERROR = lift_shift_bit_num(11),
+ RX_OVERFLOW_ERROR = lift_shift_bit_num(12),
+};
+
+//enable spi
+typedef enum enum_spi_enable {
+ SPI_DISABLE = 0,
+ SPI_ENABLE = (lift_shift_bit_num(0)),
+} spi_enable_e;
+
+//polarity
+typedef enum enum_spi_polarity {
+ SPI_POLARITY_LOW = 0,
+ SPI_POLARITY_HIGH = (lift_shift_bit_num(7)),
+ //bit pos
+ SPI_POLARITY_RANGE = (lift_shift_bit_num(7)),
+} spi_polarity_e;
+
+//phase
+typedef enum enum_spi_phase {
+ SPI_PHASE_RX_FIRST = 0,
+ SPI_PHASE_TX_FIRST = (lift_shift_bit_num(6)),
+ //bit pos
+ SPI_PHASE_RANGE = (lift_shift_bit_num(6)),
+} spi_phase_e;
+
+//frame format
+typedef enum enum_spi_format {
+ SPI_MOTOROLA_MODE = 0x00,
+ SPI_TI_MODE = 0x10,
+ SPI_MICROWIRE_MODE = 0x20,
+ //bit pos
+ SPI_FRAME_FORMAT_RANGE = 0x30,
+} spi_format_e;
+
+//data size
+typedef enum enum_spi_data_size {
+ SPI_DATA_SIZE_4BIT = 0x03,
+ SPI_DATA_SIZE_5BIT = 0x04,
+ SPI_DATA_SIZE_6BIT = 0x05,
+ SPI_DATA_SIZE_7BIT = 0x06,
+ SPI_DATA_SIZE_8BIT = 0x07,
+ SPI_DATA_SIZE_9BIT = 0x08,
+ SPI_DATA_SIZE_10BIT = 0x09,
+ SPI_DATA_SIZE_16BIT = 0x0f,
+ //bit pos
+ SPI_DATA_SIZE_RANGE = 0x0f,
+} spi_data_size_e;
+
+//transfer mode
+typedef enum enum_spi_transfer_mode {
+ SPI_TX_RX_MODE = 0x000,
+ SPI_ONLY_TX_MODE = 0x100,
+ SPI_ONLY_RX_MODE = 0x200,
+ SPI_EEPROM_MODE = 0x300,
+ //bit pos
+ SPI_TRANSFER_MODE_RANGE = 0x300,
+} spi_transfer_mode_e;
+
+//spi baudrate
+typedef enum enum_spi_baudrate {
+ SPI_SCLKIN = 50000000,
+ SPI_SCLKOUT_27000000 = (SPI_SCLKIN / 27000000), //27M
+ SPI_SCLKOUT_13500000 = (SPI_SCLKIN / 13500000), //13.5M
+ SPI_SCLKOUT_6750000 = (SPI_SCLKIN / 6750000), //6.75M
+ SPI_SCLKOUT_4500000 = (SPI_SCLKIN / 4500000), //4.5M
+ SPI_SCLKOUT_3375000 = (SPI_SCLKIN / 3375000), //3.375M
+ SPI_SCLKOUT_2700000 = (SPI_SCLKIN / 2700000), //2.7M
+ SPI_SCLKOUT_1500000 = (SPI_SCLKIN / 1500000), //1.5M
+ SPI_SCLKOUT_500000 = (SPI_SCLKIN / 500000), //0.1M
+ SPI_SCLKOUT_100000 = (SPI_SCLKIN / 100000), //0.1M
+} spi_baudrate_e;
+
+//spi_irq
+typedef enum enum_spi_irq {
+ SPI_IRQ_TXEIM = (lift_shift_bit_num(0)),
+ SPI_IRQ_TXOIM = (lift_shift_bit_num(1)),
+ SPI_IRQ_RXUIM = (lift_shift_bit_num(2)),
+ SPI_IRQ_RXOIM = (lift_shift_bit_num(3)),
+ SPI_IRQ_RXFIM = (lift_shift_bit_num(4)),
+ SPI_IRQ_MSTIM = (lift_shift_bit_num(5)),
+ SPI_IRQ_ALL = 0x3f,
+} spi_irq_e;
+
+//spi_slave_port
+typedef enum enum_spi_slave {
+ SPI_SLAVE_PORT0 = (lift_shift_bit_num(0)),
+ SPI_SLAVE_PORT1 = (lift_shift_bit_num(1)),
+} spi_slave_e;
+
+//dma control
+typedef enum enum_spi_dma_control_mode {
+ SPI_DMA_RX_POS = (lift_shift_bit_num(0)),
+ SPI_DMA_TX_POS = (lift_shift_bit_num(1)),
+ //bit pos
+ SPI_DMA_CONTROL_RANGE = 0x03,
+} spi_dma_control_mode_e;
+
+//frame format
+typedef enum enum_spi_slave_mode {
+ SPI_SLAVE_EN = 0x00,
+ SPI_SLAVE_DIS = 1 << 10,
+ //bit pos
+ SPI_SLAVE_MODE_RANGE = 1 << 10,
+} spi_slave_mode_e;
+
+#ifdef FH_SPI_SLAVE_DEBUG
+#define SPI_SLAVE_PRINT_DBG(fmt, args...) \
+ printk("[FH_SPI_S_DEBUG]: "); \
+ printk(fmt, ## args)
+#else
+#define SPI_SLAVE_PRINT_DBG(fmt, args...) do { } while (0)
+#endif
+
+struct fh_spi_reg {
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 ssienr;
+ u32 mwcr;
+ u32 ser;
+ u32 baudr;
+ u32 txfltr;
+ u32 rxfltr;
+ u32 txflr;
+ u32 rxflr;
+ u32 sr;
+ u32 imr;
+ u32 isr;
+ u32 risr;
+ u32 txoicr;
+ u32 rxoicr;
+ u32 rxuicr;
+ u32 msticr;
+ u32 icr;
+ u32 dmacr;
+ u32 dmatdlr;
+ u32 dmardlr;
+ u32 idr;
+ u32 version;
+ u32 dr; /* Currently oper as 32 bits,
+ though only low 16 bits matters */
+};
+
+#define dw_readl(dw, name) \
+ __raw_readl(&(((struct fh_spi_reg *)dw->regs)->name))
+#define dw_writel(dw, name, val) \
+ __raw_writel((val), &(((struct fh_spi_reg *)dw->regs)->name))
+#define dw_readw(dw, name) \
+ __raw_readw(&(((struct fh_spi_reg *)dw->regs)->name))
+#define dw_writew(dw, name, val) \
+ __raw_writew((val), &(((struct fh_spi_reg *)dw->regs)->name))
+
+struct _fh_spi_dma_transfer {
+ //bind to dma channel
+ struct dma_chan *chan;
+ struct dma_slave_config *cfg;
+ struct scatterlist sgl;
+ struct dma_async_tx_descriptor *desc;
+};
+
+struct fh_spi {
+ void * __iomem regs; /* vaddr of the control registers */
+ u32 id;
+ u32 paddr;
+ u32 slave_port;
+ u32 irq; /* irq no */
+ u32 fifo_len; /* depth of the FIFO buffer */
+ u32 cpol;
+ u32 cpha;
+ u32 isr_flag;
+ //clk
+ u32 apb_clock_in;
+ u32 max_freq; /* max bus freq supported */
+ u32 div;
+ /*use id u32 bus_num;*//*which bus*/
+ u32 num_cs; /* supported slave numbers */
+ u32 data_width;
+ u32 frame_mode;
+ u32 transfer_mode;
+ u32 active_cs_pin;
+ //copy from the user...
+ u32 tx_len;
+ u32 rx_len;
+ void *rx_buff;
+ void *tx_buff;
+ u32 tx_dma_add;
+ u32 rx_dma_add;
+ u32 tx_hs_no; //tx handshaking number
+ u32 rx_hs_no; //rx handshaking number
+ u32 *tx_dumy_buff;
+ u32 *rx_dumy_buff;
+ struct fh_spi_cs cs_data[SPI_MASTER_CONTROLLER_MAX_SLAVE];
+ u32 pump_data_mode;
+ struct _fh_spi_dma_transfer dma_rx;
+ struct _fh_spi_dma_transfer dma_tx;
+ u32 complete_times;
+ struct fh_spi_platform_data *board_info;
+};
+
+//this file private
+struct fh_spi_slave_controller {
+ struct clk *clk;
+ spinlock_t lock;
+ //message queue
+ struct platform_device *p_dev;
+ struct fh_spi dwc;
+ struct completion tx_done;
+
+ u32 cur_rx_len;
+ u32 cur_tx_len;
+
+ //dev interface
+ int major;
+ struct class *psClass;
+ struct device *psDev;
+
+ //kfifo interface
+ struct kfifo kfifo_in;
+ struct kfifo kfifo_out;
+};
+
+/******************************************************************************
+ * Function prototype section
+ * add prototypes for all functions called by this file,execepting those
+ * declared in header file
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Global variables section - Exported
+ * add declaration of global variables that will be exported here
+ * e.g.
+ * int8_t foo;
+ ****************************************************************************/
+
+/*****************************************************************************
+
+ * static fun;
+ *****************************************************************************/
+
+static int fh_spi_slave_init_hw(struct fh_spi_slave_controller *fh_spi_slave,
+ struct fh_spi_platform_data *board_info);
+static u32 Spi_RawIsrstatus(struct fh_spi *dw);
+
+/*****************************************************************************
+ * Global variables section - Local
+ * define global variables(will be refered only in this file) here,
+ * static keyword should be used to limit scope of local variable to this file
+ * e.g.
+ * static uint8_t ufoo;
+ *****************************************************************************/
+static struct fh_spi_slave_controller *priv_array[MAX_SPI_SLAVES] = { NULL,
+NULL, NULL, NULL, NULL, NULL, NULL, NULL };
+
+/* function body */
+
+static u32 Spi_Enable(struct fh_spi *dw, spi_enable_e enable)
+{
+ dw_writel(dw, ssienr, enable);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetPolarity(struct fh_spi *dw, spi_polarity_e polarity)
+{
+ u32 data;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_POLARITY_RANGE;
+ data |= polarity;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetPhase(struct fh_spi *dw, spi_phase_e phase)
+{
+ u32 data;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_PHASE_RANGE;
+ data |= phase;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetFrameFormat(struct fh_spi *dw, spi_format_e format)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_FRAME_FORMAT_RANGE;
+ data |= format;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetTransferMode(struct fh_spi *dw, spi_transfer_mode_e mode)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_TRANSFER_MODE_RANGE;
+ data |= mode;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_DisableIrq(struct fh_spi *dw, u32 irq)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, imr);
+ data &= ~irq;
+ dw_writel(dw, imr, data);
+ return CONFIG_OK;
+}
+
+static u32 Spi_EnableIrq(struct fh_spi *dw, u32 irq)
+{
+ u32 data = 0;
+
+ data = dw_readl(dw, imr);
+ data |= irq;
+ dw_writel(dw, imr, data);
+ return CONFIG_OK;
+
+}
+
+static u32 Spi_SetTxlevlel(struct fh_spi *dw, u32 level)
+{
+ dw_writel(dw, txfltr, level);
+ return CONFIG_OK;
+}
+
+static u32 Spi_SetRxlevlel(struct fh_spi *dw, u32 level)
+{
+ dw_writel(dw, rxfltr, level);
+ return CONFIG_OK;
+}
+
+static u32 Spi_ReadTxfifolevel(struct fh_spi *dw)
+{
+ return dw_readl(dw, txflr);
+}
+
+static u32 Spi_ReadRxfifolevel(struct fh_spi *dw)
+{
+ return (u32) dw_readl(dw, rxflr);
+}
+
+static u32 Spi_ReadStatus(struct fh_spi *dw)
+{
+ return (uint8_t) dw_readl(dw, sr);
+}
+
+static u32 Spi_SetSlaveMode(struct fh_spi *dw, spi_slave_mode_e format)
+{
+
+ u32 data = 0;
+ data = dw_readl(dw, ctrl0);
+ data &= ~(u32) SPI_SLAVE_MODE_RANGE;
+ data |= format;
+ dw_writel(dw, ctrl0, data);
+ return CONFIG_OK;
+
+}
+
+static u32 Spi_WriteData(struct fh_spi *dw, u16 data)
+{
+ dw_writew(dw, dr, data);
+ return WRITE_ONLY_OK;
+}
+
+static u16 Spi_ReadData(struct fh_spi *dw)
+{
+ return dw_readw(dw, dr);
+}
+
+#if(0)
+static void Spi_Clearallerror(struct fh_spi *dw)
+{
+ u32 data = dw_readl(dw, icr);
+ data = 0;
+}
+#endif
+
+static u32 Spi_Isrstatus(struct fh_spi *dw)
+{
+ u32 data = dw_readl(dw, isr);
+ return data;
+}
+
+static u32 Spi_RawIsrstatus(struct fh_spi *dw)
+{
+ u32 data = dw_readl(dw, risr);
+ return data;
+}
+
+#if(0)
+static void Spi_SetDmaTxDataLevel(struct fh_spi *dw, u32 level)
+{
+ dw_writel(dw, dmatdlr, level);
+}
+
+static void Spi_SetDmaRxDataLevel(struct fh_spi *dw, u32 level)
+{
+ dw_writel(dw, dmardlr, level);
+}
+
+static void Spi_SetDmaControlEnable(struct fh_spi *dw,
+ spi_dma_control_mode_e enable_pos)
+{
+
+ u32 data;
+
+ data = dw_readl(dw, dmacr);
+ data |= enable_pos;
+ dw_writel(dw, dmacr, data);
+}
+#endif
+
+static int spi_slave_open(struct inode *inode, struct file *filp)
+{
+ int i, ret = 0;
+ struct fh_spi_slave_controller *fh_spi_slave;
+ SPI_SLAVE_PRINT_DBG("%s\n", __func__);
+ //bind the pri to the spi slave control...
+ SPI_SLAVE_PRINT_DBG("inode id is %x..\n", inode->i_rdev);
+ for (i = 0; i < MAX_SPI_SLAVES; i++) {
+ SPI_SLAVE_PRINT_DBG("register id is %x..\n",
+ MKDEV(priv_array[i]->major, 0));
+ if (priv_array[i]
+ && MKDEV(priv_array[i]->major, 0)
+ == inode->i_rdev) {
+ //SPI_SLAVE_PRINT_DBG();
+ filp->private_data = priv_array[i];
+ break;
+ }
+ }
+ if (i == MAX_SPI_SLAVES)
+ return -ENXIO;
+ //reset kfifo...
+ fh_spi_slave = priv_array[i];
+ kfifo_reset(&fh_spi_slave->kfifo_in);
+ kfifo_reset(&fh_spi_slave->kfifo_out);
+ return ret;
+}
+
+static ssize_t spi_slave_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ int ret;
+ unsigned int copied;
+ struct fh_spi_slave_controller *fh_spi_slave;
+ fh_spi_slave = (struct fh_spi_slave_controller *) filp->private_data;
+ //write data to fifo_out
+ if (kfifo_is_empty(&fh_spi_slave->kfifo_out)) {
+ return -EFAULT;
+ }
+ ret = kfifo_to_user(&fh_spi_slave->kfifo_out, buf, count, &copied);
+ //start spi hw work...
+ if (ret == 0) {
+ fh_spi_slave->cur_rx_len = copied;
+ return copied;
+ }
+ //error..
+ else {
+
+ return ret;
+ }
+
+ //hw isr pump fifo_out data..
+
+}
+
+#if(0)
+static void wait_spi_idle(struct fh_spi_slave_controller *fh_spi_slave) {
+ int status;
+ do {
+ status = Spi_ReadStatus(&fh_spi_slave->dwc);
+ SPI_SLAVE_PRINT_DBG("status is %x\n",status);
+ }while (status & 0x01);
+}
+#endif
+
+static void spi_slave_isr_tx_data(struct fh_spi_slave_controller *fh_spi_slave)
+{
+ //fh_spi_slave->dwc.isr_flag = SPI_IRQ_TXEIM;
+ //Spi_SetTxlevlel(&fh_spi_slave->dwc, fh_spi_slave->dwc.fifo_len / 2);
+ Spi_SetTxlevlel(&fh_spi_slave->dwc, fh_spi_slave->dwc.fifo_len - 5);
+ SPI_SLAVE_PRINT_DBG("open spi slave isr tx..\n");
+ Spi_EnableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
+ //wait_for_completion(&fh_spi_slave->tx_done);
+}
+
+static ssize_t spi_slave_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int ret;
+ unsigned int copied;
+ struct fh_spi_slave_controller *fh_spi_slave;
+ fh_spi_slave = (struct fh_spi_slave_controller *) filp->private_data;
+ if (kfifo_is_full(&fh_spi_slave->kfifo_in)) {
+ //spin_unlock_irqrestore(&fh_spi_slave->lock, flags);
+ return -EFAULT;
+ }
+ ret = kfifo_from_user(&fh_spi_slave->kfifo_in, buf, count, &copied);
+ //start spi hw work...
+ if (ret == 0) {
+ //start spi hw work...
+ //here we could start a back work to process the hw write data....
+ fh_spi_slave->cur_tx_len = copied;
+ spi_slave_isr_tx_data(fh_spi_slave);
+ return copied;
+ }
+ //error..
+ else {
+ return ret;
+ }
+
+
+
+}
+
+long spi_slave_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int err = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ case SLAVE_SET_PHASE:
+ break;
+ case SLAVE_SET_POLARITY:
+ break;
+ case SLAVE_INIT_RX_FIFO:
+ break;
+
+ case SLAVE_INIT_TX_FIFO:
+ break;
+
+ case SLAVE_GET_ERROR_STATUS:
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int spi_slave_release(struct inode *inode, struct file *filp)
+{
+
+ int ret = 0;
+ return ret;
+}
+
+static struct file_operations spi_slave_fops = {
+ .open = spi_slave_open,
+ .read = spi_slave_read,
+ .write = spi_slave_write,
+ .unlocked_ioctl = spi_slave_ioctl,
+ .release = spi_slave_release,
+};
+
+static inline u32 tx_max(struct fh_spi_slave_controller *fh_spi_slave)
+{
+ u32 hw_tx_level;
+ hw_tx_level = Spi_ReadTxfifolevel(&fh_spi_slave->dwc);
+ hw_tx_level = fh_spi_slave->dwc.fifo_len - hw_tx_level;
+ hw_tx_level /= 2;
+ return hw_tx_level; //min(hw_tx_level, fh_spi_slave->dwc.tx_len);
+}
+
+/* Return the max entries we should read out of rx fifo */
+static inline u32 rx_max(struct fh_spi_slave_controller *fh_spi_slave)
+{
+ u32 hw_rx_level;
+
+ hw_rx_level = Spi_ReadRxfifolevel(&fh_spi_slave->dwc);
+ return hw_rx_level;
+}
+
+static void spi_slave_process_tx_isr(
+ struct fh_spi_slave_controller *fh_spi_slave)
+{
+
+ u8 tx_buff[SPI_SLAVE_MAX_FIFO_SIZE] = { 0 };
+ int kfifo_tx_size, hw_tx_size, trans_size;
+ u16 data;
+ int i;
+ int temp;
+ //Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
+ //Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_RXFIM);
+
+ kfifo_tx_size = kfifo_len(&fh_spi_slave->kfifo_in);
+ //kfifo_tx_size = fh_spi_slave->cur_tx_len;
+ hw_tx_size = tx_max(fh_spi_slave);
+ //read MIN(hw tx fifo avail size , tx kfifo size)
+ trans_size = min(kfifo_tx_size, hw_tx_size);
+ temp = kfifo_out(&fh_spi_slave->kfifo_in, tx_buff, trans_size);
+ //transfer data to hw.. and reduce the actual trans data size..
+ SPI_SLAVE_PRINT_DBG("kfifo size :%d, hw size:%d..\n",kfifo_tx_size,hw_tx_size);
+ SPI_SLAVE_PRINT_DBG("tx isr size is %d..\n",trans_size);
+ //printk("**0d%d\n",trans_size);
+ for (i = 0; i < trans_size; i++) {
+ data = tx_buff[i];
+ //SPI_SLAVE_PRINT_DBG("tx data is %x\n",data);
+ Spi_WriteData(&fh_spi_slave->dwc, data);
+ }
+ //SPI_SLAVE_PRINT_DBG("\n");
+ fh_spi_slave->cur_tx_len -= trans_size;
+ if (fh_spi_slave->cur_tx_len == 0) {
+ Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
+ //complete(&(fh_spi_slave->tx_done));
+ } else {
+ //Spi_EnableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
+ }
+
+}
+
+static void spi_slave_process_rx_isr(
+ struct fh_spi_slave_controller *fh_spi_slave)
+{
+
+ int hw_rx_size;
+ int i;
+ u16 data;
+ int status;
+ //here we try to get more data when the the clk is too high...
+ //do {
+ hw_rx_size = rx_max(fh_spi_slave);
+ SPI_SLAVE_PRINT_DBG("rx get size is 0x%d\n",hw_rx_size);
+ for (i = 0; i < hw_rx_size; i++) {
+ data = Spi_ReadData(&fh_spi_slave->dwc);
+ //rx_buff[i] = (u8) data;
+ kfifo_in(&fh_spi_slave->kfifo_out, &data, 1);
+ }
+ status = Spi_ReadStatus(&fh_spi_slave->dwc);
+ //} while (status & (1 << 3));
+
+}
+
+static irqreturn_t fh_spi_slave_irq(int irq, void *dev_id)
+{
+ struct fh_spi_slave_controller *fh_spi_slave;
+ u32 isr_status;
+ u32 raw_status;
+
+ fh_spi_slave = (struct fh_spi_slave_controller *) dev_id;
+ isr_status = Spi_Isrstatus(&fh_spi_slave->dwc);
+ raw_status = Spi_RawIsrstatus(&fh_spi_slave->dwc);
+ //printk("raw irq status is 0x%x..\n",raw_status);
+ SPI_SLAVE_PRINT_DBG("irq status is 0x%x..\n",isr_status);
+ if(raw_status & (1<<3)){
+ printk("[FH_SPI_S_ERROR]: rx overflow....\n");
+ }
+ if (isr_status & SPI_IRQ_TXEIM) {
+ spi_slave_process_tx_isr(fh_spi_slave);
+ }
+ if (isr_status & SPI_IRQ_RXFIM) {
+ spi_slave_process_rx_isr(fh_spi_slave);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit fh_spi_slave_probe(struct platform_device *dev)
+{
+ int err = 0;
+ struct resource *r,*ioarea;
+ int ret;
+ char spi_slave_name[32] = {0};
+ char spi_slave_class_name[32] = {0};
+ int major_id;
+ struct fh_spi_slave_controller *fh_spi_slave;
+ struct fh_spi_platform_data * spi_platform_info;
+ spi_platform_info = (struct fh_spi_platform_data *)dev->dev.platform_data;
+ if(spi_platform_info == NULL){
+ err = -ENODEV;
+ dev_err(&dev->dev, "%s, spi slave platform data null.\n",
+ __func__);
+ BUG();
+
+ }
+ fh_spi_slave =kzalloc(sizeof(struct fh_spi_slave_controller), GFP_KERNEL);
+ if (!fh_spi_slave) {
+ dev_err(&dev->dev, "malloc spi slave control mem not enough\n");
+ BUG();
+ }
+ fh_spi_slave->dwc.irq = platform_get_irq(dev, 0);
+ if (fh_spi_slave->dwc.irq < 0) {
+ dev_err(&dev->dev, "%s, spi slave irq no error.\n",
+ __func__);
+ err = fh_spi_slave->dwc.irq;
+ BUG();
+ }
+ err = request_irq(fh_spi_slave->dwc.irq , fh_spi_slave_irq, 0,
+ dev_name(&dev->dev), fh_spi_slave);
+ if (err) {
+ dev_dbg(&dev->dev, "request_irq failed, %d\n", err);BUG();
+ }
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&dev->dev, "%s, spi slave ioresource error. \n",
+ __func__);
+ err = -ENODEV;
+ BUG();
+ }
+ fh_spi_slave->dwc.paddr = r->start;
+ ioarea = request_mem_region(r->start,
+ resource_size(r), dev->name);if(!ioarea) {
+ dev_err(&dev->dev, "spi slave region already claimed\n");
+ err = -EBUSY;
+ BUG();
+
+ }
+ fh_spi_slave->dwc.regs = ioremap(r->start, resource_size(r));
+ if (!fh_spi_slave->dwc.regs) {
+ dev_err(&dev->dev, "spi slave region already mapped\n");
+ err = -EINVAL;
+ BUG();
+ }
+ spi_platform_info->bus_no = dev->id;
+ priv_array[dev->id] = fh_spi_slave;
+ init_completion(&fh_spi_slave->tx_done);
+ spin_lock_init(
+ &fh_spi_slave->lock);
+
+ fh_spi_slave->clk = clk_get(NULL, spi_platform_info->clk_name);
+ if (IS_ERR(fh_spi_slave->clk)) {
+ dev_err(&fh_spi_slave->p_dev->dev, "cannot find the spi%d clk.\n",
+ fh_spi_slave->dwc.id);
+ err = PTR_ERR(fh_spi_slave->clk);
+ BUG();
+ }
+ clk_enable(fh_spi_slave->clk);
+ clk_set_rate(fh_spi_slave->clk,spi_platform_info->apb_clock_in);
+
+ ret = fh_spi_slave_init_hw(fh_spi_slave,spi_platform_info);
+ if(ret) {
+ err = ret;
+ BUG();
+ }
+ init_completion(&fh_spi_slave->tx_done);
+ sprintf(spi_slave_name, "fh_spi_slave_%d", dev->id);
+ sprintf(spi_slave_class_name, "fh_spi_slave_class_%d", dev->id);
+ major_id = register_chrdev(0, spi_slave_name, &spi_slave_fops);
+ if (major_id <= 0) {
+ err = -EIO;
+ dev_err(&fh_spi_slave->p_dev->dev, "cannot register spi slave_%d char dev..\n",
+ fh_spi_slave->dwc.id);
+ BUG();
+ } else {
+ fh_spi_slave->major = major_id;
+ }
+
+ fh_spi_slave->psClass = class_create(THIS_MODULE, spi_slave_class_name);if (IS_ERR(fh_spi_slave->psClass)) {
+ err = -EIO;
+ dev_err(&fh_spi_slave->p_dev->dev, "%s: Unable to create class\n", __FILE__);
+ BUG();
+ }
+
+ fh_spi_slave->psDev = device_create(fh_spi_slave->psClass, NULL, MKDEV(major_id, 0),
+ fh_spi_slave, spi_slave_name);
+ if (IS_ERR(fh_spi_slave->psDev)) {
+ err = -EIO;
+ dev_err(&fh_spi_slave->p_dev->dev,"Error: %s: Unable to create device\n", __FILE__);
+ BUG();
+ }
+
+ if(kfifo_alloc(
+ &fh_spi_slave->kfifo_in, KFIFO_SIZE, GFP_KERNEL)){
+ dev_err(&fh_spi_slave->p_dev->dev,"Error: %s: Unable to alloc kfifo..\n", __FILE__);
+ BUG();
+ }
+
+ if(kfifo_alloc(&fh_spi_slave->kfifo_out, KFIFO_SIZE, GFP_KERNEL)) {
+ dev_err(&fh_spi_slave->p_dev->dev,"Error: %s: Unable to alloc kfifo..\n", __FILE__);
+ BUG();
+ }
+
+ #if(0)
+
+ //1 :empty 0:not empty
+ //1 :full 0:not full
+ int empty,full,avail;
+ char test_buf_out[20] = {0};
+
+ empty = kfifo_is_empty(&fh_spi_slave->kfifo_in);
+ full = kfifo_is_full(&fh_spi_slave->kfifo_in);
+ avail = kfifo_avail(&fh_spi_slave->kfifo_in);
+ printk("empty: %x, full: %x, avail: %x\n",empty,full,avail);
+ printk(KERN_INFO "queue len: %u\n", kfifo_len(&fh_spi_slave->kfifo_in));
+ kfifo_in(&fh_spi_slave->kfifo_in, "hello", 5);
+ printk(KERN_INFO "queue len: %u\n", kfifo_len(&fh_spi_slave->kfifo_in));
+
+ empty = kfifo_is_empty(&fh_spi_slave->kfifo_in);
+ full = kfifo_is_full(&fh_spi_slave->kfifo_in);
+ avail = kfifo_avail(&fh_spi_slave->kfifo_in);
+ printk("empty: %x, full: %x, avail: %x\n",empty,full,avail);
+
+ /* put values into the fifo */
+ for (i = 0; i !=5; i++)
+ kfifo_put(&fh_spi_slave->kfifo_in, &i);
+
+ i = kfifo_out(&fh_spi_slave->kfifo_in, test_buf_out, 5);
+ printk("data len is %d\n",i);
+ printk(KERN_INFO "buf: %.*s\n", i, test_buf_out);
+
+ printk(KERN_INFO "queue len: %u\n", kfifo_len(&fh_spi_slave->kfifo_in));
+ i = kfifo_out(&fh_spi_slave->kfifo_in, test_buf_out, 10);
+ printk("data len is %d\n",i);
+ printk(KERN_INFO "buf: %.*s\n", i, test_buf_out);
+ #endif
+
+ return err;
+}
+
+static int __devexit fh_spi_slave_remove(struct platform_device *dev)
+{
+ return 0;
+}
+
+static int fh_spi_slave_init_hw(struct fh_spi_slave_controller *fh_spi_slave,
+ struct fh_spi_platform_data *board_info)
+{
+ int status;
+ fh_spi_slave->dwc.id = board_info->bus_no;
+ fh_spi_slave->dwc.fifo_len = board_info->fifo_len;
+ fh_spi_slave->dwc.rx_hs_no = board_info->rx_handshake_num;
+ fh_spi_slave->dwc.tx_hs_no = board_info->tx_handshake_num;
+ memset(&fh_spi_slave->dwc.dma_rx, 0, sizeof(struct _fh_spi_dma_transfer));
+ memset(&fh_spi_slave->dwc.dma_tx, 0, sizeof(struct _fh_spi_dma_transfer));
+ fh_spi_slave->dwc.pump_data_mode = PUMP_DATA_ISR_MODE;
+ //bind the platform data here....
+ fh_spi_slave->dwc.board_info = board_info;
+
+ fh_spi_slave->dwc.isr_flag = SPI_IRQ_RXFIM;
+ fh_spi_slave->dwc.frame_mode = SPI_MOTOROLA_MODE;
+ fh_spi_slave->dwc.transfer_mode = SPI_TX_RX_MODE;
+ fh_spi_slave->dwc.cpol = SPI_POLARITY_HIGH;
+ fh_spi_slave->dwc.cpha = SPI_PHASE_RX_FIRST;
+ do {
+ status = Spi_ReadStatus(&fh_spi_slave->dwc);
+ } while (status & 0x01);
+ //add spi disable
+ Spi_Enable(&fh_spi_slave->dwc, SPI_DISABLE);
+ //add spi frame mode & transfer mode
+ Spi_SetFrameFormat(&fh_spi_slave->dwc, fh_spi_slave->dwc.frame_mode);
+ Spi_SetTransferMode(&fh_spi_slave->dwc, fh_spi_slave->dwc.transfer_mode);
+ Spi_SetPolarity(&fh_spi_slave->dwc, fh_spi_slave->dwc.cpol);
+ Spi_SetPhase(&fh_spi_slave->dwc, fh_spi_slave->dwc.cpha);
+ //Spi_SetRxlevlel(&fh_spi_slave->dwc, fh_spi_slave->dwc.fifo_len / 2);
+ Spi_SetRxlevlel(&fh_spi_slave->dwc, 0);
+ Spi_SetSlaveMode(&fh_spi_slave->dwc, SPI_SLAVE_EN);
+ //add spi disable all isr
+ Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_ALL);
+ Spi_EnableIrq(&fh_spi_slave->dwc, fh_spi_slave->dwc.isr_flag);
+ //add spi enable
+ Spi_Enable(&fh_spi_slave->dwc, SPI_ENABLE);
+
+ return 0;
+
+}
+
+static struct platform_driver fh_spi_slave_driver = {
+ .probe = fh_spi_slave_probe,
+ .remove = __devexit_p(fh_spi_slave_remove),
+ .driver = {
+ .name = "fh_spi_slave",
+ .owner = THIS_MODULE,
+ },
+ .suspend =NULL,
+ .resume = NULL,
+};
+
+static int __init fh_spi_slave_init(void)
+{
+ return platform_driver_register(&fh_spi_slave_driver);
+}
+
+static void __exit fh_spi_slave_exit(void)
+{
+ platform_driver_unregister(&fh_spi_slave_driver);
+}
+
+module_init(fh_spi_slave_init);
+module_exit(fh_spi_slave_exit);
+MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
+MODULE_DESCRIPTION("DUOBAO SPI SLAVE driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2e13a14b..b423fe92 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -318,7 +318,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
}
spi->master = master;
- spi->dev.parent = dev;
+ spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
device_initialize(&spi->dev);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index b3692e6e..925c7cff 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1635,4 +1635,32 @@ config SERIAL_XILINX_PS_UART_CONSOLE
help
Enable a Xilinx PS UART port to be the system console.
+config SERIAL_FH
+ tristate "FH UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the FH UART port.
+
+config SERIAL_FH_CONSOLE
+ bool "FH UART console support"
+ depends on SERIAL_FH=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a FH UART port to be the system console.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
endmenu
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index cb2628fe..4cae882c 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -96,3 +96,7 @@ obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
+obj-$(CONFIG_SERIAL_FH) += fh_serial.o
+
+
+
diff --git a/drivers/tty/serial/fh_serial.c b/drivers/tty/serial/fh_serial.c
new file mode 100644
index 00000000..a1d4bdc4
--- /dev/null
+++ b/drivers/tty/serial/fh_serial.c
@@ -0,0 +1,1617 @@
+#if defined(CONFIG_SERIAL_FH_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/moduleparam.h>
+#include <linux/ratelimit.h>
+#include <linux/serial_reg.h>
+#include <linux/nmi.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/timex.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include "fh_serial.h"
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <mach/pmu.h>
+#include <linux/scatterlist.h>
+#include <linux/syslog.h>
+#include <mach/fh_dmac_regs.h>
+#include <mach/chip.h>
+/*********************************
+ *
+ * fh private
+ *
+ *********************************/
+#define REG_UART_RBR (0x0000)
+#define REG_UART_THR (0x0000)
+#define REG_UART_DLL (0x0000)
+#define REG_UART_DLH (0x0004)
+#define REG_UART_IER (0x0004)
+#define REG_UART_IIR (0x0008)
+#define REG_UART_FCR (0x0008)
+#define REG_UART_LCR (0x000c)
+#define REG_UART_MCR (0x0010)
+#define REG_UART_LSR (0x0014)
+#define REG_UART_MSR (0x0018)
+#define REG_UART_SCR (0x001c)
+#define REG_UART_FAR (0x0070)
+#define REG_UART_TFR (0x0074)
+#define REG_UART_RFW (0x0078)
+#define REG_UART_USR (0x007c)
+#define REG_UART_TFL (0x0080)
+#define REG_UART_RFL (0x0084)
+#define REG_UART_SRR (0x0088)
+#define REG_UART_SFE (0x0098)
+#define REG_UART_SRT (0x009c)
+#define REG_UART_STET (0x00a0)
+#define REG_UART_HTX (0x00a4)
+#define REG_UART_DMASA (0x00a8)
+#define REG_UART_CPR (0x00f4)
+#define REG_UART_UCV (0x00f8)
+#define REG_UART_CTR (0x00fc)
+
+#define DBGLINE() printk(KERN_DEBUG \
+ "file: %s\tfunc:%s\tline:%d\n",\
+ __FILE__, __func__, __LINE__)
+#define FH_SERIAL_NAME "ttyS"
+#define FH_DRIVE_NAME "ttyS"
+#define FH_DEV_NAME "ttyS"
+
+#define UART_DMA_TRANSFER_LEN (8)
+
+#define UART_READ_RX_DW_FIFO_OK 0
+#define UART_READ_RX_DW_FIFO_TIME_OUT 0xcc
+#define MAP_SIZE 0x80000
+
+#ifdef CONFIG_SERIAL_FH_CONSOLE
+static struct console fh_serial_console;
+#define FH_SERIAL_CONSOLE (&fh_serial_console)
+#else
+#define FH_SERIAL_CONSOLE NULL
+#endif
+
+#define tx_enabled(port) ((port)->unused[0])
+#define rx_enabled(port) ((port)->unused[1])
+#define FH_TYPE (99)
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+#define fh_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
+
+
+#define fh_uart_readl(addr) \
+ __raw_readl(addr)
+#define fh_uart_writel(addr, val) \
+ __raw_writel((val), addr)
+
+#define fh_uart_readw(addr) \
+ __raw_readw(addr)
+#define fh_uart_writew(addr, val) \
+ __raw_writew((val), addr)
+
+/******************************************************************************
+ * Function prototype section
+ * add prototypes for all functions called by this file,execepting those
+ * declared in header file
+ *****************************************************************************/
+static void fh_uart_pm(struct uart_port *port, unsigned int level,
+ unsigned int old);
+static void fh_uart_stop_tx(struct uart_port *port);
+static void fh_uart_start_tx(struct uart_port *port);
+static void fh_uart_stop_rx(struct uart_port *port);
+static void fh_uart_start_rx(struct uart_port *port);
+static void fh_uart_enable_ms(struct uart_port *port);
+static unsigned int fh_uart_tx_empty(struct uart_port *port);
+static unsigned int fh_uart_get_mctrl(struct uart_port *port);
+static void fh_uart_set_mctrl(struct uart_port *port, unsigned int mctrl);
+static void fh_uart_break_ctl(struct uart_port *port, int break_state);
+static irqreturn_t fh_uart_rx_chars(int irq, void *dev_id);
+static irqreturn_t fh_uart_tx_chars(int irq, void *dev_id);
+static irqreturn_t fh_uart_isr(int irq, void *dev_id);
+static void fh_serial_shutdown(struct uart_port *port);
+static int fh_serial_startup(struct uart_port *port);
+static void fh_serial_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old);
+static const char *fh_serial_type(struct uart_port *port);
+static void fh_serial_release_port(struct uart_port *port);
+static int fh_serial_request_port(struct uart_port *port);
+static void fh_serial_config_port(struct uart_port *port, int flags);
+static int fh_uart_set_wake(struct uart_port *, unsigned int state);
+
+struct fh_uart_port *to_fh_uart_port(struct uart_port *port);
+struct fh_uart_port *info_to_fh_uart_port(fh_uart_info *info);
+static bool fh_uart_dma_chan_filter(struct dma_chan *chan, void *param);
+static void fh_serial_dma_tx_char_done(void *arg);
+static void fh_serial_dma_rx_callback(void *arg);
+static void fh_serial_dma_rx_char(void *arg);
+static int uart_dma_set_rx_para(struct fh_uart_port *fh_uart_dma, int xfer_len,
+ void (*call_back)(void *arg));
+static int uart_dma_set_tx_para(struct fh_uart_port *fh_uart_dma, int xfer_len,
+ void (*call_back)(void *arg));
+static void fh_serial_dma_tx_char(struct fh_uart_port *uart_dma);
+/*****************************************************************************
+ * Global variables section - Local
+ * define global variables(will be refered only in this file) here,
+ * static keyword should be used to limit scope of local variable to this file
+ * e.g.
+ * static uint8_t ufoo;
+ *****************************************************************************/
+static struct uart_ops fh_serial_ops = {
+ .pm = fh_uart_pm,
+ .tx_empty = fh_uart_tx_empty,
+ .get_mctrl = fh_uart_get_mctrl,
+ .set_mctrl = fh_uart_set_mctrl,
+ .stop_tx = fh_uart_stop_tx,
+ .start_tx = fh_uart_start_tx,
+ .stop_rx = fh_uart_stop_rx,
+ .enable_ms = fh_uart_enable_ms,
+ .break_ctl = fh_uart_break_ctl,
+ .startup = fh_serial_startup,
+ .shutdown = fh_serial_shutdown,
+ .set_termios = fh_serial_set_termios,
+ .type = fh_serial_type,
+ .release_port = fh_serial_release_port,
+ .request_port = fh_serial_request_port,
+ .config_port = fh_serial_config_port,
+ .set_wake = fh_uart_set_wake,
+ .verify_port = NULL,
+};
+
+static struct uart_driver fh_uart_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = FH_DRIVE_NAME,
+ .nr = FH_UART_NUMBER,
+ .cons = FH_SERIAL_CONSOLE,
+ .dev_name = FH_DEV_NAME,
+ .major = 4,
+ .minor = 64,
+};
+
+#if (defined(CONFIG_ARCH_FH8810) || defined(CONFIG_ARCH_WUDANG))
+#define UART_PORT0_ISR ISR_NUMBER1
+#define UART_PORT1_ISR ISR_NUMBER0
+#define UART_PORT0_BASE UART1_REG_BASE
+#define UART_PORT1_BASE UART0_REG_BASE
+#define UART_PORT0(x) VUART1(x)
+#define UART_PORT1(x) VUART0(x)
+#else
+#define UART_PORT0_ISR ISR_NUMBER0
+#define UART_PORT0(x) VUART0(x)
+#define UART_PORT0_BASE UART0_REG_BASE
+#if FH_UART_NUMBER >= 2
+#define UART_PORT1_ISR ISR_NUMBER1
+#define UART_PORT1_BASE UART1_REG_BASE
+#define UART_PORT1(x) VUART1(x)
+#endif
+#if FH_UART_NUMBER >= 3
+#define UART_PORT2_ISR ISR_NUMBER2
+#define UART_PORT2_BASE UART2_REG_BASE
+#define UART_PORT2(x) VUART2(x)
+#endif
+#if FH_UART_NUMBER >= 4
+#define UART_PORT3_ISR ISR_NUMBER3
+#define UART_PORT3_BASE UART3_REG_BASE
+#define UART_PORT3(x) VUART3(x + 0x100)
+#endif
+#endif
+
+static struct fh_uart_port fh_own_ports[FH_UART_NUMBER] = {
+ [0] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(fh_own_ports[0].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = UART_PORT0_ISR,
+ .uartclk = UART_CLOCK_FREQ,
+ .fifosize = UART0_FIFO_SIZE,
+ .ops = &fh_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ .mapbase = UART_PORT0_BASE,
+ .membase = (unsigned char __iomem *)UART_PORT0(UART_PORT0_BASE),
+ },
+ .fh_info = {
+ .name = "FH UART0",
+ .irq_num = UART_PORT0_ISR,
+ .base_add = (unsigned char __iomem *)UART_PORT0(UART_PORT0_BASE),
+ .baudrate = BAUDRATE_115200,
+ .line_ctrl = Uart_line_8n2,
+ .fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER,
+ }
+ },
+#if FH_UART_NUMBER >= 2
+ [1] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(fh_own_ports[1].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = UART_PORT1_ISR,
+ .uartclk = UART_CLOCK_FREQ,
+ .fifosize = UART1_FIFO_SIZE,
+ .ops = &fh_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 1,
+ .mapbase = UART_PORT1_BASE,
+ .membase = (unsigned char __iomem *)UART_PORT1(UART_PORT1_BASE),
+
+ },
+ .fh_info = {
+ .name = "FH UART1",
+ .irq_num = UART_PORT1_ISR,
+ .base_add = (unsigned char __iomem *)UART_PORT1(UART_PORT1_BASE),
+ .baudrate = BAUDRATE_115200,
+ .line_ctrl = Uart_line_8n2,
+ .fifo_ctrl = UART_DMA_RXFIFO_DEPTH_HALF,
+ .use_dma = 0,
+ },
+ .uart_dma = {
+ .tx_hs_no = UART1_TX_HW_HANDSHAKE,
+ .rx_hs_no = UART1_RX_HW_HANDSHAKE,
+ .tx_dma_channel = UART1_DMA_TX_CHAN,
+ .rx_dma_channel = UART1_DMA_RX_CHAN,
+ .rx_xmit_len = UART_DMA_TRANSFER_LEN,
+ }
+ },
+#endif
+#if FH_UART_NUMBER >= 3
+ [2] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(fh_own_ports[2].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = UART_PORT2_ISR,
+ .uartclk = UART_CLOCK_FREQ,
+ .fifosize = UART2_FIFO_SIZE,
+ .ops = &fh_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 2,
+ .mapbase = UART_PORT2_BASE,
+ .membase = (unsigned char __iomem *)UART_PORT2(UART_PORT2_BASE),
+
+ },
+ .fh_info = {
+ .name = "FH UART2",
+ .irq_num = UART_PORT2_ISR,
+ .base_add = (unsigned char __iomem *)UART_PORT2(UART_PORT2_BASE),
+ .baudrate = BAUDRATE_115200,
+ .line_ctrl = Uart_line_8n2,
+ .fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER,
+ }
+ },
+#endif
+#if FH_UART_NUMBER >= 4
+ [3] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(fh_own_ports[3].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = UART_PORT3_ISR,
+ .uartclk = UART_CLOCK_FREQ,
+ .fifosize = UART3_FIFO_SIZE,
+ .ops = &fh_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 3,
+ .mapbase = UART_PORT3_BASE,
+ .membase = (unsigned char __iomem *)UART_PORT3(UART_PORT3_BASE),
+
+ },
+ .fh_info = {
+ .name = "FH UART3",
+ .irq_num = UART_PORT3_ISR,
+ .base_add = (unsigned char __iomem *)UART_PORT3(UART_PORT3_BASE),
+ .baudrate = BAUDRATE_115200,
+ .line_ctrl = Uart_line_8n2,
+ .fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER,
+ }
+ },
+#endif
+};
+
+struct fh_uart_port *to_fh_uart_port(struct uart_port *port)
+{
+ return container_of(port, struct fh_uart_port, port);
+
+}
+
+struct fh_uart_port *info_to_fh_uart_port(fh_uart_info *info)
+{
+ return container_of(info, struct fh_uart_port, fh_info);
+
+}
+
+s32 Uart_Disable_Irq(fh_uart_info *desc, uart_irq_e interrupts)
+{
+ u32 ret;
+ u32 base = (u32)desc->base_add;
+
+ ret = fh_uart_readl(base + REG_UART_IER);
+ ret &= ~interrupts;
+ fh_uart_writel(base+REG_UART_IER, ret);
+
+ return UART_CONFIG_OK;
+}
+
+s32 Uart_Enable_Irq(fh_uart_info *desc, uart_irq_e interrupts)
+{
+ u32 ret;
+ u32 base = (u32)desc->base_add;
+
+ ret = fh_uart_readl(base);
+ ret |= interrupts;
+ fh_uart_writel(base + REG_UART_IER, ret);
+
+ return UART_CONFIG_OK;
+
+}
+
+s32 Uart_Fifo_Config(fh_uart_info *desc)
+{
+ u32 ret;
+ u32 base = (u32)desc->base_add;
+
+ fh_uart_writel(base + REG_UART_FCR, desc->fifo_ctrl);
+ ret = fh_uart_readl(base + REG_UART_IIR);
+
+ if (ret & UART_FIFO_IS_ENABLE)
+ return UART_CONFIG_FIFO_OK;
+ else
+ return UART_CONFIG_FIFO_ERROR;
+}
+
+s32 Uart_Read_Control_Status(fh_uart_info *desc)
+{
+ u32 base = (u32)desc->base_add;
+ return fh_uart_readl(base + REG_UART_USR);
+}
+
+s32 Uart_Set_Line_Control(fh_uart_info *desc)
+{
+ u32 ret;
+ u32 base = (u32)desc->base_add;
+
+ ret = Uart_Read_Control_Status(desc);
+ if (ret & UART_STATUS_BUSY)
+ return UART_IS_BUSY;
+
+ fh_uart_writel(base + REG_UART_LCR, desc->line_ctrl);
+ return UART_CONFIG_LINE_OK;
+}
+
+s32 Uart_Read_Line_Status(fh_uart_info *desc)
+{
+ u32 base = (u32)desc->base_add;
+ return fh_uart_readl(base + REG_UART_LSR);
+}
+
+s32 Uart_Set_Clock_Divisor(fh_uart_info *desc)
+{
+ u32 low, high, ret;
+ u32 base = (u32)desc->base_add;
+
+ low = desc->baudrate & 0x00ff;
+ high = (desc->baudrate & 0xff00) >> 8;
+
+ ret = Uart_Read_Control_Status(desc);
+ if (ret & UART_STATUS_BUSY)
+ return UART_IS_BUSY;
+
+ ret = fh_uart_readl(base + REG_UART_LCR);
+ /* if DLAB not set */
+ if (!(ret & UART_LCR_DLAB_POS)) {
+ ret |= UART_LCR_DLAB_POS;
+ fh_uart_writel(base + REG_UART_LCR, ret);
+ }
+ fh_uart_writel(base + REG_UART_DLL, low);
+ fh_uart_writel(base + REG_UART_DLH, high);
+
+ /* clear DLAB */
+ ret = ret & 0x7f;
+ fh_uart_writel(base + REG_UART_LCR, ret);
+
+ return UART_CONFIG_DIVISOR_OK;
+}
+
+s32 Uart_Read_iir(fh_uart_info *desc)
+{
+ u32 base = (u32)desc->base_add;
+ return fh_uart_readl(base + REG_UART_IIR);
+}
+
+static bool fh_uart_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ int dma_channel = *(int *) param;
+ bool ret = false;
+ if (chan->chan_id == dma_channel)
+ ret = true;
+ return ret;
+}
+static void fh_serial_dma_tx_char_done(void *arg)
+{
+ struct fh_uart_port *uart_dma = (struct fh_uart_port *)arg;
+ uart_dma->port.icount.tx += uart_dma->uart_dma.tx_count;
+ uart_dma->uart_dma.tx_done = 1;
+}
+static void fh_serial_dma_rx_char(void *arg)
+{
+ struct fh_uart_port *uart_dma = (struct fh_uart_port *)arg;
+ struct uart_port *port = &uart_dma->port;
+ struct tty_struct *tty = uart_dma->port.state->port.tty;
+ unsigned int uerstat;
+ unsigned int flag;
+ unsigned int xmit_len;
+ unsigned int i;
+ xmit_len = uart_dma->uart_dma.rx_xmit_len;
+ uart_dma->port.icount.rx += xmit_len;
+ uerstat = Uart_Read_Line_Status(&uart_dma->fh_info);
+ flag = TTY_NORMAL;
+ if (unlikely(uerstat & UART_LINE_STATUS_RFE)) {
+ printk(KERN_INFO "rxerr: port rxs=0x%08x\n", uerstat);
+ if (uerstat & UART_LINE_STATUS_BI) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ goto dma_ignore_char;
+ }
+ }
+ if (uerstat & UART_LINE_STATUS_PE)
+ port->icount.parity++;
+ if (uerstat & UART_LINE_STATUS_BI)
+ port->icount.frame++;
+ if (uerstat & UART_LINE_STATUS_OE)
+ port->icount.overrun++;
+ uerstat &= port->read_status_mask;
+ if (uerstat & UART_LINE_STATUS_BI)
+ flag = TTY_BREAK;
+ else if (uerstat & UART_LINE_STATUS_PE)
+ flag = TTY_PARITY;
+ else if (uerstat & (UART_LINE_STATUS_FE |
+ UART_LINE_STATUS_OE))
+ flag = TTY_FRAME;
+ for (i = uart_dma->uart_dma.rx_rd_ptr; \
+ i < uart_dma->uart_dma.rx_rd_ptr + xmit_len; i++) {
+ if (!uart_handle_sysrq_char(&uart_dma->port, \
+ uart_dma->uart_dma.rx_dumy_buff[i])) {
+ uart_insert_char(&uart_dma->port, uerstat, \
+ UART_LINE_STATUS_OE,\
+ uart_dma->uart_dma.rx_dumy_buff[i], flag);
+ }
+ }
+dma_ignore_char:
+ tty_flip_buffer_push(tty);
+}
+static int uart_dma_set_rx_para(struct fh_uart_port *fh_uart_dma,
+ int xfer_len,
+ void (*call_back)(void *arg))
+{
+ struct fh_dma_extra ext_para;
+ struct dma_slave_config *rx_config;
+ struct dma_chan *rxchan;
+ struct scatterlist *p_sca_list;
+ unsigned int sg_size = 1;
+ memset(&fh_uart_dma->uart_dma.dma_rx.cfg,
+ 0,
+ sizeof(struct dma_slave_config));
+ memset(&ext_para, 0, sizeof(struct fh_dma_extra));
+ p_sca_list = &fh_uart_dma->uart_dma.dma_rx.sgl[0];
+ rxchan = fh_uart_dma->uart_dma.dma_rx.chan;
+ rx_config = &fh_uart_dma->uart_dma.dma_rx.cfg;
+ rx_config->src_addr = fh_uart_dma->uart_dma.paddr + REG_UART_RBR;
+ rx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ rx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ rx_config->slave_id = fh_uart_dma->uart_dma.rx_hs_no;
+ rx_config->src_maxburst = 8;
+ rx_config->dst_maxburst = 8;
+ rx_config->direction = DMA_DEV_TO_MEM;
+ rx_config->device_fc = 0;
+ ext_para.sinc = FH_DMA_SLAVE_FIX;
+ p_sca_list->dma_address = fh_uart_dma->uart_dma.rx_dma_add \
+ + fh_uart_dma->uart_dma.rx_wr_ptr;
+ p_sca_list->length = min(xfer_len, 128);
+ ext_para.dinc = FH_DMA_SLAVE_INC;
+ dmaengine_slave_config(rxchan, rx_config);
+ fh_uart_dma->uart_dma.dma_rx.desc =
+ rxchan->device->device_prep_slave_sg(rxchan,
+ &fh_uart_dma->uart_dma.dma_rx.sgl[0], sg_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP,
+ &ext_para);
+ fh_uart_dma->uart_dma.dma_rx.actual_sgl_size = sg_size;
+ fh_uart_dma->uart_dma.dma_rx.desc->callback = fh_serial_dma_rx_callback;
+ fh_uart_dma->uart_dma.dma_rx.desc->callback_param = fh_uart_dma;
+ return 0;
+}
+static void fh_serial_dma_rx_callback(void *arg)
+{
+ struct fh_uart_port *port = (struct fh_uart_port *)arg;
+ port->uart_dma.rx_wr_ptr =
+ (port->uart_dma.rx_wr_ptr + port->uart_dma.rx_xmit_len) & (128 - 1);
+ fh_serial_dma_rx_char(port);
+ port->uart_dma.rx_rd_ptr = port->uart_dma.rx_wr_ptr;
+ uart_dma_set_rx_para(port,
+ port->uart_dma.rx_xmit_len,
+ fh_serial_dma_rx_callback);
+ port->uart_dma.dma_rx.desc->tx_submit(port->uart_dma.dma_rx.desc);
+}
+static int uart_dma_set_tx_para(struct fh_uart_port *fh_uart_dma,
+ int xfer_len,
+ void (*call_back)(void *arg))
+{
+ struct fh_dma_extra ext_para;
+ struct dma_slave_config *tx_config;
+ struct dma_chan *txchan;
+ struct scatterlist *p_sca_list;
+ unsigned int sg_size = 1;
+ memset(&fh_uart_dma->uart_dma.dma_tx.cfg,
+ 0,
+ sizeof(struct dma_slave_config));
+ memset(&ext_para, 0, sizeof(struct fh_dma_extra));
+ p_sca_list = &fh_uart_dma->uart_dma.dma_tx.sgl[0];
+ txchan = fh_uart_dma->uart_dma.dma_tx.chan;
+ tx_config = &fh_uart_dma->uart_dma.dma_tx.cfg;
+ tx_config->dst_addr = fh_uart_dma->uart_dma.paddr + REG_UART_THR;
+ tx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx_config->slave_id = fh_uart_dma->uart_dma.tx_hs_no;
+ tx_config->src_maxburst = 1;
+ tx_config->dst_maxburst = 1;
+ tx_config->direction = DMA_MEM_TO_DEV;
+ tx_config->device_fc = 0;
+ ext_para.sinc = FH_DMA_SLAVE_FIX;
+ p_sca_list->dma_address = fh_uart_dma->uart_dma.tx_dma_add;
+ p_sca_list->length = min(xfer_len, 128);
+ ext_para.dinc = FH_DMA_SLAVE_INC;
+ dmaengine_slave_config(txchan, tx_config);
+ fh_uart_dma->uart_dma.dma_tx.desc =
+ txchan->device->device_prep_slave_sg(txchan,
+ &fh_uart_dma->uart_dma.dma_tx.sgl[0], sg_size, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP,
+ &ext_para);
+ fh_uart_dma->uart_dma.dma_tx.actual_sgl_size = sg_size;
+ fh_uart_dma->uart_dma.dma_tx.desc->callback = fh_serial_dma_tx_char_done;
+ fh_uart_dma->uart_dma.dma_tx.desc->callback_param = fh_uart_dma;
+ return 0;
+}
+static void fh_serial_dma_tx_char(struct fh_uart_port *uart_dma)
+{
+ unsigned int xfer_len;
+ struct circ_buf *xmit = &uart_dma->port.state->xmit;
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&uart_dma->port)) {
+ uart_dma->uart_dma.tx_done = 1;
+ uart_dma->uart_dma.tx_done = 0;
+ return;
+ }
+ if (uart_dma->uart_dma.tx_done) {
+ xfer_len =
+ uart_circ_chars_pending(xmit); /* Check xfer length */
+ if (xfer_len > 128)
+ xfer_len = 128;
+ if (xmit->tail < xmit->head) {
+ memcpy(uart_dma->uart_dma.tx_dumy_buff,
+ &xmit->buf[xmit->tail],
+ xfer_len);
+ } else {
+ int first = UART_XMIT_SIZE - xmit->tail;
+ int second = xmit->head;
+ memcpy(uart_dma->uart_dma.tx_dumy_buff,
+ &xmit->buf[xmit->tail],
+ first);
+ if (second)
+ memcpy(&uart_dma->uart_dma.tx_dumy_buff[first],
+ &xmit->buf[0],
+ second);
+ }
+ xmit->tail = (xmit->tail + xfer_len) & (UART_XMIT_SIZE - 1);
+ uart_dma->uart_dma.tx_count = xfer_len;
+ uart_dma_set_tx_para(
+ uart_dma,
+ xfer_len,
+ fh_serial_dma_tx_char_done);
+ uart_dma->uart_dma.dma_tx.desc->tx_submit(uart_dma->uart_dma.dma_tx.desc);
+ uart_dma->uart_dma.tx_done = 0;
+ }
+}
+void fh_uart_rx_dma_start(struct fh_uart_port *port)
+{
+ uart_dma_set_rx_para(port,
+ port->uart_dma.rx_xmit_len,
+ fh_serial_dma_rx_callback);
+ port->uart_dma.dma_rx.desc->tx_submit(port->uart_dma.dma_rx.desc);
+}
+
+s32 Uart_Init(fh_uart_info *desc)
+{
+
+ u32 base = (u32)desc->base_add;
+ struct fh_uart_port *port = info_to_fh_uart_port(desc);
+ u8 test_init_status = 0;
+
+ /* reset fifo */
+ fh_uart_writel(base + REG_UART_FCR, 6);
+ test_init_status |= Uart_Set_Clock_Divisor(desc);
+ test_init_status |= Uart_Set_Line_Control(desc);
+ test_init_status |= Uart_Fifo_Config(desc);
+ if (test_init_status != 0)
+ return test_init_status;
+
+ Uart_Disable_Irq(desc, UART_INT_ALL);
+ if (!port->fh_info.use_dma)
+ fh_uart_start_rx(&port->port);
+ else {
+ if (!port->uart_dma.HasInit) {
+ port->uart_dma.HasInit = 1;
+ fh_uart_rx_dma_start(port);
+ }
+ }
+ return 0;
+}
+
+
+
+/*********************************
+ *
+ *
+ * FH CONSOLE
+ *
+ *
+ *********************************/
+#ifdef CONFIG_SERIAL_FH_CONSOLE
+static struct uart_port *cons_uart;
+
+static void
+fh_serial_console_putchar(struct uart_port *port, int ch)
+{
+ u32 ret;
+ struct fh_uart_port *myown_port = to_fh_uart_port(port);
+
+ do {
+ ret = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
+ }
+ /* wait txfifo is full
+ * 0 means full.
+ * 1 means not full
+ */
+ while (!(ret & UART_STATUS_TFNF))
+ ;
+
+ fh_uart_writel(myown_port->fh_info.base_add + REG_UART_THR, ch);
+}
+
+static void
+fh_serial_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct fh_uart_port *myown_port = &fh_own_ports[co->index];
+ uart_console_write(&myown_port->port,
+ s,
+ count,
+ fh_serial_console_putchar);
+}
+
+
+static int __init
+fh_serial_console_setup(struct console *co,
+ char *options)
+{
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ /* is this a valid port */
+ if ((co->index == -1) || (co->index >= FH_UART_NUMBER))
+ pr_err("ERROR: co->index invaild: %d\n", co->index);
+
+ port = &fh_own_ports[co->index].port;
+
+ /* is the port configured? */
+ if (port->mapbase == 0x0)
+ pr_err("ERROR: port->mapbase == 0x0\n");
+
+ cons_uart = port;
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ uart_set_options(port, co, baud, parity, bits, flow);
+
+ /* must true for setup ok, see printk.c line:1463 */
+ return 1;
+}
+
+
+int fh_serial_initconsole(void)
+{
+ fh_serial_console.data = &fh_uart_drv;
+ register_console(&fh_serial_console);
+ return 0;
+}
+console_initcall(fh_serial_initconsole);
+
+static struct console fh_serial_console = {
+ .name = FH_SERIAL_NAME,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+ .index = -1,
+ .write = fh_serial_console_write,
+ .setup = fh_serial_console_setup,
+ .data = &fh_uart_drv,
+};
+
+#endif /* CONFIG_SERIAL_FH_CONSOLE */
+
+static void fh_uart_stop_tx(struct uart_port *port)
+{
+ /* close tx isr */
+ struct fh_uart_port *myown_port = to_fh_uart_port(port);
+ struct circ_buf *xmit = &myown_port->port.state->xmit;
+ u32 base = (u32)myown_port->fh_info.base_add;
+ if (!myown_port->fh_info.use_dma)
+ fh_uart_writel(base + REG_UART_IER, 0x01);
+ else{
+ xmit->tail =
+ (xmit->tail + myown_port->uart_dma.tx_count) & (UART_XMIT_SIZE - 1);
+ myown_port->port.icount.tx += myown_port->uart_dma.tx_count;
+ myown_port->uart_dma.tx_count = 0;
+ myown_port->uart_dma.tx_done = 1;
+ }
+ tx_enabled(port) = 0;
+ fh_uart_writel(base + REG_UART_IER, 0x01);
+}
+
+static void fh_uart_start_tx(struct uart_port *port)
+{
+ /* open tx isr */
+ struct fh_uart_port *myown_port = to_fh_uart_port(port);
+
+ if (!myown_port->fh_info.use_dma) {
+ u32 base = (u32)myown_port->fh_info.base_add;
+ fh_uart_writel(base + REG_UART_IER, 0x03);
+ tx_enabled(port) = 1;
+ } else {
+ if (myown_port->uart_dma.tx_done) {
+ fh_serial_dma_tx_char(myown_port);
+ tx_enabled(port) = 1;
+ }
+ }
+}
+
+static void fh_uart_stop_rx(struct uart_port *port)
+{
+ struct fh_uart_port *myown_port = to_fh_uart_port(port);
+ rx_enabled(port) = 0;
+ Uart_Disable_Irq(&myown_port->fh_info, UART_INT_ERBFI_POS);
+}
+
+static void fh_uart_start_rx(struct uart_port *port)
+{
+ struct fh_uart_port *myown_port = to_fh_uart_port(port);
+ rx_enabled(port) = 1;
+ Uart_Enable_Irq(&myown_port->fh_info, UART_INT_ERBFI_POS);
+}
+
+static void fh_uart_pm(struct uart_port *port, unsigned int level,
+ unsigned int old)
+{
+
+}
+
+static int fh_uart_set_wake(struct uart_port *port, unsigned int state)
+{
+ return 0;
+}
+
+
+static void fh_uart_enable_ms(struct uart_port *port)
+{
+
+}
+
+
+static unsigned int fh_uart_tx_empty(struct uart_port *port)
+{
+ struct fh_uart_port *myown_port = to_fh_uart_port(port);
+ /*
+ * 1 means empty
+ * 0:means no empty
+ */
+ int ret = 1;
+ int ret_status;
+
+ ret_status = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
+ if (ret_status & UART_STATUS_TFE)
+ ret = 1;
+ else
+ ret = 0;
+ return ret;
+}
+
+
+static unsigned int fh_uart_get_mctrl(struct uart_port *port)
+{
+ return 0;
+}
+
+
+static void fh_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+
+}
+
+static void fh_uart_break_ctl(struct uart_port *port, int break_state)
+{
+
+}
+
+static irqreturn_t
+fh_uart_rx_chars(int irq, void *dev_id)
+{
+ struct fh_uart_port *myown_port = dev_id;
+ struct uart_port *port = &myown_port->port;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ch = 0;
+ unsigned int flag;
+ unsigned int uerstat;
+ int max_count = 64;
+ int ret_status;
+
+ while (max_count-- > 0) {
+ /* check if rx fifo is empty */
+ ret_status = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
+ if (!(ret_status & UART_STATUS_RFNE))
+ break;
+ /* read error in the rx process */
+ uerstat = Uart_Read_Line_Status(&myown_port->fh_info);
+ /* read data in the rxfifo */
+ if (uerstat & UART_LINE_STATUS_DR)
+ ch = fh_uart_readl(myown_port->fh_info.base_add + REG_UART_RBR);
+ /* insert the character into the buffer */
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+ /* if at least one error in rx process */
+ if (unlikely(uerstat & UART_LINE_STATUS_RFE)) {
+ pr_err("rxerr: port ch=0x%02x, rxs=0x%08x\n",
+ ch, uerstat);
+ /* check for break */
+ if (uerstat & UART_LINE_STATUS_BI) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ goto ignore_char;
+ }
+
+ if (uerstat & UART_LINE_STATUS_BI)
+ port->icount.frame++;
+ if (uerstat & UART_LINE_STATUS_OE)
+ port->icount.overrun++;
+
+ uerstat &= port->read_status_mask;
+
+ if (uerstat & UART_LINE_STATUS_BI)
+ flag = TTY_BREAK;
+ else if (uerstat & UART_LINE_STATUS_PE)
+ flag = TTY_PARITY;
+ else if (uerstat & (UART_LINE_STATUS_FE |
+ UART_LINE_STATUS_OE))
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ uart_insert_char(port, uerstat, UART_LINE_STATUS_OE,
+ ch, flag);
+
+ ignore_char:
+ continue;
+ }
+ tty_flip_buffer_push(tty);
+ return IRQ_HANDLED;
+}
+
+
+
+static irqreturn_t
+fh_uart_tx_chars(int irq, void *dev_id)
+{
+ struct fh_uart_port* myown_port = dev_id;
+ struct uart_port *port = &myown_port->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ int count = 256;
+ int ret_status;
+
+ /* if there isn't anything more to transmit, or the uart is now
+ * stopped, disable the uart and exit
+ */
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ fh_uart_stop_tx(port);
+ goto out;
+ }
+ /* try and drain the buffer... */
+ while (!uart_circ_empty(xmit) && count-- > 0) {
+
+ /*
+ * check the tx fifo full?
+ * full then break
+ */
+ ret_status = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
+ if(!(ret_status & UART_STATUS_TFNF))
+ break;
+ /* write data to the hw fifo */
+ fh_uart_writel(myown_port->fh_info.base_add + REG_UART_THR, \
+ xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ fh_uart_stop_tx(port);
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+fh_uart_isr(int irq, void *dev_id)
+{
+ irqreturn_t ret_isr;
+ struct fh_uart_port* myown_port = dev_id;
+ int ret_iir;
+
+ /* check if the tx empty isr */
+ ret_iir = Uart_Read_iir(&myown_port->fh_info);
+ if (ret_iir == 0x06)
+ pr_err("uart overrun\n");
+
+ if ((ret_iir & 0x04)||(ret_iir & 0x0c))
+ ret_isr = fh_uart_rx_chars(irq,dev_id);
+
+ if (ret_iir & 0x02)
+ ret_isr = fh_uart_tx_chars(irq,dev_id);
+ else
+ ret_isr = IRQ_HANDLED;
+
+ return ret_isr;
+}
+
+static void fh_serial_shutdown(struct uart_port *port)
+{
+ struct fh_uart_port* myown_port = to_fh_uart_port(port);
+
+ Uart_Disable_Irq(&myown_port->fh_info,UART_INT_ALL);
+ fh_uart_writel( myown_port->fh_info.base_add + REG_UART_FCR, 6);
+ free_irq(myown_port->fh_info.irq_num, myown_port);
+ tx_enabled(port) = 0;
+ rx_enabled(port) = 0;
+}
+
+static int fh_serial_startup(struct uart_port *port)
+{
+ struct fh_uart_port* myown_port = to_fh_uart_port(port);
+ int ret;
+ int status;
+
+ do {
+ status = Uart_Read_Line_Status(&myown_port->fh_info);
+
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ Uart_Init(&myown_port->fh_info);
+ if ((ret = request_irq(myown_port->fh_info.irq_num,
+ fh_uart_isr,
+ 0,
+ FH_DEV_NAME,
+ (void*)myown_port))) {
+ pr_err("cannot get irq %d\n", myown_port->fh_info.irq_num);
+ return ret;
+ }
+
+ enable_irq_wake(myown_port->fh_info.irq_num);
+
+ return 0;
+}
+
+static void fh_serial_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct fh_uart_port* myown_port = to_fh_uart_port(port);
+
+ unsigned long flags;
+ unsigned int baud, quot;
+ unsigned int line_data = 0,status;
+
+ do {
+ status = Uart_Read_Line_Status(&myown_port->fh_info);
+
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ line_data |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ line_data |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ line_data |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ line_data |= UART_LCR_WLEN8;
+ break;
+ default:
+ line_data |= UART_LCR_WLEN8;
+ break;
+ }
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ line_data |= UART_LCR_STOP;
+
+ if (termios->c_cflag & PARENB) {
+ line_data |= UART_LCR_PARITY;
+
+ if (!(termios->c_cflag & PARODD))
+ line_data |= UART_LCR_EPAR;
+ }
+ /*
+ * baud cal.
+ * baud is the uart will be out.
+ * the quot is the div
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ do{
+ status = Uart_Read_Line_Status(&myown_port->fh_info);
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+ spin_lock_irqsave(&myown_port->port.lock, flags);
+
+ myown_port->port.read_status_mask =
+ UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ myown_port->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ myown_port->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ myown_port->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ myown_port->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ myown_port->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ myown_port->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ myown_port->port.ignore_status_mask |= UART_LSR_DR;
+
+
+ myown_port->fh_info.line_ctrl = (uart_line_e)line_data;
+ myown_port->fh_info.baudrate = quot;
+ myown_port->fh_info.fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER;
+ Uart_Init(&myown_port->fh_info);
+ spin_unlock_irqrestore(&myown_port->port.lock, flags);
+}
+
+
+
+static const char *fh_serial_type(struct uart_port *port)
+{
+ return FH_SERIAL_NAME;
+}
+
+static void fh_serial_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, MAP_SIZE);
+}
+
+static int fh_serial_request_port(struct uart_port *port)
+{
+ struct fh_uart_port* myown_port = to_fh_uart_port(port);
+ const char* name = myown_port->fh_info.name;
+ return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
+}
+
+
+static void fh_serial_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ fh_serial_request_port(port);
+ port->type = FH_TYPE;
+ }
+}
+
+static int fh_serial_init_port(struct fh_uart_port *myown_port,
+
+ struct platform_device *platdev)
+{
+ struct uart_port *port = &myown_port->port;
+ struct resource *res;
+
+ if (platdev == NULL)
+ return -ENODEV;
+
+
+ myown_port->fh_info.dev = platdev;
+ /* setup info for port */
+ port->dev = &platdev->dev;
+
+ /* sort our the physical and virtual addresses for each UART */
+ res = platform_get_resource(platdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ printk(KERN_ERR "failed to find memory resource for uart\n");
+ return -EINVAL;
+ }
+
+ myown_port->uart_dma.paddr = res->start;
+ myown_port->fh_info.baudrate = BAUDRATE_115200;
+ if (myown_port->fh_info.use_dma)
+ myown_port->fh_info.fifo_ctrl = 0x8f;
+ else
+ myown_port->fh_info.fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER;
+ myown_port->fh_info.line_ctrl = Uart_line_8n2;
+
+ Uart_Init(&myown_port->fh_info);
+ return 0;
+}
+
+static inline int fh_serial_cpufreq_register(struct fh_uart_port* myown_port)
+{
+ return 0;
+}
+
+static ssize_t fh_serial_show_clksrc(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "* %s\n", FH_SERIAL_NAME);
+}
+
+static DEVICE_ATTR(clock_source, S_IRUGO, fh_serial_show_clksrc, NULL);
+
+#ifdef CONFIG_PM
+static int fh_serial_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ uart_suspend_port(&fh_uart_drv, port);
+
+ return 0;
+}
+
+static int fh_serial_resume(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ int may_wakeup;
+
+ may_wakeup = device_may_wakeup(&pdev->dev);
+
+ uart_resume_port(&fh_uart_drv, port);
+ device_set_wakeup_enable(&pdev->dev, may_wakeup);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static unsigned int SysrqParseHex(char *buff)
+{
+ unsigned int data;
+
+ data = (unsigned int)simple_strtoul(buff, NULL, 16);
+ return data;
+}
+
+static int SysrqGetSingle(void)
+{
+ struct fh_uart_port* myown_port;
+ unsigned int ch;
+
+ myown_port = &fh_own_ports[0];
+ while (1) {
+ if ((fh_uart_readl(myown_port->fh_info.base_add + REG_UART_LSR) & 0x01) == 0x01) {
+ ch = fh_uart_readl(myown_port->fh_info.base_add + REG_UART_RBR) & 0xff;
+ return ch;
+ }
+ }
+}
+
+static int SysrqGetValue(void)
+{
+ unsigned char internal_buffer[11] = {0};
+ unsigned char loop = 0;
+ unsigned char data;
+ unsigned int value;
+ struct fh_uart_port *myown_port;
+
+ myown_port = &fh_own_ports[0];
+ while (1) {
+ if ((fh_uart_readl(myown_port->fh_info.base_add + REG_UART_LSR) & 0x01) == 0x01) {
+ data = fh_uart_readl(myown_port->fh_info.base_add + REG_UART_RBR) & 0xff;
+ /*Get Next Param*/
+ if (data == ' ' || data == '\r' || data == '\n') {
+ internal_buffer[loop] = '\0';
+ value = SysrqParseHex(internal_buffer);
+ return value;
+ } else {
+ internal_buffer[loop] = data;
+ loop++;
+ if (loop >= sizeof(internal_buffer)) {
+ printk(KERN_INFO "Address or length Error!\n");
+ return 0;
+ }
+ }
+ }
+ }
+}
+
+static int SysrqGetDirection(void)
+{
+ unsigned int ch;
+
+ ch = SysrqGetSingle();
+ if (ch == 'w' || ch == 'W')
+ return 1;
+ else if (ch == 'r' || ch == 'R')
+ return 0;
+ else
+ return -1;
+}
+
+static int SysrqGetMode(void)
+{
+ unsigned int ch;
+
+ ch = SysrqGetSingle();
+ if (ch == 'v' || ch == 'V')
+ return 1;
+ else if (ch == 'p' || ch == 'P')
+ return 0;
+ else
+ return -1;
+}
+
+static void sysrq_handle_fh_mem(int key)
+{
+ unsigned int ch;
+ unsigned int mode;
+ unsigned int direction;
+ unsigned int vir_address;
+ unsigned int phy_address;
+ unsigned int len;
+ unsigned int start = 0;
+
+ direction = SysrqGetDirection();
+ if (direction == 1) {
+ printk(KERN_INFO "Write Mode(p or v)\n");
+ mode = SysrqGetMode();
+ if (mode == 0) {
+ printk(KERN_INFO \
+ "Physical Address Write:\n");
+ phy_address = SysrqGetValue();
+ len = SysrqGetValue();
+ printk(KERN_INFO \
+ "Start Physical Address : 0x%x\n", phy_address);
+ printk(KERN_INFO "Write Data : 0x%x\n", len);
+ vir_address = __phys_to_virt(phy_address);
+ writel(len, vir_address);
+ printk(KERN_INFO \
+ "Address:0x%x at 0x%x\n", vir_address, len);
+ } else if (mode == 1) {
+ printk(KERN_INFO \
+ "Virtual Address Write:\n");
+ vir_address = SysrqGetValue();
+ len = SysrqGetValue();
+ writel(len, vir_address);
+ printk(KERN_INFO \
+ "Address:0x%x at 0x%x\n", vir_address, len);
+ } else {
+ printk(KERN_INFO "Select Wrong Mode!\n");
+ return;
+ }
+ } else if (direction == 0) {
+ printk(KERN_INFO "Read Mode(p or v)\n");
+ mode = SysrqGetMode();
+ if (mode == 0) {
+ printk(KERN_INFO \
+ "Physical Address Read:\n");
+ phy_address = SysrqGetValue();
+ len = SysrqGetValue();
+ printk(KERN_INFO \
+ "Start Physical Address : 0x%x\n", phy_address);
+ printk(KERN_INFO \
+ "Read Length : 0x%x\n", len);
+ vir_address = __phys_to_virt(phy_address);
+ for (; start < len; start++) {
+ if (start % 8 == 0)
+ printk(KERN_INFO "\n%02x:",
+ (vir_address + start * 4) & 0xff);
+ ch = readl((vir_address + start * 4));
+ printk("%08x ", ch);
+ }
+ } else if (mode == 1) {
+ printk(KERN_INFO \
+ "Virtual Address Read:\n");
+ vir_address = SysrqGetValue();
+ len = SysrqGetValue();
+ printk(KERN_INFO \
+ "Read Length : 0x%x\n", len);
+ for (; start < len; start++) {
+ if (start % 8 == 0)
+ printk(KERN_INFO "\n%02x:",
+ (vir_address + start * 4) & 0xff);
+ ch = readl((vir_address + start * 4));
+ printk("%08x ", ch);
+ }
+ } else {
+ printk(KERN_INFO "Select Wrong Mode!\n");
+ return;
+ }
+ } else {
+ printk(KERN_INFO "Select Wrong Direction!\n");
+ return;
+ }
+
+}
+
+static void fh_show_syslog(char *log_buf, int len)
+{
+ for (; len; len--) {
+ if (*log_buf == 0x0a) {
+ fh_serial_console_putchar(&fh_own_ports[0].port, '\r');
+ fh_serial_console_putchar(&fh_own_ports[0].port, '\n');
+ log_buf++;
+ }
+ fh_serial_console_putchar(&fh_own_ports[0].port, *log_buf);
+ log_buf++;
+ }
+}
+
+
+static void sysrq_handle_fh_dmesg(int key)
+{
+ int len;
+ char *log_buf = NULL;
+
+ /* Just Dummy */
+ len = 16 * 1024;
+ len = do_syslog(SYSLOG_ACTION_SIZE_UNREAD, \
+ log_buf, len, SYSLOG_FROM_CALL);
+ log_buf = kmalloc(len, GFP_KERNEL);
+ if (log_buf == NULL) {
+ pr_info("Get Syslog Buffer failed!\n");
+ return;
+ }
+ memset((void *)log_buf, 0x0, len);
+ do_syslog(SYSLOG_ACTION_READ, log_buf, len, SYSLOG_FROM_CALL);
+ fh_show_syslog(log_buf, len);
+ kfree(log_buf);
+}
+
+
+static struct sysrq_key_op sysrq_fh_debug_mem_op = {
+ .handler = sysrq_handle_fh_mem,
+ .help_msg = "Fullhan Debug Dump Mem(G)",
+ .action_msg = "Fullhan Dump Mem(w or r)",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static struct sysrq_key_op sysrq_fh_dmesg_op = {
+ .handler = sysrq_handle_fh_dmesg,
+ .help_msg = "Fullhan DMSG(x)",
+ .action_msg = "Fullhan DMSG",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+
+#endif
+
+int fh_serial_probe(struct platform_device *dev)
+{
+ int ret = 0;
+ struct fh_uart_port *myown_port;
+ int filter_no;
+ dma_cap_mask_t mask;
+ struct clk *uart_clk;
+ char clk_name[16];
+
+ if (dev->id > (sizeof(fh_own_ports)/sizeof(struct fh_uart_port)))
+ goto probe_err;
+
+ sprintf(clk_name, "uart%d_clk", dev->id);
+ uart_clk = clk_get(NULL, clk_name);
+ if (IS_ERR(uart_clk))
+ return PTR_ERR(uart_clk);
+ ret = clk_enable(uart_clk);
+ if (ret)
+ goto probe_err;
+
+ myown_port = &fh_own_ports[dev->id];
+ if (myown_port->fh_info.use_dma) {
+ filter_no = myown_port->uart_dma.tx_dma_channel;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ myown_port->uart_dma.dma_tx.chan = dma_request_channel(mask,
+ fh_uart_dma_chan_filter, &filter_no);
+ if (!myown_port->uart_dma.dma_tx.chan)
+ goto probe_err;
+ filter_no = myown_port->uart_dma.rx_dma_channel;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ myown_port->uart_dma.dma_rx.chan = dma_request_channel(mask,
+ fh_uart_dma_chan_filter, &filter_no);
+ if (!myown_port->uart_dma.dma_rx.chan) {
+ dma_release_channel(myown_port->uart_dma.dma_tx.chan);
+ myown_port->fh_info.use_dma = 0;
+ goto probe_err;
+ }
+ myown_port->uart_dma.tx_dma_add = dma_map_single(
+ myown_port->uart_dma.dma_tx.chan->dev->device.parent,
+ (void *) myown_port->uart_dma.tx_dumy_buff,
+ sizeof(myown_port->uart_dma.tx_dumy_buff),
+ DMA_TO_DEVICE);
+ myown_port->uart_dma.rx_dma_add = dma_map_single(
+ myown_port->uart_dma.dma_rx.chan->dev->device.parent,
+ (void *) myown_port->uart_dma.rx_dumy_buff,
+ sizeof(myown_port->uart_dma.rx_dumy_buff),
+ DMA_TO_DEVICE);
+ myown_port->uart_dma.HasInit = 0;
+ myown_port->uart_dma.tx_done = 1;
+ myown_port->uart_dma.tx_count = 0;
+ myown_port->uart_dma.rx_wr_ptr = 0;
+ myown_port->uart_dma.rx_rd_ptr = 0;
+ }
+
+ ret = fh_serial_init_port(myown_port, dev);
+ if (ret < 0)
+ goto probe_err;
+
+ ret = uart_add_one_port(&fh_uart_drv, &myown_port->port);
+ if (ret != 0)
+ printk(KERN_ERR "%s: failed to add one port.\n", __func__);
+
+ platform_set_drvdata(dev, &myown_port->port);
+ ret = device_create_file(&dev->dev, &dev_attr_clock_source);
+
+ if (ret < 0)
+ printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
+
+ ret = fh_serial_cpufreq_register(myown_port);
+ if (ret < 0)
+ dev_err(&dev->dev, "failed to add cpufreq notifier\n");
+#ifdef CONFIG_MAGIC_SYSRQ
+ register_sysrq_key('g', &sysrq_fh_debug_mem_op);
+ register_sysrq_key('x', &sysrq_fh_dmesg_op);
+#endif
+ printk(KERN_DEBUG "fh serial probe done\n");
+ return 0;
+
+ probe_err:
+ printk(KERN_ERR "%s: fh serial probe error.\n", __func__);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fh_serial_probe);
+
+static inline void fh_serial_cpufreq_deregister(struct fh_uart_port* myown_port)
+{
+
+}
+
+
+int __devexit fh_serial_remove(struct platform_device *dev)
+{
+ struct uart_port *port = fh_dev_to_port(&dev->dev);
+ struct fh_uart_port *myown_port = to_fh_uart_port(port);
+ struct device *dev_rx =
+ myown_port->uart_dma.dma_tx.chan->dev->device.parent;
+ struct device *dev_tx =
+ myown_port->uart_dma.dma_rx.chan->dev->device.parent;
+
+ if (port) {
+ if (myown_port->fh_info.use_dma) {
+ dma_release_channel(myown_port->uart_dma.dma_tx.chan);
+ dma_release_channel(myown_port->uart_dma.dma_rx.chan);
+ dma_unmap_single(dev_tx,
+ (dma_addr_t)myown_port->uart_dma.tx_dumy_buff,
+ sizeof(myown_port->uart_dma.tx_dumy_buff),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev_rx,
+ (dma_addr_t)myown_port->uart_dma.rx_dumy_buff,
+ sizeof(myown_port->uart_dma.rx_dumy_buff),
+ DMA_TO_DEVICE);
+ }
+ fh_serial_cpufreq_deregister(myown_port);
+ device_remove_file(&dev->dev, &dev_attr_clock_source);
+ uart_remove_one_port(&fh_uart_drv, port);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fh_serial_remove);
+
+int fh_serial_init(struct platform_driver *drv)
+{
+ return platform_driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(fh_serial_init);
+
+static int __init fh_serial_modinit(void)
+{
+ int ret;
+ ret = uart_register_driver(&fh_uart_drv);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to register UART driver\n");
+ return -1;
+ }
+ return 0;
+}
+
+static void __exit fh_serial_modexit(void)
+{
+ uart_unregister_driver(&fh_uart_drv);
+}
+
+
+static int _fh_serial_probe(struct platform_device *dev)
+{
+ return fh_serial_probe(dev);
+}
+
+static struct platform_driver fh_serial_driver = {
+ .probe = _fh_serial_probe,
+ .remove = __devexit_p(fh_serial_remove),
+#ifdef CONFIG_PM
+ .suspend = fh_serial_suspend,
+ .resume = fh_serial_resume,
+#endif
+ .driver = {
+ .name = FH_SERIAL_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init _fh_serial_init(void)
+{
+ return fh_serial_init(&fh_serial_driver);
+}
+
+static void __exit _fh_serial_exit(void)
+{
+ platform_driver_unregister(&fh_serial_driver);
+}
+
+module_init(_fh_serial_init);
+module_exit(_fh_serial_exit);
+
+module_init(fh_serial_modinit);
+module_exit(fh_serial_modexit);
diff --git a/drivers/tty/serial/fh_serial.h b/drivers/tty/serial/fh_serial.h
new file mode 100644
index 00000000..888c8000
--- /dev/null
+++ b/drivers/tty/serial/fh_serial.h
@@ -0,0 +1,200 @@
+/*
+ * fh_serial.h
+ *
+ * Created on: Jul 29, 2014
+ * Author: duobao
+ */
+#ifndef FH_SERIAL_H_
+#define FH_SERIAL_H_
+/****************************************************************************
+ * #include section
+ * add #include here if any
+ ***************************************************************************/
+#include <linux/dmaengine.h>
+#include <linux/scatterlist.h>
+#include <linux/completion.h>
+/****************************************************************************
+ * #define section
+ * add constant #define here if any
+ ***************************************************************************/
+
+#define lift_shift_bit_num(bit_num) (1<<bit_num)
+#define ISR_NUMBER0 (UART0_IRQ)
+#define ISR_NUMBER1 (UART1_IRQ)
+#define ISR_NUMBER2 (UART2_IRQ)
+#define ISR_NUMBER3 (UART3_IRQ)
+#define UART0_PORT 0
+#define UART1_PORT 1
+#define UART2_PORT 2
+#define UART3_PORT 3
+#define UART_MAX_NUM 4
+#define UART_DATA_ARRIVED 1
+#define UART_LCR_DLAB_POS (lift_shift_bit_num(7))
+
+#define UART0_FIFO_SIZE 64
+#define UART1_FIFO_SIZE 64
+#define UART2_FIFO_SIZE 64
+#define UART3_FIFO_SIZE 64
+/****************************************************************************
+* ADT section
+* add Abstract Data Type definition here
+***************************************************************************/
+//error status
+enum {
+ UART_CONFIG_OK = 0,
+ UART_CONFIG_FIFO_OK = 0,
+ UART_CONFIG_LINE_OK = 0,
+ UART_CONFIG_DIVISOR_OK = 0,
+ UART_WRITE_DATA_OK = 0,
+ UART_READ_DATA_OK = 0,
+ UART_CLEAR_ERROR_OK = 0,
+ UART_RESET_RX_POOL_OK = 0,
+ UART_CLEAR_RX_DATA_READY_OK = 0,
+ UART_INIT_OK = 0,
+ UART_CONFIG_PARA_ERROR = lift_shift_bit_num(0),
+ UART_CONFIG_FIFO_ERROR = lift_shift_bit_num(1),
+ UART_IS_BUSY = lift_shift_bit_num(2),
+ UART_DW_FIFO_OVERFLOW = lift_shift_bit_num(3), //dw rxfifo overflow ,maybe rxisr is closed or main clock is too low
+ UART_SW_FIFO_OVERFLOW = lift_shift_bit_num(4), //soft rxfifo overflow , maybe main clk is too low
+ UART_PARITY_ERROR = lift_shift_bit_num(5),
+ UART_FRAME_ERROR = lift_shift_bit_num(6),
+ UART_BREAK_ERROR = lift_shift_bit_num(7),
+ UART_FIFO_EMPTY = lift_shift_bit_num(8),
+};
+
+//interrupt enable
+typedef enum enum_uart_irq {
+ UART_INT_PTIME_POS = (lift_shift_bit_num(7)),
+ UART_INT_EDSSI_POS = (lift_shift_bit_num(3)),
+ UART_INT_ELSI_POS = (lift_shift_bit_num(2)),
+ UART_INT_ETBEI_POS = (lift_shift_bit_num(1)),
+ UART_INT_ERBFI_POS = (lift_shift_bit_num(0)),
+ UART_INT_ALL = 0x0f,
+}uart_irq_e;
+
+//interrupt id
+enum {
+ UART_INT_ID_MODEM = 0,
+ UART_INT_ID_NO_INT = 1,
+ UART_INT_ID_THR_EMPTY = 2,
+ UART_INT_ID_RECEIVE_DATA = 4,
+ UART_INT_ID_RECEIVE_LINE = 6,
+ UART_INT_ID_BUSY = 7,
+ UART_INT_ID_TIME_OUT = 12,
+ UART_FIFO_IS_ENABLE = 0xc0,
+};
+
+typedef enum enum_uart_line {
+ Uart_line_5n1 = 0x00, // 5 data bits, no parity, 1 stop bit
+ Uart_line_5n1_5 = 0x04, // 5 data bits, no parity, 1.5 stop bits
+ Uart_line_5e1 = 0x18, // 5 data bits, even parity, 1 stop bit
+ Uart_line_5e1_5 = 0x1c, // 5 data bits, even parity, 1.5 stop bits
+ Uart_line_5o1 = 0x08, // 5 data bits, odd parity, 1 stop bit
+ Uart_line_5o1_5 = 0x0c, // 5 data bits, odd parity, 1.5 stop bits
+ Uart_line_6n1 = 0x01, // 6 data bits, no parity, 1 stop bit
+ Uart_line_6n2 = 0x05, // 6 data bits, no parity, 2 stop bits
+ Uart_line_6e1 = 0x19, // 6 data bits, even parity, 1 stop bit
+ Uart_line_6e2 = 0x1d, // 6 data bits, even parity, 2 stop bits
+ Uart_line_6o1 = 0x09, // 6 data bits, odd parity, 1 stop bit
+ Uart_line_6o2 = 0x0d, // 6 data bits, odd parity, 2 stop bits
+ Uart_line_7n1 = 0x02, // 7 data bits, no parity, 1 stop bit
+ Uart_line_7n2 = 0x06, // 7 data bits, no parity, 2 stop bits
+ Uart_line_7e1 = 0x1a, // 7 data bits, even parity, 1 stop bit
+ Uart_line_7e2 = 0x1e, // 7 data bits, even parity, 2 stop bits
+ Uart_line_7o1 = 0x0a, // 7 data bits, odd parity, 1 stop bit
+ Uart_line_7o2 = 0x0e, // 7 data bits, odd parity, 2 stop bits
+ Uart_line_8n1 = 0x03, // 8 data bits, no parity, 1 stop bit
+ Uart_line_8n2 = 0x07, // 8 data bits, no parity, 2 stop bits
+ Uart_line_8e1 = 0x1b, // 8 data bits, even parity, 1 stop bit
+ Uart_line_8e2 = 0x1f, // 8 data bits, even parity, 2 stop bits
+ Uart_line_8o1 = 0x0b, // 8 data bits, odd parity, 1 stop bit
+ Uart_line_8o2 = 0x0f // 8 data bits, odd parity, 2 stop bits
+}uart_line_e;
+
+//rx & tx fifo config
+typedef enum enum_uart_fifo {
+ UART_INT_RXFIFO_DEPTH_1 = 0x01, //fifo enable, rx 1 byte, set rx int
+ UART_INT_RXFIFO_DEPTH_QUARTER = 0x41, //fifo enable, rx 1/4 fifo, set rx int
+ UART_INT_RXFIFO_DEPTH_HALF =0x81, //fifo enable, rx 1/2 fifo, set rx int
+ UART_INT_RXFIFO_2LESS_THAN_FULL =0xc1, //fifo enable, rx 2 less than full, set rx int
+ UART_DMA_RXFIFO_DEPTH_HALF = 0x89, /* fifo enable, rx 1/2 fifo, set tx/rx dma */
+}uart_fifo_e;
+
+//line status
+enum {
+ UART_LINE_STATUS_RFE = (lift_shift_bit_num(7)),
+ UART_LINE_STATUS_TEMT = (lift_shift_bit_num(6)),
+ UART_LINE_STATUS_THRE = (lift_shift_bit_num(5)),
+ UART_LINE_STATUS_BI = (lift_shift_bit_num(4)),
+ UART_LINE_STATUS_FE = (lift_shift_bit_num(3)),
+ UART_LINE_STATUS_PE = (lift_shift_bit_num(2)),
+ UART_LINE_STATUS_OE = (lift_shift_bit_num(1)),
+ UART_LINE_STATUS_DR = (lift_shift_bit_num(0)),
+};
+
+//uart status
+enum {
+ UART_STATUS_RFF = (lift_shift_bit_num(4)),
+ UART_STATUS_RFNE = (lift_shift_bit_num(3)),
+ UART_STATUS_TFE = (lift_shift_bit_num(2)),
+ UART_STATUS_TFNF = (lift_shift_bit_num(1)),
+ UART_STATUS_BUSY = (lift_shift_bit_num(0)),
+
+};
+
+#define UART_CLOCK_FREQ (16666667) //15MHZ
+typedef enum enum_uart_baudrate{
+ BAUDRATE_9600 = (((UART_CLOCK_FREQ/9600)+8)/16),
+ BAUDRATE_19200 = (((UART_CLOCK_FREQ/19200)+8)/16),
+ BAUDRATE_38400 = (((UART_CLOCK_FREQ/38400)+8)/16),
+ BAUDRATE_57600 = (((UART_CLOCK_FREQ/57600)+8)/16),
+ BAUDRATE_115200 = (((UART_CLOCK_FREQ/115200)+8)/16),
+ BAUDRATE_194000 = (((UART_CLOCK_FREQ/194000)+8)/16),
+}uart_baudrate_e;
+
+typedef struct _fh_uart_info {
+ const char * name;
+ unsigned int irq_num;
+ unsigned char __iomem *base_add;
+ uart_baudrate_e baudrate;
+ uart_line_e line_ctrl;
+ uart_fifo_e fifo_ctrl;
+ unsigned int use_dma;
+ struct platform_device *dev;
+
+}fh_uart_info;
+
+struct fh_uart_dma_transfer {
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ struct scatterlist sgl[128];
+ unsigned int sgl_data_size[128];
+ unsigned int actual_sgl_size;
+ struct dma_async_tx_descriptor *desc;
+};
+struct fh_uart_dma {
+ u32 tx_dumy_buff[128];
+ u8 rx_dumy_buff[128];
+ u32 rx_wr_ptr;
+ u32 rx_rd_ptr;
+ u32 tx_dma_add;
+ u32 rx_dma_add;
+ u32 tx_hs_no;
+ u32 rx_hs_no;
+ u32 tx_dma_channel;
+ u32 rx_dma_channel;
+ u32 tx_count;
+ u32 tx_done;
+ u32 paddr;
+ u32 rx_xmit_len;
+ u32 HasInit;
+ struct fh_uart_dma_transfer dma_rx;
+ struct fh_uart_dma_transfer dma_tx;
+};
+struct fh_uart_port {
+ fh_uart_info fh_info;
+ struct uart_port port;
+ struct fh_uart_dma uart_dma;
+};
+
+#endif /* FH_SERIAL_H_ */
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 48f17813..f67a21c4 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -42,6 +42,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_DAVINCI_DA8XX
default y if ARCH_CNS3XXX
default y if PLAT_SPEAR
+ default y if ARCH_FULLHAN
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 30ddf8dc..7e05b632 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -1,13 +1,16 @@
#
-# Makefile for the kernel USB device drivers.
+#akefile for the kernel USB device drivers.
#
# Object files in subdirectories
obj-$(CONFIG_USB) += core/
+#obj-$(CONFIG_USB_DWC2) += dwc2/
+
obj-$(CONFIG_USB_MON) += mon/
+obj-$(CONFIG_USB_FH_OTG) += host/
obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/
@@ -23,7 +26,7 @@ obj-$(CONFIG_USB_HWA_HCD) += host/
obj-$(CONFIG_USB_ISP1760_HCD) += host/
obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/
-
+obj-$(CONFIG_USB_S3C_OTG_HOST) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
obj-$(CONFIG_USB_WUSB) += wusbcore/
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ace9f844..d0b57e01 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -393,7 +393,6 @@ rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
char const *s;
static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
- // language ids
switch (id) {
case 0:
/* Array of LANGID codes (0x0409 is MSFT-speak for "en-us") */
@@ -562,7 +561,6 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
case DeviceOutRequest | USB_REQ_SET_INTERFACE:
break;
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
- // wValue == urb->dev->devaddr
dev_dbg (hcd->self.controller, "root hub device address %d\n",
wValue);
break;
@@ -572,7 +570,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
/* ENDPOINT REQUESTS */
case EndpointRequest | USB_REQ_GET_STATUS:
- // ENDPOINT_HALT flag
+
tbuf [0] = 0;
tbuf [1] = 0;
len = 2;
@@ -1582,8 +1580,10 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
usb_unanchor_urb(urb);
/* pass ownership to the completion handler */
+ //printk("god...hand add is %x\n",(unsigned int)urb->complete);
urb->status = status;
urb->complete (urb);
+
atomic_dec (&urb->use_count);
if (unlikely(atomic_read(&urb->reject)))
wake_up (&usb_kill_urb_queue);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a428aa08..769bc355 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2939,6 +2939,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
/* FALL THROUGH */
default:
if (r == 0)
+ printk("error is r = eproto\n");
r = -EPROTO;
break;
}
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 4fe92b18..743db7f1 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -3,7 +3,7 @@
#
ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
-obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
+#obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
obj-$(CONFIG_USB_NET2280) += net2280.o
obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
@@ -11,7 +11,6 @@ obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
obj-$(CONFIG_USB_IMX) += imx_udc.o
obj-$(CONFIG_USB_GOKU) += goku_udc.o
obj-$(CONFIG_USB_OMAP) += omap_udc.o
-obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
obj-$(CONFIG_USB_AT91) += at91_udc.o
obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
@@ -21,8 +20,6 @@ obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
obj-$(CONFIG_USB_CI13XXX_PCI) += ci13xxx_pci.o
-obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
-obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o
obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index 93b999e4..6b6be1d9 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -33,8 +33,9 @@
#include "config.c"
#include "epautoconf.c"
-#include "u_audio.c"
+/* #include "u_audio.c" */
#include "f_audio.c"
+#include "audio_poll.c"
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/audio_poll.c b/drivers/usb/gadget/audio_poll.c
new file mode 100644
index 00000000..c2506417
--- /dev/null
+++ b/drivers/usb/gadget/audio_poll.c
@@ -0,0 +1,173 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/semaphore.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+
+#include "f_uac.h"
+
+
+#define init_MUTEX(LOCKNAME) sema_init(LOCKNAME, 1)
+#define DEVICE_NAME "uac_audio"
+#define UAC_STREAM_OFF 0
+#define UAC_STREAM_ON 1
+#define UAC_IOC_SEND_AUDIO 0x499
+#define UAC_IOC_GET_STREAM_STATUS 0x599
+
+
+
+static struct class *cdev_class;
+static struct cdev dev_c;
+static dev_t dev;
+
+
+static struct semaphore sem;
+static wait_queue_head_t outq;
+
+static int gStreamFlag;
+static int gStreamStatus = UAC_STREAM_OFF;
+static int gStaFlag;
+
+
+/*#define UAC_DEV_USE_LOCK*/
+static int lock(void)
+{
+#ifdef UAC_DEV_USE_LOCK
+ return down_interruptible(&sem);
+#else
+ return 0;
+#endif
+}
+
+static void unlock(void)
+{
+#ifdef UAC_DEV_USE_LOCK
+ up(&sem);
+#endif
+}
+
+
+void wake_up_app(void)
+{
+ if (gStreamStatus == UAC_STREAM_ON) {
+ lock();
+ gStreamFlag = 1;
+ unlock();
+ wake_up_interruptible(&outq);
+ }
+}
+
+
+void uac_stream(int on)
+{
+ lock();
+ gStreamStatus = on;
+ gStaFlag = 1;
+ unlock();
+ wake_up_interruptible(&outq);
+}
+
+
+static int audio_ops_open(struct inode *node, struct file *filp)
+{
+ audio_dev_open();
+ return 0;
+}
+
+
+
+static long audio_ops_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case UAC_IOC_SEND_AUDIO:
+ return audio_send_data((void *)arg);
+ break;
+
+ case UAC_IOC_GET_STREAM_STATUS:
+ return gStreamStatus;
+ break;
+ }
+ return 0;
+}
+
+
+static unsigned int audio_ops_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ poll_wait(filp, &outq, wait);
+ lock();
+ if (gStreamFlag) {
+ if (gStreamStatus == UAC_STREAM_ON) {
+ /*Write Stream*/
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ gStreamFlag = 0;
+ }
+
+ if (gStaFlag) {
+ /*Read Status*/
+ mask |= POLLIN | POLLRDNORM;
+ gStaFlag = 0;
+ }
+ unlock();
+ return mask;
+}
+
+
+
+struct file_operations audio_device_fops = {
+poll:
+ audio_ops_poll,
+open :
+ audio_ops_open,
+unlocked_ioctl :
+ audio_ops_ioctl,
+};
+
+
+
+int __init audio_device_init(void)
+{
+ int ret, err;
+ ret = alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME);
+ if (ret)
+ printk("audio device register failure");
+ else {
+ cdev_init(&dev_c, &audio_device_fops);
+ err = cdev_add(&dev_c, dev, 1);
+ if (err) {
+ printk(KERN_NOTICE "error %d adding FC_dev\n", err);
+ unregister_chrdev_region(dev, 1);
+ return err;
+ } else
+ printk("device register success!\n");
+
+ cdev_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(cdev_class)) {
+ printk("ERR:cannot create a cdev_class\n");
+ unregister_chrdev_region(dev, 1);
+ return -1;
+ }
+ device_create(cdev_class, NULL, dev, 0, DEVICE_NAME);
+
+ init_MUTEX(&sem);
+ init_waitqueue_head(&outq);
+ }
+ return ret;
+}
+
+
+void __exit audio_device_exit(void)
+{
+ device_destroy(cdev_class, dev);
+ class_destroy(cdev_class);
+ unregister_chrdev_region(dev, 1);
+ /*printk("device");*/
+}
+
+
diff --git a/drivers/usb/gadget/f_uac.c b/drivers/usb/gadget/f_uac.c
new file mode 100644
index 00000000..3145bff8
--- /dev/null
+++ b/drivers/usb/gadget/f_uac.c
@@ -0,0 +1,915 @@
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/video.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <linux/usb/audio.h>
+#include "f_uac.h"
+
+/*#define UAC_BIND_DEACTIVATE*/
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req);
+
+#define UAC_ERR(msg...) printk(KERN_EMERG "UAC, " msg)
+#define UAC_DBG(msg...) printk(KERN_INFO "UAC, " msg)
+#define UAC_INFO(msg...) printk(KERN_INFO "UAC, " msg)
+
+
+
+#define UAC_STR_ASSOCIATION_IDX 0
+#define UAC_STR_CONTROL_IDX 1
+#define UAC_STR_STREAMING_IDX 2
+
+
+static struct usb_string uac_en_us_strings[] = {
+ [UAC_STR_ASSOCIATION_IDX].s = "Fullhan Audio",
+ [UAC_STR_CONTROL_IDX].s = "Audio Control",
+ [UAC_STR_STREAMING_IDX].s = "Audio Streaming",
+ { }
+};
+
+static struct usb_gadget_strings uac_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = uac_en_us_strings,
+};
+
+static struct usb_gadget_strings *uac_function_strings[] = {
+ &uac_stringtab,
+ NULL,
+};
+
+
+
+
+#ifdef ENABLE_SPEAKER_DESC
+
+#define UAC_IF_COUNT 3
+#define UAC_COLLECTION_NUM 2
+#define AC_HEADER_TOTAL_LENGTH 0x44
+
+#else
+
+#define UAC_IF_COUNT 2
+#define UAC_COLLECTION_NUM 1
+#define AC_HEADER_TOTAL_LENGTH 0x26
+
+
+#endif
+
+/*
+*
+*/
+static struct usb_interface_assoc_descriptor audio_iad_desc = {
+ .bLength = 0x08,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 2,
+ .bInterfaceCount = UAC_IF_COUNT,
+ .bFunctionClass = USB_CLASS_AUDIO,
+ .bFunctionSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+ .bFunctionProtocol = 0x00,
+ .iFunction = 0,
+};
+
+
+static struct usb_interface_descriptor ac_intf_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 2,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+
+#define AC_SIZE_NUM 2
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_SIZE(UAC_COLLECTION_NUM),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(AC_HEADER_TOTAL_LENGTH),
+ .bInCollection = UAC_COLLECTION_NUM,
+ .baInterfaceNr[0] = 3,
+ .baInterfaceNr[1] = 4,
+};
+
+
+
+#define INPUT_TERMINAL_ID 1
+#define AUDIO_CHN_NUM 1
+static struct uac_input_terminal_descriptor audio_mic_it_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE,
+ .bAssocTerminal = 0,
+ .bNrChannels = 1,
+ .wChannelConfig = 0x0,
+};
+
+
+#define OUTPUT_TERMINAL_ID 3
+#define FEATURE_UNIT_ID 5
+
+static struct uac1_output_terminal_descriptor audio_mic_ot_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0,
+ .bSourceID = FEATURE_UNIT_ID,
+ .iTerminal = 0,
+};
+
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+
+static struct uac_feature_unit_descriptor_0 audio_mic_fu_desc = {
+ .bLength = 0x08,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 1,
+ .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
+};
+
+
+#define SPEAKER_IT_ID 4
+#define SPEAKER_FU_ID 6
+#define SPEAKER_OT_ID 8
+
+#ifdef ENABLE_SPEAKER_DESC
+static struct uac_input_terminal_descriptor audio_spk_it_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = SPEAKER_IT_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0,
+ .bNrChannels = 1,
+ .wChannelConfig = 0x0,
+};
+
+
+
+static struct uac_feature_unit_descriptor_0 audio_spk_fu_desc = {
+ .bLength = 0x08,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = SPEAKER_FU_ID,
+ .bSourceID = SPEAKER_IT_ID,
+ .bControlSize = 1,
+ .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
+};
+
+static struct uac1_output_terminal_descriptor audio_spk_ot_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = SPEAKER_OT_ID,
+ .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
+ .bAssocTerminal = 0,
+ .bSourceID = SPEAKER_FU_ID,
+ .iTerminal = 0,
+};
+
+#endif
+
+
+/* Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_mic_if_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 3,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_mic_if_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 3,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_mic_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = OUTPUT_TERMINAL_ID,
+ .bDelay = 0xff,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+
+#define SAMPLE_FREQ (8000)
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_mic_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bNrChannels = AUDIO_CHN_NUM,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+ .tSamFreq[0][0] = (SAMPLE_FREQ & 0xff),
+ .tSamFreq[0][1] = (SAMPLE_FREQ >> 8) & 0xff,
+ .tSamFreq[0][2] = 0,
+};
+
+
+static struct usb_endpoint_descriptor audio_mic_streaming_ep = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN|5,
+ .bmAttributes = 0x05,/*USB_ENDPOINT_XFER_ISOC,*/
+ .wMaxPacketSize = cpu_to_le16(68),
+ .bInterval = 4,
+};
+
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_mic_iso_out_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 0,
+ .wLockDelay = 0,
+};
+
+
+#ifdef ENABLE_SPEAKER_DESC
+/*speaker*/
+/* Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_spk_if_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 4,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_spk_if_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 4,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_spk_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = SPEAKER_IT_ID,
+ .bDelay = 0xff,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+
+static struct uac_format_type_i_discrete_descriptor_1 as_spk_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bNrChannels = AUDIO_CHN_NUM,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+ .tSamFreq[0][0] = (SAMPLE_FREQ & 0xff),
+ .tSamFreq[0][1] = (SAMPLE_FREQ >> 8) & 0xff,
+ .tSamFreq[0][2] = 0,
+
+};
+
+
+static struct usb_endpoint_descriptor audio_spk_streaming_ep = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT|6,
+ .bmAttributes = 0x05,/*USB_ENDPOINT_XFER_ISOC,*/
+ .wMaxPacketSize = cpu_to_le16(68),
+ .bInterval = 4,
+};
+
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_spk_iso_out_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 0,
+ .bLockDelayUnits = 0,
+ .wLockDelay = 0,
+};
+
+#endif
+
+
+
+static struct usb_descriptor_header *f_audio_descs[] __initdata = {
+ (struct usb_descriptor_header *)&audio_iad_desc,
+ (struct usb_descriptor_header *)&ac_intf_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&audio_mic_it_desc,
+ (struct usb_descriptor_header *)&audio_mic_ot_desc,
+ (struct usb_descriptor_header *)&audio_mic_fu_desc,
+
+#ifdef ENABLE_SPEAKER_DESC
+ (struct usb_descriptor_header *)&audio_spk_it_desc,
+ (struct usb_descriptor_header *)&audio_spk_ot_desc,
+ (struct usb_descriptor_header *)&audio_spk_fu_desc,
+#endif
+
+
+ (struct usb_descriptor_header *)&as_mic_if_alt_0_desc,
+ (struct usb_descriptor_header *)&as_mic_if_alt_1_desc,
+ (struct usb_descriptor_header *)&as_mic_header_desc,
+ (struct usb_descriptor_header *)&as_mic_type_i_desc,
+ (struct usb_descriptor_header *)&audio_mic_streaming_ep,
+ (struct usb_descriptor_header *)&as_mic_iso_out_desc,
+
+#ifdef ENABLE_SPEAKER_DESC
+ (struct usb_descriptor_header *)&as_spk_if_alt_0_desc,
+ (struct usb_descriptor_header *)&as_spk_if_alt_1_desc,
+ (struct usb_descriptor_header *)&as_spk_header_desc,
+ (struct usb_descriptor_header *)&as_spk_type_i_desc,
+ (struct usb_descriptor_header *)&audio_spk_streaming_ep,
+ (struct usb_descriptor_header *)&as_spk_iso_out_desc,
+#endif
+ NULL,
+};
+
+
+
+
+#define UAC_STREAM_OFF 0
+#define UAC_STREAM_ON 1
+#define AUDIO_REQ_NUM 32
+
+struct uac_audio {
+ struct usb_function *func;
+ struct usb_ep *ep;
+ struct usb_request *req[AUDIO_REQ_NUM];
+ void *req_buf[AUDIO_REQ_NUM];
+ int req_valid[AUDIO_REQ_NUM];
+ int req_buf_size;
+
+ int ac_connected;
+ int stream_sta;
+ int ac_intf;
+ int as_mic_intf;
+ int as_spk_intf;
+
+ int mute_set;
+ int vol_set;
+ int set_cmd;
+
+};
+
+
+static struct uac_audio *g_audio;
+
+
+static int uac_is_stream_on(void)
+{
+ return (g_audio->stream_sta == UAC_STREAM_ON);
+}
+
+static int uac_audio_enable(int enable)
+{
+ unsigned int i;
+
+ uac_stream(enable);
+ if (!enable) {
+ if (uac_is_stream_on()) {
+ for (i = 0; i < AUDIO_REQ_NUM; ++i)
+ usb_ep_dequeue(g_audio->ep, g_audio->req[i]);
+
+ usb_ep_disable(g_audio->ep);
+ g_audio->stream_sta = UAC_STREAM_OFF;
+ UAC_INFO("STREAM OFF\n");
+ }
+ return 0;
+ }
+
+
+ usb_ep_enable(g_audio->ep, &audio_mic_streaming_ep);
+ g_audio->stream_sta = UAC_STREAM_ON;
+ UAC_INFO("STREAM ON\n");
+ return 0;
+}
+
+
+
+
+
+void uac_begin_transfer_data(void)
+{
+ int i, err;
+ struct usb_request *req;
+ for (i = 0; i < AUDIO_REQ_NUM; i++) {
+ req = g_audio->req[i];
+ req->buf = g_audio->req_buf[i];
+ req->dma = (~(dma_addr_t)0);
+
+ g_audio->req_valid[i] = 0;
+ memset(req->buf, 0, g_audio->req_buf_size);
+ req->length = 0;
+ req->context = (void *)i;
+ req->dma = DMA_ADDR_INVALID;
+ req->complete = f_audio_complete;
+#if 1
+ err = usb_ep_queue(g_audio->ep, req, GFP_ATOMIC);
+ if (err)
+ UAC_ERR("transfer_data usb_ep_queue error, %s req: %d\n", g_audio->ep->name, err);
+#endif
+ }
+
+
+ wake_up_app();
+ err = 0;
+ UAC_INFO("sendMsg audio!\n");
+}
+
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+
+ int a;
+
+ int i, avlidCount = 0;
+ unsigned char *pData = NULL;
+ switch (req->status) {
+ case 0:
+ UAC_INFO("f_audio_complete.\n");
+ if (ep != g_audio->ep) {
+ if (g_audio->set_cmd) {
+ pData = req->buf;
+ if (UAC_FU_MUTE == g_audio->set_cmd) {
+ if (1 == req->actual) {
+ g_audio->mute_set = pData[0];
+ UAC_INFO("mute set data = %02x\n", pData[0]);
+ }
+ } else if (UAC_FU_VOLUME == g_audio->set_cmd) {
+ if (2 == req->actual) {
+ g_audio->vol_set = pData[0] + pData[1]*256;
+ UAC_INFO("vol set data = %02x, %02x\n", pData[0], pData[1]);
+ }
+ }
+ }
+ g_audio->set_cmd = 0;
+ return;
+ }
+ a = (int)req->context;
+ g_audio->req_valid[a] = 1;
+ for (i = 0; i < AUDIO_REQ_NUM; i++) {
+ if (g_audio->req_valid[a])
+ avlidCount++;
+ if (avlidCount > 3) {
+ UAC_INFO("f_audio_complete wake_up_app.\n");
+ wake_up_app();
+ break;
+ }
+ }
+ break;
+
+ case -ESHUTDOWN:
+ UAC_INFO("f_audio_complete ESHUTDOWN.\n");
+ goto requeue;
+
+ default:
+ UAC_INFO("request completed with status %d.\n", req->status);
+ goto requeue;
+ }
+
+
+ return;
+
+requeue:
+ a = 5;
+
+}
+
+
+struct audio_data {
+ unsigned char *pData;
+ unsigned char *pPos;
+ unsigned char *pEnd;
+};
+
+int audio_send_data(void *args)
+{
+ struct audio_data *pAudio = args;
+ int i;
+ struct usb_request *req = NULL;
+
+ int len = g_audio->req_buf_size;
+ int send_len = 0;
+ int ret = 0;
+ int err = 0;
+ if (!uac_is_stream_on())
+ return 0;
+
+ for (i = 0; i < AUDIO_REQ_NUM; i++) {
+ if (g_audio->req_valid[i]) {
+ g_audio->req_valid[i] = 0;
+ req = g_audio->req[i];
+ req->buf = g_audio->req_buf[i];
+ req->dma = DMA_ADDR_INVALID;
+ req->context = (void *)i;
+ req->complete = f_audio_complete;
+
+ if (pAudio->pPos >= pAudio->pEnd) {
+ pAudio->pPos = pAudio->pData;
+ ret = 1;
+ break;
+ } else if (pAudio->pPos < pAudio->pData)
+ pAudio->pPos = pAudio->pData;
+
+ if (pAudio->pPos + g_audio->req_buf_size >= pAudio->pEnd) {
+ len = pAudio->pEnd-pAudio->pPos;
+ memcpy(req->buf, pAudio->pPos, len);
+ send_len += len;
+
+ pAudio->pPos = pAudio->pData;
+ ret = 1;
+ } else {
+ len = g_audio->req_buf_size;
+ memcpy(req->buf, pAudio->pPos, g_audio->req_buf_size);
+ pAudio->pPos += g_audio->req_buf_size;
+ send_len += g_audio->req_buf_size;
+ }
+ req->length = len;
+
+ /*if (g_audio->mute_set)
+ memset(req->buf, 0, len);
+ */
+ err = usb_ep_queue(g_audio->ep, req, GFP_ATOMIC);
+ if (err)
+ UAC_ERR("audio usb_ep_queue error, %s req: %d\n", g_audio->ep->name, err);
+
+ if (ret)
+ break;
+ }
+ }
+
+ if (send_len > 0)
+ UAC_INFO("audio send = %d\n", send_len);
+ return send_len;
+
+}
+
+
+
+static int
+uac_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int cs, id;
+ int sendAudioReq = 0;
+ u8 *pData;
+ u8 is_cmd = 1;
+
+ UAC_INFO("setup, req: %02x.%02x value: %04x index: %04x length: %d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ g_audio->set_cmd = 0;
+
+ if ((ctrl->bRequestType & USB_RECIP_MASK) != USB_RECIP_INTERFACE) {
+ value = 3;
+ sendAudioReq = 1;
+ } else {
+ cs = ctrl->wValue >> 8;
+ id = ctrl->wIndex >> 8;
+ UAC_INFO("id = %d, cs = %d\n", id, cs);
+ pData = req->buf;
+ if (FEATURE_UNIT_ID == id || SPEAKER_FU_ID == id) {
+ switch (cs) {
+ case UAC_FU_MUTE:
+ value = 1;
+ pData[0] = 0;
+ break;
+
+ case UAC_FU_VOLUME:
+ value = 2;
+ pData[0] = 0;
+ pData[1] = 1;
+ break;
+
+ default:
+ is_cmd = 0;
+ break;
+ }
+ }
+
+ if (!(ctrl->bRequestType & USB_DIR_IN)) {
+ if (is_cmd)
+ g_audio->set_cmd = cs;
+ }
+ }
+ if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) {
+ req->length = value;
+ req->zero = value < w_length;
+ req->complete = f_audio_complete;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0) {
+ DBG(cdev, "ep_queue --> %d\n", value);
+ req->status = 0;
+ composite_setup_complete(cdev->gadget->ep0, req);
+ }
+ }
+ if (sendAudioReq)
+ uac_begin_transfer_data();
+ return 0;
+}
+
+
+static int
+uac_function_get_alt(struct usb_function *f, unsigned interface)
+{
+ UAC_INFO("get_alt\n");
+ return 0;
+}
+
+
+
+
+static int
+uac_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
+{
+ UAC_INFO("set_alt, intf = %d, alt = %d\n", interface, alt);
+ if (g_audio->as_mic_intf == interface)
+ uac_audio_enable(0 != alt);
+ else if (g_audio->ac_intf == interface)
+ g_audio->ac_connected = 1;
+ return 0;
+}
+
+
+void audio_dev_open(void)
+{
+ UAC_DBG("audio_dev_open!\n");
+ if (!g_audio->ac_connected) {
+ #ifdef UAC_BIND_DEACTIVATE
+ int ret;
+ ret = usb_function_activate(g_audio->func);
+ if (ret < 0)
+ UAC_DBG("audio_dev_open conn error!\n");
+ else
+ UAC_DBG("audio_dev_open conn ok!\n");
+ #endif
+ }
+}
+
+static void
+uac_function_disable(struct usb_function *f)
+{
+
+ INFO(f->config->cdev, "uac_function_disable\n");
+}
+
+#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
+ do { \
+ memcpy(mem, desc, (desc)->bLength); \
+ *(dst)++ = mem; \
+ mem += (desc)->bLength; \
+ } while (0);
+
+#define UVC_COPY_DESCRIPTORS(mem, dst, src) \
+ do { \
+ const struct usb_descriptor_header * const *__src; \
+ for (__src = src; *__src; ++__src) { \
+ memcpy(mem, *__src, (*__src)->bLength); \
+ *dst++ = mem; \
+ mem += (*__src)->bLength; \
+ } \
+ } while (0)
+
+
+
+static struct usb_descriptor_header ** __init
+uac_copy_descriptors(enum usb_device_speed speed)
+{
+ const struct usb_descriptor_header * const *src;
+ struct usb_descriptor_header **dst;
+ struct usb_descriptor_header **hdr;
+ unsigned int n_desc = 0;
+ unsigned int bytes = 0;
+ void *mem;
+
+ int audio_desc_num = 0;
+ for (src = (const struct usb_descriptor_header **)f_audio_descs; *src; ++src) {
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+
+ audio_desc_num = (n_desc + 1) * sizeof(*src) + bytes;
+ UAC_INFO("copy_descriptors bytes = %d, ndesc = %d\n", bytes, n_desc);
+ mem = kmalloc(audio_desc_num+20, GFP_KERNEL);
+ hdr = mem;
+ dst = mem;
+ mem += (n_desc + 1) * sizeof(*src);
+
+ UVC_COPY_DESCRIPTORS(mem, dst, (const struct usb_descriptor_header * const *)f_audio_descs);
+ *dst = (struct usb_descriptor_header *)NULL;
+ return hdr;
+
+}
+
+static void
+uac_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ audio_device_exit();
+}
+
+static int __init
+uac_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct usb_ep *ep;
+ int ret = -EINVAL;
+ int i;
+
+ UAC_DBG("bind~\n");
+
+ ep = usb_ep_autoconfig(cdev->gadget, &audio_mic_streaming_ep);
+ if (!ep) {
+ UAC_ERR("Unable to allocate audio EP\n");
+ goto error;
+ }
+
+ g_audio->ep = ep;
+
+
+ /* Allocate interface IDs. */
+ ret = usb_interface_id(c, f);
+ if (ret < 0)
+ goto error;
+
+ UAC_INFO("control_intf = %d\n", ret);
+ audio_iad_desc.bFirstInterface = ret;
+ ac_intf_desc.bInterfaceNumber = ret;
+ g_audio->ac_intf = ret;
+
+ ret = usb_interface_id(c, f);
+ if (ret < 0)
+ goto error;
+
+ UAC_INFO("streaming_intf = %d\n", ret);
+ as_mic_if_alt_0_desc.bInterfaceNumber = ret;
+ as_mic_if_alt_1_desc.bInterfaceNumber = ret;
+ g_audio->as_mic_intf = ret;
+ ac_header_desc.baInterfaceNr[0] = ret;
+
+
+#ifdef ENABLE_SPEAKER_DESC
+
+ ep = usb_ep_autoconfig(cdev->gadget, &audio_spk_streaming_ep);
+ UAC_DBG("audio_in_ep_id = %s\n", ep->name);
+ if (!ep) {
+ UAC_ERR("Unable to allocate audio EP\n");
+ goto error;
+ }
+
+ ret = usb_interface_id(c, f);
+ if (ret < 0)
+ goto error;
+
+ UAC_INFO("streaming_intf2 = %d\n", ret);
+ as_spk_if_alt_0_desc.bInterfaceNumber = ret;
+ as_spk_if_alt_1_desc.bInterfaceNumber = ret;
+ g_audio->as_spk_intf = ret;
+ ac_header_desc.baInterfaceNr[1] = ret;
+ #endif
+
+ /* Copy descriptors. */
+ f->descriptors = uac_copy_descriptors(USB_SPEED_FULL);
+ f->hs_descriptors = uac_copy_descriptors(USB_SPEED_FULL);
+
+
+ g_audio->req_buf_size = 16;
+ for (i = 0; i < AUDIO_REQ_NUM; i++) {
+ g_audio->req[i] = usb_ep_alloc_request(g_audio->ep, GFP_ATOMIC);
+ if (g_audio->req[i]) {
+ g_audio->req_buf[i] = kzalloc(g_audio->req_buf_size, GFP_ATOMIC);
+ g_audio->req[i]->buf = g_audio->req_buf[i];
+ }
+ }
+
+
+#ifdef UAC_BIND_DEACTIVATE
+ ret = usb_function_deactivate(f);
+ if (ret < 0)
+ goto error;
+#endif
+
+ return 0;
+
+error:
+ uac_function_unbind(c, f);
+ return ret;
+}
+
+
+
+
+
+
+int __init
+uac_bind_config(struct usb_configuration *c)
+{
+
+ int ret = 0;
+ struct usb_function *func;
+
+ /* Allocate string descriptor numbers. */
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ goto error;
+ uac_en_us_strings[UAC_STR_ASSOCIATION_IDX].id = ret;
+ audio_iad_desc.iFunction = ret;
+
+#if 0
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ goto error;
+ uac_en_us_strings[UAC_STR_CONTROL_IDX].id = ret;
+ ac_intf_desc.iInterface = ret;
+
+ ret = usb_string_id(c->cdev));
+ if (ret < 0)
+ goto error;
+ uac_en_us_strings[UAC_STR_STREAMING_IDX].id = ret;
+ as_interface_alt_0_desc.iInterface = ret;
+ as_interface_alt_1_desc.iInterface = ret;
+#endif
+
+ g_audio = kzalloc(sizeof(*g_audio), GFP_KERNEL);
+
+
+ func = kzalloc(sizeof(*func), GFP_KERNEL);
+
+ /* Register the function. */
+ func->name = "uac";
+ func->strings = uac_function_strings;
+ func->bind = uac_function_bind;
+ func->unbind = uac_function_unbind;
+ func->get_alt = uac_function_get_alt;
+ func->set_alt = uac_function_set_alt;
+ func->disable = uac_function_disable;
+ func->setup = uac_function_setup;
+
+ g_audio->func = func;
+ ret = usb_add_function(c, func);
+
+
+ audio_device_init();
+
+ return 0;
+
+error:
+ return ret;
+}
+
+
diff --git a/drivers/usb/gadget/f_uac.h b/drivers/usb/gadget/f_uac.h
new file mode 100644
index 00000000..100a9c80
--- /dev/null
+++ b/drivers/usb/gadget/f_uac.h
@@ -0,0 +1,17 @@
+#ifndef _F_UAC_H_
+#define _F_UAC_H_
+
+#include <linux/usb/composite.h>
+#include <linux/poll.h>
+#include <linux/usb/audio.h>
+
+
+extern void wake_up_app(void);
+extern int __init audio_device_init(void);
+extern void __exit audio_device_exit(void);
+extern void uac_stream(int on);
+extern void audio_dev_open(void);
+extern int audio_send_data(void *args);
+
+
+#endif
\ No newline at end of file
diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
index be446b7e..974097c4 100644
--- a/drivers/usb/gadget/f_uvc.c
+++ b/drivers/usb/gadget/f_uvc.c
@@ -29,6 +29,14 @@
#include "uvc.h"
unsigned int uvc_gadget_trace_param;
+struct usb_ep *video_stream1_ep1;
+struct usb_ep *video_stream1_ep2;
+struct usb_ep *video_stream2_ep1;
+struct usb_ep *video_stream2_ep2;
+
+static unsigned int gLastIntf;
+static unsigned int gLastDirOut;
+
/* --------------------------------------------------------------------------
* Function descriptors
@@ -41,7 +49,7 @@ unsigned int uvc_gadget_trace_param;
#define UVC_STRING_STREAMING_IDX 2
static struct usb_string uvc_en_us_strings[] = {
- [UVC_STRING_ASSOCIATION_IDX].s = "UVC Camera",
+ [UVC_STRING_ASSOCIATION_IDX].s = "Fullhan Webcam",
[UVC_STRING_CONTROL_IDX].s = "Video Control",
[UVC_STRING_STREAMING_IDX].s = "Video Streaming",
{ }
@@ -60,7 +68,7 @@ static struct usb_gadget_strings *uvc_function_strings[] = {
#define UVC_INTF_VIDEO_CONTROL 0
#define UVC_INTF_VIDEO_STREAMING 1
-static struct usb_interface_assoc_descriptor uvc_iad __initdata = {
+static struct usb_interface_assoc_descriptor uvc_iad = {
.bLength = sizeof(uvc_iad),
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0,
@@ -71,7 +79,7 @@ static struct usb_interface_assoc_descriptor uvc_iad __initdata = {
.iFunction = 0,
};
-static struct usb_interface_descriptor uvc_control_intf __initdata = {
+static struct usb_interface_descriptor uvc_control_intf = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_CONTROL,
@@ -83,7 +91,7 @@ static struct usb_interface_descriptor uvc_control_intf __initdata = {
.iInterface = 0,
};
-static struct usb_endpoint_descriptor uvc_control_ep __initdata = {
+static struct usb_endpoint_descriptor uvc_control_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -92,14 +100,14 @@ static struct usb_endpoint_descriptor uvc_control_ep __initdata = {
.bInterval = 8,
};
-static struct uvc_control_endpoint_descriptor uvc_control_cs_ep __initdata = {
+static struct uvc_control_endpoint_descriptor uvc_control_cs_ep = {
.bLength = UVC_DT_CONTROL_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubType = UVC_EP_INTERRUPT,
.wMaxTransferSize = cpu_to_le16(16),
};
-static struct usb_interface_descriptor uvc_streaming_intf_alt0 __initdata = {
+static struct usb_interface_descriptor uvc_streaming1_intf_alt0 = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
@@ -111,7 +119,7 @@ static struct usb_interface_descriptor uvc_streaming_intf_alt0 __initdata = {
.iInterface = 0,
};
-static struct usb_interface_descriptor uvc_streaming_intf_alt1 __initdata = {
+static struct usb_interface_descriptor uvc_streaming1_intf_alt1 = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
@@ -123,7 +131,28 @@ static struct usb_interface_descriptor uvc_streaming_intf_alt1 __initdata = {
.iInterface = 0,
};
-static struct usb_endpoint_descriptor uvc_streaming_ep = {
+static struct usb_endpoint_descriptor uvc_streaming1_ep = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(1020),/*1020*/
+ .bInterval = 1,
+};
+
+static struct usb_interface_descriptor uvc_streaming1_intf_alt2 = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 2,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_endpoint_descriptor uvc_streaming1_ep2 = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -132,18 +161,96 @@ static struct usb_endpoint_descriptor uvc_streaming_ep = {
.bInterval = 1,
};
-static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
- (struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
- (struct usb_descriptor_header *) &uvc_streaming_ep,
+static const struct usb_descriptor_header * const uvc_fs_streaming1[] = {
+ (struct usb_descriptor_header *) &uvc_streaming1_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming1_ep,
+ (struct usb_descriptor_header *) &uvc_streaming1_intf_alt2,
+ (struct usb_descriptor_header *) &uvc_streaming1_ep2,
NULL,
};
-static const struct usb_descriptor_header * const uvc_hs_streaming[] = {
- (struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
- (struct usb_descriptor_header *) &uvc_streaming_ep,
+static const struct usb_descriptor_header * const uvc_hs_streaming1[] = {
+ (struct usb_descriptor_header *) &uvc_streaming1_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming1_ep,
+ (struct usb_descriptor_header *) &uvc_streaming1_intf_alt2,
+ (struct usb_descriptor_header *) &uvc_streaming1_ep2,
NULL,
};
+
+
+
+static struct usb_interface_descriptor uvc_streaming2_intf_alt0 = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_interface_descriptor uvc_streaming2_intf_alt1 = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_endpoint_descriptor uvc_streaming2_ep = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(1020), /*1020*/
+ .bInterval = 1,
+};
+
+static struct usb_interface_descriptor uvc_streaming2_intf_alt2 = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 2,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_endpoint_descriptor uvc_streaming2_ep2 = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(512),
+ .bInterval = 1,
+};
+
+static const struct usb_descriptor_header * const uvc_fs_streaming2[] = {
+ (struct usb_descriptor_header *) &uvc_streaming2_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming2_ep,
+ (struct usb_descriptor_header *) &uvc_streaming2_intf_alt2,
+ (struct usb_descriptor_header *) &uvc_streaming2_ep2,
+ NULL,
+};
+
+static const struct usb_descriptor_header * const uvc_hs_streaming2[] = {
+ (struct usb_descriptor_header *) &uvc_streaming2_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming2_ep,
+ (struct usb_descriptor_header *) &uvc_streaming2_intf_alt2,
+ (struct usb_descriptor_header *) &uvc_streaming2_ep2,
+ NULL,
+};
+
+
/* --------------------------------------------------------------------------
* Control requests
*/
@@ -169,14 +276,14 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
static int
uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
- struct uvc_device *uvc = to_uvc(f);
+/* struct uvc_device *uvc = to_uvc(f);*/
+ struct uvc_common *comm = to_common(f);
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+ unsigned int intf = ctrl->wIndex & 0xff;
- /* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
- * ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
- * le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
- */
+ gLastIntf = intf;
+ gLastDirOut = (ctrl->bRequestType & USB_DIR_OUT) == USB_DIR_OUT;
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
INFO(f->config->cdev, "invalid request type\n");
@@ -190,8 +297,15 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_SETUP;
memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+#ifdef UVC_DOUBLE_STREAM
+ if (intf == comm->uvc2->streaming_intf) {
+ v4l2_event_queue(comm->uvc2->vdev, &v4l2_event);
+ }
+ else
+#endif
+ v4l2_event_queue(comm->uvc1->vdev, &v4l2_event);
return 0;
}
@@ -199,58 +313,82 @@ static int
uvc_function_get_alt(struct usb_function *f, unsigned interface)
{
struct uvc_device *uvc = to_uvc(f);
+ struct uvc_common *comm = to_common(f);
INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
- if (interface == uvc->control_intf)
+ if (interface == uvc->comm->control_intf) {
return 0;
- else if (interface != uvc->streaming_intf)
- return -EINVAL;
+ } else if (interface == comm->uvc1->streaming_intf)
+ return comm->uvc1->state ==
+ UVC_STATE_STREAMING ? 1 : 0;
+
+#ifdef UVC_DOUBLE_STREAM
+ else if (interface == comm->uvc2->streaming_intf)
+ return comm->uvc2->state == UVC_STATE_STREAMING ? 1 : 0;
+
+#endif
else
- return uvc->state == UVC_STATE_STREAMING ? 1 : 0;
+ return -EINVAL;
+
}
static int
uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
{
struct uvc_device *uvc = to_uvc(f);
+ struct uvc_common *comm = to_common(f);
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+ struct uvc_device *uvc1 = comm->uvc1;
+ struct uvc_device *uvc2 = comm->uvc2;
+
+
+ if (interface == uvc1->streaming_intf)
+ uvc = uvc1;
+
+#ifdef UVC_DOUBLE_STREAM
+ else if (interface == uvc2->streaming_intf)
+ uvc = uvc2;
+
+#endif
+
INFO(f->config->cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
- if (interface == uvc->control_intf) {
+ if (interface == uvc->comm->control_intf) {
if (alt)
return -EINVAL;
- if (uvc->state == UVC_STATE_DISCONNECTED) {
+ if (uvc1->state == UVC_STATE_DISCONNECTED) {
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_CONNECT;
uvc_event->speed = f->config->cdev->gadget->speed;
- v4l2_event_queue(uvc->vdev, &v4l2_event);
-
- uvc->state = UVC_STATE_CONNECTED;
+ v4l2_event_queue(uvc1->vdev, &v4l2_event);
+ uvc1->state = UVC_STATE_CONNECTED;
}
+#ifdef UVC_DOUBLE_STREAM
+ if (uvc2->state == UVC_STATE_DISCONNECTED) {
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_CONNECT;
+ uvc_event->speed = f->config->cdev->gadget->speed;
+ v4l2_event_queue(uvc2->vdev, &v4l2_event);
+
+ uvc2->state = UVC_STATE_CONNECTED;
+ }
+#endif
return 0;
}
- if (interface != uvc->streaming_intf)
+ if (interface != uvc1->streaming_intf && interface !=
+ uvc2->streaming_intf)
return -EINVAL;
- /* TODO
- if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep))
- return alt ? -EINVAL : 0;
- */
-
switch (alt) {
case 0:
if (uvc->state != UVC_STATE_STREAMING)
return 0;
-
- if (uvc->video.ep)
- usb_ep_disable(uvc->video.ep);
-
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMOFF;
v4l2_event_queue(uvc->vdev, &v4l2_event);
@@ -259,16 +397,65 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
break;
case 1:
+ case 2:
if (uvc->state != UVC_STATE_CONNECTED)
return 0;
- if (uvc->video.ep)
- usb_ep_enable(uvc->video.ep, &uvc_streaming_ep);
-
memset(&v4l2_event, 0, sizeof(v4l2_event));
+ if (interface == uvc1->streaming_intf) {
+ if (alt == 2) {
+ uvc->video.ep = video_stream1_ep2;
+ v4l2_event.u.data[0] =
+ uvc_streaming1_ep2.wMaxPacketSize&0xff;
+ v4l2_event.u.data[1] =
+ uvc_streaming1_ep2.wMaxPacketSize>>8;
+ } else {
+ uvc->video.ep = video_stream1_ep1;
+ v4l2_event.u.data[0] =
+ uvc_streaming1_ep.wMaxPacketSize&0xff;
+ v4l2_event.u.data[1] =
+ uvc_streaming1_ep.wMaxPacketSize>>8;
+
+ }
+
+ if (uvc->video.ep) {
+ if (alt == 2)
+ usb_ep_enable(uvc->video.ep,\
+ &uvc_streaming1_ep2);
+ else
+ usb_ep_enable(uvc->video.ep,\
+ &uvc_streaming1_ep);
+ }
+ }
+#ifdef UVC_DOUBLE_STREAM
+ else if (interface == uvc2->streaming_intf) {
+ if (alt == 2) {
+ uvc->video.ep = video_stream2_ep2;
+ v4l2_event.u.data[0] =
+ uvc_streaming2_ep2.wMaxPacketSize&0xff;
+ v4l2_event.u.data[1] =
+ uvc_streaming2_ep2.wMaxPacketSize>>8;
+ } else {
+ uvc->video.ep = video_stream2_ep1;
+ v4l2_event.u.data[0] =
+ uvc_streaming2_ep.wMaxPacketSize&0xff;
+ v4l2_event.u.data[1] =
+ uvc_streaming2_ep.wMaxPacketSize>>8;
+
+ }
+
+ if (uvc->video.ep) {
+ if (alt == 2)
+ usb_ep_enable(uvc->video.ep,\
+ &uvc_streaming2_ep2);
+ else
+ usb_ep_enable(uvc->video.ep,\
+ &uvc_streaming2_ep);
+ }
+ }
+#endif
v4l2_event.type = UVC_EVENT_STREAMON;
v4l2_event_queue(uvc->vdev, &v4l2_event);
-
uvc->state = UVC_STATE_STREAMING;
break;
@@ -282,16 +469,22 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
static void
uvc_function_disable(struct usb_function *f)
{
- struct uvc_device *uvc = to_uvc(f);
+ /*struct uvc_device *uvc = to_uvc(f);*/
+ struct uvc_common *comm = to_common(f);
struct v4l2_event v4l2_event;
INFO(f->config->cdev, "uvc_function_disable\n");
+ printk(KERN_EMERG "######### uvc disable #########\n");
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DISCONNECT;
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+ v4l2_event_queue(comm->uvc1->vdev, &v4l2_event);
+ comm->uvc1->state = UVC_STATE_DISCONNECTED;
- uvc->state = UVC_STATE_DISCONNECTED;
+#ifdef UVC_DOUBLE_STREAM
+ v4l2_event_queue(comm->uvc2->vdev, &v4l2_event);
+ comm->uvc2->state = UVC_STATE_DISCONNECTED;
+#endif
}
/* --------------------------------------------------------------------------
@@ -301,20 +494,20 @@ uvc_function_disable(struct usb_function *f)
void
uvc_function_connect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct usb_composite_dev *cdev = uvc->comm->func.config->cdev;
int ret;
-
- if ((ret = usb_function_activate(&uvc->func)) < 0)
+ ret = usb_function_activate(&uvc->comm->func);
+ if (ret < 0)
INFO(cdev, "UVC connect failed with %d\n", ret);
}
void
uvc_function_disconnect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct usb_composite_dev *cdev = uvc->comm->func.config->cdev;
int ret;
-
- if ((ret = usb_function_deactivate(&uvc->func)) < 0)
+ ret = usb_function_deactivate(&uvc->comm->func);
+ if (ret < 0)
INFO(cdev, "UVC disconnect failed with %d\n", ret);
}
@@ -325,7 +518,7 @@ uvc_function_disconnect(struct uvc_device *uvc)
static int
uvc_register_video(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct usb_composite_dev *cdev = uvc->comm->func.config->cdev;
struct video_device *video;
/* TODO reference counting. */
@@ -362,12 +555,12 @@ uvc_register_video(struct uvc_device *uvc)
} \
} while (0)
-static struct usb_descriptor_header ** __init
+static struct usb_descriptor_header** /*__init*/
uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
{
struct uvc_input_header_descriptor *uvc_streaming_header;
struct uvc_header_descriptor *uvc_control_header;
- const struct uvc_descriptor_header * const *uvc_streaming_cls;
+ struct uvc_descriptor_header **uvc_streaming_cls;
const struct usb_descriptor_header * const *uvc_streaming_std;
const struct usb_descriptor_header * const *src;
struct usb_descriptor_header **dst;
@@ -377,11 +570,30 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
unsigned int n_desc;
unsigned int bytes;
void *mem;
+ int needBufLen;
+
+
+ unsigned int stream2_avalid = 0;
+ struct uvc_descriptor_header **uvc_streaming2_cls = NULL;
+ const struct usb_descriptor_header * const *uvc_streaming2_std = NULL;
+ unsigned int streaming2_size;
+
+ stream2_avalid = (uvc->comm->desc.fs_streaming2 != NULL);
uvc_streaming_cls = (speed == USB_SPEED_FULL)
- ? uvc->desc.fs_streaming : uvc->desc.hs_streaming;
+ ? uvc->comm->desc.fs_streaming : uvc->comm->desc.hs_streaming;
uvc_streaming_std = (speed == USB_SPEED_FULL)
- ? uvc_fs_streaming : uvc_hs_streaming;
+ ? uvc_fs_streaming1 : uvc_hs_streaming1;
+
+
+ if (stream2_avalid) {
+ uvc_streaming2_cls = (speed == USB_SPEED_FULL)
+ ? uvc->comm->desc.fs_streaming2 : uvc->comm->desc.hs_streaming2;
+ uvc_streaming2_std = (speed == USB_SPEED_FULL)
+ ? uvc_fs_streaming2 : uvc_hs_streaming2;
+
+ uvc_iad.bInterfaceCount = 3;
+ }
/* Descriptors layout
*
@@ -399,16 +611,17 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
control_size = 0;
streaming_size = 0;
bytes = uvc_iad.bLength + uvc_control_intf.bLength
- + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
- + uvc_streaming_intf_alt0.bLength;
+ + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
+ + uvc_streaming1_intf_alt0.bLength;
n_desc = 5;
- for (src = (const struct usb_descriptor_header**)uvc->desc.control; *src; ++src) {
+
+ for (src = (const struct usb_descriptor_header **)uvc->comm->desc.control; *src; ++src) {
control_size += (*src)->bLength;
bytes += (*src)->bLength;
n_desc++;
}
- for (src = (const struct usb_descriptor_header**)uvc_streaming_cls; *src; ++src) {
+ for (src = (const struct usb_descriptor_header **)uvc_streaming_cls; *src; ++src) {
streaming_size += (*src)->bLength;
bytes += (*src)->bLength;
n_desc++;
@@ -418,7 +631,28 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
n_desc++;
}
- mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL);
+ if (stream2_avalid) {
+ bytes += uvc_streaming2_intf_alt0.bLength;
+ n_desc++;
+ for (src = (const struct usb_descriptor_header **)uvc_streaming2_cls; *src; ++src) {
+ streaming2_size += (*src)->bLength;
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+
+ for (src = uvc_streaming2_std; *src; ++src) {
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+ }
+
+
+
+/* printk(KERN_EMERG "#########uvc_copy_descriptors 44\n");*/
+
+ needBufLen = (n_desc + 1) * sizeof(*src) + bytes;
+
+ mem = kmalloc(needBufLen, GFP_KERNEL);
if (mem == NULL)
return NULL;
@@ -432,24 +666,51 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
uvc_control_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
- (const struct usb_descriptor_header**)uvc->desc.control);
+ (const struct usb_descriptor_header **)uvc->comm->desc.control);
uvc_control_header->wTotalLength = cpu_to_le16(control_size);
- uvc_control_header->bInCollection = 1;
+
uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
+ if (stream2_avalid) {
+ uvc_control_header->baInterfaceNr[1] =
+ uvc->comm->uvc2->streaming_intf;
+ uvc_control_header->bInCollection = 2;
+
+ } else {
+ uvc_control_header->bInCollection = 1;
+ }
+
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
- UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming1_intf_alt0);
uvc_streaming_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
- (const struct usb_descriptor_header**)uvc_streaming_cls);
+ (const struct usb_descriptor_header **)uvc_streaming_cls);
uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size);
- uvc_streaming_header->bEndpointAddress = uvc_streaming_ep.bEndpointAddress;
+ uvc_streaming_header->bEndpointAddress =
+ uvc_streaming1_ep.bEndpointAddress;
UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std);
+ if (stream2_avalid) {
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming2_intf_alt0);
+ uvc_streaming_header = mem;
+ UVC_COPY_DESCRIPTORS(mem, dst,
+ (const struct usb_descriptor_header **)uvc_streaming2_cls);
+ uvc_streaming_header->wTotalLength =
+ cpu_to_le16(streaming2_size);
+ uvc_streaming_header->bEndpointAddress =
+ uvc_streaming2_ep.bEndpointAddress;
+#ifdef UVC_DOUBLE_STREAM
+ uvc_streaming_header->bTerminalLink++;
+#endif
+
+ UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming2_std);
+ }
+
*dst = NULL;
+
return hdr;
}
@@ -460,6 +721,7 @@ uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
struct uvc_device *uvc = to_uvc(f);
INFO(cdev, "uvc_function_unbind\n");
+ printk(KERN_EMERG "######### uvc unbind #########\n");
if (uvc->vdev) {
if (uvc->vdev->minor == -1)
@@ -469,14 +731,14 @@ uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
uvc->vdev = NULL;
}
- if (uvc->control_ep)
- uvc->control_ep->driver_data = NULL;
+ if (uvc->comm->control_ep)
+ uvc->comm->control_ep->driver_data = NULL;
if (uvc->video.ep)
uvc->video.ep->driver_data = NULL;
- if (uvc->control_req) {
- usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
- kfree(uvc->control_buf);
+ if (uvc->comm->control_req) {
+ usb_ep_free_request(cdev->gadget->ep0, uvc->comm->control_req);
+ kfree(uvc->comm->control_buf);
}
kfree(f->descriptors);
@@ -490,9 +752,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ struct uvc_common *comm = to_common(f);
+ struct uvc_device *uvc1, *uvc2;
struct usb_ep *ep;
int ret = -EINVAL;
+ uvc1 = comm->uvc1;
+ uvc2 = comm->uvc2;
+
INFO(cdev, "uvc_function_bind\n");
/* Allocate endpoints. */
@@ -501,64 +768,119 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
INFO(cdev, "Unable to allocate control EP\n");
goto error;
}
- uvc->control_ep = ep;
- ep->driver_data = uvc;
- ep = usb_ep_autoconfig(cdev->gadget, &uvc_streaming_ep);
+ comm->control_ep = ep;
+ ep->driver_data = uvc;
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_streaming1_ep);
if (!ep) {
INFO(cdev, "Unable to allocate streaming EP\n");
goto error;
}
+ video_stream1_ep1 = ep;
uvc->video.ep = ep;
- ep->driver_data = uvc;
-
+ ep->driver_data = uvc1;
+ video_stream1_ep2 =
+ usb_ep_autoconfig(cdev->gadget, &uvc_streaming1_ep2);
+ if (!video_stream1_ep2) {
+ INFO(cdev, "Unable to allocate streaming EP2\n");
+ goto error;
+ }
+ video_stream1_ep2->driver_data = uvc1;
/* Allocate interface IDs. */
- if ((ret = usb_interface_id(c, f)) < 0)
+ ret = usb_interface_id(c, f);
+ if (ret < 0)
goto error;
uvc_iad.bFirstInterface = ret;
uvc_control_intf.bInterfaceNumber = ret;
- uvc->control_intf = ret;
-
- if ((ret = usb_interface_id(c, f)) < 0)
+ comm->control_intf = ret;
+ ret = usb_interface_id(c, f);
+ if (ret < 0)
goto error;
- uvc_streaming_intf_alt0.bInterfaceNumber = ret;
- uvc_streaming_intf_alt1.bInterfaceNumber = ret;
- uvc->streaming_intf = ret;
+
+ uvc_streaming1_intf_alt0.bInterfaceNumber = ret;
+ uvc_streaming1_intf_alt1.bInterfaceNumber = ret;
+ uvc_streaming1_intf_alt2.bInterfaceNumber = ret;
+ uvc1->streaming_intf = ret;
+
+
+ if (NULL != comm->desc.fs_streaming2) {
+
+ video_stream2_ep1 =
+ usb_ep_autoconfig(cdev->gadget, &uvc_streaming2_ep);
+ if (!video_stream2_ep1) {
+ INFO(cdev, "Unable to allocate streaming EP\n");
+ goto error;
+ }
+
+ uvc2->video.ep = video_stream2_ep1;
+ video_stream2_ep1->driver_data = uvc2;
+
+ video_stream2_ep2 =
+ usb_ep_autoconfig(cdev->gadget, &uvc_streaming2_ep2);
+ if (!video_stream2_ep2) {
+ INFO(cdev, "Unable to allocate streaming EP2\n");
+ goto error;
+ }
+
+ video_stream2_ep2->driver_data = uvc2;
+ ret = usb_interface_id(c, f);
+ if (ret < 0)
+ goto error;
+ uvc_streaming2_intf_alt0.bInterfaceNumber = ret;
+ uvc_streaming2_intf_alt1.bInterfaceNumber = ret;
+ uvc_streaming2_intf_alt2.bInterfaceNumber = ret;
+ uvc2->streaming_intf = ret;
+ }
/* Copy descriptors. */
f->descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
/* Preallocate control endpoint request. */
- uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
- uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
- if (uvc->control_req == NULL || uvc->control_buf == NULL) {
+ comm->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+ comm->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
+ if (uvc->comm->control_req == NULL || uvc->comm->control_buf == NULL) {
ret = -ENOMEM;
goto error;
}
- uvc->control_req->buf = uvc->control_buf;
- uvc->control_req->complete = uvc_function_ep0_complete;
- uvc->control_req->context = uvc;
+ comm->control_req->buf = uvc->comm->control_buf;
+ comm->control_req->complete = uvc_function_ep0_complete;
+ comm->control_req->context = uvc;
/* Avoid letting this gadget enumerate until the userspace server is
* active.
*/
- if ((ret = usb_function_deactivate(f)) < 0)
+ ret = usb_function_deactivate(f);
+ if (ret < 0)
goto error;
/* Initialise video. */
- ret = uvc_video_init(&uvc->video);
+ ret = uvc_video_init(&uvc1->video);
if (ret < 0)
goto error;
-
/* Register a V4L2 device. */
- ret = uvc_register_video(uvc);
+ ret = uvc_register_video(uvc1);
if (ret < 0) {
printk(KERN_INFO "Unable to register video device\n");
goto error;
}
+ if (NULL != comm->desc.fs_streaming2) {
+ ret = usb_function_deactivate(f);
+ if (ret < 0)
+ goto error;
+ /* Initialise video. */
+ ret = uvc_video_init(&uvc2->video);
+ if (ret < 0)
+ goto error;
+ /* Register a V4L2 device. */
+ ret = uvc_register_video(uvc2);
+ if (ret < 0) {
+ printk(KERN_INFO "Unable to register video2 device\n");
+ goto error;
+ }
+ }
return 0;
error:
@@ -587,6 +909,8 @@ uvc_bind_config(struct usb_configuration *c,
const struct uvc_descriptor_header * const *hs_streaming)
{
struct uvc_device *uvc;
+ struct uvc_device *uvc2;
+ struct uvc_common *comm;
int ret = 0;
/* TODO Check if the USB device controller supports the required
@@ -601,54 +925,92 @@ uvc_bind_config(struct usb_configuration *c,
uvc->state = UVC_STATE_DISCONNECTED;
+
+ comm = kzalloc(sizeof(*comm), GFP_KERNEL);
+ if (comm == NULL)
+ return -ENOMEM;
+
+ uvc->comm = comm;
+ comm->uvc1 = uvc;
+
+
+ uvc2 = kzalloc(sizeof(*uvc), GFP_KERNEL);
+ if (uvc == NULL)
+ return -ENOMEM;
+
+ uvc2->state = UVC_STATE_DISCONNECTED;
+ uvc2->comm = comm;
+ comm->uvc2 = uvc2;
+
/* Validate the descriptors. */
if (control == NULL || control[0] == NULL ||
- control[0]->bDescriptorSubType != UVC_VC_HEADER)
+ control[0]->bDescriptorSubType != UVC_VC_HEADER)
goto error;
if (fs_streaming == NULL || fs_streaming[0] == NULL ||
- fs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
+ fs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
goto error;
if (hs_streaming == NULL || hs_streaming[0] == NULL ||
- hs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
+ hs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
goto error;
- uvc->desc.control = control;
- uvc->desc.fs_streaming = fs_streaming;
- uvc->desc.hs_streaming = hs_streaming;
+ uvc->comm->desc.control = (struct uvc_descriptor_header **)control;
+ uvc->comm->desc.fs_streaming =
+ (struct uvc_descriptor_header **)fs_streaming;
+ uvc->comm->desc.hs_streaming =
+ (struct uvc_descriptor_header **)hs_streaming;
+
+#ifdef UVC_DOUBLE_STREAM
+ uvc->comm->desc.fs_streaming2 =
+ (struct uvc_descriptor_header **)fs_streaming;
+ uvc->comm->desc.hs_streaming2 =
+ (struct uvc_descriptor_header **)hs_streaming;
+#endif
/* Allocate string descriptor numbers. */
- if ((ret = usb_string_id(c->cdev)) < 0)
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
goto error;
uvc_en_us_strings[UVC_STRING_ASSOCIATION_IDX].id = ret;
uvc_iad.iFunction = ret;
- if ((ret = usb_string_id(c->cdev)) < 0)
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
goto error;
uvc_en_us_strings[UVC_STRING_CONTROL_IDX].id = ret;
uvc_control_intf.iInterface = ret;
- if ((ret = usb_string_id(c->cdev)) < 0)
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
goto error;
uvc_en_us_strings[UVC_STRING_STREAMING_IDX].id = ret;
- uvc_streaming_intf_alt0.iInterface = ret;
- uvc_streaming_intf_alt1.iInterface = ret;
+ uvc_streaming1_intf_alt0.iInterface = ret;
+ uvc_streaming1_intf_alt1.iInterface = ret;
+ uvc_streaming1_intf_alt2.iInterface = ret;
+
+ uvc_streaming2_intf_alt0.iInterface = ret;
+ uvc_streaming2_intf_alt1.iInterface = ret;
+ uvc_streaming2_intf_alt2.iInterface = ret;
/* Register the function. */
- uvc->func.name = "uvc";
- uvc->func.strings = uvc_function_strings;
- uvc->func.bind = uvc_function_bind;
- uvc->func.unbind = uvc_function_unbind;
- uvc->func.get_alt = uvc_function_get_alt;
- uvc->func.set_alt = uvc_function_set_alt;
- uvc->func.disable = uvc_function_disable;
- uvc->func.setup = uvc_function_setup;
-
- ret = usb_add_function(c, &uvc->func);
+ uvc->comm->func.name = "uvc";
+ uvc->comm->func.strings = uvc_function_strings;
+ uvc->comm->func.bind = uvc_function_bind;
+ uvc->comm->func.unbind = uvc_function_unbind;
+ uvc->comm->func.get_alt = uvc_function_get_alt;
+ uvc->comm->func.set_alt = uvc_function_set_alt;
+ uvc->comm->func.disable = uvc_function_disable;
+ uvc->comm->func.setup = uvc_function_setup;
+
+ ret = usb_add_function(c, &uvc->comm->func);
if (ret)
kfree(uvc);
-
+#ifdef UVC_DOUBLE_STREAM
+ printk(KERN_EMERG "######### uvc bind 2 streams#########\n");
+#else
+ printk(KERN_EMERG "######### uvc bind 1 stream#########\n");
+#endif
return 0;
error:
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index bcdac7c7..4541dce4 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -136,6 +136,12 @@
#define gadget_is_s3c_hsotg(g) 0
#endif
+#ifdef CONFIG_CONFIG_USB_FH_OTG
+#define gadget_is_fhotg(g) (!strcmp("fh_otg", (g)->name))
+#else
+#define gadget_is_fhotg(g) 0
+#endif
+
#ifdef CONFIG_USB_S3C_HSUDC
#define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
#else
@@ -223,6 +229,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x29;
else if (gadget_is_s3c_hsudc(gadget))
return 0x30;
+ else if (gadget_is_fhotg(gadget))
+ return 0x31;
return -ENOENT;
}
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
deleted file mode 100644
index 0dfee282..00000000
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ /dev/null
@@ -1,3481 +0,0 @@
-/* linux/drivers/usb/gadget/s3c-hsotg.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C USB2.0 High-speed / OtG driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-#include <mach/map.h>
-
-#include <plat/regs-usb-hsotg-phy.h>
-#include <plat/regs-usb-hsotg.h>
-#include <mach/regs-sys.h>
-#include <plat/udc-hs.h>
-#include <plat/cpu.h>
-
-#define DMA_ADDR_INVALID (~((dma_addr_t)0))
-
-/* EP0_MPS_LIMIT
- *
- * Unfortunately there seems to be a limit of the amount of data that can
- * be transferred by IN transactions on EP0. This is either 127 bytes or 3
- * packets (which practically means 1 packet and 63 bytes of data) when the
- * MPS is set to 64.
- *
- * This means if we are wanting to move >127 bytes of data, we need to
- * split the transactions up, but just doing one packet at a time does
- * not work (this may be an implicit DATA0 PID on first packet of the
- * transaction) and doing 2 packets is outside the controller's limits.
- *
- * If we try to lower the MPS size for EP0, then no transfers work properly
- * for EP0, and the system will fail basic enumeration. As no cause for this
- * has currently been found, we cannot support any large IN transfers for
- * EP0.
- */
-#define EP0_MPS_LIMIT 64
-
-struct s3c_hsotg;
-struct s3c_hsotg_req;
-
-/**
- * struct s3c_hsotg_ep - driver endpoint definition.
- * @ep: The gadget layer representation of the endpoint.
- * @name: The driver generated name for the endpoint.
- * @queue: Queue of requests for this endpoint.
- * @parent: Reference back to the parent device structure.
- * @req: The current request that the endpoint is processing. This is
- * used to indicate an request has been loaded onto the endpoint
- * and has yet to be completed (maybe due to data move, or simply
- * awaiting an ack from the core all the data has been completed).
- * @debugfs: File entry for debugfs file for this endpoint.
- * @lock: State lock to protect contents of endpoint.
- * @dir_in: Set to true if this endpoint is of the IN direction, which
- * means that it is sending data to the Host.
- * @index: The index for the endpoint registers.
- * @name: The name array passed to the USB core.
- * @halted: Set if the endpoint has been halted.
- * @periodic: Set if this is a periodic ep, such as Interrupt
- * @sent_zlp: Set if we've sent a zero-length packet.
- * @total_data: The total number of data bytes done.
- * @fifo_size: The size of the FIFO (for periodic IN endpoints)
- * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
- * @last_load: The offset of data for the last start of request.
- * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
- *
- * This is the driver's state for each registered enpoint, allowing it
- * to keep track of transactions that need doing. Each endpoint has a
- * lock to protect the state, to try and avoid using an overall lock
- * for the host controller as much as possible.
- *
- * For periodic IN endpoints, we have fifo_size and fifo_load to try
- * and keep track of the amount of data in the periodic FIFO for each
- * of these as we don't have a status register that tells us how much
- * is in each of them. (note, this may actually be useless information
- * as in shared-fifo mode periodic in acts like a single-frame packet
- * buffer than a fifo)
- */
-struct s3c_hsotg_ep {
- struct usb_ep ep;
- struct list_head queue;
- struct s3c_hsotg *parent;
- struct s3c_hsotg_req *req;
- struct dentry *debugfs;
-
- spinlock_t lock;
-
- unsigned long total_data;
- unsigned int size_loaded;
- unsigned int last_load;
- unsigned int fifo_load;
- unsigned short fifo_size;
-
- unsigned char dir_in;
- unsigned char index;
-
- unsigned int halted:1;
- unsigned int periodic:1;
- unsigned int sent_zlp:1;
-
- char name[10];
-};
-
-#define S3C_HSOTG_EPS (8+1) /* limit to 9 for the moment */
-
-/**
- * struct s3c_hsotg - driver state.
- * @dev: The parent device supplied to the probe function
- * @driver: USB gadget driver
- * @plat: The platform specific configuration data.
- * @regs: The memory area mapped for accessing registers.
- * @regs_res: The resource that was allocated when claiming register space.
- * @irq: The IRQ number we are using
- * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
- * @debug_root: root directrory for debugfs.
- * @debug_file: main status file for debugfs.
- * @debug_fifo: FIFO status file for debugfs.
- * @ep0_reply: Request used for ep0 reply.
- * @ep0_buff: Buffer for EP0 reply data, if needed.
- * @ctrl_buff: Buffer for EP0 control requests.
- * @ctrl_req: Request for EP0 control packets.
- * @eps: The endpoints being supplied to the gadget framework
- */
-struct s3c_hsotg {
- struct device *dev;
- struct usb_gadget_driver *driver;
- struct s3c_hsotg_plat *plat;
-
- void __iomem *regs;
- struct resource *regs_res;
- int irq;
- struct clk *clk;
-
- unsigned int dedicated_fifos:1;
-
- struct dentry *debug_root;
- struct dentry *debug_file;
- struct dentry *debug_fifo;
-
- struct usb_request *ep0_reply;
- struct usb_request *ctrl_req;
- u8 ep0_buff[8];
- u8 ctrl_buff[8];
-
- struct usb_gadget gadget;
- struct s3c_hsotg_ep eps[];
-};
-
-/**
- * struct s3c_hsotg_req - data transfer request
- * @req: The USB gadget request
- * @queue: The list of requests for the endpoint this is queued for.
- * @in_progress: Has already had size/packets written to core
- * @mapped: DMA buffer for this request has been mapped via dma_map_single().
- */
-struct s3c_hsotg_req {
- struct usb_request req;
- struct list_head queue;
- unsigned char in_progress;
- unsigned char mapped;
-};
-
-/* conversion functions */
-static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
-{
- return container_of(req, struct s3c_hsotg_req, req);
-}
-
-static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
-{
- return container_of(ep, struct s3c_hsotg_ep, ep);
-}
-
-static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
-{
- return container_of(gadget, struct s3c_hsotg, gadget);
-}
-
-static inline void __orr32(void __iomem *ptr, u32 val)
-{
- writel(readl(ptr) | val, ptr);
-}
-
-static inline void __bic32(void __iomem *ptr, u32 val)
-{
- writel(readl(ptr) & ~val, ptr);
-}
-
-/* forward decleration of functions */
-static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
-
-/**
- * using_dma - return the DMA status of the driver.
- * @hsotg: The driver state.
- *
- * Return true if we're using DMA.
- *
- * Currently, we have the DMA support code worked into everywhere
- * that needs it, but the AMBA DMA implementation in the hardware can
- * only DMA from 32bit aligned addresses. This means that gadgets such
- * as the CDC Ethernet cannot work as they often pass packets which are
- * not 32bit aligned.
- *
- * Unfortunately the choice to use DMA or not is global to the controller
- * and seems to be only settable when the controller is being put through
- * a core reset. This means we either need to fix the gadgets to take
- * account of DMA alignment, or add bounce buffers (yuerk).
- *
- * Until this issue is sorted out, we always return 'false'.
- */
-static inline bool using_dma(struct s3c_hsotg *hsotg)
-{
- return false; /* support is not complete */
-}
-
-/**
- * s3c_hsotg_en_gsint - enable one or more of the general interrupt
- * @hsotg: The device state
- * @ints: A bitmask of the interrupts to enable
- */
-static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
-{
- u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
- u32 new_gsintmsk;
-
- new_gsintmsk = gsintmsk | ints;
-
- if (new_gsintmsk != gsintmsk) {
- dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
- writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
- }
-}
-
-/**
- * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
- * @hsotg: The device state
- * @ints: A bitmask of the interrupts to enable
- */
-static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
-{
- u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
- u32 new_gsintmsk;
-
- new_gsintmsk = gsintmsk & ~ints;
-
- if (new_gsintmsk != gsintmsk)
- writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
-}
-
-/**
- * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
- * @hsotg: The device state
- * @ep: The endpoint index
- * @dir_in: True if direction is in.
- * @en: The enable value, true to enable
- *
- * Set or clear the mask for an individual endpoint's interrupt
- * request.
- */
-static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
- unsigned int ep, unsigned int dir_in,
- unsigned int en)
-{
- unsigned long flags;
- u32 bit = 1 << ep;
- u32 daint;
-
- if (!dir_in)
- bit <<= 16;
-
- local_irq_save(flags);
- daint = readl(hsotg->regs + S3C_DAINTMSK);
- if (en)
- daint |= bit;
- else
- daint &= ~bit;
- writel(daint, hsotg->regs + S3C_DAINTMSK);
- local_irq_restore(flags);
-}
-
-/**
- * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
- * @hsotg: The device instance.
- */
-static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
-{
- unsigned int ep;
- unsigned int addr;
- unsigned int size;
- int timeout;
- u32 val;
-
- /* the ryu 2.6.24 release ahs
- writel(0x1C0, hsotg->regs + S3C_GRXFSIZ);
- writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) |
- S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
- hsotg->regs + S3C_GNPTXFSIZ);
- */
-
- /* set FIFO sizes to 2048/1024 */
-
- writel(2048, hsotg->regs + S3C_GRXFSIZ);
- writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |
- S3C_GNPTXFSIZ_NPTxFDep(1024),
- hsotg->regs + S3C_GNPTXFSIZ);
-
- /* arange all the rest of the TX FIFOs, as some versions of this
- * block have overlapping default addresses. This also ensures
- * that if the settings have been changed, then they are set to
- * known values. */
-
- /* start at the end of the GNPTXFSIZ, rounded up */
- addr = 2048 + 1024;
- size = 768;
-
- /* currently we allocate TX FIFOs for all possible endpoints,
- * and assume that they are all the same size. */
-
- for (ep = 0; ep <= 15; ep++) {
- val = addr;
- val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT;
- addr += size;
-
- writel(val, hsotg->regs + S3C_DPTXFSIZn(ep));
- }
-
- /* according to p428 of the design guide, we need to ensure that
- * all fifos are flushed before continuing */
-
- writel(S3C_GRSTCTL_TxFNum(0x10) | S3C_GRSTCTL_TxFFlsh |
- S3C_GRSTCTL_RxFFlsh, hsotg->regs + S3C_GRSTCTL);
-
- /* wait until the fifos are both flushed */
- timeout = 100;
- while (1) {
- val = readl(hsotg->regs + S3C_GRSTCTL);
-
- if ((val & (S3C_GRSTCTL_TxFFlsh | S3C_GRSTCTL_RxFFlsh)) == 0)
- break;
-
- if (--timeout == 0) {
- dev_err(hsotg->dev,
- "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
- __func__, val);
- }
-
- udelay(1);
- }
-
- dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
-}
-
-/**
- * @ep: USB endpoint to allocate request for.
- * @flags: Allocation flags
- *
- * Allocate a new USB request structure appropriate for the specified endpoint
- */
-static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
- gfp_t flags)
-{
- struct s3c_hsotg_req *req;
-
- req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
- if (!req)
- return NULL;
-
- INIT_LIST_HEAD(&req->queue);
-
- req->req.dma = DMA_ADDR_INVALID;
- return &req->req;
-}
-
-/**
- * is_ep_periodic - return true if the endpoint is in periodic mode.
- * @hs_ep: The endpoint to query.
- *
- * Returns true if the endpoint is in periodic mode, meaning it is being
- * used for an Interrupt or ISO transfer.
- */
-static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
-{
- return hs_ep->periodic;
-}
-
-/**
- * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
- * @hsotg: The device state.
- * @hs_ep: The endpoint for the request
- * @hs_req: The request being processed.
- *
- * This is the reverse of s3c_hsotg_map_dma(), called for the completion
- * of a request to ensure the buffer is ready for access by the caller.
-*/
-static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep,
- struct s3c_hsotg_req *hs_req)
-{
- struct usb_request *req = &hs_req->req;
- enum dma_data_direction dir;
-
- dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- /* ignore this if we're not moving any data */
- if (hs_req->req.length == 0)
- return;
-
- if (hs_req->mapped) {
- /* we mapped this, so unmap and remove the dma */
-
- dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
-
- req->dma = DMA_ADDR_INVALID;
- hs_req->mapped = 0;
- } else {
- dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
- }
-}
-
-/**
- * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
- * @hsotg: The controller state.
- * @hs_ep: The endpoint we're going to write for.
- * @hs_req: The request to write data for.
- *
- * This is called when the TxFIFO has some space in it to hold a new
- * transmission and we have something to give it. The actual setup of
- * the data size is done elsewhere, so all we have to do is to actually
- * write the data.
- *
- * The return value is zero if there is more space (or nothing was done)
- * otherwise -ENOSPC is returned if the FIFO space was used up.
- *
- * This routine is only needed for PIO
-*/
-static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep,
- struct s3c_hsotg_req *hs_req)
-{
- bool periodic = is_ep_periodic(hs_ep);
- u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS);
- int buf_pos = hs_req->req.actual;
- int to_write = hs_ep->size_loaded;
- void *data;
- int can_write;
- int pkt_round;
-
- to_write -= (buf_pos - hs_ep->last_load);
-
- /* if there's nothing to write, get out early */
- if (to_write == 0)
- return 0;
-
- if (periodic && !hsotg->dedicated_fifos) {
- u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
- int size_left;
- int size_done;
-
- /* work out how much data was loaded so we can calculate
- * how much data is left in the fifo. */
-
- size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
-
- /* if shared fifo, we cannot write anything until the
- * previous data has been completely sent.
- */
- if (hs_ep->fifo_load != 0) {
- s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
- return -ENOSPC;
- }
-
- dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
- __func__, size_left,
- hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
-
- /* how much of the data has moved */
- size_done = hs_ep->size_loaded - size_left;
-
- /* how much data is left in the fifo */
- can_write = hs_ep->fifo_load - size_done;
- dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
- __func__, can_write);
-
- can_write = hs_ep->fifo_size - can_write;
- dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
- __func__, can_write);
-
- if (can_write <= 0) {
- s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
- return -ENOSPC;
- }
- } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
- can_write = readl(hsotg->regs + S3C_DTXFSTS(hs_ep->index));
-
- can_write &= 0xffff;
- can_write *= 4;
- } else {
- if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
- dev_dbg(hsotg->dev,
- "%s: no queue slots available (0x%08x)\n",
- __func__, gnptxsts);
-
- s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
- return -ENOSPC;
- }
-
- can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
- can_write *= 4; /* fifo size is in 32bit quantities. */
- }
-
- dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
- __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
-
- /* limit to 512 bytes of data, it seems at least on the non-periodic
- * FIFO, requests of >512 cause the endpoint to get stuck with a
- * fragment of the end of the transfer in it.
- */
- if (can_write > 512)
- can_write = 512;
-
- /* limit the write to one max-packet size worth of data, but allow
- * the transfer to return that it did not run out of fifo space
- * doing it. */
- if (to_write > hs_ep->ep.maxpacket) {
- to_write = hs_ep->ep.maxpacket;
-
- s3c_hsotg_en_gsint(hsotg,
- periodic ? S3C_GINTSTS_PTxFEmp :
- S3C_GINTSTS_NPTxFEmp);
- }
-
- /* see if we can write data */
-
- if (to_write > can_write) {
- to_write = can_write;
- pkt_round = to_write % hs_ep->ep.maxpacket;
-
- /* Not sure, but we probably shouldn't be writing partial
- * packets into the FIFO, so round the write down to an
- * exact number of packets.
- *
- * Note, we do not currently check to see if we can ever
- * write a full packet or not to the FIFO.
- */
-
- if (pkt_round)
- to_write -= pkt_round;
-
- /* enable correct FIFO interrupt to alert us when there
- * is more room left. */
-
- s3c_hsotg_en_gsint(hsotg,
- periodic ? S3C_GINTSTS_PTxFEmp :
- S3C_GINTSTS_NPTxFEmp);
- }
-
- dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
- to_write, hs_req->req.length, can_write, buf_pos);
-
- if (to_write <= 0)
- return -ENOSPC;
-
- hs_req->req.actual = buf_pos + to_write;
- hs_ep->total_data += to_write;
-
- if (periodic)
- hs_ep->fifo_load += to_write;
-
- to_write = DIV_ROUND_UP(to_write, 4);
- data = hs_req->req.buf + buf_pos;
-
- writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write);
-
- return (to_write >= can_write) ? -ENOSPC : 0;
-}
-
-/**
- * get_ep_limit - get the maximum data legnth for this endpoint
- * @hs_ep: The endpoint
- *
- * Return the maximum data that can be queued in one go on a given endpoint
- * so that transfers that are too long can be split.
- */
-static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
-{
- int index = hs_ep->index;
- unsigned maxsize;
- unsigned maxpkt;
-
- if (index != 0) {
- maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;
- maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;
- } else {
- maxsize = 64+64;
- if (hs_ep->dir_in)
- maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;
- else
- maxpkt = 2;
- }
-
- /* we made the constant loading easier above by using +1 */
- maxpkt--;
- maxsize--;
-
- /* constrain by packet count if maxpkts*pktsize is greater
- * than the length register size. */
-
- if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
- maxsize = maxpkt * hs_ep->ep.maxpacket;
-
- return maxsize;
-}
-
-/**
- * s3c_hsotg_start_req - start a USB request from an endpoint's queue
- * @hsotg: The controller state.
- * @hs_ep: The endpoint to process a request for
- * @hs_req: The request to start.
- * @continuing: True if we are doing more for the current request.
- *
- * Start the given request running by setting the endpoint registers
- * appropriately, and writing any data to the FIFOs.
- */
-static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep,
- struct s3c_hsotg_req *hs_req,
- bool continuing)
-{
- struct usb_request *ureq = &hs_req->req;
- int index = hs_ep->index;
- int dir_in = hs_ep->dir_in;
- u32 epctrl_reg;
- u32 epsize_reg;
- u32 epsize;
- u32 ctrl;
- unsigned length;
- unsigned packets;
- unsigned maxreq;
-
- if (index != 0) {
- if (hs_ep->req && !continuing) {
- dev_err(hsotg->dev, "%s: active request\n", __func__);
- WARN_ON(1);
- return;
- } else if (hs_ep->req != hs_req && continuing) {
- dev_err(hsotg->dev,
- "%s: continue different req\n", __func__);
- WARN_ON(1);
- return;
- }
- }
-
- epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
- epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index);
-
- dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
- __func__, readl(hsotg->regs + epctrl_reg), index,
- hs_ep->dir_in ? "in" : "out");
-
- /* If endpoint is stalled, we will restart request later */
- ctrl = readl(hsotg->regs + epctrl_reg);
-
- if (ctrl & S3C_DxEPCTL_Stall) {
- dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
- return;
- }
-
- length = ureq->length - ureq->actual;
-
- if (0)
- dev_dbg(hsotg->dev,
- "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
- ureq->buf, length, ureq->dma,
- ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
-
- maxreq = get_ep_limit(hs_ep);
- if (length > maxreq) {
- int round = maxreq % hs_ep->ep.maxpacket;
-
- dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
- __func__, length, maxreq, round);
-
- /* round down to multiple of packets */
- if (round)
- maxreq -= round;
-
- length = maxreq;
- }
-
- if (length)
- packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
- else
- packets = 1; /* send one packet if length is zero. */
-
- if (dir_in && index != 0)
- epsize = S3C_DxEPTSIZ_MC(1);
- else
- epsize = 0;
-
- if (index != 0 && ureq->zero) {
- /* test for the packets being exactly right for the
- * transfer */
-
- if (length == (packets * hs_ep->ep.maxpacket))
- packets++;
- }
-
- epsize |= S3C_DxEPTSIZ_PktCnt(packets);
- epsize |= S3C_DxEPTSIZ_XferSize(length);
-
- dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
- __func__, packets, length, ureq->length, epsize, epsize_reg);
-
- /* store the request as the current one we're doing */
- hs_ep->req = hs_req;
-
- /* write size / packets */
- writel(epsize, hsotg->regs + epsize_reg);
-
- if (using_dma(hsotg)) {
- unsigned int dma_reg;
-
- /* write DMA address to control register, buffer already
- * synced by s3c_hsotg_ep_queue(). */
-
- dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index);
- writel(ureq->dma, hsotg->regs + dma_reg);
-
- dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
- __func__, ureq->dma, dma_reg);
- }
-
- ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
- ctrl |= S3C_DxEPCTL_USBActEp;
- ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */
-
- dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
- writel(ctrl, hsotg->regs + epctrl_reg);
-
- /* set these, it seems that DMA support increments past the end
- * of the packet buffer so we need to calculate the length from
- * this information. */
- hs_ep->size_loaded = length;
- hs_ep->last_load = ureq->actual;
-
- if (dir_in && !using_dma(hsotg)) {
- /* set these anyway, we may need them for non-periodic in */
- hs_ep->fifo_load = 0;
-
- s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
- }
-
- /* clear the INTknTXFEmpMsk when we start request, more as a aide
- * to debugging to see what is going on. */
- if (dir_in)
- writel(S3C_DIEPMSK_INTknTXFEmpMsk,
- hsotg->regs + S3C_DIEPINT(index));
-
- /* Note, trying to clear the NAK here causes problems with transmit
- * on the S3C6400 ending up with the TXFIFO becoming full. */
-
- /* check ep is enabled */
- if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna))
- dev_warn(hsotg->dev,
- "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
- index, readl(hsotg->regs + epctrl_reg));
-
- dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
- __func__, readl(hsotg->regs + epctrl_reg));
-}
-
-/**
- * s3c_hsotg_map_dma - map the DMA memory being used for the request
- * @hsotg: The device state.
- * @hs_ep: The endpoint the request is on.
- * @req: The request being processed.
- *
- * We've been asked to queue a request, so ensure that the memory buffer
- * is correctly setup for DMA. If we've been passed an extant DMA address
- * then ensure the buffer has been synced to memory. If our buffer has no
- * DMA memory, then we map the memory and mark our request to allow us to
- * cleanup on completion.
-*/
-static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep,
- struct usb_request *req)
-{
- enum dma_data_direction dir;
- struct s3c_hsotg_req *hs_req = our_req(req);
-
- dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- /* if the length is zero, ignore the DMA data */
- if (hs_req->req.length == 0)
- return 0;
-
- if (req->dma == DMA_ADDR_INVALID) {
- dma_addr_t dma;
-
- dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
-
- if (unlikely(dma_mapping_error(hsotg->dev, dma)))
- goto dma_error;
-
- if (dma & 3) {
- dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
- __func__);
-
- dma_unmap_single(hsotg->dev, dma, req->length, dir);
- return -EINVAL;
- }
-
- hs_req->mapped = 1;
- req->dma = dma;
- } else {
- dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
- hs_req->mapped = 0;
- }
-
- return 0;
-
-dma_error:
- dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
- __func__, req->buf, req->length);
-
- return -EIO;
-}
-
-static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
- gfp_t gfp_flags)
-{
- struct s3c_hsotg_req *hs_req = our_req(req);
- struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
- unsigned long irqflags;
- bool first;
-
- dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
- ep->name, req, req->length, req->buf, req->no_interrupt,
- req->zero, req->short_not_ok);
-
- /* initialise status of the request */
- INIT_LIST_HEAD(&hs_req->queue);
- req->actual = 0;
- req->status = -EINPROGRESS;
-
- /* if we're using DMA, sync the buffers as necessary */
- if (using_dma(hs)) {
- int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
- if (ret)
- return ret;
- }
-
- spin_lock_irqsave(&hs_ep->lock, irqflags);
-
- first = list_empty(&hs_ep->queue);
- list_add_tail(&hs_req->queue, &hs_ep->queue);
-
- if (first)
- s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
-
- spin_unlock_irqrestore(&hs_ep->lock, irqflags);
-
- return 0;
-}
-
-static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
- struct usb_request *req)
-{
- struct s3c_hsotg_req *hs_req = our_req(req);
-
- kfree(hs_req);
-}
-
-/**
- * s3c_hsotg_complete_oursetup - setup completion callback
- * @ep: The endpoint the request was on.
- * @req: The request completed.
- *
- * Called on completion of any requests the driver itself
- * submitted that need cleaning up.
- */
-static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
- struct usb_request *req)
-{
- struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
-
- dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
-
- s3c_hsotg_ep_free_request(ep, req);
-}
-
-/**
- * ep_from_windex - convert control wIndex value to endpoint
- * @hsotg: The driver state.
- * @windex: The control request wIndex field (in host order).
- *
- * Convert the given wIndex into a pointer to an driver endpoint
- * structure, or return NULL if it is not a valid endpoint.
-*/
-static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
- u32 windex)
-{
- struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
- int dir = (windex & USB_DIR_IN) ? 1 : 0;
- int idx = windex & 0x7F;
-
- if (windex >= 0x100)
- return NULL;
-
- if (idx > S3C_HSOTG_EPS)
- return NULL;
-
- if (idx && ep->dir_in != dir)
- return NULL;
-
- return ep;
-}
-
-/**
- * s3c_hsotg_send_reply - send reply to control request
- * @hsotg: The device state
- * @ep: Endpoint 0
- * @buff: Buffer for request
- * @length: Length of reply.
- *
- * Create a request and queue it on the given endpoint. This is useful as
- * an internal method of sending replies to certain control requests, etc.
- */
-static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *ep,
- void *buff,
- int length)
-{
- struct usb_request *req;
- int ret;
-
- dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
-
- req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
- hsotg->ep0_reply = req;
- if (!req) {
- dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
- return -ENOMEM;
- }
-
- req->buf = hsotg->ep0_buff;
- req->length = length;
- req->zero = 1; /* always do zero-length final transfer */
- req->complete = s3c_hsotg_complete_oursetup;
-
- if (length)
- memcpy(req->buf, buff, length);
- else
- ep->sent_zlp = 1;
-
- ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
- if (ret) {
- dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * s3c_hsotg_process_req_status - process request GET_STATUS
- * @hsotg: The device state
- * @ctrl: USB control request
- */
-static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
- struct usb_ctrlrequest *ctrl)
-{
- struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
- struct s3c_hsotg_ep *ep;
- __le16 reply;
- int ret;
-
- dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
-
- if (!ep0->dir_in) {
- dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
- return -EINVAL;
- }
-
- switch (ctrl->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- reply = cpu_to_le16(0); /* bit 0 => self powered,
- * bit 1 => remote wakeup */
- break;
-
- case USB_RECIP_INTERFACE:
- /* currently, the data result should be zero */
- reply = cpu_to_le16(0);
- break;
-
- case USB_RECIP_ENDPOINT:
- ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
- if (!ep)
- return -ENOENT;
-
- reply = cpu_to_le16(ep->halted ? 1 : 0);
- break;
-
- default:
- return 0;
- }
-
- if (le16_to_cpu(ctrl->wLength) != 2)
- return -EINVAL;
-
- ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
- return ret;
- }
-
- return 1;
-}
-
-static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
-
-/**
- * get_ep_head - return the first request on the endpoint
- * @hs_ep: The controller endpoint to get
- *
- * Get the first request on the endpoint.
- */
-static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
-{
- if (list_empty(&hs_ep->queue))
- return NULL;
-
- return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
-}
-
-/**
- * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
- * @hsotg: The device state
- * @ctrl: USB control request
- */
-static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
- struct usb_ctrlrequest *ctrl)
-{
- struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
- struct s3c_hsotg_req *hs_req;
- bool restart;
- bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
- struct s3c_hsotg_ep *ep;
- int ret;
-
- dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
- __func__, set ? "SET" : "CLEAR");
-
- if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
- ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
- if (!ep) {
- dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
- __func__, le16_to_cpu(ctrl->wIndex));
- return -ENOENT;
- }
-
- switch (le16_to_cpu(ctrl->wValue)) {
- case USB_ENDPOINT_HALT:
- s3c_hsotg_ep_sethalt(&ep->ep, set);
-
- ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
- if (ret) {
- dev_err(hsotg->dev,
- "%s: failed to send reply\n", __func__);
- return ret;
- }
-
- if (!set) {
- /*
- * If we have request in progress,
- * then complete it
- */
- if (ep->req) {
- hs_req = ep->req;
- ep->req = NULL;
- list_del_init(&hs_req->queue);
- hs_req->req.complete(&ep->ep,
- &hs_req->req);
- }
-
- /* If we have pending request, then start it */
- restart = !list_empty(&ep->queue);
- if (restart) {
- hs_req = get_ep_head(ep);
- s3c_hsotg_start_req(hsotg, ep,
- hs_req, false);
- }
- }
-
- break;
-
- default:
- return -ENOENT;
- }
- } else
- return -ENOENT; /* currently only deal with endpoint */
-
- return 1;
-}
-
-/**
- * s3c_hsotg_process_control - process a control request
- * @hsotg: The device state
- * @ctrl: The control request received
- *
- * The controller has received the SETUP phase of a control request, and
- * needs to work out what to do next (and whether to pass it on to the
- * gadget driver).
- */
-static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
- struct usb_ctrlrequest *ctrl)
-{
- struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
- int ret = 0;
- u32 dcfg;
-
- ep0->sent_zlp = 0;
-
- dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
- ctrl->bRequest, ctrl->bRequestType,
- ctrl->wValue, ctrl->wLength);
-
- /* record the direction of the request, for later use when enquing
- * packets onto EP0. */
-
- ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
- dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
-
- /* if we've no data with this request, then the last part of the
- * transaction is going to implicitly be IN. */
- if (ctrl->wLength == 0)
- ep0->dir_in = 1;
-
- if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (ctrl->bRequest) {
- case USB_REQ_SET_ADDRESS:
- dcfg = readl(hsotg->regs + S3C_DCFG);
- dcfg &= ~S3C_DCFG_DevAddr_MASK;
- dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT;
- writel(dcfg, hsotg->regs + S3C_DCFG);
-
- dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
-
- ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
- return;
-
- case USB_REQ_GET_STATUS:
- ret = s3c_hsotg_process_req_status(hsotg, ctrl);
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- case USB_REQ_SET_FEATURE:
- ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
- break;
- }
- }
-
- /* as a fallback, try delivering it to the driver to deal with */
-
- if (ret == 0 && hsotg->driver) {
- ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
- if (ret < 0)
- dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
- }
-
- /* the request is either unhandlable, or is not formatted correctly
- * so respond with a STALL for the status stage to indicate failure.
- */
-
- if (ret < 0) {
- u32 reg;
- u32 ctrl;
-
- dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
- reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0;
-
- /* S3C_DxEPCTL_Stall will be cleared by EP once it has
- * taken effect, so no need to clear later. */
-
- ctrl = readl(hsotg->regs + reg);
- ctrl |= S3C_DxEPCTL_Stall;
- ctrl |= S3C_DxEPCTL_CNAK;
- writel(ctrl, hsotg->regs + reg);
-
- dev_dbg(hsotg->dev,
- "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
- ctrl, reg, readl(hsotg->regs + reg));
-
- /* don't believe we need to anything more to get the EP
- * to reply with a STALL packet */
- }
-}
-
-static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
-
-/**
- * s3c_hsotg_complete_setup - completion of a setup transfer
- * @ep: The endpoint the request was on.
- * @req: The request completed.
- *
- * Called on completion of any requests the driver itself submitted for
- * EP0 setup packets
- */
-static void s3c_hsotg_complete_setup(struct usb_ep *ep,
- struct usb_request *req)
-{
- struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
-
- if (req->status < 0) {
- dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
- return;
- }
-
- if (req->actual == 0)
- s3c_hsotg_enqueue_setup(hsotg);
- else
- s3c_hsotg_process_control(hsotg, req->buf);
-}
-
-/**
- * s3c_hsotg_enqueue_setup - start a request for EP0 packets
- * @hsotg: The device state.
- *
- * Enqueue a request on EP0 if necessary to received any SETUP packets
- * received from the host.
- */
-static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
-{
- struct usb_request *req = hsotg->ctrl_req;
- struct s3c_hsotg_req *hs_req = our_req(req);
- int ret;
-
- dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
-
- req->zero = 0;
- req->length = 8;
- req->buf = hsotg->ctrl_buff;
- req->complete = s3c_hsotg_complete_setup;
-
- if (!list_empty(&hs_req->queue)) {
- dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
- return;
- }
-
- hsotg->eps[0].dir_in = 0;
-
- ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
- /* Don't think there's much we can do other than watch the
- * driver fail. */
- }
-}
-
-/**
- * s3c_hsotg_complete_request - complete a request given to us
- * @hsotg: The device state.
- * @hs_ep: The endpoint the request was on.
- * @hs_req: The request to complete.
- * @result: The result code (0 => Ok, otherwise errno)
- *
- * The given request has finished, so call the necessary completion
- * if it has one and then look to see if we can start a new request
- * on the endpoint.
- *
- * Note, expects the ep to already be locked as appropriate.
-*/
-static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep,
- struct s3c_hsotg_req *hs_req,
- int result)
-{
- bool restart;
-
- if (!hs_req) {
- dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
- return;
- }
-
- dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
- hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
-
- /* only replace the status if we've not already set an error
- * from a previous transaction */
-
- if (hs_req->req.status == -EINPROGRESS)
- hs_req->req.status = result;
-
- hs_ep->req = NULL;
- list_del_init(&hs_req->queue);
-
- if (using_dma(hsotg))
- s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
-
- /* call the complete request with the locks off, just in case the
- * request tries to queue more work for this endpoint. */
-
- if (hs_req->req.complete) {
- spin_unlock(&hs_ep->lock);
- hs_req->req.complete(&hs_ep->ep, &hs_req->req);
- spin_lock(&hs_ep->lock);
- }
-
- /* Look to see if there is anything else to do. Note, the completion
- * of the previous request may have caused a new request to be started
- * so be careful when doing this. */
-
- if (!hs_ep->req && result >= 0) {
- restart = !list_empty(&hs_ep->queue);
- if (restart) {
- hs_req = get_ep_head(hs_ep);
- s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
- }
- }
-}
-
-/**
- * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
- * @hsotg: The device state.
- * @hs_ep: The endpoint the request was on.
- * @hs_req: The request to complete.
- * @result: The result code (0 => Ok, otherwise errno)
- *
- * See s3c_hsotg_complete_request(), but called with the endpoint's
- * lock held.
-*/
-static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep,
- struct s3c_hsotg_req *hs_req,
- int result)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hs_ep->lock, flags);
- s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
- spin_unlock_irqrestore(&hs_ep->lock, flags);
-}
-
-/**
- * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
- * @hsotg: The device state.
- * @ep_idx: The endpoint index for the data
- * @size: The size of data in the fifo, in bytes
- *
- * The FIFO status shows there is data to read from the FIFO for a given
- * endpoint, so sort out whether we need to read the data into a request
- * that has been made for that endpoint.
- */
-static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
-{
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
- struct s3c_hsotg_req *hs_req = hs_ep->req;
- void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx);
- int to_read;
- int max_req;
- int read_ptr;
-
- if (!hs_req) {
- u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx));
- int ptr;
-
- dev_warn(hsotg->dev,
- "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
- __func__, size, ep_idx, epctl);
-
- /* dump the data from the FIFO, we've nothing we can do */
- for (ptr = 0; ptr < size; ptr += 4)
- (void)readl(fifo);
-
- return;
- }
-
- spin_lock(&hs_ep->lock);
-
- to_read = size;
- read_ptr = hs_req->req.actual;
- max_req = hs_req->req.length - read_ptr;
-
- dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
- __func__, to_read, max_req, read_ptr, hs_req->req.length);
-
- if (to_read > max_req) {
- /* more data appeared than we where willing
- * to deal with in this request.
- */
-
- /* currently we don't deal this */
- WARN_ON_ONCE(1);
- }
-
- hs_ep->total_data += to_read;
- hs_req->req.actual += to_read;
- to_read = DIV_ROUND_UP(to_read, 4);
-
- /* note, we might over-write the buffer end by 3 bytes depending on
- * alignment of the data. */
- readsl(fifo, hs_req->req.buf + read_ptr, to_read);
-
- spin_unlock(&hs_ep->lock);
-}
-
-/**
- * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
- * @hsotg: The device instance
- * @req: The request currently on this endpoint
- *
- * Generate a zero-length IN packet request for terminating a SETUP
- * transaction.
- *
- * Note, since we don't write any data to the TxFIFO, then it is
- * currently believed that we do not need to wait for any space in
- * the TxFIFO.
- */
-static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_req *req)
-{
- u32 ctrl;
-
- if (!req) {
- dev_warn(hsotg->dev, "%s: no request?\n", __func__);
- return;
- }
-
- if (req->req.length == 0) {
- hsotg->eps[0].sent_zlp = 1;
- s3c_hsotg_enqueue_setup(hsotg);
- return;
- }
-
- hsotg->eps[0].dir_in = 1;
- hsotg->eps[0].sent_zlp = 1;
-
- dev_dbg(hsotg->dev, "sending zero-length packet\n");
-
- /* issue a zero-sized packet to terminate this */
- writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
- S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0));
-
- ctrl = readl(hsotg->regs + S3C_DIEPCTL0);
- ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */
- ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
- ctrl |= S3C_DxEPCTL_USBActEp;
- writel(ctrl, hsotg->regs + S3C_DIEPCTL0);
-}
-
-/**
- * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
- * @hsotg: The device instance
- * @epnum: The endpoint received from
- * @was_setup: Set if processing a SetupDone event.
- *
- * The RXFIFO has delivered an OutDone event, which means that the data
- * transfer for an OUT endpoint has been completed, either by a short
- * packet or by the finish of a transfer.
-*/
-static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
- int epnum, bool was_setup)
-{
- u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
- struct s3c_hsotg_req *hs_req = hs_ep->req;
- struct usb_request *req = &hs_req->req;
- unsigned size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
- int result = 0;
-
- if (!hs_req) {
- dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
- return;
- }
-
- if (using_dma(hsotg)) {
- unsigned size_done;
-
- /* Calculate the size of the transfer by checking how much
- * is left in the endpoint size register and then working it
- * out from the amount we loaded for the transfer.
- *
- * We need to do this as DMA pointers are always 32bit aligned
- * so may overshoot/undershoot the transfer.
- */
-
- size_done = hs_ep->size_loaded - size_left;
- size_done += hs_ep->last_load;
-
- req->actual = size_done;
- }
-
- /* if there is more request to do, schedule new transfer */
- if (req->actual < req->length && size_left == 0) {
- s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
- return;
- }
-
- if (req->actual < req->length && req->short_not_ok) {
- dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
- __func__, req->actual, req->length);
-
- /* todo - what should we return here? there's no one else
- * even bothering to check the status. */
- }
-
- if (epnum == 0) {
- if (!was_setup && req->complete != s3c_hsotg_complete_setup)
- s3c_hsotg_send_zlp(hsotg, hs_req);
- }
-
- s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
-}
-
-/**
- * s3c_hsotg_read_frameno - read current frame number
- * @hsotg: The device instance
- *
- * Return the current frame number
-*/
-static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
-{
- u32 dsts;
-
- dsts = readl(hsotg->regs + S3C_DSTS);
- dsts &= S3C_DSTS_SOFFN_MASK;
- dsts >>= S3C_DSTS_SOFFN_SHIFT;
-
- return dsts;
-}
-
-/**
- * s3c_hsotg_handle_rx - RX FIFO has data
- * @hsotg: The device instance
- *
- * The IRQ handler has detected that the RX FIFO has some data in it
- * that requires processing, so find out what is in there and do the
- * appropriate read.
- *
- * The RXFIFO is a true FIFO, the packets coming out are still in packet
- * chunks, so if you have x packets received on an endpoint you'll get x
- * FIFO events delivered, each with a packet's worth of data in it.
- *
- * When using DMA, we should not be processing events from the RXFIFO
- * as the actual data should be sent to the memory directly and we turn
- * on the completion interrupts to get notifications of transfer completion.
- */
-static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
-{
- u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
- u32 epnum, status, size;
-
- WARN_ON(using_dma(hsotg));
-
- epnum = grxstsr & S3C_GRXSTS_EPNum_MASK;
- status = grxstsr & S3C_GRXSTS_PktSts_MASK;
-
- size = grxstsr & S3C_GRXSTS_ByteCnt_MASK;
- size >>= S3C_GRXSTS_ByteCnt_SHIFT;
-
- if (1)
- dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
- __func__, grxstsr, size, epnum);
-
-#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT)
-
- switch (status >> S3C_GRXSTS_PktSts_SHIFT) {
- case __status(S3C_GRXSTS_PktSts_GlobalOutNAK):
- dev_dbg(hsotg->dev, "GlobalOutNAK\n");
- break;
-
- case __status(S3C_GRXSTS_PktSts_OutDone):
- dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
- s3c_hsotg_read_frameno(hsotg));
-
- if (!using_dma(hsotg))
- s3c_hsotg_handle_outdone(hsotg, epnum, false);
- break;
-
- case __status(S3C_GRXSTS_PktSts_SetupDone):
- dev_dbg(hsotg->dev,
- "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
- s3c_hsotg_read_frameno(hsotg),
- readl(hsotg->regs + S3C_DOEPCTL(0)));
-
- s3c_hsotg_handle_outdone(hsotg, epnum, true);
- break;
-
- case __status(S3C_GRXSTS_PktSts_OutRX):
- s3c_hsotg_rx_data(hsotg, epnum, size);
- break;
-
- case __status(S3C_GRXSTS_PktSts_SetupRX):
- dev_dbg(hsotg->dev,
- "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
- s3c_hsotg_read_frameno(hsotg),
- readl(hsotg->regs + S3C_DOEPCTL(0)));
-
- s3c_hsotg_rx_data(hsotg, epnum, size);
- break;
-
- default:
- dev_warn(hsotg->dev, "%s: unknown status %08x\n",
- __func__, grxstsr);
-
- s3c_hsotg_dump(hsotg);
- break;
- }
-}
-
-/**
- * s3c_hsotg_ep0_mps - turn max packet size into register setting
- * @mps: The maximum packet size in bytes.
-*/
-static u32 s3c_hsotg_ep0_mps(unsigned int mps)
-{
- switch (mps) {
- case 64:
- return S3C_D0EPCTL_MPS_64;
- case 32:
- return S3C_D0EPCTL_MPS_32;
- case 16:
- return S3C_D0EPCTL_MPS_16;
- case 8:
- return S3C_D0EPCTL_MPS_8;
- }
-
- /* bad max packet size, warn and return invalid result */
- WARN_ON(1);
- return (u32)-1;
-}
-
-/**
- * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
- * @hsotg: The driver state.
- * @ep: The index number of the endpoint
- * @mps: The maximum packet size in bytes
- *
- * Configure the maximum packet size for the given endpoint, updating
- * the hardware control registers to reflect this.
- */
-static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
- unsigned int ep, unsigned int mps)
-{
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
- void __iomem *regs = hsotg->regs;
- u32 mpsval;
- u32 reg;
-
- if (ep == 0) {
- /* EP0 is a special case */
- mpsval = s3c_hsotg_ep0_mps(mps);
- if (mpsval > 3)
- goto bad_mps;
- } else {
- if (mps >= S3C_DxEPCTL_MPS_LIMIT+1)
- goto bad_mps;
-
- mpsval = mps;
- }
-
- hs_ep->ep.maxpacket = mps;
-
- /* update both the in and out endpoint controldir_ registers, even
- * if one of the directions may not be in use. */
-
- reg = readl(regs + S3C_DIEPCTL(ep));
- reg &= ~S3C_DxEPCTL_MPS_MASK;
- reg |= mpsval;
- writel(reg, regs + S3C_DIEPCTL(ep));
-
- reg = readl(regs + S3C_DOEPCTL(ep));
- reg &= ~S3C_DxEPCTL_MPS_MASK;
- reg |= mpsval;
- writel(reg, regs + S3C_DOEPCTL(ep));
-
- return;
-
-bad_mps:
- dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
-}
-
-/**
- * s3c_hsotg_txfifo_flush - flush Tx FIFO
- * @hsotg: The driver state
- * @idx: The index for the endpoint (0..15)
- */
-static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
-{
- int timeout;
- int val;
-
- writel(S3C_GRSTCTL_TxFNum(idx) | S3C_GRSTCTL_TxFFlsh,
- hsotg->regs + S3C_GRSTCTL);
-
- /* wait until the fifo is flushed */
- timeout = 100;
-
- while (1) {
- val = readl(hsotg->regs + S3C_GRSTCTL);
-
- if ((val & (S3C_GRSTCTL_TxFFlsh)) == 0)
- break;
-
- if (--timeout == 0) {
- dev_err(hsotg->dev,
- "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
- __func__, val);
- }
-
- udelay(1);
- }
-}
-
-/**
- * s3c_hsotg_trytx - check to see if anything needs transmitting
- * @hsotg: The driver state
- * @hs_ep: The driver endpoint to check.
- *
- * Check to see if there is a request that has data to send, and if so
- * make an attempt to write data into the FIFO.
- */
-static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep)
-{
- struct s3c_hsotg_req *hs_req = hs_ep->req;
-
- if (!hs_ep->dir_in || !hs_req)
- return 0;
-
- if (hs_req->req.actual < hs_req->req.length) {
- dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
- hs_ep->index);
- return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
- }
-
- return 0;
-}
-
-/**
- * s3c_hsotg_complete_in - complete IN transfer
- * @hsotg: The device state.
- * @hs_ep: The endpoint that has just completed.
- *
- * An IN transfer has been completed, update the transfer's state and then
- * call the relevant completion routines.
- */
-static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep)
-{
- struct s3c_hsotg_req *hs_req = hs_ep->req;
- u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
- int size_left, size_done;
-
- if (!hs_req) {
- dev_dbg(hsotg->dev, "XferCompl but no req\n");
- return;
- }
-
- /* Calculate the size of the transfer by checking how much is left
- * in the endpoint size register and then working it out from
- * the amount we loaded for the transfer.
- *
- * We do this even for DMA, as the transfer may have incremented
- * past the end of the buffer (DMA transfers are always 32bit
- * aligned).
- */
-
- size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
-
- size_done = hs_ep->size_loaded - size_left;
- size_done += hs_ep->last_load;
-
- if (hs_req->req.actual != size_done)
- dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
- __func__, hs_req->req.actual, size_done);
-
- hs_req->req.actual = size_done;
-
- /* if we did all of the transfer, and there is more data left
- * around, then try restarting the rest of the request */
-
- if (!size_left && hs_req->req.actual < hs_req->req.length) {
- dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
- s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
- } else
- s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
-}
-
-/**
- * s3c_hsotg_epint - handle an in/out endpoint interrupt
- * @hsotg: The driver state
- * @idx: The index for the endpoint (0..15)
- * @dir_in: Set if this is an IN endpoint
- *
- * Process and clear any interrupt pending for an individual endpoint
-*/
-static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
- int dir_in)
-{
- struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
- u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx);
- u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx);
- u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx);
- u32 ints;
-
- ints = readl(hsotg->regs + epint_reg);
-
- /* Clear endpoint interrupts */
- writel(ints, hsotg->regs + epint_reg);
-
- dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
- __func__, idx, dir_in ? "in" : "out", ints);
-
- if (ints & S3C_DxEPINT_XferCompl) {
- dev_dbg(hsotg->dev,
- "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
- __func__, readl(hsotg->regs + epctl_reg),
- readl(hsotg->regs + epsiz_reg));
-
- /* we get OutDone from the FIFO, so we only need to look
- * at completing IN requests here */
- if (dir_in) {
- s3c_hsotg_complete_in(hsotg, hs_ep);
-
- if (idx == 0 && !hs_ep->req)
- s3c_hsotg_enqueue_setup(hsotg);
- } else if (using_dma(hsotg)) {
- /* We're using DMA, we need to fire an OutDone here
- * as we ignore the RXFIFO. */
-
- s3c_hsotg_handle_outdone(hsotg, idx, false);
- }
- }
-
- if (ints & S3C_DxEPINT_EPDisbld) {
- dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
-
- if (dir_in) {
- int epctl = readl(hsotg->regs + epctl_reg);
-
- s3c_hsotg_txfifo_flush(hsotg, idx);
-
- if ((epctl & S3C_DxEPCTL_Stall) &&
- (epctl & S3C_DxEPCTL_EPType_Bulk)) {
- int dctl = readl(hsotg->regs + S3C_DCTL);
-
- dctl |= S3C_DCTL_CGNPInNAK;
- writel(dctl, hsotg->regs + S3C_DCTL);
- }
- }
- }
-
- if (ints & S3C_DxEPINT_AHBErr)
- dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
-
- if (ints & S3C_DxEPINT_Setup) { /* Setup or Timeout */
- dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
-
- if (using_dma(hsotg) && idx == 0) {
- /* this is the notification we've received a
- * setup packet. In non-DMA mode we'd get this
- * from the RXFIFO, instead we need to process
- * the setup here. */
-
- if (dir_in)
- WARN_ON_ONCE(1);
- else
- s3c_hsotg_handle_outdone(hsotg, 0, true);
- }
- }
-
- if (ints & S3C_DxEPINT_Back2BackSetup)
- dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
-
- if (dir_in) {
- /* not sure if this is important, but we'll clear it anyway
- */
- if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) {
- dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
- __func__, idx);
- }
-
- /* this probably means something bad is happening */
- if (ints & S3C_DIEPMSK_INTknEPMisMsk) {
- dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
- __func__, idx);
- }
-
- /* FIFO has space or is empty (see GAHBCFG) */
- if (hsotg->dedicated_fifos &&
- ints & S3C_DIEPMSK_TxFIFOEmpty) {
- dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
- __func__, idx);
- s3c_hsotg_trytx(hsotg, hs_ep);
- }
- }
-}
-
-/**
- * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
- * @hsotg: The device state.
- *
- * Handle updating the device settings after the enumeration phase has
- * been completed.
-*/
-static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
-{
- u32 dsts = readl(hsotg->regs + S3C_DSTS);
- int ep0_mps = 0, ep_mps;
-
- /* This should signal the finish of the enumeration phase
- * of the USB handshaking, so we should now know what rate
- * we connected at. */
-
- dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
-
- /* note, since we're limited by the size of transfer on EP0, and
- * it seems IN transfers must be a even number of packets we do
- * not advertise a 64byte MPS on EP0. */
-
- /* catch both EnumSpd_FS and EnumSpd_FS48 */
- switch (dsts & S3C_DSTS_EnumSpd_MASK) {
- case S3C_DSTS_EnumSpd_FS:
- case S3C_DSTS_EnumSpd_FS48:
- hsotg->gadget.speed = USB_SPEED_FULL;
- dev_info(hsotg->dev, "new device is full-speed\n");
-
- ep0_mps = EP0_MPS_LIMIT;
- ep_mps = 64;
- break;
-
- case S3C_DSTS_EnumSpd_HS:
- dev_info(hsotg->dev, "new device is high-speed\n");
- hsotg->gadget.speed = USB_SPEED_HIGH;
-
- ep0_mps = EP0_MPS_LIMIT;
- ep_mps = 512;
- break;
-
- case S3C_DSTS_EnumSpd_LS:
- hsotg->gadget.speed = USB_SPEED_LOW;
- dev_info(hsotg->dev, "new device is low-speed\n");
-
- /* note, we don't actually support LS in this driver at the
- * moment, and the documentation seems to imply that it isn't
- * supported by the PHYs on some of the devices.
- */
- break;
- }
-
- /* we should now know the maximum packet size for an
- * endpoint, so set the endpoints to a default value. */
-
- if (ep0_mps) {
- int i;
- s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
- for (i = 1; i < S3C_HSOTG_EPS; i++)
- s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
- }
-
- /* ensure after enumeration our EP0 is active */
-
- s3c_hsotg_enqueue_setup(hsotg);
-
- dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- readl(hsotg->regs + S3C_DIEPCTL0),
- readl(hsotg->regs + S3C_DOEPCTL0));
-}
-
-/**
- * kill_all_requests - remove all requests from the endpoint's queue
- * @hsotg: The device state.
- * @ep: The endpoint the requests may be on.
- * @result: The result code to use.
- * @force: Force removal of any current requests
- *
- * Go through the requests on the given endpoint and mark them
- * completed with the given result code.
- */
-static void kill_all_requests(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *ep,
- int result, bool force)
-{
- struct s3c_hsotg_req *req, *treq;
- unsigned long flags;
-
- spin_lock_irqsave(&ep->lock, flags);
-
- list_for_each_entry_safe(req, treq, &ep->queue, queue) {
- /* currently, we can't do much about an already
- * running request on an in endpoint */
-
- if (ep->req == req && ep->dir_in && !force)
- continue;
-
- s3c_hsotg_complete_request(hsotg, ep, req,
- result);
- }
-
- spin_unlock_irqrestore(&ep->lock, flags);
-}
-
-#define call_gadget(_hs, _entry) \
- if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
- (_hs)->driver && (_hs)->driver->_entry) \
- (_hs)->driver->_entry(&(_hs)->gadget);
-
-/**
- * s3c_hsotg_disconnect_irq - disconnect irq service
- * @hsotg: The device state.
- *
- * A disconnect IRQ has been received, meaning that the host has
- * lost contact with the bus. Remove all current transactions
- * and signal the gadget driver that this has happened.
-*/
-static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg)
-{
- unsigned ep;
-
- for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
- kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
-
- call_gadget(hsotg, disconnect);
-}
-
-/**
- * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
- * @hsotg: The device state:
- * @periodic: True if this is a periodic FIFO interrupt
- */
-static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
-{
- struct s3c_hsotg_ep *ep;
- int epno, ret;
-
- /* look through for any more data to transmit */
-
- for (epno = 0; epno < S3C_HSOTG_EPS; epno++) {
- ep = &hsotg->eps[epno];
-
- if (!ep->dir_in)
- continue;
-
- if ((periodic && !ep->periodic) ||
- (!periodic && ep->periodic))
- continue;
-
- ret = s3c_hsotg_trytx(hsotg, ep);
- if (ret < 0)
- break;
- }
-}
-
-static struct s3c_hsotg *our_hsotg;
-
-/* IRQ flags which will trigger a retry around the IRQ loop */
-#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \
- S3C_GINTSTS_PTxFEmp | \
- S3C_GINTSTS_RxFLvl)
-
-/**
- * s3c_hsotg_irq - handle device interrupt
- * @irq: The IRQ number triggered
- * @pw: The pw value when registered the handler.
- */
-static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
-{
- struct s3c_hsotg *hsotg = pw;
- int retry_count = 8;
- u32 gintsts;
- u32 gintmsk;
-
-irq_retry:
- gintsts = readl(hsotg->regs + S3C_GINTSTS);
- gintmsk = readl(hsotg->regs + S3C_GINTMSK);
-
- dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
- __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
-
- gintsts &= gintmsk;
-
- if (gintsts & S3C_GINTSTS_OTGInt) {
- u32 otgint = readl(hsotg->regs + S3C_GOTGINT);
-
- dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
-
- writel(otgint, hsotg->regs + S3C_GOTGINT);
- }
-
- if (gintsts & S3C_GINTSTS_DisconnInt) {
- dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__);
- writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS);
-
- s3c_hsotg_disconnect_irq(hsotg);
- }
-
- if (gintsts & S3C_GINTSTS_SessReqInt) {
- dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
- writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS);
- }
-
- if (gintsts & S3C_GINTSTS_EnumDone) {
- writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS);
-
- s3c_hsotg_irq_enumdone(hsotg);
- }
-
- if (gintsts & S3C_GINTSTS_ConIDStsChng) {
- dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
- readl(hsotg->regs + S3C_DSTS),
- readl(hsotg->regs + S3C_GOTGCTL));
-
- writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS);
- }
-
- if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) {
- u32 daint = readl(hsotg->regs + S3C_DAINT);
- u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT;
- u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT);
- int ep;
-
- dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
-
- for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
- if (daint_out & 1)
- s3c_hsotg_epint(hsotg, ep, 0);
- }
-
- for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
- if (daint_in & 1)
- s3c_hsotg_epint(hsotg, ep, 1);
- }
- }
-
- if (gintsts & S3C_GINTSTS_USBRst) {
- dev_info(hsotg->dev, "%s: USBRst\n", __func__);
- dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
- readl(hsotg->regs + S3C_GNPTXSTS));
-
- writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS);
-
- kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);
-
- /* it seems after a reset we can end up with a situation
- * where the TXFIFO still has data in it... the docs
- * suggest resetting all the fifos, so use the init_fifo
- * code to relayout and flush the fifos.
- */
-
- s3c_hsotg_init_fifo(hsotg);
-
- s3c_hsotg_enqueue_setup(hsotg);
- }
-
- /* check both FIFOs */
-
- if (gintsts & S3C_GINTSTS_NPTxFEmp) {
- dev_dbg(hsotg->dev, "NPTxFEmp\n");
-
- /* Disable the interrupt to stop it happening again
- * unless one of these endpoint routines decides that
- * it needs re-enabling */
-
- s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
- s3c_hsotg_irq_fifoempty(hsotg, false);
- }
-
- if (gintsts & S3C_GINTSTS_PTxFEmp) {
- dev_dbg(hsotg->dev, "PTxFEmp\n");
-
- /* See note in S3C_GINTSTS_NPTxFEmp */
-
- s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
- s3c_hsotg_irq_fifoempty(hsotg, true);
- }
-
- if (gintsts & S3C_GINTSTS_RxFLvl) {
- /* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
- * we need to retry s3c_hsotg_handle_rx if this is still
- * set. */
-
- s3c_hsotg_handle_rx(hsotg);
- }
-
- if (gintsts & S3C_GINTSTS_ModeMis) {
- dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
- writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS);
- }
-
- if (gintsts & S3C_GINTSTS_USBSusp) {
- dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n");
- writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS);
-
- call_gadget(hsotg, suspend);
- }
-
- if (gintsts & S3C_GINTSTS_WkUpInt) {
- dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n");
- writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS);
-
- call_gadget(hsotg, resume);
- }
-
- if (gintsts & S3C_GINTSTS_ErlySusp) {
- dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n");
- writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS);
- }
-
- /* these next two seem to crop-up occasionally causing the core
- * to shutdown the USB transfer, so try clearing them and logging
- * the occurrence. */
-
- if (gintsts & S3C_GINTSTS_GOUTNakEff) {
- dev_info(hsotg->dev, "GOUTNakEff triggered\n");
-
- writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL);
-
- s3c_hsotg_dump(hsotg);
- }
-
- if (gintsts & S3C_GINTSTS_GINNakEff) {
- dev_info(hsotg->dev, "GINNakEff triggered\n");
-
- writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL);
-
- s3c_hsotg_dump(hsotg);
- }
-
- /* if we've had fifo events, we should try and go around the
- * loop again to see if there's any point in returning yet. */
-
- if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
- goto irq_retry;
-
- return IRQ_HANDLED;
-}
-
-/**
- * s3c_hsotg_ep_enable - enable the given endpoint
- * @ep: The USB endpint to configure
- * @desc: The USB endpoint descriptor to configure with.
- *
- * This is called from the USB gadget code's usb_ep_enable().
-*/
-static int s3c_hsotg_ep_enable(struct usb_ep *ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
- unsigned long flags;
- int index = hs_ep->index;
- u32 epctrl_reg;
- u32 epctrl;
- u32 mps;
- int dir_in;
- int ret = 0;
-
- dev_dbg(hsotg->dev,
- "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
- __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
- desc->wMaxPacketSize, desc->bInterval);
-
- /* not to be called for EP0 */
- WARN_ON(index == 0);
-
- dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
- if (dir_in != hs_ep->dir_in) {
- dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
- return -EINVAL;
- }
-
- mps = le16_to_cpu(desc->wMaxPacketSize);
-
- /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
-
- epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
- epctrl = readl(hsotg->regs + epctrl_reg);
-
- dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
- __func__, epctrl, epctrl_reg);
-
- spin_lock_irqsave(&hs_ep->lock, flags);
-
- epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK);
- epctrl |= S3C_DxEPCTL_MPS(mps);
-
- /* mark the endpoint as active, otherwise the core may ignore
- * transactions entirely for this endpoint */
- epctrl |= S3C_DxEPCTL_USBActEp;
-
- /* set the NAK status on the endpoint, otherwise we might try and
- * do something with data that we've yet got a request to process
- * since the RXFIFO will take data for an endpoint even if the
- * size register hasn't been set.
- */
-
- epctrl |= S3C_DxEPCTL_SNAK;
-
- /* update the endpoint state */
- hs_ep->ep.maxpacket = mps;
-
- /* default, set to non-periodic */
- hs_ep->periodic = 0;
-
- switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
- case USB_ENDPOINT_XFER_ISOC:
- dev_err(hsotg->dev, "no current ISOC support\n");
- ret = -EINVAL;
- goto out;
-
- case USB_ENDPOINT_XFER_BULK:
- epctrl |= S3C_DxEPCTL_EPType_Bulk;
- break;
-
- case USB_ENDPOINT_XFER_INT:
- if (dir_in) {
- /* Allocate our TxFNum by simply using the index
- * of the endpoint for the moment. We could do
- * something better if the host indicates how
- * many FIFOs we are expecting to use. */
-
- hs_ep->periodic = 1;
- epctrl |= S3C_DxEPCTL_TxFNum(index);
- }
-
- epctrl |= S3C_DxEPCTL_EPType_Intterupt;
- break;
-
- case USB_ENDPOINT_XFER_CONTROL:
- epctrl |= S3C_DxEPCTL_EPType_Control;
- break;
- }
-
- /* if the hardware has dedicated fifos, we must give each IN EP
- * a unique tx-fifo even if it is non-periodic.
- */
- if (dir_in && hsotg->dedicated_fifos)
- epctrl |= S3C_DxEPCTL_TxFNum(index);
-
- /* for non control endpoints, set PID to D0 */
- if (index)
- epctrl |= S3C_DxEPCTL_SetD0PID;
-
- dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
- __func__, epctrl);
-
- writel(epctrl, hsotg->regs + epctrl_reg);
- dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
- __func__, readl(hsotg->regs + epctrl_reg));
-
- /* enable the endpoint interrupt */
- s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
-
-out:
- spin_unlock_irqrestore(&hs_ep->lock, flags);
- return ret;
-}
-
-static int s3c_hsotg_ep_disable(struct usb_ep *ep)
-{
- struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
- int dir_in = hs_ep->dir_in;
- int index = hs_ep->index;
- unsigned long flags;
- u32 epctrl_reg;
- u32 ctrl;
-
- dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
-
- if (ep == &hsotg->eps[0].ep) {
- dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
- return -EINVAL;
- }
-
- epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
-
- /* terminate all requests with shutdown */
- kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
-
- spin_lock_irqsave(&hs_ep->lock, flags);
-
- ctrl = readl(hsotg->regs + epctrl_reg);
- ctrl &= ~S3C_DxEPCTL_EPEna;
- ctrl &= ~S3C_DxEPCTL_USBActEp;
- ctrl |= S3C_DxEPCTL_SNAK;
-
- dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
- writel(ctrl, hsotg->regs + epctrl_reg);
-
- /* disable endpoint interrupts */
- s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
-
- spin_unlock_irqrestore(&hs_ep->lock, flags);
- return 0;
-}
-
-/**
- * on_list - check request is on the given endpoint
- * @ep: The endpoint to check.
- * @test: The request to test if it is on the endpoint.
-*/
-static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
-{
- struct s3c_hsotg_req *req, *treq;
-
- list_for_each_entry_safe(req, treq, &ep->queue, queue) {
- if (req == test)
- return true;
- }
-
- return false;
-}
-
-static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
-{
- struct s3c_hsotg_req *hs_req = our_req(req);
- struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
- unsigned long flags;
-
- dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
-
- spin_lock_irqsave(&hs_ep->lock, flags);
-
- if (!on_list(hs_ep, hs_req)) {
- spin_unlock_irqrestore(&hs_ep->lock, flags);
- return -EINVAL;
- }
-
- s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
- spin_unlock_irqrestore(&hs_ep->lock, flags);
-
- return 0;
-}
-
-static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
-{
- struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
- int index = hs_ep->index;
- unsigned long irqflags;
- u32 epreg;
- u32 epctl;
- u32 xfertype;
-
- dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
-
- spin_lock_irqsave(&hs_ep->lock, irqflags);
-
- /* write both IN and OUT control registers */
-
- epreg = S3C_DIEPCTL(index);
- epctl = readl(hs->regs + epreg);
-
- if (value) {
- epctl |= S3C_DxEPCTL_Stall + S3C_DxEPCTL_SNAK;
- if (epctl & S3C_DxEPCTL_EPEna)
- epctl |= S3C_DxEPCTL_EPDis;
- } else {
- epctl &= ~S3C_DxEPCTL_Stall;
- xfertype = epctl & S3C_DxEPCTL_EPType_MASK;
- if (xfertype == S3C_DxEPCTL_EPType_Bulk ||
- xfertype == S3C_DxEPCTL_EPType_Intterupt)
- epctl |= S3C_DxEPCTL_SetD0PID;
- }
-
- writel(epctl, hs->regs + epreg);
-
- epreg = S3C_DOEPCTL(index);
- epctl = readl(hs->regs + epreg);
-
- if (value)
- epctl |= S3C_DxEPCTL_Stall;
- else {
- epctl &= ~S3C_DxEPCTL_Stall;
- xfertype = epctl & S3C_DxEPCTL_EPType_MASK;
- if (xfertype == S3C_DxEPCTL_EPType_Bulk ||
- xfertype == S3C_DxEPCTL_EPType_Intterupt)
- epctl |= S3C_DxEPCTL_SetD0PID;
- }
-
- writel(epctl, hs->regs + epreg);
-
- spin_unlock_irqrestore(&hs_ep->lock, irqflags);
-
- return 0;
-}
-
-static struct usb_ep_ops s3c_hsotg_ep_ops = {
- .enable = s3c_hsotg_ep_enable,
- .disable = s3c_hsotg_ep_disable,
- .alloc_request = s3c_hsotg_ep_alloc_request,
- .free_request = s3c_hsotg_ep_free_request,
- .queue = s3c_hsotg_ep_queue,
- .dequeue = s3c_hsotg_ep_dequeue,
- .set_halt = s3c_hsotg_ep_sethalt,
- /* note, don't believe we have any call for the fifo routines */
-};
-
-/**
- * s3c_hsotg_corereset - issue softreset to the core
- * @hsotg: The device state
- *
- * Issue a soft reset to the core, and await the core finishing it.
-*/
-static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
-{
- int timeout;
- u32 grstctl;
-
- dev_dbg(hsotg->dev, "resetting core\n");
-
- /* issue soft reset */
- writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL);
-
- timeout = 1000;
- do {
- grstctl = readl(hsotg->regs + S3C_GRSTCTL);
- } while ((grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
-
- if (grstctl & S3C_GRSTCTL_CSftRst) {
- dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
- return -EINVAL;
- }
-
- timeout = 1000;
-
- while (1) {
- u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL);
-
- if (timeout-- < 0) {
- dev_info(hsotg->dev,
- "%s: reset failed, GRSTCTL=%08x\n",
- __func__, grstctl);
- return -ETIMEDOUT;
- }
-
- if (!(grstctl & S3C_GRSTCTL_AHBIdle))
- continue;
-
- break; /* reset done */
- }
-
- dev_dbg(hsotg->dev, "reset successful\n");
- return 0;
-}
-
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
-{
- struct s3c_hsotg *hsotg = our_hsotg;
- int ret;
-
- if (!hsotg) {
- printk(KERN_ERR "%s: called with no device\n", __func__);
- return -ENODEV;
- }
-
- if (!driver) {
- dev_err(hsotg->dev, "%s: no driver\n", __func__);
- return -EINVAL;
- }
-
- if (driver->speed != USB_SPEED_HIGH &&
- driver->speed != USB_SPEED_FULL) {
- dev_err(hsotg->dev, "%s: bad speed\n", __func__);
- }
-
- if (!bind || !driver->setup) {
- dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
- return -EINVAL;
- }
-
- WARN_ON(hsotg->driver);
-
- driver->driver.bus = NULL;
- hsotg->driver = driver;
- hsotg->gadget.dev.driver = &driver->driver;
- hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
- hsotg->gadget.speed = USB_SPEED_UNKNOWN;
-
- ret = device_add(&hsotg->gadget.dev);
- if (ret) {
- dev_err(hsotg->dev, "failed to register gadget device\n");
- goto err;
- }
-
- ret = bind(&hsotg->gadget);
- if (ret) {
- dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);
-
- hsotg->gadget.dev.driver = NULL;
- hsotg->driver = NULL;
- goto err;
- }
-
- /* we must now enable ep0 ready for host detection and then
- * set configuration. */
-
- s3c_hsotg_corereset(hsotg);
-
- /* set the PLL on, remove the HNP/SRP and set the PHY */
- writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) |
- (0x5 << 10), hsotg->regs + S3C_GUSBCFG);
-
- /* looks like soft-reset changes state of FIFOs */
- s3c_hsotg_init_fifo(hsotg);
-
- __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
-
- writel(1 << 18 | S3C_DCFG_DevSpd_HS, hsotg->regs + S3C_DCFG);
-
- /* Clear any pending OTG interrupts */
- writel(0xffffffff, hsotg->regs + S3C_GOTGINT);
-
- /* Clear any pending interrupts */
- writel(0xffffffff, hsotg->regs + S3C_GINTSTS);
-
- writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt |
- S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst |
- S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt |
- S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt |
- S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff |
- S3C_GINTSTS_ErlySusp,
- hsotg->regs + S3C_GINTMSK);
-
- if (using_dma(hsotg))
- writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn |
- S3C_GAHBCFG_HBstLen_Incr4,
- hsotg->regs + S3C_GAHBCFG);
- else
- writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG);
-
- /* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
- * up being flooded with interrupts if the host is polling the
- * endpoint to try and read data. */
-
- writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
- S3C_DIEPMSK_INTknEPMisMsk |
- S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk |
- ((hsotg->dedicated_fifos) ? S3C_DIEPMSK_TxFIFOEmpty : 0),
- hsotg->regs + S3C_DIEPMSK);
-
- /* don't need XferCompl, we get that from RXFIFO in slave mode. In
- * DMA mode we may need this. */
- writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
- S3C_DOEPMSK_EPDisbldMsk |
- (using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
- S3C_DIEPMSK_TimeOUTMsk) : 0),
- hsotg->regs + S3C_DOEPMSK);
-
- writel(0, hsotg->regs + S3C_DAINTMSK);
-
- dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- readl(hsotg->regs + S3C_DIEPCTL0),
- readl(hsotg->regs + S3C_DOEPCTL0));
-
- /* enable in and out endpoint interrupts */
- s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
-
- /* Enable the RXFIFO when in slave mode, as this is how we collect
- * the data. In DMA mode, we get events from the FIFO but also
- * things we cannot process, so do not use it. */
- if (!using_dma(hsotg))
- s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl);
-
- /* Enable interrupts for EP0 in and out */
- s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
- s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
-
- __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
- udelay(10); /* see openiboot */
- __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
-
- dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
-
- /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
- writing to the EPCTL register.. */
-
- /* set to read 1 8byte packet */
- writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
- S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
-
- writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
- S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna |
- S3C_DxEPCTL_USBActEp,
- hsotg->regs + S3C_DOEPCTL0);
-
- /* enable, but don't activate EP0in */
- writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
- S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0);
-
- s3c_hsotg_enqueue_setup(hsotg);
-
- dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- readl(hsotg->regs + S3C_DIEPCTL0),
- readl(hsotg->regs + S3C_DOEPCTL0));
-
- /* clear global NAKs */
- writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
- hsotg->regs + S3C_DCTL);
-
- /* must be at-least 3ms to allow bus to see disconnect */
- msleep(3);
-
- /* remove the soft-disconnect and let's go */
- __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
-
- /* report to the user, and return */
-
- dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
- return 0;
-
-err:
- hsotg->driver = NULL;
- hsotg->gadget.dev.driver = NULL;
- return ret;
-}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
-{
- struct s3c_hsotg *hsotg = our_hsotg;
- int ep;
-
- if (!hsotg)
- return -ENODEV;
-
- if (!driver || driver != hsotg->driver || !driver->unbind)
- return -EINVAL;
-
- /* all endpoints should be shutdown */
- for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
- s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
-
- call_gadget(hsotg, disconnect);
-
- driver->unbind(&hsotg->gadget);
- hsotg->driver = NULL;
- hsotg->gadget.speed = USB_SPEED_UNKNOWN;
-
- device_del(&hsotg->gadget.dev);
-
- dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
- driver->driver.name);
-
- return 0;
-}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
-static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
-{
- return s3c_hsotg_read_frameno(to_hsotg(gadget));
-}
-
-static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
- .get_frame = s3c_hsotg_gadget_getframe,
-};
-
-/**
- * s3c_hsotg_initep - initialise a single endpoint
- * @hsotg: The device state.
- * @hs_ep: The endpoint to be initialised.
- * @epnum: The endpoint number
- *
- * Initialise the given endpoint (as part of the probe and device state
- * creation) to give to the gadget driver. Setup the endpoint name, any
- * direction information and other state that may be required.
- */
-static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
- struct s3c_hsotg_ep *hs_ep,
- int epnum)
-{
- u32 ptxfifo;
- char *dir;
-
- if (epnum == 0)
- dir = "";
- else if ((epnum % 2) == 0) {
- dir = "out";
- } else {
- dir = "in";
- hs_ep->dir_in = 1;
- }
-
- hs_ep->index = epnum;
-
- snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
-
- INIT_LIST_HEAD(&hs_ep->queue);
- INIT_LIST_HEAD(&hs_ep->ep.ep_list);
-
- spin_lock_init(&hs_ep->lock);
-
- /* add to the list of endpoints known by the gadget driver */
- if (epnum)
- list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
-
- hs_ep->parent = hsotg;
- hs_ep->ep.name = hs_ep->name;
- hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
- hs_ep->ep.ops = &s3c_hsotg_ep_ops;
-
- /* Read the FIFO size for the Periodic TX FIFO, even if we're
- * an OUT endpoint, we may as well do this if in future the
- * code is changed to make each endpoint's direction changeable.
- */
-
- ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));
- hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;
-
- /* if we're using dma, we need to set the next-endpoint pointer
- * to be something valid.
- */
-
- if (using_dma(hsotg)) {
- u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15);
- writel(next, hsotg->regs + S3C_DIEPCTL(epnum));
- writel(next, hsotg->regs + S3C_DOEPCTL(epnum));
- }
-}
-
-/**
- * s3c_hsotg_otgreset - reset the OtG phy block
- * @hsotg: The host state.
- *
- * Power up the phy, set the basic configuration and start the PHY.
- */
-static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
-{
- struct clk *xusbxti;
- u32 pwr, osc;
-
- pwr = readl(S3C_PHYPWR);
- pwr &= ~0x19;
- writel(pwr, S3C_PHYPWR);
- mdelay(1);
-
- osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;
-
- xusbxti = clk_get(hsotg->dev, "xusbxti");
- if (xusbxti && !IS_ERR(xusbxti)) {
- switch (clk_get_rate(xusbxti)) {
- case 12*MHZ:
- osc |= S3C_PHYCLK_CLKSEL_12M;
- break;
- case 24*MHZ:
- osc |= S3C_PHYCLK_CLKSEL_24M;
- break;
- default:
- case 48*MHZ:
- /* default reference clock */
- break;
- }
- clk_put(xusbxti);
- }
-
- writel(osc | 0x10, S3C_PHYCLK);
-
- /* issue a full set of resets to the otg and core */
-
- writel(S3C_RSTCON_PHY, S3C_RSTCON);
- udelay(20); /* at-least 10uS */
- writel(0, S3C_RSTCON);
-}
-
-
-static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
-{
- u32 cfg4;
-
- /* unmask subset of endpoint interrupts */
-
- writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
- S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
- hsotg->regs + S3C_DIEPMSK);
-
- writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
- S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk,
- hsotg->regs + S3C_DOEPMSK);
-
- writel(0, hsotg->regs + S3C_DAINTMSK);
-
- /* Be in disconnected state until gadget is registered */
- __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
-
- if (0) {
- /* post global nak until we're ready */
- writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak,
- hsotg->regs + S3C_DCTL);
- }
-
- /* setup fifos */
-
- dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
- readl(hsotg->regs + S3C_GRXFSIZ),
- readl(hsotg->regs + S3C_GNPTXFSIZ));
-
- s3c_hsotg_init_fifo(hsotg);
-
- /* set the PLL on, remove the HNP/SRP and set the PHY */
- writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10),
- hsotg->regs + S3C_GUSBCFG);
-
- writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,
- hsotg->regs + S3C_GAHBCFG);
-
- /* check hardware configuration */
-
- cfg4 = readl(hsotg->regs + 0x50);
- hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
-
- dev_info(hsotg->dev, "%s fifos\n",
- hsotg->dedicated_fifos ? "dedicated" : "shared");
-}
-
-static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
-{
-#ifdef DEBUG
- struct device *dev = hsotg->dev;
- void __iomem *regs = hsotg->regs;
- u32 val;
- int idx;
-
- dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
- readl(regs + S3C_DCFG), readl(regs + S3C_DCTL),
- readl(regs + S3C_DIEPMSK));
-
- dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
- readl(regs + S3C_GAHBCFG), readl(regs + 0x44));
-
- dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
- readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ));
-
- /* show periodic fifo settings */
-
- for (idx = 1; idx <= 15; idx++) {
- val = readl(regs + S3C_DPTXFSIZn(idx));
- dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
- val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
- val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
- }
-
- for (idx = 0; idx < 15; idx++) {
- dev_info(dev,
- "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
- readl(regs + S3C_DIEPCTL(idx)),
- readl(regs + S3C_DIEPTSIZ(idx)),
- readl(regs + S3C_DIEPDMA(idx)));
-
- val = readl(regs + S3C_DOEPCTL(idx));
- dev_info(dev,
- "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
- idx, readl(regs + S3C_DOEPCTL(idx)),
- readl(regs + S3C_DOEPTSIZ(idx)),
- readl(regs + S3C_DOEPDMA(idx)));
-
- }
-
- dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
- readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
-#endif
-}
-
-
-/**
- * state_show - debugfs: show overall driver and device state.
- * @seq: The seq file to write to.
- * @v: Unused parameter.
- *
- * This debugfs entry shows the overall state of the hardware and
- * some general information about each of the endpoints available
- * to the system.
- */
-static int state_show(struct seq_file *seq, void *v)
-{
- struct s3c_hsotg *hsotg = seq->private;
- void __iomem *regs = hsotg->regs;
- int idx;
-
- seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
- readl(regs + S3C_DCFG),
- readl(regs + S3C_DCTL),
- readl(regs + S3C_DSTS));
-
- seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
- readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK));
-
- seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
- readl(regs + S3C_GINTMSK),
- readl(regs + S3C_GINTSTS));
-
- seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
- readl(regs + S3C_DAINTMSK),
- readl(regs + S3C_DAINT));
-
- seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
- readl(regs + S3C_GNPTXSTS),
- readl(regs + S3C_GRXSTSR));
-
- seq_printf(seq, "\nEndpoint status:\n");
-
- for (idx = 0; idx < 15; idx++) {
- u32 in, out;
-
- in = readl(regs + S3C_DIEPCTL(idx));
- out = readl(regs + S3C_DOEPCTL(idx));
-
- seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
- idx, in, out);
-
- in = readl(regs + S3C_DIEPTSIZ(idx));
- out = readl(regs + S3C_DOEPTSIZ(idx));
-
- seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
- in, out);
-
- seq_printf(seq, "\n");
- }
-
- return 0;
-}
-
-static int state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, state_show, inode->i_private);
-}
-
-static const struct file_operations state_fops = {
- .owner = THIS_MODULE,
- .open = state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/**
- * fifo_show - debugfs: show the fifo information
- * @seq: The seq_file to write data to.
- * @v: Unused parameter.
- *
- * Show the FIFO information for the overall fifo and all the
- * periodic transmission FIFOs.
-*/
-static int fifo_show(struct seq_file *seq, void *v)
-{
- struct s3c_hsotg *hsotg = seq->private;
- void __iomem *regs = hsotg->regs;
- u32 val;
- int idx;
-
- seq_printf(seq, "Non-periodic FIFOs:\n");
- seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ));
-
- val = readl(regs + S3C_GNPTXFSIZ);
- seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
- val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT,
- val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK);
-
- seq_printf(seq, "\nPeriodic TXFIFOs:\n");
-
- for (idx = 1; idx <= 15; idx++) {
- val = readl(regs + S3C_DPTXFSIZn(idx));
-
- seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
- val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
- val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
- }
-
- return 0;
-}
-
-static int fifo_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fifo_show, inode->i_private);
-}
-
-static const struct file_operations fifo_fops = {
- .owner = THIS_MODULE,
- .open = fifo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-static const char *decode_direction(int is_in)
-{
- return is_in ? "in" : "out";
-}
-
-/**
- * ep_show - debugfs: show the state of an endpoint.
- * @seq: The seq_file to write data to.
- * @v: Unused parameter.
- *
- * This debugfs entry shows the state of the given endpoint (one is
- * registered for each available).
-*/
-static int ep_show(struct seq_file *seq, void *v)
-{
- struct s3c_hsotg_ep *ep = seq->private;
- struct s3c_hsotg *hsotg = ep->parent;
- struct s3c_hsotg_req *req;
- void __iomem *regs = hsotg->regs;
- int index = ep->index;
- int show_limit = 15;
- unsigned long flags;
-
- seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
- ep->index, ep->ep.name, decode_direction(ep->dir_in));
-
- /* first show the register state */
-
- seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
- readl(regs + S3C_DIEPCTL(index)),
- readl(regs + S3C_DOEPCTL(index)));
-
- seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
- readl(regs + S3C_DIEPDMA(index)),
- readl(regs + S3C_DOEPDMA(index)));
-
- seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
- readl(regs + S3C_DIEPINT(index)),
- readl(regs + S3C_DOEPINT(index)));
-
- seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
- readl(regs + S3C_DIEPTSIZ(index)),
- readl(regs + S3C_DOEPTSIZ(index)));
-
- seq_printf(seq, "\n");
- seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
- seq_printf(seq, "total_data=%ld\n", ep->total_data);
-
- seq_printf(seq, "request list (%p,%p):\n",
- ep->queue.next, ep->queue.prev);
-
- spin_lock_irqsave(&ep->lock, flags);
-
- list_for_each_entry(req, &ep->queue, queue) {
- if (--show_limit < 0) {
- seq_printf(seq, "not showing more requests...\n");
- break;
- }
-
- seq_printf(seq, "%c req %p: %d bytes @%p, ",
- req == ep->req ? '*' : ' ',
- req, req->req.length, req->req.buf);
- seq_printf(seq, "%d done, res %d\n",
- req->req.actual, req->req.status);
- }
-
- spin_unlock_irqrestore(&ep->lock, flags);
-
- return 0;
-}
-
-static int ep_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ep_show, inode->i_private);
-}
-
-static const struct file_operations ep_fops = {
- .owner = THIS_MODULE,
- .open = ep_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/**
- * s3c_hsotg_create_debug - create debugfs directory and files
- * @hsotg: The driver state
- *
- * Create the debugfs files to allow the user to get information
- * about the state of the system. The directory name is created
- * with the same name as the device itself, in case we end up
- * with multiple blocks in future systems.
-*/
-static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
-{
- struct dentry *root;
- unsigned epidx;
-
- root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
- hsotg->debug_root = root;
- if (IS_ERR(root)) {
- dev_err(hsotg->dev, "cannot create debug root\n");
- return;
- }
-
- /* create general state file */
-
- hsotg->debug_file = debugfs_create_file("state", 0444, root,
- hsotg, &state_fops);
-
- if (IS_ERR(hsotg->debug_file))
- dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
-
- hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
- hsotg, &fifo_fops);
-
- if (IS_ERR(hsotg->debug_fifo))
- dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
-
- /* create one file for each endpoint */
-
- for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
- struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
-
- ep->debugfs = debugfs_create_file(ep->name, 0444,
- root, ep, &ep_fops);
-
- if (IS_ERR(ep->debugfs))
- dev_err(hsotg->dev, "failed to create %s debug file\n",
- ep->name);
- }
-}
-
-/**
- * s3c_hsotg_delete_debug - cleanup debugfs entries
- * @hsotg: The driver state
- *
- * Cleanup (remove) the debugfs files for use on module exit.
-*/
-static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
-{
- unsigned epidx;
-
- for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
- struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
- debugfs_remove(ep->debugfs);
- }
-
- debugfs_remove(hsotg->debug_file);
- debugfs_remove(hsotg->debug_fifo);
- debugfs_remove(hsotg->debug_root);
-}
-
-/**
- * s3c_hsotg_gate - set the hardware gate for the block
- * @pdev: The device we bound to
- * @on: On or off.
- *
- * Set the hardware gate setting into the block. If we end up on
- * something other than an S3C64XX, then we might need to change this
- * to using a platform data callback, or some other mechanism.
- */
-static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
-{
- unsigned long flags;
- u32 others;
-
- local_irq_save(flags);
-
- others = __raw_readl(S3C64XX_OTHERS);
- if (on)
- others |= S3C64XX_OTHERS_USBMASK;
- else
- others &= ~S3C64XX_OTHERS_USBMASK;
- __raw_writel(others, S3C64XX_OTHERS);
-
- local_irq_restore(flags);
-}
-
-static struct s3c_hsotg_plat s3c_hsotg_default_pdata;
-
-static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
-{
- struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
- struct device *dev = &pdev->dev;
- struct s3c_hsotg *hsotg;
- struct resource *res;
- int epnum;
- int ret;
-
- if (!plat)
- plat = &s3c_hsotg_default_pdata;
-
- hsotg = kzalloc(sizeof(struct s3c_hsotg) +
- sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS,
- GFP_KERNEL);
- if (!hsotg) {
- dev_err(dev, "cannot get memory\n");
- return -ENOMEM;
- }
-
- hsotg->dev = dev;
- hsotg->plat = plat;
-
- hsotg->clk = clk_get(&pdev->dev, "otg");
- if (IS_ERR(hsotg->clk)) {
- dev_err(dev, "cannot get otg clock\n");
- ret = PTR_ERR(hsotg->clk);
- goto err_mem;
- }
-
- platform_set_drvdata(pdev, hsotg);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "cannot find register resource 0\n");
- ret = -EINVAL;
- goto err_clk;
- }
-
- hsotg->regs_res = request_mem_region(res->start, resource_size(res),
- dev_name(dev));
- if (!hsotg->regs_res) {
- dev_err(dev, "cannot reserve registers\n");
- ret = -ENOENT;
- goto err_clk;
- }
-
- hsotg->regs = ioremap(res->start, resource_size(res));
- if (!hsotg->regs) {
- dev_err(dev, "cannot map registers\n");
- ret = -ENXIO;
- goto err_regs_res;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "cannot find IRQ\n");
- goto err_regs;
- }
-
- hsotg->irq = ret;
-
- ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);
- if (ret < 0) {
- dev_err(dev, "cannot claim IRQ\n");
- goto err_regs;
- }
-
- dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
-
- device_initialize(&hsotg->gadget.dev);
-
- dev_set_name(&hsotg->gadget.dev, "gadget");
-
- hsotg->gadget.is_dualspeed = 1;
- hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
- hsotg->gadget.name = dev_name(dev);
-
- hsotg->gadget.dev.parent = dev;
- hsotg->gadget.dev.dma_mask = dev->dma_mask;
-
- /* setup endpoint information */
-
- INIT_LIST_HEAD(&hsotg->gadget.ep_list);
- hsotg->gadget.ep0 = &hsotg->eps[0].ep;
-
- /* allocate EP0 request */
-
- hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
- GFP_KERNEL);
- if (!hsotg->ctrl_req) {
- dev_err(dev, "failed to allocate ctrl req\n");
- goto err_regs;
- }
-
- /* reset the system */
-
- clk_enable(hsotg->clk);
-
- s3c_hsotg_gate(pdev, true);
-
- s3c_hsotg_otgreset(hsotg);
- s3c_hsotg_corereset(hsotg);
- s3c_hsotg_init(hsotg);
-
- /* initialise the endpoints now the core has been initialised */
- for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)
- s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
-
- s3c_hsotg_create_debug(hsotg);
-
- s3c_hsotg_dump(hsotg);
-
- our_hsotg = hsotg;
- return 0;
-
-err_regs:
- iounmap(hsotg->regs);
-
-err_regs_res:
- release_resource(hsotg->regs_res);
- kfree(hsotg->regs_res);
-err_clk:
- clk_put(hsotg->clk);
-err_mem:
- kfree(hsotg);
- return ret;
-}
-
-static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
-{
- struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
-
- s3c_hsotg_delete_debug(hsotg);
-
- usb_gadget_unregister_driver(hsotg->driver);
-
- free_irq(hsotg->irq, hsotg);
- iounmap(hsotg->regs);
-
- release_resource(hsotg->regs_res);
- kfree(hsotg->regs_res);
-
- s3c_hsotg_gate(pdev, false);
-
- clk_disable(hsotg->clk);
- clk_put(hsotg->clk);
-
- kfree(hsotg);
- return 0;
-}
-
-#if 1
-#define s3c_hsotg_suspend NULL
-#define s3c_hsotg_resume NULL
-#endif
-
-static struct platform_driver s3c_hsotg_driver = {
- .driver = {
- .name = "s3c-hsotg",
- .owner = THIS_MODULE,
- },
- .probe = s3c_hsotg_probe,
- .remove = __devexit_p(s3c_hsotg_remove),
- .suspend = s3c_hsotg_suspend,
- .resume = s3c_hsotg_resume,
-};
-
-static int __init s3c_hsotg_modinit(void)
-{
- return platform_driver_register(&s3c_hsotg_driver);
-}
-
-static void __exit s3c_hsotg_modexit(void)
-{
- platform_driver_unregister(&s3c_hsotg_driver);
-}
-
-module_init(s3c_hsotg_modinit);
-module_exit(s3c_hsotg_modexit);
-
-MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-hsotg");
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
deleted file mode 100644
index d5e3e1e5..00000000
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ /dev/null
@@ -1,1352 +0,0 @@
-/* linux/drivers/usb/gadget/s3c-hsudc.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S3C24XX USB 2.0 High-speed USB controller gadget driver
- *
- * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
- * Each endpoint can be configured as either in or out endpoint. Endpoints
- * can be configured for Bulk or Interrupt transfer mode.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/prefetch.h>
-
-#include <mach/regs-s3c2443-clock.h>
-#include <plat/udc.h>
-
-#define S3C_HSUDC_REG(x) (x)
-
-/* Non-Indexed Registers */
-#define S3C_IR S3C_HSUDC_REG(0x00) /* Index Register */
-#define S3C_EIR S3C_HSUDC_REG(0x04) /* EP Intr Status */
-#define S3C_EIR_EP0 (1<<0)
-#define S3C_EIER S3C_HSUDC_REG(0x08) /* EP Intr Enable */
-#define S3C_FAR S3C_HSUDC_REG(0x0c) /* Gadget Address */
-#define S3C_FNR S3C_HSUDC_REG(0x10) /* Frame Number */
-#define S3C_EDR S3C_HSUDC_REG(0x14) /* EP Direction */
-#define S3C_TR S3C_HSUDC_REG(0x18) /* Test Register */
-#define S3C_SSR S3C_HSUDC_REG(0x1c) /* System Status */
-#define S3C_SSR_DTZIEN_EN (0xff8f)
-#define S3C_SSR_ERR (0xff80)
-#define S3C_SSR_VBUSON (1 << 8)
-#define S3C_SSR_HSP (1 << 4)
-#define S3C_SSR_SDE (1 << 3)
-#define S3C_SSR_RESUME (1 << 2)
-#define S3C_SSR_SUSPEND (1 << 1)
-#define S3C_SSR_RESET (1 << 0)
-#define S3C_SCR S3C_HSUDC_REG(0x20) /* System Control */
-#define S3C_SCR_DTZIEN_EN (1 << 14)
-#define S3C_SCR_RRD_EN (1 << 5)
-#define S3C_SCR_SUS_EN (1 << 1)
-#define S3C_SCR_RST_EN (1 << 0)
-#define S3C_EP0SR S3C_HSUDC_REG(0x24) /* EP0 Status */
-#define S3C_EP0SR_EP0_LWO (1 << 6)
-#define S3C_EP0SR_STALL (1 << 4)
-#define S3C_EP0SR_TX_SUCCESS (1 << 1)
-#define S3C_EP0SR_RX_SUCCESS (1 << 0)
-#define S3C_EP0CR S3C_HSUDC_REG(0x28) /* EP0 Control */
-#define S3C_BR(_x) S3C_HSUDC_REG(0x60 + (_x * 4))
-
-/* Indexed Registers */
-#define S3C_ESR S3C_HSUDC_REG(0x2c) /* EPn Status */
-#define S3C_ESR_FLUSH (1 << 6)
-#define S3C_ESR_STALL (1 << 5)
-#define S3C_ESR_LWO (1 << 4)
-#define S3C_ESR_PSIF_ONE (1 << 2)
-#define S3C_ESR_PSIF_TWO (2 << 2)
-#define S3C_ESR_TX_SUCCESS (1 << 1)
-#define S3C_ESR_RX_SUCCESS (1 << 0)
-#define S3C_ECR S3C_HSUDC_REG(0x30) /* EPn Control */
-#define S3C_ECR_DUEN (1 << 7)
-#define S3C_ECR_FLUSH (1 << 6)
-#define S3C_ECR_STALL (1 << 1)
-#define S3C_ECR_IEMS (1 << 0)
-#define S3C_BRCR S3C_HSUDC_REG(0x34) /* Read Count */
-#define S3C_BWCR S3C_HSUDC_REG(0x38) /* Write Count */
-#define S3C_MPR S3C_HSUDC_REG(0x3c) /* Max Pkt Size */
-
-#define WAIT_FOR_SETUP (0)
-#define DATA_STATE_XMIT (1)
-#define DATA_STATE_RECV (2)
-
-/**
- * struct s3c_hsudc_ep - Endpoint representation used by driver.
- * @ep: USB gadget layer representation of device endpoint.
- * @name: Endpoint name (as required by ep autoconfiguration).
- * @dev: Reference to the device controller to which this EP belongs.
- * @desc: Endpoint descriptor obtained from the gadget driver.
- * @queue: Transfer request queue for the endpoint.
- * @stopped: Maintains state of endpoint, set if EP is halted.
- * @bEndpointAddress: EP address (including direction bit).
- * @fifo: Base address of EP FIFO.
- */
-struct s3c_hsudc_ep {
- struct usb_ep ep;
- char name[20];
- struct s3c_hsudc *dev;
- const struct usb_endpoint_descriptor *desc;
- struct list_head queue;
- u8 stopped;
- u8 wedge;
- u8 bEndpointAddress;
- void __iomem *fifo;
-};
-
-/**
- * struct s3c_hsudc_req - Driver encapsulation of USB gadget transfer request.
- * @req: Reference to USB gadget transfer request.
- * @queue: Used for inserting this request to the endpoint request queue.
- */
-struct s3c_hsudc_req {
- struct usb_request req;
- struct list_head queue;
-};
-
-/**
- * struct s3c_hsudc - Driver's abstraction of the device controller.
- * @gadget: Instance of usb_gadget which is referenced by gadget driver.
- * @driver: Reference to currenty active gadget driver.
- * @dev: The device reference used by probe function.
- * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed).
- * @regs: Remapped base address of controller's register space.
- * @mem_rsrc: Device memory resource used for remapping device register space.
- * irq: IRQ number used by the controller.
- * uclk: Reference to the controller clock.
- * ep0state: Current state of EP0.
- * ep: List of endpoints supported by the controller.
- */
-struct s3c_hsudc {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct device *dev;
- struct s3c24xx_hsudc_platdata *pd;
- spinlock_t lock;
- void __iomem *regs;
- struct resource *mem_rsrc;
- int irq;
- struct clk *uclk;
- int ep0state;
- struct s3c_hsudc_ep ep[];
-};
-
-#define ep_maxpacket(_ep) ((_ep)->ep.maxpacket)
-#define ep_is_in(_ep) ((_ep)->bEndpointAddress & USB_DIR_IN)
-#define ep_index(_ep) ((_ep)->bEndpointAddress & \
- USB_ENDPOINT_NUMBER_MASK)
-
-static struct s3c_hsudc *the_controller;
-static const char driver_name[] = "s3c-udc";
-static const char ep0name[] = "ep0-control";
-
-static inline struct s3c_hsudc_req *our_req(struct usb_request *req)
-{
- return container_of(req, struct s3c_hsudc_req, req);
-}
-
-static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep)
-{
- return container_of(ep, struct s3c_hsudc_ep, ep);
-}
-
-static inline struct s3c_hsudc *to_hsudc(struct usb_gadget *gadget)
-{
- return container_of(gadget, struct s3c_hsudc, gadget);
-}
-
-static inline void set_index(struct s3c_hsudc *hsudc, int ep_addr)
-{
- ep_addr &= USB_ENDPOINT_NUMBER_MASK;
- writel(ep_addr, hsudc->regs + S3C_IR);
-}
-
-static inline void __orr32(void __iomem *ptr, u32 val)
-{
- writel(readl(ptr) | val, ptr);
-}
-
-static void s3c_hsudc_init_phy(void)
-{
- u32 cfg;
-
- cfg = readl(S3C2443_PWRCFG) | S3C2443_PWRCFG_USBPHY;
- writel(cfg, S3C2443_PWRCFG);
-
- cfg = readl(S3C2443_URSTCON);
- cfg |= (S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST);
- writel(cfg, S3C2443_URSTCON);
- mdelay(1);
-
- cfg = readl(S3C2443_URSTCON);
- cfg &= ~(S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST);
- writel(cfg, S3C2443_URSTCON);
-
- cfg = readl(S3C2443_PHYCTRL);
- cfg &= ~(S3C2443_PHYCTRL_CLKSEL | S3C2443_PHYCTRL_DSPORT);
- cfg |= (S3C2443_PHYCTRL_EXTCLK | S3C2443_PHYCTRL_PLLSEL);
- writel(cfg, S3C2443_PHYCTRL);
-
- cfg = readl(S3C2443_PHYPWR);
- cfg &= ~(S3C2443_PHYPWR_FSUSPEND | S3C2443_PHYPWR_PLL_PWRDN |
- S3C2443_PHYPWR_XO_ON | S3C2443_PHYPWR_PLL_REFCLK |
- S3C2443_PHYPWR_ANALOG_PD);
- cfg |= S3C2443_PHYPWR_COMMON_ON;
- writel(cfg, S3C2443_PHYPWR);
-
- cfg = readl(S3C2443_UCLKCON);
- cfg |= (S3C2443_UCLKCON_DETECT_VBUS | S3C2443_UCLKCON_FUNC_CLKEN |
- S3C2443_UCLKCON_TCLKEN);
- writel(cfg, S3C2443_UCLKCON);
-}
-
-static void s3c_hsudc_uninit_phy(void)
-{
- u32 cfg;
-
- cfg = readl(S3C2443_PWRCFG) & ~S3C2443_PWRCFG_USBPHY;
- writel(cfg, S3C2443_PWRCFG);
-
- writel(S3C2443_PHYPWR_FSUSPEND, S3C2443_PHYPWR);
-
- cfg = readl(S3C2443_UCLKCON) & ~S3C2443_UCLKCON_FUNC_CLKEN;
- writel(cfg, S3C2443_UCLKCON);
-}
-
-/**
- * s3c_hsudc_complete_request - Complete a transfer request.
- * @hsep: Endpoint to which the request belongs.
- * @hsreq: Transfer request to be completed.
- * @status: Transfer completion status for the transfer request.
- */
-static void s3c_hsudc_complete_request(struct s3c_hsudc_ep *hsep,
- struct s3c_hsudc_req *hsreq, int status)
-{
- unsigned int stopped = hsep->stopped;
- struct s3c_hsudc *hsudc = hsep->dev;
-
- list_del_init(&hsreq->queue);
- hsreq->req.status = status;
-
- if (!ep_index(hsep)) {
- hsudc->ep0state = WAIT_FOR_SETUP;
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- }
-
- hsep->stopped = 1;
- spin_unlock(&hsudc->lock);
- if (hsreq->req.complete != NULL)
- hsreq->req.complete(&hsep->ep, &hsreq->req);
- spin_lock(&hsudc->lock);
- hsep->stopped = stopped;
-}
-
-/**
- * s3c_hsudc_nuke_ep - Terminate all requests queued for a endpoint.
- * @hsep: Endpoint for which queued requests have to be terminated.
- * @status: Transfer completion status for the transfer request.
- */
-static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status)
-{
- struct s3c_hsudc_req *hsreq;
-
- while (!list_empty(&hsep->queue)) {
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- s3c_hsudc_complete_request(hsep, hsreq, status);
- }
-}
-
-/**
- * s3c_hsudc_stop_activity - Stop activity on all endpoints.
- * @hsudc: Device controller for which EP activity is to be stopped.
- * @driver: Reference to the gadget driver which is currently active.
- *
- * All the endpoints are stopped and any pending transfer requests if any on
- * the endpoint are terminated.
- */
-static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc,
- struct usb_gadget_driver *driver)
-{
- struct s3c_hsudc_ep *hsep;
- int epnum;
-
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
-
- for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) {
- hsep = &hsudc->ep[epnum];
- hsep->stopped = 1;
- s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
- }
-
- spin_unlock(&hsudc->lock);
- driver->disconnect(&hsudc->gadget);
- spin_lock(&hsudc->lock);
-}
-
-/**
- * s3c_hsudc_read_setup_pkt - Read the received setup packet from EP0 fifo.
- * @hsudc: Device controller from which setup packet is to be read.
- * @buf: The buffer into which the setup packet is read.
- *
- * The setup packet received in the EP0 fifo is read and stored into a
- * given buffer address.
- */
-
-static void s3c_hsudc_read_setup_pkt(struct s3c_hsudc *hsudc, u16 *buf)
-{
- int count;
-
- count = readl(hsudc->regs + S3C_BRCR);
- while (count--)
- *buf++ = (u16)readl(hsudc->regs + S3C_BR(0));
-
- writel(S3C_EP0SR_RX_SUCCESS, hsudc->regs + S3C_EP0SR);
-}
-
-/**
- * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo.
- * @hsep: Endpoint to which the data is to be written.
- * @hsreq: Transfer request from which the next chunk of data is written.
- *
- * Write the next chunk of data from a transfer request to the endpoint FIFO.
- * If the transfer request completes, 1 is returned, otherwise 0 is returned.
- */
-static int s3c_hsudc_write_fifo(struct s3c_hsudc_ep *hsep,
- struct s3c_hsudc_req *hsreq)
-{
- u16 *buf;
- u32 max = ep_maxpacket(hsep);
- u32 count, length;
- bool is_last;
- void __iomem *fifo = hsep->fifo;
-
- buf = hsreq->req.buf + hsreq->req.actual;
- prefetch(buf);
-
- length = hsreq->req.length - hsreq->req.actual;
- length = min(length, max);
- hsreq->req.actual += length;
-
- writel(length, hsep->dev->regs + S3C_BWCR);
- for (count = 0; count < length; count += 2)
- writel(*buf++, fifo);
-
- if (count != max) {
- is_last = true;
- } else {
- if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero)
- is_last = false;
- else
- is_last = true;
- }
-
- if (is_last) {
- s3c_hsudc_complete_request(hsep, hsreq, 0);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo.
- * @hsep: Endpoint from which the data is to be read.
- * @hsreq: Transfer request to which the next chunk of data read is written.
- *
- * Read the next chunk of data from the endpoint FIFO and a write it to the
- * transfer request buffer. If the transfer request completes, 1 is returned,
- * otherwise 0 is returned.
- */
-static int s3c_hsudc_read_fifo(struct s3c_hsudc_ep *hsep,
- struct s3c_hsudc_req *hsreq)
-{
- struct s3c_hsudc *hsudc = hsep->dev;
- u32 csr, offset;
- u16 *buf, word;
- u32 buflen, rcnt, rlen;
- void __iomem *fifo = hsep->fifo;
- u32 is_short = 0;
-
- offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
- csr = readl(hsudc->regs + offset);
- if (!(csr & S3C_ESR_RX_SUCCESS))
- return -EINVAL;
-
- buf = hsreq->req.buf + hsreq->req.actual;
- prefetchw(buf);
- buflen = hsreq->req.length - hsreq->req.actual;
-
- rcnt = readl(hsudc->regs + S3C_BRCR);
- rlen = (csr & S3C_ESR_LWO) ? (rcnt * 2 - 1) : (rcnt * 2);
-
- hsreq->req.actual += min(rlen, buflen);
- is_short = (rlen < hsep->ep.maxpacket);
-
- while (rcnt-- != 0) {
- word = (u16)readl(fifo);
- if (buflen) {
- *buf++ = word;
- buflen--;
- } else {
- hsreq->req.status = -EOVERFLOW;
- }
- }
-
- writel(S3C_ESR_RX_SUCCESS, hsudc->regs + offset);
-
- if (is_short || hsreq->req.actual == hsreq->req.length) {
- s3c_hsudc_complete_request(hsep, hsreq, 0);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * s3c_hsudc_epin_intr - Handle in-endpoint interrupt.
- * @hsudc - Device controller for which the interrupt is to be handled.
- * @ep_idx - Endpoint number on which an interrupt is pending.
- *
- * Handles interrupt for a in-endpoint. The interrupts that are handled are
- * stall and data transmit complete interrupt.
- */
-static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
- struct s3c_hsudc_req *hsreq;
- u32 csr;
-
- csr = readl((u32)hsudc->regs + S3C_ESR);
- if (csr & S3C_ESR_STALL) {
- writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
- return;
- }
-
- if (csr & S3C_ESR_TX_SUCCESS) {
- writel(S3C_ESR_TX_SUCCESS, hsudc->regs + S3C_ESR);
- if (list_empty(&hsep->queue))
- return;
-
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- if ((s3c_hsudc_write_fifo(hsep, hsreq) == 0) &&
- (csr & S3C_ESR_PSIF_TWO))
- s3c_hsudc_write_fifo(hsep, hsreq);
- }
-}
-
-/**
- * s3c_hsudc_epout_intr - Handle out-endpoint interrupt.
- * @hsudc - Device controller for which the interrupt is to be handled.
- * @ep_idx - Endpoint number on which an interrupt is pending.
- *
- * Handles interrupt for a out-endpoint. The interrupts that are handled are
- * stall, flush and data ready interrupt.
- */
-static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
- struct s3c_hsudc_req *hsreq;
- u32 csr;
-
- csr = readl((u32)hsudc->regs + S3C_ESR);
- if (csr & S3C_ESR_STALL) {
- writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
- return;
- }
-
- if (csr & S3C_ESR_FLUSH) {
- __orr32(hsudc->regs + S3C_ECR, S3C_ECR_FLUSH);
- return;
- }
-
- if (csr & S3C_ESR_RX_SUCCESS) {
- if (list_empty(&hsep->queue))
- return;
-
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- if (((s3c_hsudc_read_fifo(hsep, hsreq)) == 0) &&
- (csr & S3C_ESR_PSIF_TWO))
- s3c_hsudc_read_fifo(hsep, hsreq);
- }
-}
-
-/** s3c_hsudc_set_halt - Set or clear a endpoint halt.
- * @_ep: Endpoint on which halt has to be set or cleared.
- * @value: 1 for setting halt on endpoint, 0 to clear halt.
- *
- * Set or clear endpoint halt. If halt is set, the endpoint is stopped.
- * If halt is cleared, for in-endpoints, if there are any pending
- * transfer requests, transfers are started.
- */
-static int s3c_hsudc_set_halt(struct usb_ep *_ep, int value)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
- struct s3c_hsudc *hsudc = hsep->dev;
- struct s3c_hsudc_req *hsreq;
- unsigned long irqflags;
- u32 ecr;
- u32 offset;
-
- if (value && ep_is_in(hsep) && !list_empty(&hsep->queue))
- return -EAGAIN;
-
- spin_lock_irqsave(&hsudc->lock, irqflags);
- set_index(hsudc, ep_index(hsep));
- offset = (ep_index(hsep)) ? S3C_ECR : S3C_EP0CR;
- ecr = readl(hsudc->regs + offset);
-
- if (value) {
- ecr |= S3C_ECR_STALL;
- if (ep_index(hsep))
- ecr |= S3C_ECR_FLUSH;
- hsep->stopped = 1;
- } else {
- ecr &= ~S3C_ECR_STALL;
- hsep->stopped = hsep->wedge = 0;
- }
- writel(ecr, hsudc->regs + offset);
-
- if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) {
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- if (hsreq)
- s3c_hsudc_write_fifo(hsep, hsreq);
- }
-
- spin_unlock_irqrestore(&hsudc->lock, irqflags);
- return 0;
-}
-
-/** s3c_hsudc_set_wedge - Sets the halt feature with the clear requests ignored
- * @_ep: Endpoint on which wedge has to be set.
- *
- * Sets the halt feature with the clear requests ignored.
- */
-static int s3c_hsudc_set_wedge(struct usb_ep *_ep)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
-
- if (!hsep)
- return -EINVAL;
-
- hsep->wedge = 1;
- return usb_ep_set_halt(_ep);
-}
-
-/** s3c_hsudc_handle_reqfeat - Handle set feature or clear feature requests.
- * @_ep: Device controller on which the set/clear feature needs to be handled.
- * @ctrl: Control request as received on the endpoint 0.
- *
- * Handle set feature or clear feature control requests on the control endpoint.
- */
-static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc,
- struct usb_ctrlrequest *ctrl)
-{
- struct s3c_hsudc_ep *hsep;
- bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
- u8 ep_num = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
-
- if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
- hsep = &hsudc->ep[ep_num];
- switch (le16_to_cpu(ctrl->wValue)) {
- case USB_ENDPOINT_HALT:
- if (set || (!set && !hsep->wedge))
- s3c_hsudc_set_halt(&hsep->ep, set);
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-/**
- * s3c_hsudc_process_req_status - Handle get status control request.
- * @hsudc: Device controller on which get status request has be handled.
- * @ctrl: Control request as received on the endpoint 0.
- *
- * Handle get status control request received on control endpoint.
- */
-static void s3c_hsudc_process_req_status(struct s3c_hsudc *hsudc,
- struct usb_ctrlrequest *ctrl)
-{
- struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0];
- struct s3c_hsudc_req hsreq;
- struct s3c_hsudc_ep *hsep;
- __le16 reply;
- u8 epnum;
-
- switch (ctrl->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- reply = cpu_to_le16(0);
- break;
-
- case USB_RECIP_INTERFACE:
- reply = cpu_to_le16(0);
- break;
-
- case USB_RECIP_ENDPOINT:
- epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
- hsep = &hsudc->ep[epnum];
- reply = cpu_to_le16(hsep->stopped ? 1 : 0);
- break;
- }
-
- INIT_LIST_HEAD(&hsreq.queue);
- hsreq.req.length = 2;
- hsreq.req.buf = &reply;
- hsreq.req.actual = 0;
- hsreq.req.complete = NULL;
- s3c_hsudc_write_fifo(hsep0, &hsreq);
-}
-
-/**
- * s3c_hsudc_process_setup - Process control request received on endpoint 0.
- * @hsudc: Device controller on which control request has been received.
- *
- * Read the control request received on endpoint 0, decode it and handle
- * the request.
- */
-static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
- struct usb_ctrlrequest ctrl = {0};
- int ret;
-
- s3c_hsudc_nuke_ep(hsep, -EPROTO);
- s3c_hsudc_read_setup_pkt(hsudc, (u16 *)&ctrl);
-
- if (ctrl.bRequestType & USB_DIR_IN) {
- hsep->bEndpointAddress |= USB_DIR_IN;
- hsudc->ep0state = DATA_STATE_XMIT;
- } else {
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- hsudc->ep0state = DATA_STATE_RECV;
- }
-
- switch (ctrl.bRequest) {
- case USB_REQ_SET_ADDRESS:
- if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
- break;
- hsudc->ep0state = WAIT_FOR_SETUP;
- return;
-
- case USB_REQ_GET_STATUS:
- if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
- break;
- s3c_hsudc_process_req_status(hsudc, &ctrl);
- return;
-
- case USB_REQ_SET_FEATURE:
- case USB_REQ_CLEAR_FEATURE:
- if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
- break;
- s3c_hsudc_handle_reqfeat(hsudc, &ctrl);
- hsudc->ep0state = WAIT_FOR_SETUP;
- return;
- }
-
- if (hsudc->driver) {
- spin_unlock(&hsudc->lock);
- ret = hsudc->driver->setup(&hsudc->gadget, &ctrl);
- spin_lock(&hsudc->lock);
-
- if (ctrl.bRequest == USB_REQ_SET_CONFIGURATION) {
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- hsudc->ep0state = WAIT_FOR_SETUP;
- }
-
- if (ret < 0) {
- dev_err(hsudc->dev, "setup failed, returned %d\n",
- ret);
- s3c_hsudc_set_halt(&hsep->ep, 1);
- hsudc->ep0state = WAIT_FOR_SETUP;
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- }
- }
-}
-
-/** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt.
- * @hsudc: Device controller on which endpoint 0 interrupt has occured.
- *
- * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur
- * when a stall handshake is sent to host or data is sent/received on
- * endpoint 0.
- */
-static void s3c_hsudc_handle_ep0_intr(struct s3c_hsudc *hsudc)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
- struct s3c_hsudc_req *hsreq;
- u32 csr = readl(hsudc->regs + S3C_EP0SR);
- u32 ecr;
-
- if (csr & S3C_EP0SR_STALL) {
- ecr = readl(hsudc->regs + S3C_EP0CR);
- ecr &= ~(S3C_ECR_STALL | S3C_ECR_FLUSH);
- writel(ecr, hsudc->regs + S3C_EP0CR);
-
- writel(S3C_EP0SR_STALL, hsudc->regs + S3C_EP0SR);
- hsep->stopped = 0;
-
- s3c_hsudc_nuke_ep(hsep, -ECONNABORTED);
- hsudc->ep0state = WAIT_FOR_SETUP;
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- return;
- }
-
- if (csr & S3C_EP0SR_TX_SUCCESS) {
- writel(S3C_EP0SR_TX_SUCCESS, hsudc->regs + S3C_EP0SR);
- if (ep_is_in(hsep)) {
- if (list_empty(&hsep->queue))
- return;
-
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- s3c_hsudc_write_fifo(hsep, hsreq);
- }
- }
-
- if (csr & S3C_EP0SR_RX_SUCCESS) {
- if (hsudc->ep0state == WAIT_FOR_SETUP)
- s3c_hsudc_process_setup(hsudc);
- else {
- if (!ep_is_in(hsep)) {
- if (list_empty(&hsep->queue))
- return;
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- s3c_hsudc_read_fifo(hsep, hsreq);
- }
- }
- }
-}
-
-/**
- * s3c_hsudc_ep_enable - Enable a endpoint.
- * @_ep: The endpoint to be enabled.
- * @desc: Endpoint descriptor.
- *
- * Enables a endpoint when called from the gadget driver. Endpoint stall if
- * any is cleared, transfer type is configured and endpoint interrupt is
- * enabled.
- */
-static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct s3c_hsudc_ep *hsep;
- struct s3c_hsudc *hsudc;
- unsigned long flags;
- u32 ecr = 0;
-
- hsep = container_of(_ep, struct s3c_hsudc_ep, ep);
- if (!_ep || !desc || hsep->desc || _ep->name == ep0name
- || desc->bDescriptorType != USB_DT_ENDPOINT
- || hsep->bEndpointAddress != desc->bEndpointAddress
- || ep_maxpacket(hsep) < le16_to_cpu(desc->wMaxPacketSize))
- return -EINVAL;
-
- if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
- && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(hsep))
- || !desc->wMaxPacketSize)
- return -ERANGE;
-
- hsudc = hsep->dev;
- if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&hsudc->lock, flags);
-
- set_index(hsudc, hsep->bEndpointAddress);
- ecr |= ((usb_endpoint_xfer_int(desc)) ? S3C_ECR_IEMS : S3C_ECR_DUEN);
- writel(ecr, hsudc->regs + S3C_ECR);
-
- hsep->stopped = hsep->wedge = 0;
- hsep->desc = desc;
- hsep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
-
- s3c_hsudc_set_halt(_ep, 0);
- __set_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-/**
- * s3c_hsudc_ep_disable - Disable a endpoint.
- * @_ep: The endpoint to be disabled.
- * @desc: Endpoint descriptor.
- *
- * Disables a endpoint when called from the gadget driver.
- */
-static int s3c_hsudc_ep_disable(struct usb_ep *_ep)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
- struct s3c_hsudc *hsudc = hsep->dev;
- unsigned long flags;
-
- if (!_ep || !hsep->desc)
- return -EINVAL;
-
- spin_lock_irqsave(&hsudc->lock, flags);
-
- set_index(hsudc, hsep->bEndpointAddress);
- __clear_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
-
- s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
-
- hsep->desc = 0;
- hsep->stopped = 1;
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-/**
- * s3c_hsudc_alloc_request - Allocate a new request.
- * @_ep: Endpoint for which request is allocated (not used).
- * @gfp_flags: Flags used for the allocation.
- *
- * Allocates a single transfer request structure when called from gadget driver.
- */
-static struct usb_request *s3c_hsudc_alloc_request(struct usb_ep *_ep,
- gfp_t gfp_flags)
-{
- struct s3c_hsudc_req *hsreq;
-
- hsreq = kzalloc(sizeof *hsreq, gfp_flags);
- if (!hsreq)
- return 0;
-
- INIT_LIST_HEAD(&hsreq->queue);
- return &hsreq->req;
-}
-
-/**
- * s3c_hsudc_free_request - Deallocate a request.
- * @ep: Endpoint for which request is deallocated (not used).
- * @_req: Request to be deallocated.
- *
- * Allocates a single transfer request structure when called from gadget driver.
- */
-static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req)
-{
- struct s3c_hsudc_req *hsreq;
-
- hsreq = container_of(_req, struct s3c_hsudc_req, req);
- WARN_ON(!list_empty(&hsreq->queue));
- kfree(hsreq);
-}
-
-/**
- * s3c_hsudc_queue - Queue a transfer request for the endpoint.
- * @_ep: Endpoint for which the request is queued.
- * @_req: Request to be queued.
- * @gfp_flags: Not used.
- *
- * Start or enqueue a request for a endpoint when called from gadget driver.
- */
-static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req,
- gfp_t gfp_flags)
-{
- struct s3c_hsudc_req *hsreq;
- struct s3c_hsudc_ep *hsep;
- struct s3c_hsudc *hsudc;
- unsigned long flags;
- u32 offset;
- u32 csr;
-
- hsreq = container_of(_req, struct s3c_hsudc_req, req);
- if ((!_req || !_req->complete || !_req->buf ||
- !list_empty(&hsreq->queue)))
- return -EINVAL;
-
- hsep = container_of(_ep, struct s3c_hsudc_ep, ep);
- hsudc = hsep->dev;
- if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&hsudc->lock, flags);
- set_index(hsudc, hsep->bEndpointAddress);
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- if (!ep_index(hsep) && _req->length == 0) {
- hsudc->ep0state = WAIT_FOR_SETUP;
- s3c_hsudc_complete_request(hsep, hsreq, 0);
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
- }
-
- if (list_empty(&hsep->queue) && !hsep->stopped) {
- offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
- if (ep_is_in(hsep)) {
- csr = readl((u32)hsudc->regs + offset);
- if (!(csr & S3C_ESR_TX_SUCCESS) &&
- (s3c_hsudc_write_fifo(hsep, hsreq) == 1))
- hsreq = 0;
- } else {
- csr = readl((u32)hsudc->regs + offset);
- if ((csr & S3C_ESR_RX_SUCCESS)
- && (s3c_hsudc_read_fifo(hsep, hsreq) == 1))
- hsreq = 0;
- }
- }
-
- if (hsreq != 0)
- list_add_tail(&hsreq->queue, &hsep->queue);
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-/**
- * s3c_hsudc_dequeue - Dequeue a transfer request from an endpoint.
- * @_ep: Endpoint from which the request is dequeued.
- * @_req: Request to be dequeued.
- *
- * Dequeue a request from a endpoint when called from gadget driver.
- */
-static int s3c_hsudc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
- struct s3c_hsudc *hsudc = hsep->dev;
- struct s3c_hsudc_req *hsreq;
- unsigned long flags;
-
- hsep = container_of(_ep, struct s3c_hsudc_ep, ep);
- if (!_ep || hsep->ep.name == ep0name)
- return -EINVAL;
-
- spin_lock_irqsave(&hsudc->lock, flags);
-
- list_for_each_entry(hsreq, &hsep->queue, queue) {
- if (&hsreq->req == _req)
- break;
- }
- if (&hsreq->req != _req) {
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return -EINVAL;
- }
-
- set_index(hsudc, hsep->bEndpointAddress);
- s3c_hsudc_complete_request(hsep, hsreq, -ECONNRESET);
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-static struct usb_ep_ops s3c_hsudc_ep_ops = {
- .enable = s3c_hsudc_ep_enable,
- .disable = s3c_hsudc_ep_disable,
- .alloc_request = s3c_hsudc_alloc_request,
- .free_request = s3c_hsudc_free_request,
- .queue = s3c_hsudc_queue,
- .dequeue = s3c_hsudc_dequeue,
- .set_halt = s3c_hsudc_set_halt,
- .set_wedge = s3c_hsudc_set_wedge,
-};
-
-/**
- * s3c_hsudc_initep - Initialize a endpoint to default state.
- * @hsudc - Reference to the device controller.
- * @hsep - Endpoint to be initialized.
- * @epnum - Address to be assigned to the endpoint.
- *
- * Initialize a endpoint with default configuration.
- */
-static void s3c_hsudc_initep(struct s3c_hsudc *hsudc,
- struct s3c_hsudc_ep *hsep, int epnum)
-{
- char *dir;
-
- if ((epnum % 2) == 0) {
- dir = "out";
- } else {
- dir = "in";
- hsep->bEndpointAddress = USB_DIR_IN;
- }
-
- hsep->bEndpointAddress |= epnum;
- if (epnum)
- snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir);
- else
- snprintf(hsep->name, sizeof(hsep->name), "%s", ep0name);
-
- INIT_LIST_HEAD(&hsep->queue);
- INIT_LIST_HEAD(&hsep->ep.ep_list);
- if (epnum)
- list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list);
-
- hsep->dev = hsudc;
- hsep->ep.name = hsep->name;
- hsep->ep.maxpacket = epnum ? 512 : 64;
- hsep->ep.ops = &s3c_hsudc_ep_ops;
- hsep->fifo = hsudc->regs + S3C_BR(epnum);
- hsep->desc = 0;
- hsep->stopped = 0;
- hsep->wedge = 0;
-
- set_index(hsudc, epnum);
- writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR);
-}
-
-/**
- * s3c_hsudc_setup_ep - Configure all endpoints to default state.
- * @hsudc: Reference to device controller.
- *
- * Configures all endpoints to default state.
- */
-static void s3c_hsudc_setup_ep(struct s3c_hsudc *hsudc)
-{
- int epnum;
-
- hsudc->ep0state = WAIT_FOR_SETUP;
- INIT_LIST_HEAD(&hsudc->gadget.ep_list);
- for (epnum = 0; epnum < hsudc->pd->epnum; epnum++)
- s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum);
-}
-
-/**
- * s3c_hsudc_reconfig - Reconfigure the device controller to default state.
- * @hsudc: Reference to device controller.
- *
- * Reconfigures the device controller registers to a default state.
- */
-static void s3c_hsudc_reconfig(struct s3c_hsudc *hsudc)
-{
- writel(0xAA, hsudc->regs + S3C_EDR);
- writel(1, hsudc->regs + S3C_EIER);
- writel(0, hsudc->regs + S3C_TR);
- writel(S3C_SCR_DTZIEN_EN | S3C_SCR_RRD_EN | S3C_SCR_SUS_EN |
- S3C_SCR_RST_EN, hsudc->regs + S3C_SCR);
- writel(0, hsudc->regs + S3C_EP0CR);
-
- s3c_hsudc_setup_ep(hsudc);
-}
-
-/**
- * s3c_hsudc_irq - Interrupt handler for device controller.
- * @irq: Not used.
- * @_dev: Reference to the device controller.
- *
- * Interrupt handler for the device controller. This handler handles controller
- * interrupts and endpoint interrupts.
- */
-static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
-{
- struct s3c_hsudc *hsudc = _dev;
- struct s3c_hsudc_ep *hsep;
- u32 ep_intr;
- u32 sys_status;
- u32 ep_idx;
-
- spin_lock(&hsudc->lock);
-
- sys_status = readl(hsudc->regs + S3C_SSR);
- ep_intr = readl(hsudc->regs + S3C_EIR) & 0x3FF;
-
- if (!ep_intr && !(sys_status & S3C_SSR_DTZIEN_EN)) {
- spin_unlock(&hsudc->lock);
- return IRQ_HANDLED;
- }
-
- if (sys_status) {
- if (sys_status & S3C_SSR_VBUSON)
- writel(S3C_SSR_VBUSON, hsudc->regs + S3C_SSR);
-
- if (sys_status & S3C_SSR_ERR)
- writel(S3C_SSR_ERR, hsudc->regs + S3C_SSR);
-
- if (sys_status & S3C_SSR_SDE) {
- writel(S3C_SSR_SDE, hsudc->regs + S3C_SSR);
- hsudc->gadget.speed = (sys_status & S3C_SSR_HSP) ?
- USB_SPEED_HIGH : USB_SPEED_FULL;
- }
-
- if (sys_status & S3C_SSR_SUSPEND) {
- writel(S3C_SSR_SUSPEND, hsudc->regs + S3C_SSR);
- if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
- && hsudc->driver && hsudc->driver->suspend)
- hsudc->driver->suspend(&hsudc->gadget);
- }
-
- if (sys_status & S3C_SSR_RESUME) {
- writel(S3C_SSR_RESUME, hsudc->regs + S3C_SSR);
- if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
- && hsudc->driver && hsudc->driver->resume)
- hsudc->driver->resume(&hsudc->gadget);
- }
-
- if (sys_status & S3C_SSR_RESET) {
- writel(S3C_SSR_RESET, hsudc->regs + S3C_SSR);
- for (ep_idx = 0; ep_idx < hsudc->pd->epnum; ep_idx++) {
- hsep = &hsudc->ep[ep_idx];
- hsep->stopped = 1;
- s3c_hsudc_nuke_ep(hsep, -ECONNRESET);
- }
- s3c_hsudc_reconfig(hsudc);
- hsudc->ep0state = WAIT_FOR_SETUP;
- }
- }
-
- if (ep_intr & S3C_EIR_EP0) {
- writel(S3C_EIR_EP0, hsudc->regs + S3C_EIR);
- set_index(hsudc, 0);
- s3c_hsudc_handle_ep0_intr(hsudc);
- }
-
- ep_intr >>= 1;
- ep_idx = 1;
- while (ep_intr) {
- if (ep_intr & 1) {
- hsep = &hsudc->ep[ep_idx];
- set_index(hsudc, ep_idx);
- writel(1 << ep_idx, hsudc->regs + S3C_EIR);
- if (ep_is_in(hsep))
- s3c_hsudc_epin_intr(hsudc, ep_idx);
- else
- s3c_hsudc_epout_intr(hsudc, ep_idx);
- }
- ep_intr >>= 1;
- ep_idx++;
- }
-
- spin_unlock(&hsudc->lock);
- return IRQ_HANDLED;
-}
-
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
-{
- struct s3c_hsudc *hsudc = the_controller;
- int ret;
-
- if (!driver
- || (driver->speed != USB_SPEED_FULL &&
- driver->speed != USB_SPEED_HIGH)
- || !bind
- || !driver->unbind || !driver->disconnect || !driver->setup)
- return -EINVAL;
-
- if (!hsudc)
- return -ENODEV;
-
- if (hsudc->driver)
- return -EBUSY;
-
- hsudc->driver = driver;
- hsudc->gadget.dev.driver = &driver->driver;
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
- ret = device_add(&hsudc->gadget.dev);
- if (ret) {
- dev_err(hsudc->dev, "failed to probe gadget device");
- return ret;
- }
-
- ret = bind(&hsudc->gadget);
- if (ret) {
- dev_err(hsudc->dev, "%s: bind failed\n", hsudc->gadget.name);
- device_del(&hsudc->gadget.dev);
-
- hsudc->driver = NULL;
- hsudc->gadget.dev.driver = NULL;
- return ret;
- }
-
- enable_irq(hsudc->irq);
- dev_info(hsudc->dev, "bound driver %s\n", driver->driver.name);
-
- s3c_hsudc_reconfig(hsudc);
- s3c_hsudc_init_phy();
- if (hsudc->pd->gpio_init)
- hsudc->pd->gpio_init();
-
- return 0;
-}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
-{
- struct s3c_hsudc *hsudc = the_controller;
- unsigned long flags;
-
- if (!hsudc)
- return -ENODEV;
-
- if (!driver || driver != hsudc->driver || !driver->unbind)
- return -EINVAL;
-
- spin_lock_irqsave(&hsudc->lock, flags);
- hsudc->driver = 0;
- s3c_hsudc_uninit_phy();
- if (hsudc->pd->gpio_uninit)
- hsudc->pd->gpio_uninit();
- s3c_hsudc_stop_activity(hsudc, driver);
- spin_unlock_irqrestore(&hsudc->lock, flags);
-
- driver->unbind(&hsudc->gadget);
- device_del(&hsudc->gadget.dev);
- disable_irq(hsudc->irq);
-
- dev_info(hsudc->dev, "unregistered gadget driver '%s'\n",
- driver->driver.name);
- return 0;
-}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
-static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc)
-{
- return readl(hsudc->regs + S3C_FNR) & 0x3FF;
-}
-
-static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
-{
- return s3c_hsudc_read_frameno(to_hsudc(gadget));
-}
-
-static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
- .get_frame = s3c_hsudc_gadget_getframe,
-};
-
-static int s3c_hsudc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct s3c_hsudc *hsudc;
- struct s3c24xx_hsudc_platdata *pd = pdev->dev.platform_data;
- int ret;
-
- hsudc = kzalloc(sizeof(struct s3c_hsudc) +
- sizeof(struct s3c_hsudc_ep) * pd->epnum,
- GFP_KERNEL);
- if (!hsudc) {
- dev_err(dev, "cannot allocate memory\n");
- return -ENOMEM;
- }
-
- the_controller = hsudc;
- platform_set_drvdata(pdev, dev);
- hsudc->dev = dev;
- hsudc->pd = pdev->dev.platform_data;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "unable to obtain driver resource data\n");
- ret = -ENODEV;
- goto err_res;
- }
-
- hsudc->mem_rsrc = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!hsudc->mem_rsrc) {
- dev_err(dev, "failed to reserve register area\n");
- ret = -ENODEV;
- goto err_res;
- }
-
- hsudc->regs = ioremap(res->start, resource_size(res));
- if (!hsudc->regs) {
- dev_err(dev, "error mapping device register area\n");
- ret = -EBUSY;
- goto err_remap;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "unable to obtain IRQ number\n");
- goto err_irq;
- }
- hsudc->irq = ret;
-
- ret = request_irq(hsudc->irq, s3c_hsudc_irq, 0, driver_name, hsudc);
- if (ret < 0) {
- dev_err(dev, "irq request failed\n");
- goto err_irq;
- }
-
- spin_lock_init(&hsudc->lock);
-
- device_initialize(&hsudc->gadget.dev);
- dev_set_name(&hsudc->gadget.dev, "gadget");
-
- hsudc->gadget.is_dualspeed = 1;
- hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
- hsudc->gadget.name = dev_name(dev);
- hsudc->gadget.dev.parent = dev;
- hsudc->gadget.dev.dma_mask = dev->dma_mask;
- hsudc->gadget.ep0 = &hsudc->ep[0].ep;
-
- hsudc->gadget.is_otg = 0;
- hsudc->gadget.is_a_peripheral = 0;
-
- s3c_hsudc_setup_ep(hsudc);
-
- hsudc->uclk = clk_get(&pdev->dev, "usb-device");
- if (IS_ERR(hsudc->uclk)) {
- dev_err(dev, "failed to find usb-device clock source\n");
- ret = PTR_ERR(hsudc->uclk);
- goto err_clk;
- }
- clk_enable(hsudc->uclk);
-
- local_irq_disable();
-
- disable_irq(hsudc->irq);
- local_irq_enable();
- return 0;
-err_clk:
- free_irq(hsudc->irq, hsudc);
-err_irq:
- iounmap(hsudc->regs);
-
-err_remap:
- release_resource(hsudc->mem_rsrc);
- kfree(hsudc->mem_rsrc);
-
-err_res:
- kfree(hsudc);
- return ret;
-}
-
-static struct platform_driver s3c_hsudc_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "s3c-hsudc",
- },
- .probe = s3c_hsudc_probe,
-};
-
-static int __init s3c_hsudc_modinit(void)
-{
- return platform_driver_register(&s3c_hsudc_driver);
-}
-
-static void __exit s3c_hsudc_modexit(void)
-{
- platform_driver_unregister(&s3c_hsudc_driver);
-}
-
-module_init(s3c_hsudc_modinit);
-module_exit(s3c_hsudc_modexit);
-
-MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver");
-MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
deleted file mode 100644
index 100f2635..00000000
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ /dev/null
@@ -1,2118 +0,0 @@
-/*
- * linux/drivers/usb/gadget/s3c2410_udc.c
- *
- * Samsung S3C24xx series on-chip full speed USB device controllers
- *
- * Copyright (C) 2004-2007 Herbert P<>tzl - Arnaud Patard
- * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/prefetch.h>
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include <linux/usb.h>
-#include <linux/usb/gadget.h>
-
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/unaligned.h>
-#include <mach/irqs.h>
-
-#include <mach/hardware.h>
-
-#include <plat/regs-udc.h>
-#include <plat/udc.h>
-
-
-#include "s3c2410_udc.h"
-
-#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
-#define DRIVER_VERSION "29 Apr 2007"
-#define DRIVER_AUTHOR "Herbert P<>tzl <herbert@13thfloor.at>, " \
- "Arnaud Patard <arnaud.patard@rtp-net.org>"
-
-static const char gadget_name[] = "s3c2410_udc";
-static const char driver_desc[] = DRIVER_DESC;
-
-static struct s3c2410_udc *the_controller;
-static struct clk *udc_clock;
-static struct clk *usb_bus_clock;
-static void __iomem *base_addr;
-static u64 rsrc_start;
-static u64 rsrc_len;
-static struct dentry *s3c2410_udc_debugfs_root;
-
-static inline u32 udc_read(u32 reg)
-{
- return readb(base_addr + reg);
-}
-
-static inline void udc_write(u32 value, u32 reg)
-{
- writeb(value, base_addr + reg);
-}
-
-static inline void udc_writeb(void __iomem *base, u32 value, u32 reg)
-{
- writeb(value, base + reg);
-}
-
-static struct s3c2410_udc_mach_info *udc_info;
-
-/*************************** DEBUG FUNCTION ***************************/
-#define DEBUG_NORMAL 1
-#define DEBUG_VERBOSE 2
-
-#ifdef CONFIG_USB_S3C2410_DEBUG
-#define USB_S3C2410_DEBUG_LEVEL 0
-
-static uint32_t s3c2410_ticks = 0;
-
-static int dprintk(int level, const char *fmt, ...)
-{
- static char printk_buf[1024];
- static long prevticks;
- static int invocation;
- va_list args;
- int len;
-
- if (level > USB_S3C2410_DEBUG_LEVEL)
- return 0;
-
- if (s3c2410_ticks != prevticks) {
- prevticks = s3c2410_ticks;
- invocation = 0;
- }
-
- len = scnprintf(printk_buf,
- sizeof(printk_buf), "%1lu.%02d USB: ",
- prevticks, invocation++);
-
- va_start(args, fmt);
- len = vscnprintf(printk_buf+len,
- sizeof(printk_buf)-len, fmt, args);
- va_end(args);
-
- return printk(KERN_DEBUG "%s", printk_buf);
-}
-#else
-static int dprintk(int level, const char *fmt, ...)
-{
- return 0;
-}
-#endif
-static int s3c2410_udc_debugfs_seq_show(struct seq_file *m, void *p)
-{
- u32 addr_reg,pwr_reg,ep_int_reg,usb_int_reg;
- u32 ep_int_en_reg, usb_int_en_reg, ep0_csr;
- u32 ep1_i_csr1,ep1_i_csr2,ep1_o_csr1,ep1_o_csr2;
- u32 ep2_i_csr1,ep2_i_csr2,ep2_o_csr1,ep2_o_csr2;
-
- addr_reg = udc_read(S3C2410_UDC_FUNC_ADDR_REG);
- pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
- ep_int_reg = udc_read(S3C2410_UDC_EP_INT_REG);
- usb_int_reg = udc_read(S3C2410_UDC_USB_INT_REG);
- ep_int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
- usb_int_en_reg = udc_read(S3C2410_UDC_USB_INT_EN_REG);
- udc_write(0, S3C2410_UDC_INDEX_REG);
- ep0_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- udc_write(1, S3C2410_UDC_INDEX_REG);
- ep1_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep1_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
- ep1_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep1_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
- udc_write(2, S3C2410_UDC_INDEX_REG);
- ep2_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep2_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
- ep2_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep2_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
-
- seq_printf(m, "FUNC_ADDR_REG : 0x%04X\n"
- "PWR_REG : 0x%04X\n"
- "EP_INT_REG : 0x%04X\n"
- "USB_INT_REG : 0x%04X\n"
- "EP_INT_EN_REG : 0x%04X\n"
- "USB_INT_EN_REG : 0x%04X\n"
- "EP0_CSR : 0x%04X\n"
- "EP1_I_CSR1 : 0x%04X\n"
- "EP1_I_CSR2 : 0x%04X\n"
- "EP1_O_CSR1 : 0x%04X\n"
- "EP1_O_CSR2 : 0x%04X\n"
- "EP2_I_CSR1 : 0x%04X\n"
- "EP2_I_CSR2 : 0x%04X\n"
- "EP2_O_CSR1 : 0x%04X\n"
- "EP2_O_CSR2 : 0x%04X\n",
- addr_reg,pwr_reg,ep_int_reg,usb_int_reg,
- ep_int_en_reg, usb_int_en_reg, ep0_csr,
- ep1_i_csr1,ep1_i_csr2,ep1_o_csr1,ep1_o_csr2,
- ep2_i_csr1,ep2_i_csr2,ep2_o_csr1,ep2_o_csr2
- );
-
- return 0;
-}
-
-static int s3c2410_udc_debugfs_fops_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, s3c2410_udc_debugfs_seq_show, NULL);
-}
-
-static const struct file_operations s3c2410_udc_debugfs_fops = {
- .open = s3c2410_udc_debugfs_fops_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-/* io macros */
-
-static inline void s3c2410_udc_clear_ep0_opr(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_SOPKTRDY,
- S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_clear_ep0_sst(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- writeb(0x00, base + S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_clear_ep0_se(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_SSE, S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_ipr(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_IPKRDY, S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_de(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_DE, S3C2410_UDC_EP0_CSR_REG);
-}
-
-inline void s3c2410_udc_set_ep0_ss(void __iomem *b)
-{
- udc_writeb(b, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(b, S3C2410_UDC_EP0_CSR_SENDSTL, S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_de_out(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
-
- udc_writeb(base,(S3C2410_UDC_EP0_CSR_SOPKTRDY
- | S3C2410_UDC_EP0_CSR_DE),
- S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_sse_out(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY
- | S3C2410_UDC_EP0_CSR_SSE),
- S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_de_in(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, (S3C2410_UDC_EP0_CSR_IPKRDY
- | S3C2410_UDC_EP0_CSR_DE),
- S3C2410_UDC_EP0_CSR_REG);
-}
-
-/*------------------------- I/O ----------------------------------*/
-
-/*
- * s3c2410_udc_done
- */
-static void s3c2410_udc_done(struct s3c2410_ep *ep,
- struct s3c2410_request *req, int status)
-{
- unsigned halted = ep->halted;
-
- list_del_init(&req->queue);
-
- if (likely (req->req.status == -EINPROGRESS))
- req->req.status = status;
- else
- status = req->req.status;
-
- ep->halted = 1;
- req->req.complete(&ep->ep, &req->req);
- ep->halted = halted;
-}
-
-static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
- struct s3c2410_ep *ep, int status)
-{
- /* Sanity check */
- if (&ep->queue == NULL)
- return;
-
- while (!list_empty (&ep->queue)) {
- struct s3c2410_request *req;
- req = list_entry (ep->queue.next, struct s3c2410_request,
- queue);
- s3c2410_udc_done(ep, req, status);
- }
-}
-
-static inline void s3c2410_udc_clear_ep_state(struct s3c2410_udc *dev)
-{
- unsigned i;
-
- /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
- * fifos, and pending transactions mustn't be continued in any case.
- */
-
- for (i = 1; i < S3C2410_ENDPOINTS; i++)
- s3c2410_udc_nuke(dev, &dev->ep[i], -ECONNABORTED);
-}
-
-static inline int s3c2410_udc_fifo_count_out(void)
-{
- int tmp;
-
- tmp = udc_read(S3C2410_UDC_OUT_FIFO_CNT2_REG) << 8;
- tmp |= udc_read(S3C2410_UDC_OUT_FIFO_CNT1_REG);
- return tmp;
-}
-
-/*
- * s3c2410_udc_write_packet
- */
-static inline int s3c2410_udc_write_packet(int fifo,
- struct s3c2410_request *req,
- unsigned max)
-{
- unsigned len = min(req->req.length - req->req.actual, max);
- u8 *buf = req->req.buf + req->req.actual;
-
- prefetch(buf);
-
- dprintk(DEBUG_VERBOSE, "%s %d %d %d %d\n", __func__,
- req->req.actual, req->req.length, len, req->req.actual + len);
-
- req->req.actual += len;
-
- udelay(5);
- writesb(base_addr + fifo, buf, len);
- return len;
-}
-
-/*
- * s3c2410_udc_write_fifo
- *
- * return: 0 = still running, 1 = completed, negative = errno
- */
-static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep,
- struct s3c2410_request *req)
-{
- unsigned count;
- int is_last;
- u32 idx;
- int fifo_reg;
- u32 ep_csr;
-
- idx = ep->bEndpointAddress & 0x7F;
- switch (idx) {
- default:
- idx = 0;
- case 0:
- fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
- break;
- case 1:
- fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
- break;
- case 2:
- fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
- break;
- case 3:
- fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
- break;
- case 4:
- fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
- break;
- }
-
- count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket);
-
- /* last packet is often short (sometimes a zlp) */
- if (count != ep->ep.maxpacket)
- is_last = 1;
- else if (req->req.length != req->req.actual || req->req.zero)
- is_last = 0;
- else
- is_last = 2;
-
- /* Only ep0 debug messages are interesting */
- if (idx == 0)
- dprintk(DEBUG_NORMAL,
- "Written ep%d %d.%d of %d b [last %d,z %d]\n",
- idx, count, req->req.actual, req->req.length,
- is_last, req->req.zero);
-
- if (is_last) {
- /* The order is important. It prevents sending 2 packets
- * at the same time */
-
- if (idx == 0) {
- /* Reset signal => no need to say 'data sent' */
- if (! (udc_read(S3C2410_UDC_USB_INT_REG)
- & S3C2410_UDC_USBINT_RESET))
- s3c2410_udc_set_ep0_de_in(base_addr);
- ep->dev->ep0state=EP0_IDLE;
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
- S3C2410_UDC_IN_CSR1_REG);
- }
-
- s3c2410_udc_done(ep, req, 0);
- is_last = 1;
- } else {
- if (idx == 0) {
- /* Reset signal => no need to say 'data sent' */
- if (! (udc_read(S3C2410_UDC_USB_INT_REG)
- & S3C2410_UDC_USBINT_RESET))
- s3c2410_udc_set_ep0_ipr(base_addr);
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
- S3C2410_UDC_IN_CSR1_REG);
- }
- }
-
- return is_last;
-}
-
-static inline int s3c2410_udc_read_packet(int fifo, u8 *buf,
- struct s3c2410_request *req, unsigned avail)
-{
- unsigned len;
-
- len = min(req->req.length - req->req.actual, avail);
- req->req.actual += len;
-
- readsb(fifo + base_addr, buf, len);
- return len;
-}
-
-/*
- * return: 0 = still running, 1 = queue empty, negative = errno
- */
-static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep,
- struct s3c2410_request *req)
-{
- u8 *buf;
- u32 ep_csr;
- unsigned bufferspace;
- int is_last=1;
- unsigned avail;
- int fifo_count = 0;
- u32 idx;
- int fifo_reg;
-
- idx = ep->bEndpointAddress & 0x7F;
-
- switch (idx) {
- default:
- idx = 0;
- case 0:
- fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
- break;
- case 1:
- fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
- break;
- case 2:
- fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
- break;
- case 3:
- fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
- break;
- case 4:
- fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
- break;
- }
-
- if (!req->req.length)
- return 1;
-
- buf = req->req.buf + req->req.actual;
- bufferspace = req->req.length - req->req.actual;
- if (!bufferspace) {
- dprintk(DEBUG_NORMAL, "%s: buffer full!\n", __func__);
- return -1;
- }
-
- udc_write(idx, S3C2410_UDC_INDEX_REG);
-
- fifo_count = s3c2410_udc_fifo_count_out();
- dprintk(DEBUG_NORMAL, "%s fifo count : %d\n", __func__, fifo_count);
-
- if (fifo_count > ep->ep.maxpacket)
- avail = ep->ep.maxpacket;
- else
- avail = fifo_count;
-
- fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail);
-
- /* checking this with ep0 is not accurate as we already
- * read a control request
- **/
- if (idx != 0 && fifo_count < ep->ep.maxpacket) {
- is_last = 1;
- /* overflowed this request? flush extra data */
- if (fifo_count != avail)
- req->req.status = -EOVERFLOW;
- } else {
- is_last = (req->req.length <= req->req.actual) ? 1 : 0;
- }
-
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- fifo_count = s3c2410_udc_fifo_count_out();
-
- /* Only ep0 debug messages are interesting */
- if (idx == 0)
- dprintk(DEBUG_VERBOSE, "%s fifo count : %d [last %d]\n",
- __func__, fifo_count,is_last);
-
- if (is_last) {
- if (idx == 0) {
- s3c2410_udc_set_ep0_de_out(base_addr);
- ep->dev->ep0state = EP0_IDLE;
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
- S3C2410_UDC_OUT_CSR1_REG);
- }
-
- s3c2410_udc_done(ep, req, 0);
- } else {
- if (idx == 0) {
- s3c2410_udc_clear_ep0_opr(base_addr);
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
- S3C2410_UDC_OUT_CSR1_REG);
- }
- }
-
- return is_last;
-}
-
-static int s3c2410_udc_read_fifo_crq(struct usb_ctrlrequest *crq)
-{
- unsigned char *outbuf = (unsigned char*)crq;
- int bytes_read = 0;
-
- udc_write(0, S3C2410_UDC_INDEX_REG);
-
- bytes_read = s3c2410_udc_fifo_count_out();
-
- dprintk(DEBUG_NORMAL, "%s: fifo_count=%d\n", __func__, bytes_read);
-
- if (bytes_read > sizeof(struct usb_ctrlrequest))
- bytes_read = sizeof(struct usb_ctrlrequest);
-
- readsb(S3C2410_UDC_EP0_FIFO_REG + base_addr, outbuf, bytes_read);
-
- dprintk(DEBUG_VERBOSE, "%s: len=%d %02x:%02x {%x,%x,%x}\n", __func__,
- bytes_read, crq->bRequest, crq->bRequestType,
- crq->wValue, crq->wIndex, crq->wLength);
-
- return bytes_read;
-}
-
-static int s3c2410_udc_get_status(struct s3c2410_udc *dev,
- struct usb_ctrlrequest *crq)
-{
- u16 status = 0;
- u8 ep_num = crq->wIndex & 0x7F;
- u8 is_in = crq->wIndex & USB_DIR_IN;
-
- switch (crq->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_INTERFACE:
- break;
-
- case USB_RECIP_DEVICE:
- status = dev->devstatus;
- break;
-
- case USB_RECIP_ENDPOINT:
- if (ep_num > 4 || crq->wLength > 2)
- return 1;
-
- if (ep_num == 0) {
- udc_write(0, S3C2410_UDC_INDEX_REG);
- status = udc_read(S3C2410_UDC_IN_CSR1_REG);
- status = status & S3C2410_UDC_EP0_CSR_SENDSTL;
- } else {
- udc_write(ep_num, S3C2410_UDC_INDEX_REG);
- if (is_in) {
- status = udc_read(S3C2410_UDC_IN_CSR1_REG);
- status = status & S3C2410_UDC_ICSR1_SENDSTL;
- } else {
- status = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- status = status & S3C2410_UDC_OCSR1_SENDSTL;
- }
- }
-
- status = status ? 1 : 0;
- break;
-
- default:
- return 1;
- }
-
- /* Seems to be needed to get it working. ouch :( */
- udelay(5);
- udc_write(status & 0xFF, S3C2410_UDC_EP0_FIFO_REG);
- udc_write(status >> 8, S3C2410_UDC_EP0_FIFO_REG);
- s3c2410_udc_set_ep0_de_in(base_addr);
-
- return 0;
-}
-/*------------------------- usb state machine -------------------------------*/
-static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value);
-
-static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
- struct s3c2410_ep *ep,
- struct usb_ctrlrequest *crq,
- u32 ep0csr)
-{
- int len, ret, tmp;
-
- /* start control request? */
- if (!(ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY))
- return;
-
- s3c2410_udc_nuke(dev, ep, -EPROTO);
-
- len = s3c2410_udc_read_fifo_crq(crq);
- if (len != sizeof(*crq)) {
- dprintk(DEBUG_NORMAL, "setup begin: fifo READ ERROR"
- " wanted %d bytes got %d. Stalling out...\n",
- sizeof(*crq), len);
- s3c2410_udc_set_ep0_ss(base_addr);
- return;
- }
-
- dprintk(DEBUG_NORMAL, "bRequest = %d bRequestType %d wLength = %d\n",
- crq->bRequest, crq->bRequestType, crq->wLength);
-
- /* cope with automagic for some standard requests. */
- dev->req_std = (crq->bRequestType & USB_TYPE_MASK)
- == USB_TYPE_STANDARD;
- dev->req_config = 0;
- dev->req_pending = 1;
-
- switch (crq->bRequest) {
- case USB_REQ_SET_CONFIGURATION:
- dprintk(DEBUG_NORMAL, "USB_REQ_SET_CONFIGURATION ... \n");
-
- if (crq->bRequestType == USB_RECIP_DEVICE) {
- dev->req_config = 1;
- s3c2410_udc_set_ep0_de_out(base_addr);
- }
- break;
-
- case USB_REQ_SET_INTERFACE:
- dprintk(DEBUG_NORMAL, "USB_REQ_SET_INTERFACE ... \n");
-
- if (crq->bRequestType == USB_RECIP_INTERFACE) {
- dev->req_config = 1;
- s3c2410_udc_set_ep0_de_out(base_addr);
- }
- break;
-
- case USB_REQ_SET_ADDRESS:
- dprintk(DEBUG_NORMAL, "USB_REQ_SET_ADDRESS ... \n");
-
- if (crq->bRequestType == USB_RECIP_DEVICE) {
- tmp = crq->wValue & 0x7F;
- dev->address = tmp;
- udc_write((tmp | S3C2410_UDC_FUNCADDR_UPDATE),
- S3C2410_UDC_FUNC_ADDR_REG);
- s3c2410_udc_set_ep0_de_out(base_addr);
- return;
- }
- break;
-
- case USB_REQ_GET_STATUS:
- dprintk(DEBUG_NORMAL, "USB_REQ_GET_STATUS ... \n");
- s3c2410_udc_clear_ep0_opr(base_addr);
-
- if (dev->req_std) {
- if (!s3c2410_udc_get_status(dev, crq)) {
- return;
- }
- }
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- s3c2410_udc_clear_ep0_opr(base_addr);
-
- if (crq->bRequestType != USB_RECIP_ENDPOINT)
- break;
-
- if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
- break;
-
- s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 0);
- s3c2410_udc_set_ep0_de_out(base_addr);
- return;
-
- case USB_REQ_SET_FEATURE:
- s3c2410_udc_clear_ep0_opr(base_addr);
-
- if (crq->bRequestType != USB_RECIP_ENDPOINT)
- break;
-
- if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
- break;
-
- s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 1);
- s3c2410_udc_set_ep0_de_out(base_addr);
- return;
-
- default:
- s3c2410_udc_clear_ep0_opr(base_addr);
- break;
- }
-
- if (crq->bRequestType & USB_DIR_IN)
- dev->ep0state = EP0_IN_DATA_PHASE;
- else
- dev->ep0state = EP0_OUT_DATA_PHASE;
-
- if (!dev->driver)
- return;
-
- /* deliver the request to the gadget driver */
- ret = dev->driver->setup(&dev->gadget, crq);
- if (ret < 0) {
- if (dev->req_config) {
- dprintk(DEBUG_NORMAL, "config change %02x fail %d?\n",
- crq->bRequest, ret);
- return;
- }
-
- if (ret == -EOPNOTSUPP)
- dprintk(DEBUG_NORMAL, "Operation not supported\n");
- else
- dprintk(DEBUG_NORMAL,
- "dev->driver->setup failed. (%d)\n", ret);
-
- udelay(5);
- s3c2410_udc_set_ep0_ss(base_addr);
- s3c2410_udc_set_ep0_de_out(base_addr);
- dev->ep0state = EP0_IDLE;
- /* deferred i/o == no response yet */
- } else if (dev->req_pending) {
- dprintk(DEBUG_VERBOSE, "dev->req_pending... what now?\n");
- dev->req_pending=0;
- }
-
- dprintk(DEBUG_VERBOSE, "ep0state %s\n", ep0states[dev->ep0state]);
-}
-
-static void s3c2410_udc_handle_ep0(struct s3c2410_udc *dev)
-{
- u32 ep0csr;
- struct s3c2410_ep *ep = &dev->ep[0];
- struct s3c2410_request *req;
- struct usb_ctrlrequest crq;
-
- if (list_empty(&ep->queue))
- req = NULL;
- else
- req = list_entry(ep->queue.next, struct s3c2410_request, queue);
-
- /* We make the assumption that S3C2410_UDC_IN_CSR1_REG equal to
- * S3C2410_UDC_EP0_CSR_REG when index is zero */
-
- udc_write(0, S3C2410_UDC_INDEX_REG);
- ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
-
- dprintk(DEBUG_NORMAL, "ep0csr %x ep0state %s\n",
- ep0csr, ep0states[dev->ep0state]);
-
- /* clear stall status */
- if (ep0csr & S3C2410_UDC_EP0_CSR_SENTSTL) {
- s3c2410_udc_nuke(dev, ep, -EPIPE);
- dprintk(DEBUG_NORMAL, "... clear SENT_STALL ...\n");
- s3c2410_udc_clear_ep0_sst(base_addr);
- dev->ep0state = EP0_IDLE;
- return;
- }
-
- /* clear setup end */
- if (ep0csr & S3C2410_UDC_EP0_CSR_SE) {
- dprintk(DEBUG_NORMAL, "... serviced SETUP_END ...\n");
- s3c2410_udc_nuke(dev, ep, 0);
- s3c2410_udc_clear_ep0_se(base_addr);
- dev->ep0state = EP0_IDLE;
- }
-
- switch (dev->ep0state) {
- case EP0_IDLE:
- s3c2410_udc_handle_ep0_idle(dev, ep, &crq, ep0csr);
- break;
-
- case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
- dprintk(DEBUG_NORMAL, "EP0_IN_DATA_PHASE ... what now?\n");
- if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req) {
- s3c2410_udc_write_fifo(ep, req);
- }
- break;
-
- case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
- dprintk(DEBUG_NORMAL, "EP0_OUT_DATA_PHASE ... what now?\n");
- if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req ) {
- s3c2410_udc_read_fifo(ep,req);
- }
- break;
-
- case EP0_END_XFER:
- dprintk(DEBUG_NORMAL, "EP0_END_XFER ... what now?\n");
- dev->ep0state = EP0_IDLE;
- break;
-
- case EP0_STALL:
- dprintk(DEBUG_NORMAL, "EP0_STALL ... what now?\n");
- dev->ep0state = EP0_IDLE;
- break;
- }
-}
-
-/*
- * handle_ep - Manage I/O endpoints
- */
-
-static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
-{
- struct s3c2410_request *req;
- int is_in = ep->bEndpointAddress & USB_DIR_IN;
- u32 ep_csr1;
- u32 idx;
-
- if (likely (!list_empty(&ep->queue)))
- req = list_entry(ep->queue.next,
- struct s3c2410_request, queue);
- else
- req = NULL;
-
- idx = ep->bEndpointAddress & 0x7F;
-
- if (is_in) {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- dprintk(DEBUG_VERBOSE, "ep%01d write csr:%02x %d\n",
- idx, ep_csr1, req ? 1 : 0);
-
- if (ep_csr1 & S3C2410_UDC_ICSR1_SENTSTL) {
- dprintk(DEBUG_VERBOSE, "st\n");
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr1 & ~S3C2410_UDC_ICSR1_SENTSTL,
- S3C2410_UDC_IN_CSR1_REG);
- return;
- }
-
- if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req) {
- s3c2410_udc_write_fifo(ep,req);
- }
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr1 = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- dprintk(DEBUG_VERBOSE, "ep%01d rd csr:%02x\n", idx, ep_csr1);
-
- if (ep_csr1 & S3C2410_UDC_OCSR1_SENTSTL) {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr1 & ~S3C2410_UDC_OCSR1_SENTSTL,
- S3C2410_UDC_OUT_CSR1_REG);
- return;
- }
-
- if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req) {
- s3c2410_udc_read_fifo(ep,req);
- }
- }
-}
-
-#include <mach/regs-irq.h>
-
-/*
- * s3c2410_udc_irq - interrupt handler
- */
-static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
-{
- struct s3c2410_udc *dev = _dev;
- int usb_status;
- int usbd_status;
- int pwr_reg;
- int ep0csr;
- int i;
- u32 idx, idx2;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->lock, flags);
-
- /* Driver connected ? */
- if (!dev->driver) {
- /* Clear interrupts */
- udc_write(udc_read(S3C2410_UDC_USB_INT_REG),
- S3C2410_UDC_USB_INT_REG);
- udc_write(udc_read(S3C2410_UDC_EP_INT_REG),
- S3C2410_UDC_EP_INT_REG);
- }
-
- /* Save index */
- idx = udc_read(S3C2410_UDC_INDEX_REG);
-
- /* Read status registers */
- usb_status = udc_read(S3C2410_UDC_USB_INT_REG);
- usbd_status = udc_read(S3C2410_UDC_EP_INT_REG);
- pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
-
- udc_writeb(base_addr, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
-
- dprintk(DEBUG_NORMAL, "usbs=%02x, usbds=%02x, pwr=%02x ep0csr=%02x\n",
- usb_status, usbd_status, pwr_reg, ep0csr);
-
- /*
- * Now, handle interrupts. There's two types :
- * - Reset, Resume, Suspend coming -> usb_int_reg
- * - EP -> ep_int_reg
- */
-
- /* RESET */
- if (usb_status & S3C2410_UDC_USBINT_RESET) {
- /* two kind of reset :
- * - reset start -> pwr reg = 8
- * - reset end -> pwr reg = 0
- **/
- dprintk(DEBUG_NORMAL, "USB reset csr %x pwr %x\n",
- ep0csr, pwr_reg);
-
- dev->gadget.speed = USB_SPEED_UNKNOWN;
- udc_write(0x00, S3C2410_UDC_INDEX_REG);
- udc_write((dev->ep[0].ep.maxpacket & 0x7ff) >> 3,
- S3C2410_UDC_MAXP_REG);
- dev->address = 0;
-
- dev->ep0state = EP0_IDLE;
- dev->gadget.speed = USB_SPEED_FULL;
-
- /* clear interrupt */
- udc_write(S3C2410_UDC_USBINT_RESET,
- S3C2410_UDC_USB_INT_REG);
-
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- spin_unlock_irqrestore(&dev->lock, flags);
- return IRQ_HANDLED;
- }
-
- /* RESUME */
- if (usb_status & S3C2410_UDC_USBINT_RESUME) {
- dprintk(DEBUG_NORMAL, "USB resume\n");
-
- /* clear interrupt */
- udc_write(S3C2410_UDC_USBINT_RESUME,
- S3C2410_UDC_USB_INT_REG);
-
- if (dev->gadget.speed != USB_SPEED_UNKNOWN
- && dev->driver
- && dev->driver->resume)
- dev->driver->resume(&dev->gadget);
- }
-
- /* SUSPEND */
- if (usb_status & S3C2410_UDC_USBINT_SUSPEND) {
- dprintk(DEBUG_NORMAL, "USB suspend\n");
-
- /* clear interrupt */
- udc_write(S3C2410_UDC_USBINT_SUSPEND,
- S3C2410_UDC_USB_INT_REG);
-
- if (dev->gadget.speed != USB_SPEED_UNKNOWN
- && dev->driver
- && dev->driver->suspend)
- dev->driver->suspend(&dev->gadget);
-
- dev->ep0state = EP0_IDLE;
- }
-
- /* EP */
- /* control traffic */
- /* check on ep0csr != 0 is not a good idea as clearing in_pkt_ready
- * generate an interrupt
- */
- if (usbd_status & S3C2410_UDC_INT_EP0) {
- dprintk(DEBUG_VERBOSE, "USB ep0 irq\n");
- /* Clear the interrupt bit by setting it to 1 */
- udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_REG);
- s3c2410_udc_handle_ep0(dev);
- }
-
- /* endpoint data transfers */
- for (i = 1; i < S3C2410_ENDPOINTS; i++) {
- u32 tmp = 1 << i;
- if (usbd_status & tmp) {
- dprintk(DEBUG_VERBOSE, "USB ep%d irq\n", i);
-
- /* Clear the interrupt bit by setting it to 1 */
- udc_write(tmp, S3C2410_UDC_EP_INT_REG);
- s3c2410_udc_handle_ep(&dev->ep[i]);
- }
- }
-
- /* what else causes this interrupt? a receive! who is it? */
- if (!usb_status && !usbd_status && !pwr_reg && !ep0csr) {
- for (i = 1; i < S3C2410_ENDPOINTS; i++) {
- idx2 = udc_read(S3C2410_UDC_INDEX_REG);
- udc_write(i, S3C2410_UDC_INDEX_REG);
-
- if (udc_read(S3C2410_UDC_OUT_CSR1_REG) & 0x1)
- s3c2410_udc_handle_ep(&dev->ep[i]);
-
- /* restore index */
- udc_write(idx2, S3C2410_UDC_INDEX_REG);
- }
- }
-
- dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", IRQ_USBD);
-
- /* Restore old index */
- udc_write(idx, S3C2410_UDC_INDEX_REG);
-
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return IRQ_HANDLED;
-}
-/*------------------------- s3c2410_ep_ops ----------------------------------*/
-
-static inline struct s3c2410_ep *to_s3c2410_ep(struct usb_ep *ep)
-{
- return container_of(ep, struct s3c2410_ep, ep);
-}
-
-static inline struct s3c2410_udc *to_s3c2410_udc(struct usb_gadget *gadget)
-{
- return container_of(gadget, struct s3c2410_udc, gadget);
-}
-
-static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req)
-{
- return container_of(req, struct s3c2410_request, req);
-}
-
-/*
- * s3c2410_udc_ep_enable
- */
-static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct s3c2410_udc *dev;
- struct s3c2410_ep *ep;
- u32 max, tmp;
- unsigned long flags;
- u32 csr1,csr2;
- u32 int_en_reg;
-
- ep = to_s3c2410_ep(_ep);
-
- if (!_ep || !desc || ep->desc
- || _ep->name == ep0name
- || desc->bDescriptorType != USB_DT_ENDPOINT)
- return -EINVAL;
-
- dev = ep->dev;
- if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
-
- local_irq_save (flags);
- _ep->maxpacket = max & 0x7ff;
- ep->desc = desc;
- ep->halted = 0;
- ep->bEndpointAddress = desc->bEndpointAddress;
-
- /* set max packet */
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(max >> 3, S3C2410_UDC_MAXP_REG);
-
- /* set type, direction, address; reset fifo counters */
- if (desc->bEndpointAddress & USB_DIR_IN) {
- csr1 = S3C2410_UDC_ICSR1_FFLUSH|S3C2410_UDC_ICSR1_CLRDT;
- csr2 = S3C2410_UDC_ICSR2_MODEIN|S3C2410_UDC_ICSR2_DMAIEN;
-
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
- } else {
- /* don't flush in fifo or it will cause endpoint interrupt */
- csr1 = S3C2410_UDC_ICSR1_CLRDT;
- csr2 = S3C2410_UDC_ICSR2_DMAIEN;
-
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
-
- csr1 = S3C2410_UDC_OCSR1_FFLUSH | S3C2410_UDC_OCSR1_CLRDT;
- csr2 = S3C2410_UDC_OCSR2_DMAIEN;
-
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr1, S3C2410_UDC_OUT_CSR1_REG);
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr2, S3C2410_UDC_OUT_CSR2_REG);
- }
-
- /* enable irqs */
- int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
- udc_write(int_en_reg | (1 << ep->num), S3C2410_UDC_EP_INT_EN_REG);
-
- /* print some debug message */
- tmp = desc->bEndpointAddress;
- dprintk (DEBUG_NORMAL, "enable %s(%d) ep%x%s-blk max %02x\n",
- _ep->name,ep->num, tmp,
- desc->bEndpointAddress & USB_DIR_IN ? "in" : "out", max);
-
- local_irq_restore (flags);
- s3c2410_udc_set_halt(_ep, 0);
-
- return 0;
-}
-
-/*
- * s3c2410_udc_ep_disable
- */
-static int s3c2410_udc_ep_disable(struct usb_ep *_ep)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- unsigned long flags;
- u32 int_en_reg;
-
- if (!_ep || !ep->desc) {
- dprintk(DEBUG_NORMAL, "%s not enabled\n",
- _ep ? ep->ep.name : NULL);
- return -EINVAL;
- }
-
- local_irq_save(flags);
-
- dprintk(DEBUG_NORMAL, "ep_disable: %s\n", _ep->name);
-
- ep->desc = NULL;
- ep->halted = 1;
-
- s3c2410_udc_nuke (ep->dev, ep, -ESHUTDOWN);
-
- /* disable irqs */
- int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
- udc_write(int_en_reg & ~(1<<ep->num), S3C2410_UDC_EP_INT_EN_REG);
-
- local_irq_restore(flags);
-
- dprintk(DEBUG_NORMAL, "%s disabled\n", _ep->name);
-
- return 0;
-}
-
-/*
- * s3c2410_udc_alloc_request
- */
-static struct usb_request *
-s3c2410_udc_alloc_request(struct usb_ep *_ep, gfp_t mem_flags)
-{
- struct s3c2410_request *req;
-
- dprintk(DEBUG_VERBOSE,"%s(%p,%d)\n", __func__, _ep, mem_flags);
-
- if (!_ep)
- return NULL;
-
- req = kzalloc (sizeof(struct s3c2410_request), mem_flags);
- if (!req)
- return NULL;
-
- INIT_LIST_HEAD (&req->queue);
- return &req->req;
-}
-
-/*
- * s3c2410_udc_free_request
- */
-static void
-s3c2410_udc_free_request(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- struct s3c2410_request *req = to_s3c2410_req(_req);
-
- dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
-
- if (!ep || !_req || (!ep->desc && _ep->name != ep0name))
- return;
-
- WARN_ON (!list_empty (&req->queue));
- kfree(req);
-}
-
-/*
- * s3c2410_udc_queue
- */
-static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req,
- gfp_t gfp_flags)
-{
- struct s3c2410_request *req = to_s3c2410_req(_req);
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- struct s3c2410_udc *dev;
- u32 ep_csr = 0;
- int fifo_count = 0;
- unsigned long flags;
-
- if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
- dprintk(DEBUG_NORMAL, "%s: invalid args\n", __func__);
- return -EINVAL;
- }
-
- dev = ep->dev;
- if (unlikely (!dev->driver
- || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
- return -ESHUTDOWN;
- }
-
- local_irq_save (flags);
-
- if (unlikely(!_req || !_req->complete
- || !_req->buf || !list_empty(&req->queue))) {
- if (!_req)
- dprintk(DEBUG_NORMAL, "%s: 1 X X X\n", __func__);
- else {
- dprintk(DEBUG_NORMAL, "%s: 0 %01d %01d %01d\n",
- __func__, !_req->complete,!_req->buf,
- !list_empty(&req->queue));
- }
-
- local_irq_restore(flags);
- return -EINVAL;
- }
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- dprintk(DEBUG_VERBOSE, "%s: ep%x len %d\n",
- __func__, ep->bEndpointAddress, _req->length);
-
- if (ep->bEndpointAddress) {
- udc_write(ep->bEndpointAddress & 0x7F, S3C2410_UDC_INDEX_REG);
-
- ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN)
- ? S3C2410_UDC_IN_CSR1_REG
- : S3C2410_UDC_OUT_CSR1_REG);
- fifo_count = s3c2410_udc_fifo_count_out();
- } else {
- udc_write(0, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- fifo_count = s3c2410_udc_fifo_count_out();
- }
-
- /* kickstart this i/o queue? */
- if (list_empty(&ep->queue) && !ep->halted) {
- if (ep->bEndpointAddress == 0 /* ep0 */) {
- switch (dev->ep0state) {
- case EP0_IN_DATA_PHASE:
- if (!(ep_csr&S3C2410_UDC_EP0_CSR_IPKRDY)
- && s3c2410_udc_write_fifo(ep,
- req)) {
- dev->ep0state = EP0_IDLE;
- req = NULL;
- }
- break;
-
- case EP0_OUT_DATA_PHASE:
- if ((!_req->length)
- || ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
- && s3c2410_udc_read_fifo(ep,
- req))) {
- dev->ep0state = EP0_IDLE;
- req = NULL;
- }
- break;
-
- default:
- local_irq_restore(flags);
- return -EL2HLT;
- }
- } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
- && (!(ep_csr&S3C2410_UDC_OCSR1_PKTRDY))
- && s3c2410_udc_write_fifo(ep, req)) {
- req = NULL;
- } else if ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
- && fifo_count
- && s3c2410_udc_read_fifo(ep, req)) {
- req = NULL;
- }
- }
-
- /* pio or dma irq handler advances the queue. */
- if (likely (req != 0))
- list_add_tail(&req->queue, &ep->queue);
-
- local_irq_restore(flags);
-
- dprintk(DEBUG_VERBOSE, "%s ok\n", __func__);
- return 0;
-}
-
-/*
- * s3c2410_udc_dequeue
- */
-static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- struct s3c2410_udc *udc;
- int retval = -EINVAL;
- unsigned long flags;
- struct s3c2410_request *req = NULL;
-
- dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
-
- if (!the_controller->driver)
- return -ESHUTDOWN;
-
- if (!_ep || !_req)
- return retval;
-
- udc = to_s3c2410_udc(ep->gadget);
-
- local_irq_save (flags);
-
- list_for_each_entry (req, &ep->queue, queue) {
- if (&req->req == _req) {
- list_del_init (&req->queue);
- _req->status = -ECONNRESET;
- retval = 0;
- break;
- }
- }
-
- if (retval == 0) {
- dprintk(DEBUG_VERBOSE,
- "dequeued req %p from %s, len %d buf %p\n",
- req, _ep->name, _req->length, _req->buf);
-
- s3c2410_udc_done(ep, req, -ECONNRESET);
- }
-
- local_irq_restore (flags);
- return retval;
-}
-
-/*
- * s3c2410_udc_set_halt
- */
-static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- u32 ep_csr = 0;
- unsigned long flags;
- u32 idx;
-
- if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
- dprintk(DEBUG_NORMAL, "%s: inval 2\n", __func__);
- return -EINVAL;
- }
-
- local_irq_save (flags);
-
- idx = ep->bEndpointAddress & 0x7F;
-
- if (idx == 0) {
- s3c2410_udc_set_ep0_ss(base_addr);
- s3c2410_udc_set_ep0_de_out(base_addr);
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read((ep->bEndpointAddress &USB_DIR_IN)
- ? S3C2410_UDC_IN_CSR1_REG
- : S3C2410_UDC_OUT_CSR1_REG);
-
- if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
- if (value)
- udc_write(ep_csr | S3C2410_UDC_ICSR1_SENDSTL,
- S3C2410_UDC_IN_CSR1_REG);
- else {
- ep_csr &= ~S3C2410_UDC_ICSR1_SENDSTL;
- udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
- ep_csr |= S3C2410_UDC_ICSR1_CLRDT;
- udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
- }
- } else {
- if (value)
- udc_write(ep_csr | S3C2410_UDC_OCSR1_SENDSTL,
- S3C2410_UDC_OUT_CSR1_REG);
- else {
- ep_csr &= ~S3C2410_UDC_OCSR1_SENDSTL;
- udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
- ep_csr |= S3C2410_UDC_OCSR1_CLRDT;
- udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
- }
- }
- }
-
- ep->halted = value ? 1 : 0;
- local_irq_restore (flags);
-
- return 0;
-}
-
-static const struct usb_ep_ops s3c2410_ep_ops = {
- .enable = s3c2410_udc_ep_enable,
- .disable = s3c2410_udc_ep_disable,
-
- .alloc_request = s3c2410_udc_alloc_request,
- .free_request = s3c2410_udc_free_request,
-
- .queue = s3c2410_udc_queue,
- .dequeue = s3c2410_udc_dequeue,
-
- .set_halt = s3c2410_udc_set_halt,
-};
-
-/*------------------------- usb_gadget_ops ----------------------------------*/
-
-/*
- * s3c2410_udc_get_frame
- */
-static int s3c2410_udc_get_frame(struct usb_gadget *_gadget)
-{
- int tmp;
-
- dprintk(DEBUG_VERBOSE, "%s()\n", __func__);
-
- tmp = udc_read(S3C2410_UDC_FRAME_NUM2_REG) << 8;
- tmp |= udc_read(S3C2410_UDC_FRAME_NUM1_REG);
- return tmp;
-}
-
-/*
- * s3c2410_udc_wakeup
- */
-static int s3c2410_udc_wakeup(struct usb_gadget *_gadget)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- return 0;
-}
-
-/*
- * s3c2410_udc_set_selfpowered
- */
-static int s3c2410_udc_set_selfpowered(struct usb_gadget *gadget, int value)
-{
- struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- if (value)
- udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
- else
- udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
-
- return 0;
-}
-
-static void s3c2410_udc_disable(struct s3c2410_udc *dev);
-static void s3c2410_udc_enable(struct s3c2410_udc *dev);
-
-static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- if (udc_info && (udc_info->udc_command ||
- gpio_is_valid(udc_info->pullup_pin))) {
-
- if (is_on)
- s3c2410_udc_enable(udc);
- else {
- if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
- if (udc->driver && udc->driver->disconnect)
- udc->driver->disconnect(&udc->gadget);
-
- }
- s3c2410_udc_disable(udc);
- }
- }
- else
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static int s3c2410_udc_vbus_session(struct usb_gadget *gadget, int is_active)
-{
- struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- udc->vbus = (is_active != 0);
- s3c2410_udc_set_pullup(udc, is_active);
- return 0;
-}
-
-static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
-{
- struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- s3c2410_udc_set_pullup(udc, is_on ? 0 : 1);
- return 0;
-}
-
-static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
-{
- struct s3c2410_udc *dev = _dev;
- unsigned int value;
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0;
- if (udc_info->vbus_pin_inverted)
- value = !value;
-
- if (value != dev->vbus)
- s3c2410_udc_vbus_session(&dev->gadget, value);
-
- return IRQ_HANDLED;
-}
-
-static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- if (udc_info && udc_info->vbus_draw) {
- udc_info->vbus_draw(ma);
- return 0;
- }
-
- return -ENOTSUPP;
-}
-
-static const struct usb_gadget_ops s3c2410_ops = {
- .get_frame = s3c2410_udc_get_frame,
- .wakeup = s3c2410_udc_wakeup,
- .set_selfpowered = s3c2410_udc_set_selfpowered,
- .pullup = s3c2410_udc_pullup,
- .vbus_session = s3c2410_udc_vbus_session,
- .vbus_draw = s3c2410_vbus_draw,
-};
-
-static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
-{
- if (!udc_info)
- return;
-
- if (udc_info->udc_command) {
- udc_info->udc_command(S3C2410_UDC_P_DISABLE);
- } else if (gpio_is_valid(udc_info->pullup_pin)) {
- int value;
-
- switch (cmd) {
- case S3C2410_UDC_P_ENABLE:
- value = 1;
- break;
- case S3C2410_UDC_P_DISABLE:
- value = 0;
- break;
- default:
- return;
- }
- value ^= udc_info->pullup_pin_inverted;
-
- gpio_set_value(udc_info->pullup_pin, value);
- }
-}
-
-/*------------------------- gadget driver handling---------------------------*/
-/*
- * s3c2410_udc_disable
- */
-static void s3c2410_udc_disable(struct s3c2410_udc *dev)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- /* Disable all interrupts */
- udc_write(0x00, S3C2410_UDC_USB_INT_EN_REG);
- udc_write(0x00, S3C2410_UDC_EP_INT_EN_REG);
-
- /* Clear the interrupt registers */
- udc_write(S3C2410_UDC_USBINT_RESET
- | S3C2410_UDC_USBINT_RESUME
- | S3C2410_UDC_USBINT_SUSPEND,
- S3C2410_UDC_USB_INT_REG);
-
- udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
-
- /* Good bye, cruel world */
- s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
-
- /* Set speed to unknown */
- dev->gadget.speed = USB_SPEED_UNKNOWN;
-}
-
-/*
- * s3c2410_udc_reinit
- */
-static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
-{
- u32 i;
-
- /* device/ep0 records init */
- INIT_LIST_HEAD (&dev->gadget.ep_list);
- INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
- dev->ep0state = EP0_IDLE;
-
- for (i = 0; i < S3C2410_ENDPOINTS; i++) {
- struct s3c2410_ep *ep = &dev->ep[i];
-
- if (i != 0)
- list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
-
- ep->dev = dev;
- ep->desc = NULL;
- ep->halted = 0;
- INIT_LIST_HEAD (&ep->queue);
- }
-}
-
-/*
- * s3c2410_udc_enable
- */
-static void s3c2410_udc_enable(struct s3c2410_udc *dev)
-{
- int i;
-
- dprintk(DEBUG_NORMAL, "s3c2410_udc_enable called\n");
-
- /* dev->gadget.speed = USB_SPEED_UNKNOWN; */
- dev->gadget.speed = USB_SPEED_FULL;
-
- /* Set MAXP for all endpoints */
- for (i = 0; i < S3C2410_ENDPOINTS; i++) {
- udc_write(i, S3C2410_UDC_INDEX_REG);
- udc_write((dev->ep[i].ep.maxpacket & 0x7ff) >> 3,
- S3C2410_UDC_MAXP_REG);
- }
-
- /* Set default power state */
- udc_write(DEFAULT_POWER_STATE, S3C2410_UDC_PWR_REG);
-
- /* Enable reset and suspend interrupt interrupts */
- udc_write(S3C2410_UDC_USBINT_RESET | S3C2410_UDC_USBINT_SUSPEND,
- S3C2410_UDC_USB_INT_EN_REG);
-
- /* Enable ep0 interrupt */
- udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
-
- /* time to say "hello, world" */
- s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
-}
-
-/*
- * usb_gadget_probe_driver
- */
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
-{
- struct s3c2410_udc *udc = the_controller;
- int retval;
-
- dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name);
-
- /* Sanity checks */
- if (!udc)
- return -ENODEV;
-
- if (udc->driver)
- return -EBUSY;
-
- if (!bind || !driver->setup || driver->speed < USB_SPEED_FULL) {
- printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
- bind, driver->setup, driver->speed);
- return -EINVAL;
- }
-#if defined(MODULE)
- if (!driver->unbind) {
- printk(KERN_ERR "Invalid driver: no unbind method\n");
- return -EINVAL;
- }
-#endif
-
- /* Hook the driver */
- udc->driver = driver;
- udc->gadget.dev.driver = &driver->driver;
-
- /* Bind the driver */
- if ((retval = device_add(&udc->gadget.dev)) != 0) {
- printk(KERN_ERR "Error in device_add() : %d\n",retval);
- goto register_error;
- }
-
- dprintk(DEBUG_NORMAL, "binding gadget driver '%s'\n",
- driver->driver.name);
-
- if ((retval = bind(&udc->gadget)) != 0) {
- device_del(&udc->gadget.dev);
- goto register_error;
- }
-
- /* Enable udc */
- s3c2410_udc_enable(udc);
-
- return 0;
-
-register_error:
- udc->driver = NULL;
- udc->gadget.dev.driver = NULL;
- return retval;
-}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-
-/*
- * usb_gadget_unregister_driver
- */
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
-{
- struct s3c2410_udc *udc = the_controller;
-
- if (!udc)
- return -ENODEV;
-
- if (!driver || driver != udc->driver || !driver->unbind)
- return -EINVAL;
-
- dprintk(DEBUG_NORMAL, "usb_gadget_unregister_driver() '%s'\n",
- driver->driver.name);
-
- /* report disconnect */
- if (driver->disconnect)
- driver->disconnect(&udc->gadget);
-
- driver->unbind(&udc->gadget);
-
- device_del(&udc->gadget.dev);
- udc->driver = NULL;
-
- /* Disable udc */
- s3c2410_udc_disable(udc);
-
- return 0;
-}
-
-/*---------------------------------------------------------------------------*/
-static struct s3c2410_udc memory = {
- .gadget = {
- .ops = &s3c2410_ops,
- .ep0 = &memory.ep[0].ep,
- .name = gadget_name,
- .dev = {
- .init_name = "gadget",
- },
- },
-
- /* control endpoint */
- .ep[0] = {
- .num = 0,
- .ep = {
- .name = ep0name,
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP0_FIFO_SIZE,
- },
- .dev = &memory,
- },
-
- /* first group of endpoints */
- .ep[1] = {
- .num = 1,
- .ep = {
- .name = "ep1-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 1,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .ep[2] = {
- .num = 2,
- .ep = {
- .name = "ep2-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 2,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .ep[3] = {
- .num = 3,
- .ep = {
- .name = "ep3-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 3,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .ep[4] = {
- .num = 4,
- .ep = {
- .name = "ep4-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 4,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- }
-
-};
-
-/*
- * probe - binds to the platform device
- */
-static int s3c2410_udc_probe(struct platform_device *pdev)
-{
- struct s3c2410_udc *udc = &memory;
- struct device *dev = &pdev->dev;
- int retval;
- int irq;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- usb_bus_clock = clk_get(NULL, "usb-bus-gadget");
- if (IS_ERR(usb_bus_clock)) {
- dev_err(dev, "failed to get usb bus clock source\n");
- return PTR_ERR(usb_bus_clock);
- }
-
- clk_enable(usb_bus_clock);
-
- udc_clock = clk_get(NULL, "usb-device");
- if (IS_ERR(udc_clock)) {
- dev_err(dev, "failed to get udc clock source\n");
- return PTR_ERR(udc_clock);
- }
-
- clk_enable(udc_clock);
-
- mdelay(10);
-
- dev_dbg(dev, "got and enabled clocks\n");
-
- if (strncmp(pdev->name, "s3c2440", 7) == 0) {
- dev_info(dev, "S3C2440: increasing FIFO to 128 bytes\n");
- memory.ep[1].fifo_size = S3C2440_EP_FIFO_SIZE;
- memory.ep[2].fifo_size = S3C2440_EP_FIFO_SIZE;
- memory.ep[3].fifo_size = S3C2440_EP_FIFO_SIZE;
- memory.ep[4].fifo_size = S3C2440_EP_FIFO_SIZE;
- }
-
- spin_lock_init (&udc->lock);
- udc_info = pdev->dev.platform_data;
-
- rsrc_start = S3C2410_PA_USBDEV;
- rsrc_len = S3C24XX_SZ_USBDEV;
-
- if (!request_mem_region(rsrc_start, rsrc_len, gadget_name))
- return -EBUSY;
-
- base_addr = ioremap(rsrc_start, rsrc_len);
- if (!base_addr) {
- retval = -ENOMEM;
- goto err_mem;
- }
-
- device_initialize(&udc->gadget.dev);
- udc->gadget.dev.parent = &pdev->dev;
- udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
-
- the_controller = udc;
- platform_set_drvdata(pdev, udc);
-
- s3c2410_udc_disable(udc);
- s3c2410_udc_reinit(udc);
-
- /* irq setup after old hardware state is cleaned up */
- retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
- IRQF_DISABLED, gadget_name, udc);
-
- if (retval != 0) {
- dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
- retval = -EBUSY;
- goto err_map;
- }
-
- dev_dbg(dev, "got irq %i\n", IRQ_USBD);
-
- if (udc_info && udc_info->vbus_pin > 0) {
- retval = gpio_request(udc_info->vbus_pin, "udc vbus");
- if (retval < 0) {
- dev_err(dev, "cannot claim vbus pin\n");
- goto err_int;
- }
-
- irq = gpio_to_irq(udc_info->vbus_pin);
- if (irq < 0) {
- dev_err(dev, "no irq for gpio vbus pin\n");
- goto err_gpio_claim;
- }
-
- retval = request_irq(irq, s3c2410_udc_vbus_irq,
- IRQF_DISABLED | IRQF_TRIGGER_RISING
- | IRQF_TRIGGER_FALLING | IRQF_SHARED,
- gadget_name, udc);
-
- if (retval != 0) {
- dev_err(dev, "can't get vbus irq %d, err %d\n",
- irq, retval);
- retval = -EBUSY;
- goto err_gpio_claim;
- }
-
- dev_dbg(dev, "got irq %i\n", irq);
- } else {
- udc->vbus = 1;
- }
-
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin)) {
-
- retval = gpio_request_one(udc_info->pullup_pin,
- udc_info->vbus_pin_inverted ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "udc pullup");
- if (retval)
- goto err_vbus_irq;
- }
-
- if (s3c2410_udc_debugfs_root) {
- udc->regs_info = debugfs_create_file("registers", S_IRUGO,
- s3c2410_udc_debugfs_root,
- udc, &s3c2410_udc_debugfs_fops);
- if (!udc->regs_info)
- dev_warn(dev, "debugfs file creation failed\n");
- }
-
- dev_dbg(dev, "probe ok\n");
-
- return 0;
-
-err_vbus_irq:
- if (udc_info && udc_info->vbus_pin > 0)
- free_irq(gpio_to_irq(udc_info->vbus_pin), udc);
-err_gpio_claim:
- if (udc_info && udc_info->vbus_pin > 0)
- gpio_free(udc_info->vbus_pin);
-err_int:
- free_irq(IRQ_USBD, udc);
-err_map:
- iounmap(base_addr);
-err_mem:
- release_mem_region(rsrc_start, rsrc_len);
-
- return retval;
-}
-
-/*
- * s3c2410_udc_remove
- */
-static int s3c2410_udc_remove(struct platform_device *pdev)
-{
- struct s3c2410_udc *udc = platform_get_drvdata(pdev);
- unsigned int irq;
-
- dev_dbg(&pdev->dev, "%s()\n", __func__);
- if (udc->driver)
- return -EBUSY;
-
- debugfs_remove(udc->regs_info);
-
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin))
- gpio_free(udc_info->pullup_pin);
-
- if (udc_info && udc_info->vbus_pin > 0) {
- irq = gpio_to_irq(udc_info->vbus_pin);
- free_irq(irq, udc);
- }
-
- free_irq(IRQ_USBD, udc);
-
- iounmap(base_addr);
- release_mem_region(rsrc_start, rsrc_len);
-
- platform_set_drvdata(pdev, NULL);
-
- if (!IS_ERR(udc_clock) && udc_clock != NULL) {
- clk_disable(udc_clock);
- clk_put(udc_clock);
- udc_clock = NULL;
- }
-
- if (!IS_ERR(usb_bus_clock) && usb_bus_clock != NULL) {
- clk_disable(usb_bus_clock);
- clk_put(usb_bus_clock);
- usb_bus_clock = NULL;
- }
-
- dev_dbg(&pdev->dev, "%s: remove ok\n", __func__);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
-{
- s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
-
- return 0;
-}
-
-static int s3c2410_udc_resume(struct platform_device *pdev)
-{
- s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
-
- return 0;
-}
-#else
-#define s3c2410_udc_suspend NULL
-#define s3c2410_udc_resume NULL
-#endif
-
-static struct platform_driver udc_driver_2410 = {
- .driver = {
- .name = "s3c2410-usbgadget",
- .owner = THIS_MODULE,
- },
- .probe = s3c2410_udc_probe,
- .remove = s3c2410_udc_remove,
- .suspend = s3c2410_udc_suspend,
- .resume = s3c2410_udc_resume,
-};
-
-static struct platform_driver udc_driver_2440 = {
- .driver = {
- .name = "s3c2440-usbgadget",
- .owner = THIS_MODULE,
- },
- .probe = s3c2410_udc_probe,
- .remove = s3c2410_udc_remove,
- .suspend = s3c2410_udc_suspend,
- .resume = s3c2410_udc_resume,
-};
-
-static int __init udc_init(void)
-{
- int retval;
-
- dprintk(DEBUG_NORMAL, "%s: version %s\n", gadget_name, DRIVER_VERSION);
-
- s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
- if (IS_ERR(s3c2410_udc_debugfs_root)) {
- printk(KERN_ERR "%s: debugfs dir creation failed %ld\n",
- gadget_name, PTR_ERR(s3c2410_udc_debugfs_root));
- s3c2410_udc_debugfs_root = NULL;
- }
-
- retval = platform_driver_register(&udc_driver_2410);
- if (retval)
- goto err;
-
- retval = platform_driver_register(&udc_driver_2440);
- if (retval)
- goto err;
-
- return 0;
-
-err:
- debugfs_remove(s3c2410_udc_debugfs_root);
- return retval;
-}
-
-static void __exit udc_exit(void)
-{
- platform_driver_unregister(&udc_driver_2410);
- platform_driver_unregister(&udc_driver_2440);
- debugfs_remove(s3c2410_udc_debugfs_root);
-}
-
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
-module_init(udc_init);
-module_exit(udc_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-usbgadget");
-MODULE_ALIAS("platform:s3c2440-usbgadget");
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
deleted file mode 100644
index 9e0bece4..00000000
--- a/drivers/usb/gadget/s3c2410_udc.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * linux/drivers/usb/gadget/s3c2410_udc.h
- * Samsung on-chip full speed USB device controllers
- *
- * Copyright (C) 2004-2007 Herbert P<>tzl - Arnaud Patard
- * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef _S3C2410_UDC_H
-#define _S3C2410_UDC_H
-
-struct s3c2410_ep {
- struct list_head queue;
- unsigned long last_io; /* jiffies timestamp */
- struct usb_gadget *gadget;
- struct s3c2410_udc *dev;
- const struct usb_endpoint_descriptor *desc;
- struct usb_ep ep;
- u8 num;
-
- unsigned short fifo_size;
- u8 bEndpointAddress;
- u8 bmAttributes;
-
- unsigned halted : 1;
- unsigned already_seen : 1;
- unsigned setup_stage : 1;
-};
-
-
-/* Warning : ep0 has a fifo of 16 bytes */
-/* Don't try to set 32 or 64 */
-/* also testusb 14 fails wit 16 but is */
-/* fine with 8 */
-#define EP0_FIFO_SIZE 8
-#define EP_FIFO_SIZE 64
-#define DEFAULT_POWER_STATE 0x00
-
-#define S3C2440_EP_FIFO_SIZE 128
-
-static const char ep0name [] = "ep0";
-
-static const char *const ep_name[] = {
- ep0name, /* everyone has ep0 */
- /* s3c2410 four bidirectional bulk endpoints */
- "ep1-bulk", "ep2-bulk", "ep3-bulk", "ep4-bulk",
-};
-
-#define S3C2410_ENDPOINTS ARRAY_SIZE(ep_name)
-
-struct s3c2410_request {
- struct list_head queue; /* ep's requests */
- struct usb_request req;
-};
-
-enum ep0_state {
- EP0_IDLE,
- EP0_IN_DATA_PHASE,
- EP0_OUT_DATA_PHASE,
- EP0_END_XFER,
- EP0_STALL,
-};
-
-static const char *ep0states[]= {
- "EP0_IDLE",
- "EP0_IN_DATA_PHASE",
- "EP0_OUT_DATA_PHASE",
- "EP0_END_XFER",
- "EP0_STALL",
-};
-
-struct s3c2410_udc {
- spinlock_t lock;
-
- struct s3c2410_ep ep[S3C2410_ENDPOINTS];
- int address;
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct s3c2410_request fifo_req;
- u8 fifo_buf[EP_FIFO_SIZE];
- u16 devstatus;
-
- u32 port_status;
- int ep0state;
-
- unsigned got_irq : 1;
-
- unsigned req_std : 1;
- unsigned req_config : 1;
- unsigned req_pending : 1;
- u8 vbus;
- struct dentry *regs_info;
-};
-
-#endif
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
index 5b791946..d5be8376 100644
--- a/drivers/usb/gadget/uvc.h
+++ b/drivers/usb/gadget/uvc.h
@@ -27,14 +27,12 @@
#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
-struct uvc_request_data
-{
+struct uvc_request_data {
unsigned int length;
__u8 data[60];
};
-struct uvc_event
-{
+struct uvc_event {
union {
enum usb_device_speed speed;
struct usb_ctrlrequest req;
@@ -108,8 +106,7 @@ extern unsigned int uvc_gadget_trace_param;
* Structures
*/
-struct uvc_video
-{
+struct uvc_video {
struct usb_ep *ep;
/* Frame parameters */
@@ -137,25 +134,23 @@ struct uvc_video
unsigned int fid;
};
-enum uvc_state
-{
+enum uvc_state {
UVC_STATE_DISCONNECTED,
UVC_STATE_CONNECTED,
UVC_STATE_STREAMING,
};
-struct uvc_device
-{
- struct video_device *vdev;
- enum uvc_state state;
- struct usb_function func;
- struct uvc_video video;
+/*#define UVC_DOUBLE_STREAM*/
- /* Descriptors */
+struct uvc_common {
+ struct usb_function func;
struct {
- const struct uvc_descriptor_header * const *control;
- const struct uvc_descriptor_header * const *fs_streaming;
- const struct uvc_descriptor_header * const *hs_streaming;
+ struct uvc_descriptor_header **control;
+ struct uvc_descriptor_header **fs_streaming;
+ struct uvc_descriptor_header **hs_streaming;
+
+ struct uvc_descriptor_header **fs_streaming2;
+ struct uvc_descriptor_header **hs_streaming2;
} desc;
unsigned int control_intf;
@@ -163,20 +158,33 @@ struct uvc_device
struct usb_request *control_req;
void *control_buf;
- unsigned int streaming_intf;
+ struct uvc_device *uvc1;
+ struct uvc_device *uvc2;
+};
+struct uvc_device {
+ struct uvc_common *comm;
+ struct video_device *vdev;
+ enum uvc_state state;
+ struct uvc_video video;
+ unsigned int streaming_intf;
/* Events */
unsigned int event_length;
- unsigned int event_setup_out : 1;
+ unsigned int event_setup_out:1;
};
-static inline struct uvc_device *to_uvc(struct usb_function *f)
-{
- return container_of(f, struct uvc_device, func);
+static inline struct uvc_device *to_uvc(struct usb_function *f) {
+ struct uvc_common *comm =
+ (struct uvc_common *)container_of(f, struct uvc_common, func);
+ return comm->uvc1;
+}
+
+
+static inline struct uvc_common *to_common(struct usb_function *f) {
+ return (struct uvc_common *)container_of(f, struct uvc_common, func);
}
-struct uvc_file_handle
-{
+struct uvc_file_handle {
struct v4l2_fh vfh;
struct uvc_video *device;
};
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index f7395ac5..a0b472e6 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -132,7 +132,8 @@ uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
mutex_lock(&queue->mutex);
- if ((ret = uvc_free_buffers(queue)) < 0)
+ ret = uvc_free_buffers(queue);
+ if (ret < 0)
goto done;
/* Bail out if no buffers should be allocated. */
@@ -265,6 +266,7 @@ uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf)
spin_lock_irqsave(&queue->irqlock, flags);
if (queue->flags & UVC_QUEUE_DISCONNECTED) {
+ printk(KERN_EMERG "uvc_queue_buffer UVC_QUEUE_DISCONNECTED.\n");
spin_unlock_irqrestore(&queue->irqlock, flags);
ret = -ENODEV;
goto done;
@@ -323,7 +325,8 @@ uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf,
}
buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
- if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
+ ret = uvc_queue_waiton(buf, nonblocking);
+ if (ret < 0)
goto done;
uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
@@ -445,7 +448,8 @@ uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
addr = (unsigned long)queue->mem + buffer->buf.m.offset;
while (size > 0) {
page = vmalloc_to_page((void *)addr);
- if ((ret = vm_insert_page(vma, start, page)) < 0)
+ ret = vm_insert_page(vma, start, page);
+ if (ret < 0)
goto done;
start += PAGE_SIZE;
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 5e807f08..02c65b6e 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -33,10 +33,9 @@
*/
static int
-uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
-{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
- struct usb_request *req = uvc->control_req;
+uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) {
+ struct usb_composite_dev *cdev = uvc->comm->func.config->cdev;
+ struct usb_request *req = uvc->comm->control_req;
if (data->length < 0)
return usb_ep_set_halt(cdev->gadget->ep0);
@@ -44,6 +43,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
req->length = min(uvc->event_length, data->length);
req->zero = data->length < uvc->event_length;
req->dma = DMA_ADDR_INVALID;
+ req->context = uvc;
memcpy(req->buf, data->data, data->length);
@@ -54,20 +54,24 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
* V4L2
*/
-struct uvc_format
-{
+struct uvc_format {
u8 bpp;
u32 fcc;
};
static struct uvc_format uvc_formats[] = {
- { 16, V4L2_PIX_FMT_YUYV },
- { 0, V4L2_PIX_FMT_MJPEG },
+ { 12, V4L2_PIX_FMT_NV12 },
+ { 0, V4L2_PIX_FMT_MJPEG },
+ { 0, V4L2_PIX_FMT_H264 },
};
-static int
-uvc_v4l2_get_format(struct uvc_video *video, struct v4l2_format *fmt)
+void set_yuv_bpp(int bpp)
{
+ uvc_formats[0].bpp = bpp;
+}
+
+static int
+uvc_v4l2_get_format(struct uvc_video *video, struct v4l2_format *fmt) {
fmt->fmt.pix.pixelformat = video->fcc;
fmt->fmt.pix.width = video->width;
fmt->fmt.pix.height = video->height;
@@ -81,8 +85,7 @@ uvc_v4l2_get_format(struct uvc_video *video, struct v4l2_format *fmt)
}
static int
-uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
-{
+uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt) {
struct uvc_format *format;
unsigned int imagesize;
unsigned int bpl;
@@ -119,8 +122,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
}
static int
-uvc_v4l2_open(struct file *file)
-{
+uvc_v4l2_open(struct file *file) {
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle;
@@ -147,6 +149,8 @@ uvc_v4l2_open(struct file *file)
handle->device = &uvc->video;
file->private_data = &handle->vfh;
+ return 0;
+
uvc_function_connect(uvc);
return 0;
@@ -156,8 +160,7 @@ error:
}
static int
-uvc_v4l2_release(struct file *file)
-{
+uvc_v4l2_release(struct file *file) {
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
@@ -179,17 +182,41 @@ uvc_v4l2_release(struct file *file)
return 0;
}
+
+
+#define VIDIOC_TEST_FMTS 0x12345678
+#define VIDIOC_RESET_DESC 0x445566
+#define VIDIOC_CONNECT_NOW 0x238956
+
+void change_usb_support_fmt(struct uvc_device *uvc, void *arg);
static long
-uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
+uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) {
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct usb_composite_dev *cdev = uvc->comm->func.config->cdev;
struct uvc_video *video = &uvc->video;
int ret = 0;
switch (cmd) {
+ case VIDIOC_TEST_FMTS:
+ /*enumFmts(arg);*/
+ return 0;
+
+
+ case VIDIOC_RESET_DESC:
+
+ change_usb_support_fmt(uvc, arg);
+
+ break;
+
+ case VIDIOC_CONNECT_NOW:
+
+ uvc_function_connect(uvc);
+
+ return 0;
+ break;
+
/* Query capabilities */
case VIDIOC_QUERYCAP:
{
@@ -204,7 +231,6 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
break;
}
-
/* Get & Set format */
case VIDIOC_G_FMT:
{
@@ -215,7 +241,6 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_v4l2_get_format(video, fmt);
}
-
case VIDIOC_S_FMT:
{
struct v4l2_format *fmt = arg;
@@ -225,14 +250,13 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_v4l2_set_format(video, fmt);
}
-
/* Buffers & streaming */
case VIDIOC_REQBUFS:
{
struct v4l2_requestbuffers *rb = arg;
if (rb->type != video->queue.type ||
- rb->memory != V4L2_MEMORY_MMAP)
+ rb->memory != V4L2_MEMORY_MMAP)
return -EINVAL;
ret = uvc_alloc_buffers(&video->queue, rb->count,
@@ -244,7 +268,6 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = 0;
break;
}
-
case VIDIOC_QUERYBUF:
{
struct v4l2_buffer *buf = arg;
@@ -254,7 +277,6 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_query_buffer(&video->queue, buf);
}
-
case VIDIOC_QBUF:
if ((ret = uvc_queue_buffer(&video->queue, arg)) < 0)
return ret;
@@ -274,7 +296,6 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_video_enable(video, 1);
}
-
case VIDIOC_STREAMOFF:
{
int *type = arg;
@@ -286,27 +307,26 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
/* Events */
- case VIDIOC_DQEVENT:
+ case VIDIOC_DQEVENT:
{
- struct v4l2_event *event = arg;
-
- ret = v4l2_event_dequeue(&handle->vfh, event,
- file->f_flags & O_NONBLOCK);
- if (ret == 0 && event->type == UVC_EVENT_SETUP) {
- struct uvc_event *uvc_event = (void *)&event->u.data;
-
- /* Tell the complete callback to generate an event for
- * the next request that will be enqueued by
- * uvc_event_write.
- */
- uvc->event_setup_out =
- !(uvc_event->req.bRequestType & USB_DIR_IN);
- uvc->event_length = uvc_event->req.wLength;
- }
-
- return ret;
+ struct v4l2_event *event = arg;
+
+ ret = v4l2_event_dequeue(&handle->vfh, event,
+ file->f_flags & O_NONBLOCK);
+ if (ret == 0 && event->type == UVC_EVENT_SETUP) {
+ struct uvc_event *uvc_event = (void *)&event->u.data;
+
+ /* Tell the complete callback to generate an event for
+ * the next request that will be enqueued by
+ * uvc_event_write.
+ */
+ uvc->event_setup_out =
+ !(uvc_event->req.bRequestType & USB_DIR_IN);
+ uvc->event_length = uvc_event->req.wLength;
}
+ return ret;
+ }
case VIDIOC_SUBSCRIBE_EVENT:
{
struct v4l2_event_subscription *sub = arg;
@@ -316,7 +336,6 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_event_subscribe(&handle->vfh, arg);
}
-
case VIDIOC_UNSUBSCRIBE_EVENT:
return v4l2_event_unsubscribe(&handle->vfh, arg);
@@ -332,14 +351,12 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
static long
-uvc_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
+uvc_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) {
return video_usercopy(file, cmd, arg, uvc_v4l2_do_ioctl);
}
static int
-uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
-{
+uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma) {
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
@@ -347,8 +364,7 @@ uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
}
static unsigned int
-uvc_v4l2_poll(struct file *file, poll_table *wait)
-{
+uvc_v4l2_poll(struct file *file, poll_table *wait) {
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c
index b08f3543..10c02ffb 100644
--- a/drivers/usb/gadget/uvc_video.c
+++ b/drivers/usb/gadget/uvc_video.c
@@ -26,6 +26,19 @@
* Video codecs
*/
+unsigned char header[] = {
+ 0x0c, 0x8d, 0x51, 0x0e, 0xac, 0x1d, 0x37, 0xd1, 0xaf, 0x1d, 0x78, 0x02,
+ 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73,
+ 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86,
+ 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99,
+ 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3,
+ 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6,
+ 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9,
+ 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xff, 0xdd, 0x00, 0x04, 0x00,
+ 0x28, 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11,
+};
+
static int
uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
u8 *data, int len)
@@ -93,7 +106,7 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
}
if (video->payload_size == video->max_payload_size ||
- buf->buf.bytesused == video->queue.buf_used)
+ buf->buf.bytesused == video->queue.buf_used)
video->payload_size = 0;
}
@@ -171,12 +184,13 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
break;
case -ESHUTDOWN:
- printk(KERN_INFO "VS request cancelled.\n");
+ uvc_queue_cancel(&video->queue, 1);
goto requeue;
default:
printk(KERN_INFO "VS request completed with status %d.\n",
req->status);
+ uvc_queue_cancel(&video->queue, 0);
goto requeue;
}
@@ -188,8 +202,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
}
video->encode(req, video, buf);
-
- if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (ret < 0) {
printk(KERN_INFO "Failed to queue request (%d).\n", ret);
usb_ep_set_halt(ep);
spin_unlock_irqrestore(&video->queue.irqlock, flags);
@@ -236,7 +250,8 @@ uvc_video_alloc_requests(struct uvc_video *video)
BUG_ON(video->req_size);
for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
- video->req_buffer[i] = kmalloc(video->ep->maxpacket, GFP_KERNEL);
+ video->req_buffer[i] = \
+ kmalloc(video->ep->maxpacket, GFP_KERNEL);
if (video->req_buffer[i] == NULL)
goto error;
@@ -310,7 +325,8 @@ uvc_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
/* Queue the USB request */
- if ((ret = usb_ep_queue(video->ep, req, GFP_KERNEL)) < 0) {
+ ret = usb_ep_queue(video->ep, req, GFP_KERNEL);
+ if (ret < 0) {
printk(KERN_INFO "Failed to queue request (%d)\n", ret);
usb_ep_set_halt(video->ep);
spin_unlock_irqrestore(&video->queue.irqlock, flags);
@@ -346,13 +362,15 @@ uvc_video_enable(struct uvc_video *video, int enable)
uvc_video_free_requests(video);
uvc_queue_enable(&video->queue, 0);
+
+ usb_ep_disable(video->ep);
return 0;
}
-
- if ((ret = uvc_queue_enable(&video->queue, 1)) < 0)
+ ret = uvc_queue_enable(&video->queue, 1);
+ if (ret < 0)
return ret;
-
- if ((ret = uvc_video_alloc_requests(video)) < 0)
+ ret = uvc_video_alloc_requests(video);
+ if (ret < 0)
return ret;
if (video->max_payload_size) {
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
index a5a0fdb8..cc319678 100644
--- a/drivers/usb/gadget/webcam.c
+++ b/drivers/usb/gadget/webcam.c
@@ -10,6 +10,17 @@
* (at your option) any later version.
*
*/
+
+
+
+
+#ifdef TEST_AUDIO
+#include "audio.c"
+#else
+
+#define ENABLE_UVC
+#define ENABLE_UAC
+
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/usb/video.h>
@@ -32,18 +43,34 @@
#include "uvc_video.c"
#include "uvc_v4l2.c"
#include "f_uvc.c"
-
+#include "f_uac.c"
+#include "audio_poll.c"
+#include "uvc.h"
/* --------------------------------------------------------------------------
* Device descriptor
*/
+#define YUV_NV12 0
+#define YUV_YUY2 1
+
+
+#ifdef ENABLE_UVC
+
#define WEBCAM_VENDOR_ID 0x1d6b /* Linux Foundation */
#define WEBCAM_PRODUCT_ID 0x0102 /* Webcam A/V gadget */
+
+#else
+
+#define WEBCAM_VENDOR_ID 0x3d9c /* Linux Foundation */
+#define WEBCAM_PRODUCT_ID 0x0806 /* Webcam A/V gadget */
+
+#endif
+
#define WEBCAM_DEVICE_BCD 0x0010 /* 0.10 */
-static char webcam_vendor_label[] = "Linux Foundation";
-static char webcam_product_label[] = "Webcam gadget";
-static char webcam_config_label[] = "Video";
+static const char webcam_vendor_label[] = "FULLHAN webcam";
+static const char webcam_product_label[] = "Webcam gadget";
+static const char webcam_config_label[] = "Video";
/* string IDs are assigned dynamically */
@@ -51,6 +78,16 @@ static char webcam_config_label[] = "Video";
#define STRING_PRODUCT_IDX 1
#define STRING_DESCRIPTION_IDX 2
+/* identification number of Unit or Terminal */
+#define UVC_INTERFACE_ID 0
+#define UVC_CAMERAL_TERMINAL_ID 1
+#define UVC_PROCESSING_UNIT_ID 2
+#define UVC_H264_EXTENSION_UNIT_ID 3
+#define UVC_OUTPUT_TERMINAL_ID 4
+#define UVC_OUTPUT_TERMINAL2_ID 5
+
+
+
static struct usb_string webcam_strings[] = {
[STRING_MANUFACTURER_IDX].s = webcam_vendor_label,
[STRING_PRODUCT_IDX].s = webcam_product_label,
@@ -85,9 +122,9 @@ static struct usb_device_descriptor webcam_device_descriptor = {
.bNumConfigurations = 0, /* dynamic */
};
-DECLARE_UVC_HEADER_DESCRIPTOR(1);
+DECLARE_UVC_HEADER_DESCRIPTOR(2);
-static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
+static struct UVC_HEADER_DESCRIPTOR(2) uvc_control_header = {
.bLength = UVC_DT_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_HEADER,
@@ -102,7 +139,7 @@ static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = {
.bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_INPUT_TERMINAL,
- .bTerminalID = 1,
+ .bTerminalID = UVC_CAMERAL_TERMINAL_ID,
.wTerminalType = cpu_to_le16(0x0201),
.bAssocTerminal = 0,
.iTerminal = 0,
@@ -110,64 +147,117 @@ static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = {
.wObjectiveFocalLengthMax = cpu_to_le16(0),
.wOcularFocalLength = cpu_to_le16(0),
.bControlSize = 3,
- .bmControls[0] = 2,
- .bmControls[1] = 0,
- .bmControls[2] = 0,
+ .bmControls[0] = 0x0e, /* 0xff*/
+ .bmControls[1] = 0x00, /* 0xff*/
+ .bmControls[2] = 0x07,
};
static const struct uvc_processing_unit_descriptor uvc_processing = {
.bLength = UVC_DT_PROCESSING_UNIT_SIZE(2),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_PROCESSING_UNIT,
- .bUnitID = 2,
- .bSourceID = 1,
+ .bUnitID = UVC_PROCESSING_UNIT_ID,
+ .bSourceID = UVC_CAMERAL_TERMINAL_ID,
.wMaxMultiplier = cpu_to_le16(16*1024),
.bControlSize = 2,
- .bmControls[0] = 1,
- .bmControls[1] = 0,
+ .bmControls[0] = 0x0b, /*0xff*/
+ .bmControls[1] = 0x00, /*0xff*/
.iProcessing = 0,
};
+
+DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(1, 18);
+
+static const struct UVC_EXTENSION_UNIT_DESCRIPTOR(1, 18) h264_extension_unit = {
+ .bLength = UVC_DT_EXTENSION_UNIT_SIZE(1, 18),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VC_EXTENSION_UNIT,
+ .bUnitID = UVC_H264_EXTENSION_UNIT_ID,
+ .guidExtensionCode = { 0x41, 0x76, 0x9e, 0xa2, 0x04,\
+ 0xde, 0xe3, 0x47, 0x8b, 0x2b, 0xf4, 0x34, 0x1a,\
+ 0xff, 0x00, 0x3b},
+ .bNumControls = 17,
+ .bNrInPins = 1,
+ .baSourceID[0] = UVC_PROCESSING_UNIT_ID,
+ .bControlSize = 18,
+ .bmControls[0] = 0xff,
+ .bmControls[1] = 0xff,
+ .bmControls[2] = 0xff,
+ .bmControls[3] = 0xff,
+ .bmControls[4] = 0xff,
+ .bmControls[5] = 0xff,
+ .bmControls[6] = 0xff,
+ .bmControls[7] = 0xff,
+ .bmControls[8] = 0xff,
+ .bmControls[9] = 0xff,
+ .bmControls[10] = 0xff,
+ .bmControls[11] = 0xff,
+ .bmControls[12] = 0xff,
+ .bmControls[13] = 0xff,
+ .bmControls[14] = 0xff,
+ .bmControls[15] = 0xff,
+ .bmControls[16] = 0xff,
+ .iExtension = 0,
+};
+
+
static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
.bLength = UVC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL,
- .bTerminalID = 3,
+ .bTerminalID = UVC_OUTPUT_TERMINAL_ID,
.wTerminalType = cpu_to_le16(0x0101),
.bAssocTerminal = 0,
- .bSourceID = 2,
+ .bSourceID = UVC_H264_EXTENSION_UNIT_ID,
.iTerminal = 0,
};
-DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 2);
-static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
- .bLength = UVC_DT_INPUT_HEADER_SIZE(1, 2),
+
+static const struct uvc_output_terminal_descriptor uvc_output_terminal2 = {
+ .bLength = UVC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL,
+ .bTerminalID = UVC_OUTPUT_TERMINAL2_ID,
+ .wTerminalType = cpu_to_le16(0x0101),
+ .bAssocTerminal = 0,
+ .bSourceID = UVC_H264_EXTENSION_UNIT_ID,
+ .iTerminal = 0,
+};
+
+
+
+
+DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 1);
+
+static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 1) uvc_input_header = {
+ .bLength = UVC_DT_INPUT_HEADER_SIZE(1, 1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_INPUT_HEADER,
- .bNumFormats = 2,
+ .bNumFormats = 1,
.wTotalLength = 0, /* dynamic */
.bEndpointAddress = 0, /* dynamic */
.bmInfo = 0,
- .bTerminalLink = 3,
+ .bTerminalLink = UVC_OUTPUT_TERMINAL_ID,
.bStillCaptureMethod = 0,
.bTriggerSupport = 0,
.bTriggerUsage = 0,
.bControlSize = 1,
.bmaControls[0][0] = 0,
- .bmaControls[1][0] = 4,
+ /*.bmaControls[1][0] = 4,*/
+ /*.bmaControls[2][0] = 4,*/
};
-static const struct uvc_format_uncompressed uvc_format_yuv = {
+static /*const*/ struct uvc_format_uncompressed uvc_format_nv12 = {
.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED,
.bFormatIndex = 1,
- .bNumFrameDescriptors = 2,
- .guidFormat =
- { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
- .bBitsPerPixel = 16,
+ .bNumFrameDescriptors = 3,
+ .guidFormat = { 'N', 'V', '1', '2',\
+ 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00,\
+ 0xaa, 0x00, 0x38, 0x9b, 0x71},
+ .bBitsPerPixel = 12,
.bDefaultFrameIndex = 1,
.bAspectRatioX = 0,
.bAspectRatioY = 0,
@@ -175,29 +265,31 @@ static const struct uvc_format_uncompressed uvc_format_yuv = {
.bCopyProtect = 0,
};
-DECLARE_UVC_FRAME_UNCOMPRESSED(1);
DECLARE_UVC_FRAME_UNCOMPRESSED(3);
+DECLARE_UVC_FRAME_UNCOMPRESSED(5);
-static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
- .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+static const struct UVC_FRAME_UNCOMPRESSED(5) uvc_frame_nv12_360p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(5),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 1,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(640),
- .wHeight = cpu_to_le16(360),
+ .wHeight = cpu_to_le16(368),
.dwMinBitRate = cpu_to_le32(18432000),
.dwMaxBitRate = cpu_to_le32(55296000),
- .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
- .dwDefaultFrameInterval = cpu_to_le32(666666),
- .bFrameIntervalType = 3,
- .dwFrameInterval[0] = cpu_to_le32(666666),
- .dwFrameInterval[1] = cpu_to_le32(1000000),
- .dwFrameInterval[2] = cpu_to_le32(5000000),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(471040),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
};
-static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
- .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
+static const struct UVC_FRAME_UNCOMPRESSED(5) uvc_frame_nv12_720p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(5),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 2,
@@ -207,17 +299,54 @@ static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
.dwMinBitRate = cpu_to_le32(29491200),
.dwMaxBitRate = cpu_to_le32(29491200),
.dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
- .dwDefaultFrameInterval = cpu_to_le32(5000000),
- .bFrameIntervalType = 1,
- .dwFrameInterval[0] = cpu_to_le32(5000000),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
+};
+
+static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_nv12_1080p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
+ .bFrameIndex = 3,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1920),
+ .wHeight = cpu_to_le16(1088),
+ .dwMinBitRate = cpu_to_le32(29491200),
+ .dwMaxBitRate = cpu_to_le32(29491200),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
+ .dwDefaultFrameInterval = cpu_to_le32(666666),
+ .bFrameIntervalType = 3,
+ .dwFrameInterval[0] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[1] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[2] = cpu_to_le32(5000000), /*2fps*/
+};
+
+static /*const*/ struct uvc_format_mjpeg uvc_format_mjpg = {
+ .bLength = UVC_DT_FORMAT_MJPEG_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
+ .bFormatIndex = 1,
+ .bNumFrameDescriptors = 3,
+ .bmFlags = 0,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterfaceFlags = 0,
+ .bCopyProtect = 0,
};
-static const struct uvc_format_mjpeg uvc_format_mjpg = {
+
+static const struct uvc_format_mjpeg uvc_format_mjpg_fm1 = {
.bLength = UVC_DT_FORMAT_MJPEG_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
- .bFormatIndex = 2,
- .bNumFrameDescriptors = 2,
+ .bFormatIndex = 1,
+ .bNumFrameDescriptors = 1,
.bmFlags = 0,
.bDefaultFrameIndex = 1,
.bAspectRatioX = 0,
@@ -226,29 +355,30 @@ static const struct uvc_format_mjpeg uvc_format_mjpg = {
.bCopyProtect = 0,
};
-DECLARE_UVC_FRAME_MJPEG(1);
-DECLARE_UVC_FRAME_MJPEG(3);
+DECLARE_UVC_FRAME_MJPEG(5);
-static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
- .bLength = UVC_DT_FRAME_MJPEG_SIZE(3),
+static const struct UVC_FRAME_MJPEG(5) uvc_frame_mjpg_360p = {
+ .bLength = UVC_DT_FRAME_MJPEG_SIZE(5),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 1,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(640),
- .wHeight = cpu_to_le16(360),
+ .wHeight = cpu_to_le16(368),
.dwMinBitRate = cpu_to_le32(18432000),
.dwMaxBitRate = cpu_to_le32(55296000),
.dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
- .dwDefaultFrameInterval = cpu_to_le32(666666),
- .bFrameIntervalType = 3,
- .dwFrameInterval[0] = cpu_to_le32(666666),
- .dwFrameInterval[1] = cpu_to_le32(1000000),
- .dwFrameInterval[2] = cpu_to_le32(5000000),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
};
-static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
- .bLength = UVC_DT_FRAME_MJPEG_SIZE(1),
+static const struct UVC_FRAME_MJPEG(5) uvc_frame_mjpg_720p = {
+ .bLength = UVC_DT_FRAME_MJPEG_SIZE(5),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 2,
@@ -258,11 +388,138 @@ static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
.dwMinBitRate = cpu_to_le32(29491200),
.dwMaxBitRate = cpu_to_le32(29491200),
.dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
- .dwDefaultFrameInterval = cpu_to_le32(5000000),
- .bFrameIntervalType = 1,
- .dwFrameInterval[0] = cpu_to_le32(5000000),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
};
+static const struct UVC_FRAME_MJPEG(5) uvc_frame_mjpg_1080p = {
+ .bLength = UVC_DT_FRAME_MJPEG_SIZE(5),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FRAME_MJPEG,
+ .bFrameIndex = 3,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1920),
+ .wHeight = cpu_to_le16(1088),
+ .dwMinBitRate = cpu_to_le32(29491200),
+ .dwMaxBitRate = cpu_to_le32(29491200),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(4147200),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
+};
+
+static /*const*/ struct uvc_format_frameBased uvc_format_h264 = {
+ .bLength = UVC_DT_FORMAT_FRAMEBASED_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FORMAT_FRAME_BASED,
+ .bFormatIndex = 1,
+ .bNumFrameDescriptors = 1,
+ .guidFormat = { 'H', '2', '6', '4',\
+ 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa,\
+ 0x00, 0x38, 0x9b, 0x71},
+ .bBitsPerPixel = 0,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterfaceFlags = 0,
+ .bCopyProtect = 0,
+ .bVariableSize = 1,
+};
+
+DECLARE_UVC_FRAME_FRAMEBASED(5);
+
+static const struct UVC_FRAME_FRAMEBASED(5) uvc_frame_h264_360p = {
+ .bLength = UVC_DT_FRAME_FRAMEBASED_SIZE(5),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FRAME_FRAME_BASED,
+ .bFrameIndex = 1,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(640),
+ .wHeight = cpu_to_le16(368),
+ .dwMinBitRate = cpu_to_le32(18432000),
+ .dwMaxBitRate = cpu_to_le32(55296000),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwBytesPerLine = 0,
+ /*when bFrameIntervalType = 0 which means continuous frame interval
+ //.dwMinFrameInterval = 0,
+ //.dwMaxFrameInterval = 0,
+ //.dwFrameIntervalStep= 0,*/
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
+};
+
+static const struct UVC_FRAME_FRAMEBASED(5) uvc_frame_h264_720p = {
+ .bLength = UVC_DT_FRAME_FRAMEBASED_SIZE(5),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FRAME_FRAME_BASED,
+ .bFrameIndex = 2,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1280),
+ .wHeight = cpu_to_le16(720),
+ .dwMinBitRate = cpu_to_le32(18432000),
+ .dwMaxBitRate = cpu_to_le32(55296000),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwBytesPerLine = 0,
+ /*when bFrameIntervalType = 0 which means continuous frame interval
+ //.dwMinFrameInterval = 0,
+ //.dwMaxFrameInterval = 0,
+ //.dwFrameIntervalStep= 0,*/
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
+};
+
+static const struct UVC_FRAME_FRAMEBASED(5) uvc_frame_h264_1080p = {
+ .bLength = UVC_DT_FRAME_FRAMEBASED_SIZE(5),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FRAME_FRAME_BASED,
+ .bFrameIndex = 3,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1920),
+ .wHeight = cpu_to_le16(1088),
+ .dwMinBitRate = cpu_to_le32(18432000),
+ .dwMaxBitRate = cpu_to_le32(55296000),
+ .dwDefaultFrameInterval = cpu_to_le32(333333),
+ .bFrameIntervalType = 5,
+ .dwBytesPerLine = 0,
+ /*when bFrameIntervalType = 0 which means continuous frame interval
+ //.dwMinFrameInterval = 0,
+ //.dwMaxFrameInterval = 0,
+ //.dwFrameIntervalStep= 0,*/
+ .dwFrameInterval[0] = cpu_to_le32(166666), /*60fps*/
+ .dwFrameInterval[1] = cpu_to_le32(333333), /*30fps*/
+ .dwFrameInterval[2] = cpu_to_le32(666666), /*15fps*/
+ .dwFrameInterval[3] = cpu_to_le32(1000000), /*10fps*/
+ .dwFrameInterval[4] = cpu_to_le32(5000000), /*2fps*/
+};
+#if 0
+static const struct uvc_still_image_descriptor uvc_still_image = {
+ .bLength = UVC_DT_STILL_IMAGE_SIZE,
+ .bDescriptorType = 0x24,
+ .bDescriptorSubType = 0x03,
+ .bEndpointAddress = 0,
+ .bNumImageSizePatterns = 1,
+ .wSizePatterns[0] = 640,
+ .wSizePatterns[1] = 480,
+ .bNumCompressionPattern = 0,
+};
+#endif
static const struct uvc_color_matching_descriptor uvc_color_matching = {
.bLength = UVC_DT_COLOR_MATCHING_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
@@ -276,43 +533,474 @@ static const struct uvc_descriptor_header * const uvc_control_cls[] = {
(const struct uvc_descriptor_header *) &uvc_control_header,
(const struct uvc_descriptor_header *) &uvc_camera_terminal,
(const struct uvc_descriptor_header *) &uvc_processing,
+ (const struct uvc_descriptor_header *) &h264_extension_unit,
(const struct uvc_descriptor_header *) &uvc_output_terminal,
+#ifdef UVC_DOUBLE_STREAM
+ (const struct uvc_descriptor_header *) &uvc_output_terminal2,
+#endif
NULL,
};
static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
- (const struct uvc_descriptor_header *) &uvc_format_yuv,
- (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
- (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ /*(const struct uvc_descriptor_header *) &uvc_format_nv12,
+ //(const struct uvc_descriptor_header *) &uvc_frame_nv12_360p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_nv12_720p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_nv12_1080p,*/
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_1080p,
+ /*(const struct uvc_descriptor_header *) &uvc_format_h264,
+ //(const struct uvc_descriptor_header *) &uvc_frame_h264_360p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_h264_720p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_h264_1080p,*/
(const struct uvc_descriptor_header *) &uvc_color_matching,
NULL,
};
static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
- (const struct uvc_descriptor_header *) &uvc_format_yuv,
- (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
- (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
- (const struct uvc_descriptor_header *) &uvc_format_mjpg,
- (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
- (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+ /*(const struct uvc_descriptor_header *) &uvc_format_nv12,
+ //(const struct uvc_descriptor_header *) &uvc_frame_nv12_360p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_nv12_720p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_nv12_1080p,
+ //(const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ //(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_mjpg_1080p,*/
+ (const struct uvc_descriptor_header *) &uvc_format_h264,
+ (const struct uvc_descriptor_header *) &uvc_frame_h264_360p,
+ /*(const struct uvc_descriptor_header *) &uvc_frame_h264_720p,
+ //(const struct uvc_descriptor_header *) &uvc_frame_h264_1080p,*/
(const struct uvc_descriptor_header *) &uvc_color_matching,
NULL,
};
+#define MAX_FRAME_DESCRIPTORS_NUM 10
+#define MAX_FRAME_INTERVAL_NUM 5
+struct uvc_frm_mjpg_info {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwMaxVideoFrameBufferSize;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwFrameInterval[MAX_FRAME_INTERVAL_NUM];
+} __packed;
+
+
+struct uvc_frm_yuv_info {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwMaxVideoFrameBufferSize;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwFrameInterval[MAX_FRAME_INTERVAL_NUM];
+} __packed;
+
+
+
+struct uvc_frm_h264_info {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwBytesPerLine;
+ __u32 dwFrameInterval[MAX_FRAME_INTERVAL_NUM];
+} __packed;
+
+
+struct uvc_frame_info {
+ unsigned int width;
+ unsigned int height;
+ unsigned int intervals[8];
+};
+
+struct uvc_format_info {
+ unsigned int fcc;
+ const struct uvc_frame_info *frames;
+};
+
+
+struct uvc_fmt_array_info {
+ unsigned int yuv_type;
+
+ unsigned int fmt_num;
+ struct uvc_format_info *pFmts;
+
+ unsigned int fmt2_num;
+ struct uvc_format_info *pFmts2;
+
+};
+
+
+
+
+struct uvc_fmt_array_data {
+ struct UVC_INPUT_HEADER_DESCRIPTOR(1, 1) uvc_input_header;
+ struct uvc_format_uncompressed uvc_format_nv12;
+ struct uvc_format_mjpeg uvc_format_mjpg;
+ struct uvc_format_frameBased uvc_format_h264;
+
+ struct uvc_frm_yuv_info yuv_frames[MAX_FRAME_DESCRIPTORS_NUM];
+ struct uvc_frm_mjpg_info mjpg_frames[MAX_FRAME_DESCRIPTORS_NUM];
+ struct uvc_frm_h264_info h264_frames[MAX_FRAME_DESCRIPTORS_NUM];
+ struct uvc_descriptor_header *uvc_streaming_data[MAX_FRAME_DESCRIPTORS_NUM*4];
+
+ int fmt_num;
+};
+
+static struct uvc_fmt_array_data uvc_fmt_array[2];
+static struct uvc_fmt_array_data *pCurFmtArray;
+
+
+static int uvc_stream_idx;
+static int uvc_fmt_idx = 1;
+
+
+int get_frame_array_num(const struct uvc_frame_info *pFrms)
+{
+ const struct uvc_frame_info *pFrmData = pFrms;
+ int num = 0;
+ while (1) {
+ if (pFrmData->width == 0)
+ break;
+ num++;
+ pFrmData++;
+ }
+
+ if (num > MAX_FRAME_DESCRIPTORS_NUM)
+ num = MAX_FRAME_DESCRIPTORS_NUM;
+
+ return num;
+}
+
+
+int get_frame_intervals_num(const struct uvc_frame_info *pFrms)
+{
+ const struct uvc_frame_info *pFrmData = pFrms;
+ int num = 0;
+
+ while (1) {
+ if (pFrmData->intervals[num] == 0)
+ break;
+ num++;
+ }
+
+ if (num > MAX_FRAME_INTERVAL_NUM)
+ num = MAX_FRAME_INTERVAL_NUM;
+
+ return num;
+}
+
+
+static void
+gen_yuv_frame_data(struct uvc_frm_yuv_info *pYuv,
+ int idx,
+ const struct uvc_frame_info *pFrm)
+{
+ int i;
+ int num = get_frame_intervals_num(pFrm);
+
+ pYuv->bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(num);
+ pYuv->bDescriptorType = USB_DT_CS_INTERFACE;
+ pYuv->bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED;
+ pYuv->bFrameIndex = idx;
+ pYuv->bmCapabilities = 0;
+ pYuv->wWidth = cpu_to_le16(pFrm->width);
+ pYuv->wHeight = cpu_to_le16(pFrm->height);
+ pYuv->dwMinBitRate = cpu_to_le32(18432000);
+ pYuv->dwMaxBitRate = cpu_to_le32(55296000);
+ pYuv->dwMaxVideoFrameBufferSize = cpu_to_le32(471040);
+ pYuv->dwDefaultFrameInterval = cpu_to_le32(pFrm->intervals[0]);
+
+ pYuv->bFrameIntervalType = num;
+ for (i = 0; i < num; i++)
+ pYuv->dwFrameInterval[i] =
+ cpu_to_le32(pFrm->intervals[i]);
+
+}
+
+
+static void
+gen_mjpg_frame_data(struct uvc_frm_mjpg_info *pMjpg,
+ int idx,
+ const struct uvc_frame_info *pFrm)
+{
+ int i;
+ int num = get_frame_intervals_num(pFrm);
+
+ pMjpg->bLength = UVC_DT_FRAME_MJPEG_SIZE(num);
+ pMjpg->bDescriptorType = USB_DT_CS_INTERFACE;
+ pMjpg->bDescriptorSubType = UVC_VS_FRAME_MJPEG;
+ pMjpg->bFrameIndex = idx;
+ pMjpg->bmCapabilities = 0;
+ pMjpg->wWidth = cpu_to_le16(pFrm->width);
+ pMjpg->wHeight = cpu_to_le16(pFrm->height);
+ pMjpg->dwMinBitRate = cpu_to_le32(18432000);
+ pMjpg->dwMaxBitRate = cpu_to_le32(55296000);
+ pMjpg->dwMaxVideoFrameBufferSize = cpu_to_le32(460800);
+ pMjpg->dwDefaultFrameInterval = cpu_to_le32(pFrm->intervals[0]);
+
+
+ pMjpg->bFrameIntervalType = num;
+ for (i = 0; i < num; i++)
+ pMjpg->dwFrameInterval[i] =
+ cpu_to_le32(pFrm->intervals[i]);
+
+}
+
+static void gen_h264_frame_data(struct uvc_frm_h264_info *pH264,
+ int idx,
+ const struct uvc_frame_info *pFrm)
+{
+ int i;
+ int num = get_frame_intervals_num(pFrm);
+
+ pH264->bLength = UVC_DT_FRAME_FRAMEBASED_SIZE(num);
+ pH264->bDescriptorType = USB_DT_CS_INTERFACE;
+ pH264->bDescriptorSubType = UVC_VS_FRAME_FRAME_BASED;
+ pH264->bFrameIndex = idx;
+ pH264->bmCapabilities = 0;
+ pH264->wWidth = cpu_to_le16(pFrm->width);
+ pH264->wHeight = cpu_to_le16(pFrm->height);
+ pH264->dwMinBitRate = cpu_to_le32(18432000);
+ pH264->dwMaxBitRate = cpu_to_le32(55296000);
+ pH264->dwDefaultFrameInterval = cpu_to_le32(pFrm->intervals[0]);
+ pH264->dwBytesPerLine = 0;
+
+ pH264->bFrameIntervalType = num;
+ for (i = 0; i < num; i++)
+ pH264->dwFrameInterval[i] = cpu_to_le32(pFrm->intervals[i]);
+
+}
+
+
+void uvc_stream_append_data(void *data)
+{
+ pCurFmtArray->uvc_streaming_data[uvc_stream_idx++] =
+ (struct uvc_descriptor_header *)data;
+}
+
+void deal_frms_array(unsigned int fcc, const struct uvc_frame_info *pFrms)
+{
+
+ int frm_num = get_frame_array_num(pFrms);
+ const struct uvc_frame_info *pFrmData = pFrms;
+ int i;
+
+ if (frm_num <= 0)
+ return;
+
+
+ switch (fcc) {
+ case V4L2_PIX_FMT_NV12:
+ uvc_stream_append_data(&pCurFmtArray->uvc_format_nv12);
+ pCurFmtArray->uvc_format_nv12.bFormatIndex = uvc_fmt_idx++;
+ pCurFmtArray->uvc_format_nv12.bNumFrameDescriptors = frm_num;
+ for (i = 0; i < frm_num; i++) {
+ gen_yuv_frame_data(&pCurFmtArray->yuv_frames[i],
+ i+1, pFrmData);
+ uvc_stream_append_data(&pCurFmtArray->yuv_frames[i]);
+ pFrmData++;
+ }
+ break;
+
+ case V4L2_PIX_FMT_MJPEG:
+ uvc_stream_append_data(&pCurFmtArray->uvc_format_mjpg);
+ pCurFmtArray->uvc_format_mjpg.bFormatIndex = uvc_fmt_idx++;
+ pCurFmtArray->uvc_format_mjpg.bNumFrameDescriptors = frm_num;
+ for (i = 0; i < frm_num; i++) {
+ gen_mjpg_frame_data(&pCurFmtArray->mjpg_frames[i],
+ i+1, pFrmData);
+ uvc_stream_append_data(&pCurFmtArray->mjpg_frames[i]);
+ pFrmData++;
+ }
+ break;
+
+ case V4L2_PIX_FMT_H264:
+ uvc_stream_append_data(&pCurFmtArray->uvc_format_h264);
+ pCurFmtArray->uvc_format_h264.bFormatIndex = uvc_fmt_idx++;
+ pCurFmtArray->uvc_format_h264.bNumFrameDescriptors = frm_num;
+ for (i = 0; i < frm_num; i++) {
+ gen_h264_frame_data(&pCurFmtArray->h264_frames[i],
+ i+1, pFrmData);
+ uvc_stream_append_data(&pCurFmtArray->h264_frames[i]);
+ pFrmData++;
+ }
+ break;
+ }
+}
+
+
+/*void set_yuv_bpp(int bpp);*/
+static __u8 nv12Format[16] = { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00,
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71};
+
+static __u8 yuy2Format[16] = { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71};
+
+
+
+void gen_uvc_fmt_array(int id, struct uvc_format_info *fmt, int fmt_num)
+{
+ struct uvc_format_info *pFmtData = fmt;
+ unsigned int i;
+
+ uvc_stream_idx = 0;
+ uvc_fmt_idx = 1;
+
+ pCurFmtArray = &uvc_fmt_array[id&0x01];
+ pCurFmtArray->fmt_num = fmt_num;
+ memcpy(&pCurFmtArray->uvc_input_header, &uvc_input_header,
+ sizeof(uvc_input_header));
+ memcpy(&pCurFmtArray->uvc_format_nv12, &uvc_format_nv12,
+ sizeof(uvc_format_nv12));
+ memcpy(&pCurFmtArray->uvc_format_mjpg, &uvc_format_mjpg,
+ sizeof(uvc_format_mjpg));
+ memcpy(&pCurFmtArray->uvc_format_h264, &uvc_format_h264,
+ sizeof(uvc_format_h264));
+
+ uvc_stream_append_data((void *)&pCurFmtArray->uvc_input_header);
+
+
+ for (i = 0; i < fmt_num; i++) {
+ deal_frms_array(pFmtData->fcc, pFmtData->frames);
+ pFmtData++;
+ }
+ /*uvc_stream_append_data((void*)&uvc_still_image);*/
+ uvc_stream_append_data((void *)&uvc_color_matching);
+ uvc_stream_append_data(NULL);
+}
+
+void gen_stream_descriptor_array(struct uvc_fmt_array_info *pFai)
+{
+ switch (pFai->yuv_type) {
+ case YUV_YUY2:
+ set_yuv_bpp(16);
+ memcpy(uvc_format_nv12.guidFormat, yuy2Format,
+ sizeof(yuy2Format));
+
+ uvc_format_nv12.bBitsPerPixel = 16;
+ break;
+
+ case YUV_NV12:
+ default:
+ set_yuv_bpp(12);
+ memcpy(uvc_format_nv12.guidFormat,
+ nv12Format,
+ sizeof(nv12Format));
+ uvc_format_nv12.bBitsPerPixel = 12;
+ break;
+ }
+
+ gen_uvc_fmt_array(0, pFai->pFmts, pFai->fmt_num);
+ gen_uvc_fmt_array(1, pFai->pFmts2, pFai->fmt2_num);
+}
+
+static struct uvc_descriptor_header *uvc_streaming_test[] = {
+ (struct uvc_descriptor_header *) &uvc_input_header,
+ /*(const struct uvc_descriptor_header *) &uvc_format_mjpg_fm1,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,*/
+ (struct uvc_descriptor_header *) &uvc_format_h264,
+ (struct uvc_descriptor_header *) &uvc_frame_h264_360p,
+ (struct uvc_descriptor_header *) &uvc_color_matching,
+ NULL,
+};
+
+
+void change_usb_support_fmt(struct uvc_device *uvc, void *arg)
+{
+ struct usb_function *f;
+ if (NULL != arg) {
+ gen_stream_descriptor_array(arg);
+ uvc->comm->desc.fs_streaming =
+ uvc_fmt_array[0].uvc_streaming_data;
+ uvc->comm->desc.hs_streaming =
+ uvc_fmt_array[0].uvc_streaming_data;
+
+#ifdef UVC_DOUBLE_STREAM
+
+ if (uvc_fmt_array[1].fmt_num > 0) {
+ uvc->comm->desc.fs_streaming2 =
+ uvc_fmt_array[1].uvc_streaming_data;
+ uvc->comm->desc.hs_streaming2 =
+ uvc_fmt_array[1].uvc_streaming_data;
+ uvc_control_header.bLength = UVC_DT_HEADER_SIZE(2);
+ } else {
+ uvc->comm->desc.fs_streaming2 = NULL;
+ uvc->comm->desc.hs_streaming2 = NULL;
+ }
+#else
+
+ uvc->comm->desc.fs_streaming2 = NULL;
+ uvc->comm->desc.hs_streaming2 = NULL;
+
+
+#endif
+ } else {
+ return;
+ uvc->comm->desc.fs_streaming = uvc_streaming_test;
+ uvc->comm->desc.hs_streaming = uvc_streaming_test;
+
+ uvc->comm->desc.fs_streaming2 = uvc_streaming_test;
+ uvc->comm->desc.hs_streaming2 = uvc_streaming_test;
+ }
+
+
+ f = &uvc->comm->func;
+
+
+ kfree(f->descriptors);
+ kfree(f->hs_descriptors);
+
+ /* Copy descriptors. */
+ f->descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
+
+ f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
+}
+
/* --------------------------------------------------------------------------
* USB configuration
*/
+
+/*extern int uac_bind_config(struct usb_configuration *c);*/
+
static int __init
webcam_config_bind(struct usb_configuration *c)
{
- return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
- uvc_hs_streaming_cls);
+ int ret;
+#ifdef ENABLE_UVC
+ ret = uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
+ uvc_hs_streaming_cls);
+#endif
+
+#ifdef ENABLE_UAC
+ ret = uac_bind_config(c);
+#endif
+ return ret;
}
static struct usb_configuration webcam_config_driver = {
@@ -337,24 +1025,26 @@ webcam_bind(struct usb_composite_dev *cdev)
/* Allocate string descriptor numbers ... note that string contents
* can be overridden by the composite_dev glue.
*/
- if ((ret = usb_string_id(cdev)) < 0)
+ ret = usb_string_id(cdev);
+ if (ret < 0)
goto error;
webcam_strings[STRING_MANUFACTURER_IDX].id = ret;
webcam_device_descriptor.iManufacturer = ret;
-
- if ((ret = usb_string_id(cdev)) < 0)
+ ret = usb_string_id(cdev);
+ if (ret < 0)
goto error;
webcam_strings[STRING_PRODUCT_IDX].id = ret;
webcam_device_descriptor.iProduct = ret;
-
- if ((ret = usb_string_id(cdev)) < 0)
+ ret = usb_string_id(cdev);
+ if (ret < 0)
goto error;
webcam_strings[STRING_DESCRIPTION_IDX].id = ret;
webcam_config_driver.iConfiguration = ret;
/* Register our configuration. */
- if ((ret = usb_add_config(cdev, &webcam_config_driver,
- webcam_config_bind)) < 0)
+ ret = usb_add_config(cdev, &webcam_config_driver, \
+ webcam_config_bind);
+ if (ret < 0)
goto error;
INFO(cdev, "Webcam Video Gadget\n");
@@ -396,3 +1086,5 @@ MODULE_DESCRIPTION("Webcam Video Gadget");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1.0");
+#endif
+
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index ab085f12..bc58e0db 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -574,7 +574,48 @@ config USB_OCTEON_OHCI
Enable support for the Octeon II SOC's on-chip OHCI
controller. It is needed for low-speed USB 1.0 device
support. All CN6XXX based chips with USB are supported.
+
+config USB_FH_OTG
+ tristate "FH OTG support"
+ depends on USB
+ default y
+ help
+ FULLHAN USB SUPPORT
+ Be the OTG, host or device mode
+choice
+ prompt "Choose mode"
+ depends on USB_FH_OTG
+ default FH_HOST_ONLY
+ help
+ Select one
+config FH_OTG
+ depends on USB_FH_OTG && MACH_FH8856
+ bool "OTG mode"
+
+config FH_HOST_ONLY
+ bool "Host only mode"
+
+config FH_DEVICE_ONLY
+ bool "Device only mode"
+endchoice
+
config USB_OCTEON2_COMMON
bool
default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
+
+config USB_S3C_OTG_HOST
+ tristate "S3C USB OTG Host support"
+ depends on USB
+ help
+ Samsung's S3C64XX processors include high speed USB OTG2.0
+ controller. It has 15 configurable endpoints, as well as
+ endpoint zero (for control transfers).
+
+ This driver support only OTG Host role. If you want to use
+ OTG Device role, select USB Gadget support and S3C OTG Device.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "s3c_otg_hcd" and force all
+ drivers to also be dynamically linked.
+
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 624a362f..afc1f79e 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -17,7 +17,7 @@ xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_PCI) += pci-quirks.o
-
+obj-$(CONFIG_USB_FH_OTG) += fh_otg/
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
diff --git a/drivers/usb/host/fh_otg/Kconfig b/drivers/usb/host/fh_otg/Kconfig
new file mode 100644
index 00000000..e9285782
--- /dev/null
+++ b/drivers/usb/host/fh_otg/Kconfig
@@ -0,0 +1 @@
+source "drivers/usb/host/fh_otg/fh_otg/Kconfig"
diff --git a/drivers/usb/host/fh_otg/Makefile b/drivers/usb/host/fh_otg/Makefile
new file mode 100644
index 00000000..11ecd15e
--- /dev/null
+++ b/drivers/usb/host/fh_otg/Makefile
@@ -0,0 +1,7 @@
+#
+#Makefile for the kernel USB host drivers.
+#
+
+# Object files in subdirectories
+
+obj-$(CONFIG_USB_FH_OTG) += fh_common_port/ fh_otg/
\ No newline at end of file
diff --git a/drivers/usb/host/fh_otg/fh_common_port/Makefile b/drivers/usb/host/fh_otg/fh_common_port/Makefile
new file mode 100644
index 00000000..a4b0accd
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/Makefile
@@ -0,0 +1,49 @@
+#
+# Makefile for FH_common library
+#
+ifneq ($(KERNELRELEASE),)
+
+EXTRA_CFLAGS += -DFH_LINUX
+#EXTRA_CFLAGS += -DDEBUG
+#EXTRA_CFLAGS += -DFH_DEBUG_REGS
+#EXTRA_CFLAGS += -DFH_DEBUG_MEMORY
+
+EXTRA_CFLAGS += -DFH_LIBMODULE
+EXTRA_CFLAGS += -DFH_CCLIB
+EXTRA_CFLAGS += -DFH_CRYPTOLIB
+EXTRA_CFLAGS += -DFH_NOTIFYLIB
+EXTRA_CFLAGS += -DFH_UTFLIB
+
+obj-$(CONFIG_USB_FH_OTG) := fh_common_port_lib.o
+fh_common_port_lib-objs := fh_cc.o fh_modpow.o fh_dh.o \
+ fh_crypto.o fh_notifier.o \
+ fh_common_linux.o fh_mem.o
+
+else
+
+ifeq ($(KDIR),)
+$(error Must give "KDIR=/path/to/kernel/source" on command line or in environment)
+endif
+
+ifeq ($(ARCH),)
+$(error Must give "ARCH=<arch>" on command line or in environment. Also, if \
+ cross-compiling, must give "CROSS_COMPILE=/path/to/compiler/plus/tool-prefix-")
+endif
+
+ifeq ($(DOXYGEN),)
+DOXYGEN := doxygen
+endif
+
+default:
+ $(MAKE) -C$(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
+
+docs: $(wildcard *.[hc]) doc/doxygen.cfg
+ $(DOXYGEN) doc/doxygen.cfg
+
+tags: $(wildcard *.[hc])
+ $(CTAGS) -e $(wildcard *.[hc]) $(wildcard linux/*.[hc]) $(wildcard $(KDIR)/include/linux/usb*.h)
+
+endif
+
+clean:
+ rm -rf *.o *.ko .*.cmd *.mod.c .*.o.d .*.o.tmp modules.order Module.markers Module.symvers .tmp_versions/
diff --git a/drivers/usb/host/fh_otg/fh_common_port/Makefile.fbsd b/drivers/usb/host/fh_otg/fh_common_port/Makefile.fbsd
new file mode 100644
index 00000000..c78002c4
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/Makefile.fbsd
@@ -0,0 +1,17 @@
+CFLAGS += -I/sys/i386/compile/GENERIC -I/sys/i386/include -I/usr/include
+CFLAGS += -DFH_FREEBSD
+CFLAGS += -DDEBUG
+#CFLAGS += -DFH_DEBUG_REGS
+#CFLAGS += -DFH_DEBUG_MEMORY
+
+#CFLAGS += -DFH_LIBMODULE
+#CFLAGS += -DFH_CCLIB
+#CFLAGS += -DFH_CRYPTOLIB
+#CFLAGS += -DFH_NOTIFYLIB
+#CFLAGS += -DFH_UTFLIB
+
+KMOD = fh_common_port_lib
+SRCS = fh_cc.c fh_modpow.c fh_dh.c fh_crypto.c fh_notifier.c \
+ fh_common_fbsd.c fh_mem.c
+
+.include <bsd.kmod.mk>
diff --git a/drivers/usb/host/fh_otg/fh_common_port/Makefile.linux b/drivers/usb/host/fh_otg/fh_common_port/Makefile.linux
new file mode 100644
index 00000000..4dc490cd
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/Makefile.linux
@@ -0,0 +1,49 @@
+#
+# Makefile for FH_common library
+#
+ifneq ($(KERNELRELEASE),)
+
+EXTRA_CFLAGS += -DFH_LINUX
+#EXTRA_CFLAGS += -DDEBUG
+#EXTRA_CFLAGS += -DFH_DEBUG_REGS
+#EXTRA_CFLAGS += -DFH_DEBUG_MEMORY
+
+EXTRA_CFLAGS += -DFH_LIBMODULE
+EXTRA_CFLAGS += -DFH_CCLIB
+EXTRA_CFLAGS += -DFH_CRYPTOLIB
+EXTRA_CFLAGS += -DFH_NOTIFYLIB
+EXTRA_CFLAGS += -DFH_UTFLIB
+
+obj-m := fh_common_port_lib.o
+fh_common_port_lib-objs := fh_cc.o fh_modpow.o fh_dh.o \
+ fh_crypto.o fh_notifier.o \
+ fh_common_linux.o fh_mem.o
+
+else
+
+ifeq ($(KDIR),)
+$(error Must give "KDIR=/path/to/kernel/source" on command line or in environment)
+endif
+
+ifeq ($(ARCH),)
+$(error Must give "ARCH=<arch>" on command line or in environment. Also, if \
+ cross-compiling, must give "CROSS_COMPILE=/path/to/compiler/plus/tool-prefix-")
+endif
+
+ifeq ($(DOXYGEN),)
+DOXYGEN := doxygen
+endif
+
+default:
+ $(MAKE) -C$(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
+
+docs: $(wildcard *.[hc]) doc/doxygen.cfg
+ $(DOXYGEN) doc/doxygen.cfg
+
+tags: $(wildcard *.[hc])
+ $(CTAGS) -e $(wildcard *.[hc]) $(wildcard linux/*.[hc]) $(wildcard $(KDIR)/include/linux/usb*.h)
+
+endif
+
+clean:
+ rm -rf *.o *.ko .*.cmd *.mod.c .*.o.d .*.o.tmp modules.order Module.markers Module.symvers .tmp_versions/
diff --git a/drivers/usb/host/fh_otg/fh_common_port/changes.txt b/drivers/usb/host/fh_otg/fh_common_port/changes.txt
new file mode 100644
index 00000000..f6839f92
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/changes.txt
@@ -0,0 +1,174 @@
+
+dwc_read_reg32() and friends now take an additional parameter, a pointer to an
+IO context struct. The IO context struct should live in an os-dependent struct
+in your driver. As an example, the dwc_usb3 driver has an os-dependent struct
+named 'os_dep' embedded in the main device struct. So there these calls look
+like this:
+
+ dwc_read_reg32(&usb3_dev->os_dep.ioctx, &pcd->dev_global_regs->dcfg);
+
+ dwc_write_reg32(&usb3_dev->os_dep.ioctx,
+ &pcd->dev_global_regs->dcfg, 0);
+
+Note that for the existing Linux driver ports, it is not necessary to actually
+define the 'ioctx' member in the os-dependent struct. Since Linux does not
+require an IO context, its macros for dwc_read_reg32() and friends do not
+use the context pointer, so it is optimized away by the compiler. But it is
+necessary to add the pointer parameter to all of the call sites, to be ready
+for any future ports (such as FreeBSD) which do require an IO context.
+
+
+Similarly, dwc_alloc(), dwc_alloc_atomic(), dwc_strdup(), and dwc_free() now
+take an additional parameter, a pointer to a memory context. Examples:
+
+ addr = dwc_alloc(&usb3_dev->os_dep.memctx, size);
+
+ dwc_free(&usb3_dev->os_dep.memctx, addr);
+
+Again, for the Linux ports, it is not necessary to actually define the memctx
+member, but it is necessary to add the pointer parameter to all of the call
+sites.
+
+
+Same for dwc_dma_alloc() and dwc_dma_free(). Examples:
+
+ virt_addr = dwc_dma_alloc(&usb3_dev->os_dep.dmactx, size, &phys_addr);
+
+ dwc_dma_free(&usb3_dev->os_dep.dmactx, size, virt_addr, phys_addr);
+
+
+Same for dwc_mutex_alloc() and dwc_mutex_free(). Examples:
+
+ mutex = dwc_mutex_alloc(&usb3_dev->os_dep.mtxctx);
+
+ dwc_mutex_free(&usb3_dev->os_dep.mtxctx, mutex);
+
+
+Same for dwc_spinlock_alloc() and dwc_spinlock_free(). Examples:
+
+ lock = dwc_spinlock_alloc(&usb3_dev->osdep.splctx);
+
+ dwc_spinlock_free(&usb3_dev->osdep.splctx, lock);
+
+
+Same for dwc_timer_alloc(). Example:
+
+ timer = dwc_timer_alloc(&usb3_dev->os_dep.tmrctx, "dwc_usb3_tmr1",
+ cb_func, cb_data);
+
+
+Same for dwc_waitq_alloc(). Example:
+
+ waitq = dwc_waitq_alloc(&usb3_dev->os_dep.wtqctx);
+
+
+Same for dwc_thread_run(). Example:
+
+ thread = dwc_thread_run(&usb3_dev->os_dep.thdctx, func,
+ "dwc_usb3_thd1", data);
+
+
+Same for dwc_workq_alloc(). Example:
+
+ workq = dwc_workq_alloc(&usb3_dev->osdep.wkqctx, "dwc_usb3_wkq1");
+
+
+Same for dwc_task_alloc(). Example:
+
+ task = dwc_task_alloc(&usb3_dev->os_dep.tskctx, "dwc_usb3_tsk1",
+ cb_func, cb_data);
+
+
+In addition to the context pointer additions, a few core functions have had
+other changes made to their parameters:
+
+The 'flags' parameter to dwc_spinlock_irqsave() and dwc_spinunlock_irqrestore()
+has been changed from a uint64_t to a dwc_irqflags_t.
+
+dwc_thread_should_stop() now takes a 'dwc_thread_t *' parameter, because the
+FreeBSD equivalent of that function requires it.
+
+And, in addition to the context pointer, dwc_task_alloc() also adds a
+'char *name' parameter, to be consistent with dwc_thread_run() and
+dwc_workq_alloc(), and because the FreeBSD equivalent of that function
+requires a unique name.
+
+
+Here is a complete list of the core functions that now take a pointer to a
+context as their first parameter:
+
+ dwc_read_reg32
+ dwc_read_reg64
+ dwc_write_reg32
+ dwc_write_reg64
+ dwc_modify_reg32
+ dwc_modify_reg64
+ dwc_alloc
+ dwc_alloc_atomic
+ dwc_strdup
+ dwc_free
+ dwc_dma_alloc
+ dwc_dma_free
+ dwc_mutex_alloc
+ dwc_mutex_free
+ dwc_spinlock_alloc
+ dwc_spinlock_free
+ dwc_timer_alloc
+ dwc_waitq_alloc
+ dwc_thread_run
+ dwc_workq_alloc
+ dwc_task_alloc Also adds a 'char *name' as its 2nd parameter
+
+And here are the core functions that have other changes to their parameters:
+
+ dwc_spinlock_irqsave 'flags' param is now a 'dwc_irqflags_t *'
+ dwc_spinunlock_irqrestore 'flags' param is now a 'dwc_irqflags_t'
+ dwc_thread_should_stop Adds a 'dwc_thread_t *' parameter
+
+
+
+The changes to the core functions also require some of the other library
+functions to change:
+
+ dwc_cc_if_alloc() and dwc_cc_if_free() now take a 'void *memctx'
+ (for memory allocation) as the 1st param and a 'void *mtxctx'
+ (for mutex allocation) as the 2nd param.
+
+ dwc_cc_clear(), dwc_cc_add(), dwc_cc_change(), dwc_cc_remove(),
+ dwc_cc_data_for_save(), and dwc_cc_restore_from_data() now take a
+ 'void *memctx' as the 1st param.
+
+ dwc_dh_modpow(), dwc_dh_pk(), and dwc_dh_derive_keys() now take a
+ 'void *memctx' as the 1st param.
+
+ dwc_modpow() now takes a 'void *memctx' as the 1st param.
+
+ dwc_alloc_notification_manager() now takes a 'void *memctx' as the
+ 1st param and a 'void *wkqctx' (for work queue allocation) as the 2nd
+ param, and also now returns an integer value that is non-zero if
+ allocation of its data structures or work queue fails.
+
+ dwc_register_notifier() now takes a 'void *memctx' as the 1st param.
+
+ dwc_memory_debug_start() now takes a 'void *mem_ctx' as the first
+ param, and also now returns an integer value that is non-zero if
+ allocation of its data structures fails.
+
+
+
+Other miscellaneous changes:
+
+The DEBUG_MEMORY and DEBUG_REGS #define's have been renamed to
+DWC_DEBUG_MEMORY and DWC_DEBUG_REGS.
+
+The following #define's have been added to allow selectively compiling library
+features:
+
+ DWC_CCLIB
+ DWC_CRYPTOLIB
+ DWC_NOTIFYLIB
+ DWC_UTFLIB
+
+A DWC_LIBMODULE #define has also been added. If this is not defined, then the
+module code in dwc_common_linux.c is not compiled in. This allows linking the
+library code directly into a driver module, instead of as a standalone module.
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_cc.c b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.c
new file mode 100644
index 00000000..ca1dc878
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.c
@@ -0,0 +1,532 @@
+/* =========================================================================
+ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_cc.c $
+ * $Revision: #4 $
+ * $Date: 2010/11/04 $
+ * $Change: 1621692 $
+ *
+ * Synopsys Portability Library Software and documentation
+ * (hereinafter, "Software") is an Unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing
+ * between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for
+ * Licensed Product with Synopsys or any supplement thereto. You are
+ * permitted to use and redistribute this Software in source and binary
+ * forms, with or without modification, provided that redistributions
+ * of source code must retain this notice. You may not view, use,
+ * disclose, copy or distribute this file or any information contained
+ * herein except pursuant to this license grant from Synopsys. If you
+ * do not agree with this notice, including the disclaimer below, then
+ * you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
+ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================= */
+#ifdef FH_CCLIB
+
+#include "fh_cc.h"
+
+typedef struct fh_cc
+{
+ uint32_t uid;
+ uint8_t chid[16];
+ uint8_t cdid[16];
+ uint8_t ck[16];
+ uint8_t *name;
+ uint8_t length;
+ FH_CIRCLEQ_ENTRY(fh_cc) list_entry;
+} fh_cc_t;
+
+FH_CIRCLEQ_HEAD(context_list, fh_cc);
+
+/** The main structure for CC management. */
+struct fh_cc_if
+{
+ fh_mutex_t *mutex;
+ char *filename;
+
+ unsigned is_host:1;
+
+ fh_notifier_t *notifier;
+
+ struct context_list list;
+};
+
+#ifdef DEBUG
+static inline void dump_bytes(char *name, uint8_t *bytes, int len)
+{
+ int i;
+ FH_PRINTF("%s: ", name);
+ for (i=0; i<len; i++) {
+ FH_PRINTF("%02x ", bytes[i]);
+ }
+ FH_PRINTF("\n");
+}
+#else
+#define dump_bytes(x...)
+#endif
+
+static fh_cc_t *alloc_cc(void *mem_ctx, uint8_t *name, uint32_t length)
+{
+ fh_cc_t *cc = fh_alloc(mem_ctx, sizeof(fh_cc_t));
+ if (!cc) {
+ return NULL;
+ }
+ FH_MEMSET(cc, 0, sizeof(fh_cc_t));
+
+ if (name) {
+ cc->length = length;
+ cc->name = fh_alloc(mem_ctx, length);
+ if (!cc->name) {
+ fh_free(mem_ctx, cc);
+ return NULL;
+ }
+
+ FH_MEMCPY(cc->name, name, length);
+ }
+
+ return cc;
+}
+
+static void free_cc(void *mem_ctx, fh_cc_t *cc)
+{
+ if (cc->name) {
+ fh_free(mem_ctx, cc->name);
+ }
+ fh_free(mem_ctx, cc);
+}
+
+static uint32_t next_uid(fh_cc_if_t *cc_if)
+{
+ uint32_t uid = 0;
+ fh_cc_t *cc;
+ FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
+ if (cc->uid > uid) {
+ uid = cc->uid;
+ }
+ }
+
+ if (uid == 0) {
+ uid = 255;
+ }
+
+ return uid + 1;
+}
+
+static fh_cc_t *cc_find(fh_cc_if_t *cc_if, uint32_t uid)
+{
+ fh_cc_t *cc;
+ FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
+ if (cc->uid == uid) {
+ return cc;
+ }
+ }
+ return NULL;
+}
+
+static unsigned int cc_data_size(fh_cc_if_t *cc_if)
+{
+ unsigned int size = 0;
+ fh_cc_t *cc;
+ FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
+ size += (48 + 1);
+ if (cc->name) {
+ size += cc->length;
+ }
+ }
+ return size;
+}
+
+static uint32_t cc_match_chid(fh_cc_if_t *cc_if, uint8_t *chid)
+{
+ uint32_t uid = 0;
+ fh_cc_t *cc;
+
+ FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
+ if (FH_MEMCMP(cc->chid, chid, 16) == 0) {
+ uid = cc->uid;
+ break;
+ }
+ }
+ return uid;
+}
+static uint32_t cc_match_cdid(fh_cc_if_t *cc_if, uint8_t *cdid)
+{
+ uint32_t uid = 0;
+ fh_cc_t *cc;
+
+ FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
+ if (FH_MEMCMP(cc->cdid, cdid, 16) == 0) {
+ uid = cc->uid;
+ break;
+ }
+ }
+ return uid;
+}
+
+/* Internal cc_add */
+static int32_t cc_add(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *chid,
+ uint8_t *cdid, uint8_t *ck, uint8_t *name, uint8_t length)
+{
+ fh_cc_t *cc;
+ uint32_t uid;
+
+ if (cc_if->is_host) {
+ uid = cc_match_cdid(cc_if, cdid);
+ }
+ else {
+ uid = cc_match_chid(cc_if, chid);
+ }
+
+ if (uid) {
+ FH_DEBUG("Replacing previous connection context id=%d name=%p name_len=%d", uid, name, length);
+ cc = cc_find(cc_if, uid);
+ }
+ else {
+ cc = alloc_cc(mem_ctx, name, length);
+ cc->uid = next_uid(cc_if);
+ FH_CIRCLEQ_INSERT_TAIL(&cc_if->list, cc, list_entry);
+ }
+
+ FH_MEMCPY(&(cc->chid[0]), chid, 16);
+ FH_MEMCPY(&(cc->cdid[0]), cdid, 16);
+ FH_MEMCPY(&(cc->ck[0]), ck, 16);
+
+ FH_DEBUG("Added connection context id=%d name=%p name_len=%d", cc->uid, name, length);
+ dump_bytes("CHID", cc->chid, 16);
+ dump_bytes("CDID", cc->cdid, 16);
+ dump_bytes("CK", cc->ck, 16);
+ return cc->uid;
+}
+
+/* Internal cc_clear */
+static void cc_clear(void *mem_ctx, fh_cc_if_t *cc_if)
+{
+ while (!FH_CIRCLEQ_EMPTY(&cc_if->list)) {
+ fh_cc_t *cc = FH_CIRCLEQ_FIRST(&cc_if->list);
+ FH_CIRCLEQ_REMOVE_INIT(&cc_if->list, cc, list_entry);
+ free_cc(mem_ctx, cc);
+ }
+}
+
+fh_cc_if_t *fh_cc_if_alloc(void *mem_ctx, void *mtx_ctx,
+ fh_notifier_t *notifier, unsigned is_host)
+{
+ fh_cc_if_t *cc_if = NULL;
+
+ /* Allocate a common_cc_if structure */
+ cc_if = fh_alloc(mem_ctx, sizeof(fh_cc_if_t));
+
+ if (!cc_if)
+ return NULL;
+
+#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
+ FH_MUTEX_ALLOC_LINUX_DEBUG(cc_if->mutex);
+#else
+ cc_if->mutex = fh_mutex_alloc(mtx_ctx);
+#endif
+ if (!cc_if->mutex) {
+ fh_free(mem_ctx, cc_if);
+ return NULL;
+ }
+
+ FH_CIRCLEQ_INIT(&cc_if->list);
+ cc_if->is_host = is_host;
+ cc_if->notifier = notifier;
+ return cc_if;
+}
+
+void fh_cc_if_free(void *mem_ctx, void *mtx_ctx, fh_cc_if_t *cc_if)
+{
+#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
+ FH_MUTEX_FREE(cc_if->mutex);
+#else
+ fh_mutex_free(mtx_ctx, cc_if->mutex);
+#endif
+ cc_clear(mem_ctx, cc_if);
+ fh_free(mem_ctx, cc_if);
+}
+
+static void cc_changed(fh_cc_if_t *cc_if)
+{
+ if (cc_if->notifier) {
+ fh_notify(cc_if->notifier, FH_CC_LIST_CHANGED_NOTIFICATION, cc_if);
+ }
+}
+
+void fh_cc_clear(void *mem_ctx, fh_cc_if_t *cc_if)
+{
+ FH_MUTEX_LOCK(cc_if->mutex);
+ cc_clear(mem_ctx, cc_if);
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ cc_changed(cc_if);
+}
+
+int32_t fh_cc_add(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *chid,
+ uint8_t *cdid, uint8_t *ck, uint8_t *name, uint8_t length)
+{
+ uint32_t uid;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ uid = cc_add(mem_ctx, cc_if, chid, cdid, ck, name, length);
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ cc_changed(cc_if);
+
+ return uid;
+}
+
+void fh_cc_change(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id, uint8_t *chid,
+ uint8_t *cdid, uint8_t *ck, uint8_t *name, uint8_t length)
+{
+ fh_cc_t* cc;
+
+ FH_DEBUG("Change connection context %d", id);
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ cc = cc_find(cc_if, id);
+ if (!cc) {
+ FH_ERROR("Uid %d not found in cc list\n", id);
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ return;
+ }
+
+ if (chid) {
+ FH_MEMCPY(&(cc->chid[0]), chid, 16);
+ }
+ if (cdid) {
+ FH_MEMCPY(&(cc->cdid[0]), cdid, 16);
+ }
+ if (ck) {
+ FH_MEMCPY(&(cc->ck[0]), ck, 16);
+ }
+
+ if (name) {
+ if (cc->name) {
+ fh_free(mem_ctx, cc->name);
+ }
+ cc->name = fh_alloc(mem_ctx, length);
+ if (!cc->name) {
+ FH_ERROR("Out of memory in fh_cc_change()\n");
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ return;
+ }
+ cc->length = length;
+ FH_MEMCPY(cc->name, name, length);
+ }
+
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+
+ cc_changed(cc_if);
+
+ FH_DEBUG("Changed connection context id=%d\n", id);
+ dump_bytes("New CHID", cc->chid, 16);
+ dump_bytes("New CDID", cc->cdid, 16);
+ dump_bytes("New CK", cc->ck, 16);
+}
+
+void fh_cc_remove(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id)
+{
+ fh_cc_t *cc;
+
+ FH_DEBUG("Removing connection context %d", id);
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ cc = cc_find(cc_if, id);
+ if (!cc) {
+ FH_ERROR("Uid %d not found in cc list\n", id);
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ return;
+ }
+
+ FH_CIRCLEQ_REMOVE_INIT(&cc_if->list, cc, list_entry);
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ free_cc(mem_ctx, cc);
+
+ cc_changed(cc_if);
+}
+
+uint8_t *fh_cc_data_for_save(void *mem_ctx, fh_cc_if_t *cc_if, unsigned int *length)
+{
+ uint8_t *buf, *x;
+ uint8_t zero = 0;
+ fh_cc_t *cc;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ *length = cc_data_size(cc_if);
+ if (!(*length)) {
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ return NULL;
+ }
+
+ FH_DEBUG("Creating data for saving (length=%d)", *length);
+
+ buf = fh_alloc(mem_ctx, *length);
+ if (!buf) {
+ *length = 0;
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ return NULL;
+ }
+
+ x = buf;
+ FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
+ FH_MEMCPY(x, cc->chid, 16);
+ x += 16;
+ FH_MEMCPY(x, cc->cdid, 16);
+ x += 16;
+ FH_MEMCPY(x, cc->ck, 16);
+ x += 16;
+ if (cc->name) {
+ FH_MEMCPY(x, &cc->length, 1);
+ x += 1;
+ FH_MEMCPY(x, cc->name, cc->length);
+ x += cc->length;
+ }
+ else {
+ FH_MEMCPY(x, &zero, 1);
+ x += 1;
+ }
+ }
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+
+ return buf;
+}
+
+void fh_cc_restore_from_data(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *data, uint32_t length)
+{
+ uint8_t name_length;
+ uint8_t *name;
+ uint8_t *chid;
+ uint8_t *cdid;
+ uint8_t *ck;
+ uint32_t i = 0;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ cc_clear(mem_ctx, cc_if);
+
+ while (i < length) {
+ chid = &data[i];
+ i += 16;
+ cdid = &data[i];
+ i += 16;
+ ck = &data[i];
+ i += 16;
+
+ name_length = data[i];
+ i ++;
+
+ if (name_length) {
+ name = &data[i];
+ i += name_length;
+ }
+ else {
+ name = NULL;
+ }
+
+ /* check to see if we haven't overflown the buffer */
+ if (i > length) {
+ FH_ERROR("Data format error while attempting to load CCs "
+ "(nlen=%d, iter=%d, buflen=%d).\n", name_length, i, length);
+ break;
+ }
+
+ cc_add(mem_ctx, cc_if, chid, cdid, ck, name, name_length);
+ }
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+
+ cc_changed(cc_if);
+}
+
+uint32_t fh_cc_match_chid(fh_cc_if_t *cc_if, uint8_t *chid)
+{
+ uint32_t uid = 0;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ uid = cc_match_chid(cc_if, chid);
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ return uid;
+}
+uint32_t fh_cc_match_cdid(fh_cc_if_t *cc_if, uint8_t *cdid)
+{
+ uint32_t uid = 0;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ uid = cc_match_cdid(cc_if, cdid);
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+ return uid;
+}
+
+uint8_t *fh_cc_ck(fh_cc_if_t *cc_if, int32_t id)
+{
+ uint8_t *ck = NULL;
+ fh_cc_t *cc;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ cc = cc_find(cc_if, id);
+ if (cc) {
+ ck = cc->ck;
+ }
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+
+ return ck;
+
+}
+
+uint8_t *fh_cc_chid(fh_cc_if_t *cc_if, int32_t id)
+{
+ uint8_t *retval = NULL;
+ fh_cc_t *cc;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ cc = cc_find(cc_if, id);
+ if (cc) {
+ retval = cc->chid;
+ }
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+
+ return retval;
+}
+
+uint8_t *fh_cc_cdid(fh_cc_if_t *cc_if, int32_t id)
+{
+ uint8_t *retval = NULL;
+ fh_cc_t *cc;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ cc = cc_find(cc_if, id);
+ if (cc) {
+ retval = cc->cdid;
+ }
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+
+ return retval;
+}
+
+uint8_t *fh_cc_name(fh_cc_if_t *cc_if, int32_t id, uint8_t *length)
+{
+ uint8_t *retval = NULL;
+ fh_cc_t *cc;
+
+ FH_MUTEX_LOCK(cc_if->mutex);
+ *length = 0;
+ cc = cc_find(cc_if, id);
+ if (cc) {
+ *length = cc->length;
+ retval = cc->name;
+ }
+ FH_MUTEX_UNLOCK(cc_if->mutex);
+
+ return retval;
+}
+
+#endif /* FH_CCLIB */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_cc.h b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.h
new file mode 100644
index 00000000..926229e9
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.h
@@ -0,0 +1,225 @@
+/* =========================================================================
+ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_cc.h $
+ * $Revision: #4 $
+ * $Date: 2010/09/28 $
+ * $Change: 1596182 $
+ *
+ * Synopsys Portability Library Software and documentation
+ * (hereinafter, "Software") is an Unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing
+ * between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for
+ * Licensed Product with Synopsys or any supplement thereto. You are
+ * permitted to use and redistribute this Software in source and binary
+ * forms, with or without modification, provided that redistributions
+ * of source code must retain this notice. You may not view, use,
+ * disclose, copy or distribute this file or any information contained
+ * herein except pursuant to this license grant from Synopsys. If you
+ * do not agree with this notice, including the disclaimer below, then
+ * you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
+ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================= */
+#ifndef _FH_CC_H_
+#define _FH_CC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file
+ *
+ * This file defines the Context Context library.
+ *
+ * The main data structure is fh_cc_if_t which is returned by either the
+ * fh_cc_if_alloc function or returned by the module to the user via a provided
+ * function. The data structure is opaque and should only be manipulated via the
+ * functions provied in this API.
+ *
+ * It manages a list of connection contexts and operations can be performed to
+ * add, remove, query, search, and change, those contexts. Additionally,
+ * a fh_notifier_t object can be requested from the manager so that
+ * the user can be notified whenever the context list has changed.
+ */
+
+#include "fh_os.h"
+#include "fh_list.h"
+#include "fh_notifier.h"
+
+
+/* Notifications */
+#define FH_CC_LIST_CHANGED_NOTIFICATION "FH_CC_LIST_CHANGED_NOTIFICATION"
+
+struct fh_cc_if;
+typedef struct fh_cc_if fh_cc_if_t;
+
+
+/** @name Connection Context Operations */
+/** @{ */
+
+/** This function allocates memory for a fh_cc_if_t structure, initializes
+ * fields to default values, and returns a pointer to the structure or NULL on
+ * error. */
+extern fh_cc_if_t *fh_cc_if_alloc(void *mem_ctx, void *mtx_ctx,
+ fh_notifier_t *notifier, unsigned is_host);
+
+/** Frees the memory for the specified CC structure allocated from
+ * fh_cc_if_alloc(). */
+extern void fh_cc_if_free(void *mem_ctx, void *mtx_ctx, fh_cc_if_t *cc_if);
+
+/** Removes all contexts from the connection context list */
+extern void fh_cc_clear(void *mem_ctx, fh_cc_if_t *cc_if);
+
+/** Adds a connection context (CHID, CK, CDID, Name) to the connection context list.
+ * If a CHID already exists, the CK and name are overwritten. Statistics are
+ * not overwritten.
+ *
+ * @param cc_if The cc_if structure.
+ * @param chid A pointer to the 16-byte CHID. This value will be copied.
+ * @param ck A pointer to the 16-byte CK. This value will be copied.
+ * @param cdid A pointer to the 16-byte CDID. This value will be copied.
+ * @param name An optional host friendly name as defined in the association model
+ * spec. Must be a UTF16-LE unicode string. Can be NULL to indicated no name.
+ * @param length The length othe unicode string.
+ * @return A unique identifier used to refer to this context that is valid for
+ * as long as this context is still in the list. */
+extern int32_t fh_cc_add(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *chid,
+ uint8_t *cdid, uint8_t *ck, uint8_t *name,
+ uint8_t length);
+
+/** Changes the CHID, CK, CDID, or Name values of a connection context in the
+ * list, preserving any accumulated statistics. This would typically be called
+ * if the host decideds to change the context with a SET_CONNECTION request.
+ *
+ * @param cc_if The cc_if structure.
+ * @param id The identifier of the connection context.
+ * @param chid A pointer to the 16-byte CHID. This value will be copied. NULL
+ * indicates no change.
+ * @param cdid A pointer to the 16-byte CDID. This value will be copied. NULL
+ * indicates no change.
+ * @param ck A pointer to the 16-byte CK. This value will be copied. NULL
+ * indicates no change.
+ * @param name Host friendly name UTF16-LE. NULL indicates no change.
+ * @param length Length of name. */
+extern void fh_cc_change(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id,
+ uint8_t *chid, uint8_t *cdid, uint8_t *ck,
+ uint8_t *name, uint8_t length);
+
+/** Remove the specified connection context.
+ * @param cc_if The cc_if structure.
+ * @param id The identifier of the connection context to remove. */
+extern void fh_cc_remove(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id);
+
+/** Get a binary block of data for the connection context list and attributes.
+ * This data can be used by the OS specific driver to save the connection
+ * context list into non-volatile memory.
+ *
+ * @param cc_if The cc_if structure.
+ * @param length Return the length of the data buffer.
+ * @return A pointer to the data buffer. The memory for this buffer should be
+ * freed with FH_FREE() after use. */
+extern uint8_t *fh_cc_data_for_save(void *mem_ctx, fh_cc_if_t *cc_if,
+ unsigned int *length);
+
+/** Restore the connection context list from the binary data that was previously
+ * returned from a call to fh_cc_data_for_save. This can be used by the OS specific
+ * driver to load a connection context list from non-volatile memory.
+ *
+ * @param cc_if The cc_if structure.
+ * @param data The data bytes as returned from fh_cc_data_for_save.
+ * @param length The length of the data. */
+extern void fh_cc_restore_from_data(void *mem_ctx, fh_cc_if_t *cc_if,
+ uint8_t *data, unsigned int length);
+
+/** Find the connection context from the specified CHID.
+ *
+ * @param cc_if The cc_if structure.
+ * @param chid A pointer to the CHID data.
+ * @return A non-zero identifier of the connection context if the CHID matches.
+ * Otherwise returns 0. */
+extern uint32_t fh_cc_match_chid(fh_cc_if_t *cc_if, uint8_t *chid);
+
+/** Find the connection context from the specified CDID.
+ *
+ * @param cc_if The cc_if structure.
+ * @param cdid A pointer to the CDID data.
+ * @return A non-zero identifier of the connection context if the CHID matches.
+ * Otherwise returns 0. */
+extern uint32_t fh_cc_match_cdid(fh_cc_if_t *cc_if, uint8_t *cdid);
+
+/** Retrieve the CK from the specified connection context.
+ *
+ * @param cc_if The cc_if structure.
+ * @param id The identifier of the connection context.
+ * @return A pointer to the CK data. The memory does not need to be freed. */
+extern uint8_t *fh_cc_ck(fh_cc_if_t *cc_if, int32_t id);
+
+/** Retrieve the CHID from the specified connection context.
+ *
+ * @param cc_if The cc_if structure.
+ * @param id The identifier of the connection context.
+ * @return A pointer to the CHID data. The memory does not need to be freed. */
+extern uint8_t *fh_cc_chid(fh_cc_if_t *cc_if, int32_t id);
+
+/** Retrieve the CDID from the specified connection context.
+ *
+ * @param cc_if The cc_if structure.
+ * @param id The identifier of the connection context.
+ * @return A pointer to the CDID data. The memory does not need to be freed. */
+extern uint8_t *fh_cc_cdid(fh_cc_if_t *cc_if, int32_t id);
+
+extern uint8_t *fh_cc_name(fh_cc_if_t *cc_if, int32_t id, uint8_t *length);
+
+/** Checks a buffer for non-zero.
+ * @param id A pointer to a 16 byte buffer.
+ * @return true if the 16 byte value is non-zero. */
+static inline unsigned fh_assoc_is_not_zero_id(uint8_t *id) {
+ int i;
+ for (i=0; i<16; i++) {
+ if (id[i]) return 1;
+ }
+ return 0;
+}
+
+/** Checks a buffer for zero.
+ * @param id A pointer to a 16 byte buffer.
+ * @return true if the 16 byte value is zero. */
+static inline unsigned fh_assoc_is_zero_id(uint8_t *id) {
+ return !fh_assoc_is_not_zero_id(id);
+}
+
+/** Prints an ASCII representation for the 16-byte chid, cdid, or ck, into
+ * buffer. */
+static inline int fh_print_id_string(char *buffer, uint8_t *id) {
+ char *ptr = buffer;
+ int i;
+ for (i=0; i<16; i++) {
+ ptr += FH_SPRINTF(ptr, "%02x", id[i]);
+ if (i < 15) {
+ ptr += FH_SPRINTF(ptr, " ");
+ }
+ }
+ return ptr - buffer;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FH_CC_H_ */
+
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_common_fbsd.c b/drivers/usb/host/fh_otg/fh_common_port/fh_common_fbsd.c
new file mode 100644
index 00000000..ef4b67bb
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_common_fbsd.c
@@ -0,0 +1,1308 @@
+#include "fh_os.h"
+#include "fh_list.h"
+
+#ifdef FH_CCLIB
+# include "fh_cc.h"
+#endif
+
+#ifdef FH_CRYPTOLIB
+# include "fh_modpow.h"
+# include "fh_dh.h"
+# include "fh_crypto.h"
+#endif
+
+#ifdef FH_NOTIFYLIB
+# include "fh_notifier.h"
+#endif
+
+/* OS-Level Implementations */
+
+/* This is the FreeBSD 7.0 kernel implementation of the FH platform library. */
+
+
+/* MISC */
+
+void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size)
+{
+ return memset(dest, byte, size);
+}
+
+void *FH_MEMCPY(void *dest, void const *src, uint32_t size)
+{
+ return memcpy(dest, src, size);
+}
+
+void *FH_MEMMOVE(void *dest, void *src, uint32_t size)
+{
+ bcopy(src, dest, size);
+ return dest;
+}
+
+int FH_MEMCMP(void *m1, void *m2, uint32_t size)
+{
+ return memcmp(m1, m2, size);
+}
+
+int FH_STRNCMP(void *s1, void *s2, uint32_t size)
+{
+ return strncmp(s1, s2, size);
+}
+
+int FH_STRCMP(void *s1, void *s2)
+{
+ return strcmp(s1, s2);
+}
+
+int FH_STRLEN(char const *str)
+{
+ return strlen(str);
+}
+
+char *FH_STRCPY(char *to, char const *from)
+{
+ return strcpy(to, from);
+}
+
+char *FH_STRDUP(char const *str)
+{
+ int len = FH_STRLEN(str) + 1;
+ char *new = FH_ALLOC_ATOMIC(len);
+
+ if (!new) {
+ return NULL;
+ }
+
+ FH_MEMCPY(new, str, len);
+ return new;
+}
+
+int FH_ATOI(char *str, int32_t *value)
+{
+ char *end = NULL;
+
+ *value = strtol(str, &end, 0);
+ if (*end == '\0') {
+ return 0;
+ }
+
+ return -1;
+}
+
+int FH_ATOUI(char *str, uint32_t *value)
+{
+ char *end = NULL;
+
+ *value = strtoul(str, &end, 0);
+ if (*end == '\0') {
+ return 0;
+ }
+
+ return -1;
+}
+
+
+#ifdef FH_UTFLIB
+/* From usbstring.c */
+
+int FH_UTF8_TO_UTF16LE(uint8_t const *s, uint16_t *cp, unsigned len)
+{
+ int count = 0;
+ u8 c;
+ u16 uchar;
+
+ /* this insists on correct encodings, though not minimal ones.
+ * BUT it currently rejects legit 4-byte UTF-8 code points,
+ * which need surrogate pairs. (Unicode 3.1 can use them.)
+ */
+ while (len != 0 && (c = (u8) *s++) != 0) {
+ if (unlikely(c & 0x80)) {
+ // 2-byte sequence:
+ // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
+ if ((c & 0xe0) == 0xc0) {
+ uchar = (c & 0x1f) << 6;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c;
+
+ // 3-byte sequence (most CJKV characters):
+ // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
+ } else if ((c & 0xf0) == 0xe0) {
+ uchar = (c & 0x0f) << 12;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c << 6;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c;
+
+ /* no bogus surrogates */
+ if (0xd800 <= uchar && uchar <= 0xdfff)
+ goto fail;
+
+ // 4-byte sequence (surrogate pairs, currently rare):
+ // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
+ // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
+ // (uuuuu = wwww + 1)
+ // FIXME accept the surrogate code points (only)
+ } else
+ goto fail;
+ } else
+ uchar = c;
+ put_unaligned (cpu_to_le16 (uchar), cp++);
+ count++;
+ len--;
+ }
+ return count;
+fail:
+ return -1;
+}
+
+#endif /* FH_UTFLIB */
+
+
+/* fh_debug.h */
+
+fh_bool_t FH_IN_IRQ(void)
+{
+// return in_irq();
+ return 0;
+}
+
+fh_bool_t FH_IN_BH(void)
+{
+// return in_softirq();
+ return 0;
+}
+
+void FH_VPRINTF(char *format, va_list args)
+{
+ vprintf(format, args);
+}
+
+int FH_VSNPRINTF(char *str, int size, char *format, va_list args)
+{
+ return vsnprintf(str, size, format, args);
+}
+
+void FH_PRINTF(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+int FH_SPRINTF(char *buffer, char *format, ...)
+{
+ int retval;
+ va_list args;
+
+ va_start(args, format);
+ retval = vsprintf(buffer, format, args);
+ va_end(args);
+ return retval;
+}
+
+int FH_SNPRINTF(char *buffer, int size, char *format, ...)
+{
+ int retval;
+ va_list args;
+
+ va_start(args, format);
+ retval = vsnprintf(buffer, size, format, args);
+ va_end(args);
+ return retval;
+}
+
+void __FH_WARN(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+void __FH_ERROR(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+void FH_EXCEPTION(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+// BUG_ON(1); ???
+}
+
+#ifdef DEBUG
+void __FH_DEBUG(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+#endif
+
+
+/* fh_mem.h */
+
+#if 0
+fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size,
+ uint32_t align,
+ uint32_t alloc)
+{
+ struct dma_pool *pool = dma_pool_create("Pool", NULL,
+ size, align, alloc);
+ return (fh_pool_t *)pool;
+}
+
+void FH_DMA_POOL_DESTROY(fh_pool_t *pool)
+{
+ dma_pool_destroy((struct dma_pool *)pool);
+}
+
+void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr)
+{
+// return dma_pool_alloc((struct dma_pool *)pool, GFP_KERNEL, dma_addr);
+ return dma_pool_alloc((struct dma_pool *)pool, M_WAITOK, dma_addr);
+}
+
+void *FH_DMA_POOL_ZALLOC(fh_pool_t *pool, uint64_t *dma_addr)
+{
+ void *vaddr = FH_DMA_POOL_ALLOC(pool, dma_addr);
+ memset(..);
+}
+
+void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr)
+{
+ dma_pool_free(pool, vaddr, daddr);
+}
+#endif
+
+static void dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
+{
+ fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
+ int error;
+
+ error = bus_dma_tag_create(
+#if __FreeBSD_version >= 700000
+ bus_get_dma_tag(dma->dev), /* parent */
+#else
+ NULL, /* parent */
+#endif
+ 4, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &dma->dma_tag);
+ if (error) {
+ device_printf(dma->dev, "%s: bus_dma_tag_create failed: %d\n",
+ __func__, error);
+ goto fail_0;
+ }
+
+ error = bus_dmamem_alloc(dma->dma_tag, &dma->dma_vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
+ if (error) {
+ device_printf(dma->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
+ __func__, (uintmax_t)size, error);
+ goto fail_1;
+ }
+
+ dma->dma_paddr = 0;
+ error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
+ dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
+ if (error || dma->dma_paddr == 0) {
+ device_printf(dma->dev, "%s: bus_dmamap_load failed: %d\n",
+ __func__, error);
+ goto fail_2;
+ }
+
+ *dma_addr = dma->dma_paddr;
+ return dma->dma_vaddr;
+
+fail_2:
+ bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+fail_1:
+ bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+ bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+ dma->dma_map = NULL;
+ dma->dma_tag = NULL;
+
+ return NULL;
+}
+
+void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr)
+{
+ fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
+
+ if (dma->dma_tag == NULL)
+ return;
+ if (dma->dma_map != NULL) {
+ bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+ dma->dma_map = NULL;
+ }
+
+ bus_dma_tag_destroy(dma->dma_tag);
+ dma->dma_tag = NULL;
+}
+
+void *__FH_ALLOC(void *mem_ctx, uint32_t size)
+{
+ return malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
+}
+
+void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size)
+{
+ return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+}
+
+void __FH_FREE(void *mem_ctx, void *addr)
+{
+ free(addr, M_DEVBUF);
+}
+
+
+#ifdef FH_CRYPTOLIB
+/* fh_crypto.h */
+
+void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length)
+{
+ get_random_bytes(buffer, length);
+}
+
+int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out)
+{
+ struct crypto_blkcipher *tfm;
+ struct blkcipher_desc desc;
+ struct scatterlist sgd;
+ struct scatterlist sgs;
+
+ tfm = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (tfm == NULL) {
+ printk("failed to load transform for aes CBC\n");
+ return -1;
+ }
+
+ crypto_blkcipher_setkey(tfm, key, keylen);
+ crypto_blkcipher_set_iv(tfm, iv, 16);
+
+ sg_init_one(&sgd, out, messagelen);
+ sg_init_one(&sgs, message, messagelen);
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ if (crypto_blkcipher_encrypt(&desc, &sgd, &sgs, messagelen)) {
+ crypto_free_blkcipher(tfm);
+ FH_ERROR("AES CBC encryption failed");
+ return -1;
+ }
+
+ crypto_free_blkcipher(tfm);
+ return 0;
+}
+
+int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out)
+{
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+
+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ FH_ERROR("Failed to load transform for sha256: %ld", PTR_ERR(tfm));
+ return 0;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_one(&sg, message, len);
+ crypto_hash_digest(&desc, &sg, len, out);
+ crypto_free_hash(tfm);
+
+ return 1;
+}
+
+int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen,
+ uint8_t *key, uint32_t keylen, uint8_t *out)
+{
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+
+ tfm = crypto_alloc_hash("hmac(sha256)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ FH_ERROR("Failed to load transform for hmac(sha256): %ld", PTR_ERR(tfm));
+ return 0;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_one(&sg, message, messagelen);
+ crypto_hash_setkey(tfm, key, keylen);
+ crypto_hash_digest(&desc, &sg, messagelen, out);
+ crypto_free_hash(tfm);
+
+ return 1;
+}
+
+#endif /* FH_CRYPTOLIB */
+
+
+/* Byte Ordering Conversions */
+
+uint32_t FH_CPU_TO_LE32(uint32_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_CPU_TO_BE32(uint32_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_LE32_TO_CPU(uint32_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_BE32_TO_CPU(uint32_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint16_t FH_CPU_TO_LE16(uint16_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_CPU_TO_BE16(uint16_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_LE16_TO_CPU(uint16_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_BE16_TO_CPU(uint16_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+
+/* Registers */
+
+uint32_t FH_READ_REG32(void *io_ctx, uint32_t volatile *reg)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ return bus_space_read_4(io->iot, io->ioh, ior);
+}
+
+#if 0
+uint64_t FH_READ_REG64(void *io_ctx, uint64_t volatile *reg)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ return bus_space_read_8(io->iot, io->ioh, ior);
+}
+#endif
+
+void FH_WRITE_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t value)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_4(io->iot, io->ioh, ior, value);
+}
+
+#if 0
+void FH_WRITE_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t value)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_8(io->iot, io->ioh, ior, value);
+}
+#endif
+
+void FH_MODIFY_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t clear_mask,
+ uint32_t set_mask)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_4(io->iot, io->ioh, ior,
+ (bus_space_read_4(io->iot, io->ioh, ior) &
+ ~clear_mask) | set_mask);
+}
+
+#if 0
+void FH_MODIFY_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t clear_mask,
+ uint64_t set_mask)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_8(io->iot, io->ioh, ior,
+ (bus_space_read_8(io->iot, io->ioh, ior) &
+ ~clear_mask) | set_mask);
+}
+#endif
+
+
+/* Locking */
+
+fh_spinlock_t *FH_SPINLOCK_ALLOC(void)
+{
+ struct mtx *sl = FH_ALLOC(sizeof(*sl));
+
+ if (!sl) {
+ FH_ERROR("Cannot allocate memory for spinlock");
+ return NULL;
+ }
+
+ mtx_init(sl, "dw3spn", NULL, MTX_SPIN);
+ return (fh_spinlock_t *)sl;
+}
+
+void FH_SPINLOCK_FREE(fh_spinlock_t *lock)
+{
+ struct mtx *sl = (struct mtx *)lock;
+
+ mtx_destroy(sl);
+ FH_FREE(sl);
+}
+
+void FH_SPINLOCK(fh_spinlock_t *lock)
+{
+ mtx_lock_spin((struct mtx *)lock); // ???
+}
+
+void FH_SPINUNLOCK(fh_spinlock_t *lock)
+{
+ mtx_unlock_spin((struct mtx *)lock); // ???
+}
+
+void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags)
+{
+ mtx_lock_spin((struct mtx *)lock);
+}
+
+void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags)
+{
+ mtx_unlock_spin((struct mtx *)lock);
+}
+
+fh_mutex_t *FH_MUTEX_ALLOC(void)
+{
+ struct mtx *m;
+ fh_mutex_t *mutex = (fh_mutex_t *)FH_ALLOC(sizeof(struct mtx));
+
+ if (!mutex) {
+ FH_ERROR("Cannot allocate memory for mutex");
+ return NULL;
+ }
+
+ m = (struct mtx *)mutex;
+ mtx_init(m, "dw3mtx", NULL, MTX_DEF);
+ return mutex;
+}
+
+#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
+#else
+void FH_MUTEX_FREE(fh_mutex_t *mutex)
+{
+ mtx_destroy((struct mtx *)mutex);
+ FH_FREE(mutex);
+}
+#endif
+
+void FH_MUTEX_LOCK(fh_mutex_t *mutex)
+{
+ struct mtx *m = (struct mtx *)mutex;
+
+ mtx_lock(m);
+}
+
+int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex)
+{
+ struct mtx *m = (struct mtx *)mutex;
+
+ return mtx_trylock(m);
+}
+
+void FH_MUTEX_UNLOCK(fh_mutex_t *mutex)
+{
+ struct mtx *m = (struct mtx *)mutex;
+
+ mtx_unlock(m);
+}
+
+
+/* Timing */
+
+void FH_UDELAY(uint32_t usecs)
+{
+ DELAY(usecs);
+}
+
+void FH_MDELAY(uint32_t msecs)
+{
+ do {
+ DELAY(1000);
+ } while (--msecs);
+}
+
+void FH_MSLEEP(uint32_t msecs)
+{
+ struct timeval tv;
+
+ tv.tv_sec = msecs / 1000;
+ tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
+ pause("dw3slp", tvtohz(&tv));
+}
+
+uint32_t FH_TIME(void)
+{
+ struct timeval tv;
+
+ microuptime(&tv); // or getmicrouptime? (less precise, but faster)
+ return tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+
+
+/* Timers */
+
+struct fh_timer {
+ struct callout t;
+ char *name;
+ fh_spinlock_t *lock;
+ fh_timer_callback_t cb;
+ void *data;
+};
+
+fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data)
+{
+ fh_timer_t *t = FH_ALLOC(sizeof(*t));
+
+ if (!t) {
+ FH_ERROR("Cannot allocate memory for timer");
+ return NULL;
+ }
+
+ callout_init(&t->t, 1);
+
+ t->name = FH_STRDUP(name);
+ if (!t->name) {
+ FH_ERROR("Cannot allocate memory for timer->name");
+ goto no_name;
+ }
+
+ t->lock = FH_SPINLOCK_ALLOC();
+ if (!t->lock) {
+ FH_ERROR("Cannot allocate memory for lock");
+ goto no_lock;
+ }
+
+ t->cb = cb;
+ t->data = data;
+
+ return t;
+
+ no_lock:
+ FH_FREE(t->name);
+ no_name:
+ FH_FREE(t);
+
+ return NULL;
+}
+
+void FH_TIMER_FREE(fh_timer_t *timer)
+{
+ callout_stop(&timer->t);
+ FH_SPINLOCK_FREE(timer->lock);
+ FH_FREE(timer->name);
+ FH_FREE(timer);
+}
+
+void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time)
+{
+ struct timeval tv;
+
+ tv.tv_sec = time / 1000;
+ tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
+ callout_reset(&timer->t, tvtohz(&tv), timer->cb, timer->data);
+}
+
+void FH_TIMER_CANCEL(fh_timer_t *timer)
+{
+ callout_stop(&timer->t);
+}
+
+
+/* Wait Queues */
+
+struct fh_waitq {
+ struct mtx lock;
+ int abort;
+};
+
+fh_waitq_t *FH_WAITQ_ALLOC(void)
+{
+ fh_waitq_t *wq = FH_ALLOC(sizeof(*wq));
+
+ if (!wq) {
+ FH_ERROR("Cannot allocate memory for waitqueue");
+ return NULL;
+ }
+
+ mtx_init(&wq->lock, "dw3wtq", NULL, MTX_DEF);
+ wq->abort = 0;
+
+ return wq;
+}
+
+void FH_WAITQ_FREE(fh_waitq_t *wq)
+{
+ mtx_destroy(&wq->lock);
+ FH_FREE(wq);
+}
+
+int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data)
+{
+// intrmask_t ipl;
+ int result = 0;
+
+ mtx_lock(&wq->lock);
+// ipl = splbio();
+
+ /* Skip the sleep if already aborted or triggered */
+ if (!wq->abort && !cond(data)) {
+// splx(ipl);
+ result = msleep(wq, &wq->lock, PCATCH, "dw3wat", 0); // infinite timeout
+// ipl = splbio();
+ }
+
+ if (result == ERESTART) { // signaled - restart
+ result = -FH_E_RESTART;
+
+ } else if (result == EINTR) { // signaled - interrupt
+ result = -FH_E_ABORT;
+
+ } else if (wq->abort) {
+ result = -FH_E_ABORT;
+
+ } else {
+ result = 0;
+ }
+
+ wq->abort = 0;
+// splx(ipl);
+ mtx_unlock(&wq->lock);
+ return result;
+}
+
+int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
+ void *data, int32_t msecs)
+{
+ struct timeval tv, tv1, tv2;
+// intrmask_t ipl;
+ int result = 0;
+
+ tv.tv_sec = msecs / 1000;
+ tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
+
+ mtx_lock(&wq->lock);
+// ipl = splbio();
+
+ /* Skip the sleep if already aborted or triggered */
+ if (!wq->abort && !cond(data)) {
+// splx(ipl);
+ getmicrouptime(&tv1);
+ result = msleep(wq, &wq->lock, PCATCH, "dw3wto", tvtohz(&tv));
+ getmicrouptime(&tv2);
+// ipl = splbio();
+ }
+
+ if (result == 0) { // awoken
+ if (wq->abort) {
+ result = -FH_E_ABORT;
+ } else {
+ tv2.tv_usec -= tv1.tv_usec;
+ if (tv2.tv_usec < 0) {
+ tv2.tv_usec += 1000000;
+ tv2.tv_sec--;
+ }
+
+ tv2.tv_sec -= tv1.tv_sec;
+ result = tv2.tv_sec * 1000 + tv2.tv_usec / 1000;
+ result = msecs - result;
+ if (result <= 0)
+ result = 1;
+ }
+ } else if (result == ERESTART) { // signaled - restart
+ result = -FH_E_RESTART;
+
+ } else if (result == EINTR) { // signaled - interrupt
+ result = -FH_E_ABORT;
+
+ } else { // timed out
+ result = -FH_E_TIMEOUT;
+ }
+
+ wq->abort = 0;
+// splx(ipl);
+ mtx_unlock(&wq->lock);
+ return result;
+}
+
+void FH_WAITQ_TRIGGER(fh_waitq_t *wq)
+{
+ wakeup(wq);
+}
+
+void FH_WAITQ_ABORT(fh_waitq_t *wq)
+{
+// intrmask_t ipl;
+
+ mtx_lock(&wq->lock);
+// ipl = splbio();
+ wq->abort = 1;
+ wakeup(wq);
+// splx(ipl);
+ mtx_unlock(&wq->lock);
+}
+
+
+/* Threading */
+
+struct fh_thread {
+ struct proc *proc;
+ int abort;
+};
+
+fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data)
+{
+ int retval;
+ fh_thread_t *thread = FH_ALLOC(sizeof(*thread));
+
+ if (!thread) {
+ return NULL;
+ }
+
+ thread->abort = 0;
+ retval = kthread_create((void (*)(void *))func, data, &thread->proc,
+ RFPROC | RFNOWAIT, 0, "%s", name);
+ if (retval) {
+ FH_FREE(thread);
+ return NULL;
+ }
+
+ return thread;
+}
+
+int FH_THREAD_STOP(fh_thread_t *thread)
+{
+ int retval;
+
+ thread->abort = 1;
+ retval = tsleep(&thread->abort, 0, "dw3stp", 60 * hz);
+
+ if (retval == 0) {
+ /* FH_THREAD_EXIT() will free the thread struct */
+ return 0;
+ }
+
+ /* NOTE: We leak the thread struct if thread doesn't die */
+
+ if (retval == EWOULDBLOCK) {
+ return -FH_E_TIMEOUT;
+ }
+
+ return -FH_E_UNKNOWN;
+}
+
+fh_bool_t FH_THREAD_SHOULD_STOP(fh_thread_t *thread)
+{
+ return thread->abort;
+}
+
+void FH_THREAD_EXIT(fh_thread_t *thread)
+{
+ wakeup(&thread->abort);
+ FH_FREE(thread);
+ kthread_exit(0);
+}
+
+
+/* tasklets
+ - Runs in interrupt context (cannot sleep)
+ - Each tasklet runs on a single CPU [ How can we ensure this on FreeBSD? Does it matter? ]
+ - Different tasklets can be running simultaneously on different CPUs [ shouldn't matter ]
+ */
+struct fh_tasklet {
+ struct task t;
+ fh_tasklet_callback_t cb;
+ void *data;
+};
+
+static void tasklet_callback(void *data, int pending) // what to do with pending ???
+{
+ fh_tasklet_t *task = (fh_tasklet_t *)data;
+
+ task->cb(task->data);
+}
+
+fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data)
+{
+ fh_tasklet_t *task = FH_ALLOC(sizeof(*task));
+
+ if (task) {
+ task->cb = cb;
+ task->data = data;
+ TASK_INIT(&task->t, 0, tasklet_callback, task);
+ } else {
+ FH_ERROR("Cannot allocate memory for tasklet");
+ }
+
+ return task;
+}
+
+void FH_TASK_FREE(fh_tasklet_t *task)
+{
+ taskqueue_drain(taskqueue_fast, &task->t); // ???
+ FH_FREE(task);
+}
+
+void FH_TASK_SCHEDULE(fh_tasklet_t *task)
+{
+ /* Uses predefined system queue */
+ taskqueue_enqueue_fast(taskqueue_fast, &task->t);
+}
+
+
+/* workqueues
+ - Runs in process context (can sleep)
+ */
+typedef struct work_container {
+ fh_work_callback_t cb;
+ void *data;
+ fh_workq_t *wq;
+ char *name;
+ int hz;
+
+#ifdef DEBUG
+ FH_CIRCLEQ_ENTRY(work_container) entry;
+#endif
+ struct task task;
+} work_container_t;
+
+#ifdef DEBUG
+FH_CIRCLEQ_HEAD(work_container_queue, work_container);
+#endif
+
+struct fh_workq {
+ struct taskqueue *taskq;
+ fh_spinlock_t *lock;
+ fh_waitq_t *waitq;
+ int pending;
+
+#ifdef DEBUG
+ struct work_container_queue entries;
+#endif
+};
+
+static void do_work(void *data, int pending) // what to do with pending ???
+{
+ work_container_t *container = (work_container_t *)data;
+ fh_workq_t *wq = container->wq;
+ fh_irqflags_t flags;
+
+ if (container->hz) {
+ pause("dw3wrk", container->hz);
+ }
+
+ container->cb(container->data);
+ FH_DEBUG("Work done: %s, container=%p", container->name, container);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+
+#ifdef DEBUG
+ FH_CIRCLEQ_REMOVE(&wq->entries, container, entry);
+#endif
+ if (container->name)
+ FH_FREE(container->name);
+ FH_FREE(container);
+ wq->pending--;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+}
+
+static int work_done(void *data)
+{
+ fh_workq_t *workq = (fh_workq_t *)data;
+
+ return workq->pending == 0;
+}
+
+int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout)
+{
+ return FH_WAITQ_WAIT_TIMEOUT(workq->waitq, work_done, workq, timeout);
+}
+
+fh_workq_t *FH_WORKQ_ALLOC(char *name)
+{
+ fh_workq_t *wq = FH_ALLOC(sizeof(*wq));
+
+ if (!wq) {
+ FH_ERROR("Cannot allocate memory for workqueue");
+ return NULL;
+ }
+
+ wq->taskq = taskqueue_create(name, M_NOWAIT, taskqueue_thread_enqueue, &wq->taskq);
+ if (!wq->taskq) {
+ FH_ERROR("Cannot allocate memory for taskqueue");
+ goto no_taskq;
+ }
+
+ wq->pending = 0;
+
+ wq->lock = FH_SPINLOCK_ALLOC();
+ if (!wq->lock) {
+ FH_ERROR("Cannot allocate memory for spinlock");
+ goto no_lock;
+ }
+
+ wq->waitq = FH_WAITQ_ALLOC();
+ if (!wq->waitq) {
+ FH_ERROR("Cannot allocate memory for waitqueue");
+ goto no_waitq;
+ }
+
+ taskqueue_start_threads(&wq->taskq, 1, PWAIT, "%s taskq", "dw3tsk");
+
+#ifdef DEBUG
+ FH_CIRCLEQ_INIT(&wq->entries);
+#endif
+ return wq;
+
+ no_waitq:
+ FH_SPINLOCK_FREE(wq->lock);
+ no_lock:
+ taskqueue_free(wq->taskq);
+ no_taskq:
+ FH_FREE(wq);
+
+ return NULL;
+}
+
+void FH_WORKQ_FREE(fh_workq_t *wq)
+{
+#ifdef DEBUG
+ fh_irqflags_t flags;
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+
+ if (wq->pending != 0) {
+ struct work_container *container;
+
+ FH_ERROR("Destroying work queue with pending work");
+
+ FH_CIRCLEQ_FOREACH(container, &wq->entries, entry) {
+ FH_ERROR("Work %s still pending", container->name);
+ }
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+#endif
+ FH_WAITQ_FREE(wq->waitq);
+ FH_SPINLOCK_FREE(wq->lock);
+ taskqueue_free(wq->taskq);
+ FH_FREE(wq);
+}
+
+void FH_WORKQ_SCHEDULE(fh_workq_t *wq, fh_work_callback_t cb, void *data,
+ char *format, ...)
+{
+ fh_irqflags_t flags;
+ work_container_t *container;
+ static char name[128];
+ va_list args;
+
+ va_start(args, format);
+ FH_VSNPRINTF(name, 128, format, args);
+ va_end(args);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ wq->pending++;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+
+ container = FH_ALLOC_ATOMIC(sizeof(*container));
+ if (!container) {
+ FH_ERROR("Cannot allocate memory for container");
+ return;
+ }
+
+ container->name = FH_STRDUP(name);
+ if (!container->name) {
+ FH_ERROR("Cannot allocate memory for container->name");
+ FH_FREE(container);
+ return;
+ }
+
+ container->cb = cb;
+ container->data = data;
+ container->wq = wq;
+ container->hz = 0;
+
+ FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
+
+ TASK_INIT(&container->task, 0, do_work, container);
+
+#ifdef DEBUG
+ FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
+#endif
+ taskqueue_enqueue_fast(wq->taskq, &container->task);
+}
+
+void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *wq, fh_work_callback_t cb,
+ void *data, uint32_t time, char *format, ...)
+{
+ fh_irqflags_t flags;
+ work_container_t *container;
+ static char name[128];
+ struct timeval tv;
+ va_list args;
+
+ va_start(args, format);
+ FH_VSNPRINTF(name, 128, format, args);
+ va_end(args);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ wq->pending++;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+
+ container = FH_ALLOC_ATOMIC(sizeof(*container));
+ if (!container) {
+ FH_ERROR("Cannot allocate memory for container");
+ return;
+ }
+
+ container->name = FH_STRDUP(name);
+ if (!container->name) {
+ FH_ERROR("Cannot allocate memory for container->name");
+ FH_FREE(container);
+ return;
+ }
+
+ container->cb = cb;
+ container->data = data;
+ container->wq = wq;
+
+ tv.tv_sec = time / 1000;
+ tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
+ container->hz = tvtohz(&tv);
+
+ FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
+
+ TASK_INIT(&container->task, 0, do_work, container);
+
+#ifdef DEBUG
+ FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
+#endif
+ taskqueue_enqueue_fast(wq->taskq, &container->task);
+}
+
+int FH_WORKQ_PENDING(fh_workq_t *wq)
+{
+ return wq->pending;
+}
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_common_linux.c b/drivers/usb/host/fh_otg/fh_common_port/fh_common_linux.c
new file mode 100644
index 00000000..da70b191
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_common_linux.c
@@ -0,0 +1,1426 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+
+#ifdef FH_CCLIB
+# include "fh_cc.h"
+#endif
+
+#ifdef FH_CRYPTOLIB
+# include "fh_modpow.h"
+# include "fh_dh.h"
+# include "fh_crypto.h"
+#endif
+
+#ifdef FH_NOTIFYLIB
+# include "fh_notifier.h"
+#endif
+
+/* OS-Level Implementations */
+
+/* This is the Linux kernel implementation of the FH platform library. */
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/cdev.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+# include <linux/usb/gadget.h>
+#else
+# include <linux/usb_gadget.h>
+#endif
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include "fh_os.h"
+#include "fh_list.h"
+
+
+/* MISC */
+
+void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size)
+{
+ return memset(dest, byte, size);
+}
+
+void *FH_MEMCPY(void *dest, void const *src, uint32_t size)
+{
+ return memcpy(dest, src, size);
+}
+
+void *FH_MEMMOVE(void *dest, void *src, uint32_t size)
+{
+ return memmove(dest, src, size);
+}
+
+int FH_MEMCMP(void *m1, void *m2, uint32_t size)
+{
+ return memcmp(m1, m2, size);
+}
+
+int FH_STRNCMP(void *s1, void *s2, uint32_t size)
+{
+ return strncmp(s1, s2, size);
+}
+
+int FH_STRCMP(void *s1, void *s2)
+{
+ return strcmp(s1, s2);
+}
+
+int FH_STRLEN(char const *str)
+{
+ return strlen(str);
+}
+
+char *FH_STRCPY(char *to, char const *from)
+{
+ return strcpy(to, from);
+}
+
+char *FH_STRDUP(char const *str)
+{
+ int len = FH_STRLEN(str) + 1;
+ char *new = FH_ALLOC_ATOMIC(len);
+
+ if (!new) {
+ return NULL;
+ }
+
+ FH_MEMCPY(new, str, len);
+ return new;
+}
+
+int FH_ATOI(const char *str, int32_t *value)
+{
+ char *end = NULL;
+
+ *value = simple_strtol(str, &end, 0);
+ if (*end == '\0') {
+ return 0;
+ }
+
+ return -1;
+}
+
+int FH_ATOUI(const char *str, uint32_t *value)
+{
+ char *end = NULL;
+
+ *value = simple_strtoul(str, &end, 0);
+ if (*end == '\0') {
+ return 0;
+ }
+
+ return -1;
+}
+
+
+#ifdef FH_UTFLIB
+/* From usbstring.c */
+
+int FH_UTF8_TO_UTF16LE(uint8_t const *s, uint16_t *cp, unsigned len)
+{
+ int count = 0;
+ u8 c;
+ u16 uchar;
+
+ /* this insists on correct encodings, though not minimal ones.
+ * BUT it currently rejects legit 4-byte UTF-8 code points,
+ * which need surrogate pairs. (Unicode 3.1 can use them.)
+ */
+ while (len != 0 && (c = (u8) *s++) != 0) {
+ if (unlikely(c & 0x80)) {
+ // 2-byte sequence:
+ // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
+ if ((c & 0xe0) == 0xc0) {
+ uchar = (c & 0x1f) << 6;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c;
+
+ // 3-byte sequence (most CJKV characters):
+ // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
+ } else if ((c & 0xf0) == 0xe0) {
+ uchar = (c & 0x0f) << 12;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c << 6;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c;
+
+ /* no bogus surrogates */
+ if (0xd800 <= uchar && uchar <= 0xdfff)
+ goto fail;
+
+ // 4-byte sequence (surrogate pairs, currently rare):
+ // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
+ // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
+ // (uuuuu = wwww + 1)
+ // FIXME accept the surrogate code points (only)
+ } else
+ goto fail;
+ } else
+ uchar = c;
+ put_unaligned (cpu_to_le16 (uchar), cp++);
+ count++;
+ len--;
+ }
+ return count;
+fail:
+ return -1;
+}
+#endif /* FH_UTFLIB */
+
+
+/* fh_debug.h */
+
+fh_bool_t FH_IN_IRQ(void)
+{
+ return in_irq();
+}
+
+fh_bool_t FH_IN_BH(void)
+{
+ return in_softirq();
+}
+
+void FH_VPRINTF(char *format, va_list args)
+{
+ vprintk(format, args);
+}
+
+int FH_VSNPRINTF(char *str, int size, char *format, va_list args)
+{
+ return vsnprintf(str, size, format, args);
+}
+
+void FH_PRINTF(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+int FH_SPRINTF(char *buffer, char *format, ...)
+{
+ int retval;
+ va_list args;
+
+ va_start(args, format);
+ retval = vsprintf(buffer, format, args);
+ va_end(args);
+ return retval;
+}
+
+int FH_SNPRINTF(char *buffer, int size, char *format, ...)
+{
+ int retval;
+ va_list args;
+
+ va_start(args, format);
+ retval = vsnprintf(buffer, size, format, args);
+ va_end(args);
+ return retval;
+}
+
+void __FH_WARN(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_PRINTF(KERN_WARNING);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+void __FH_ERROR(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_PRINTF(KERN_ERR);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+void FH_EXCEPTION(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_PRINTF(KERN_ERR);
+ FH_VPRINTF(format, args);
+ va_end(args);
+ BUG_ON(1);
+}
+
+#ifdef DEBUG
+void __FH_DEBUG(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_PRINTF(KERN_DEBUG);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+#endif
+
+
+/* fh_mem.h */
+
+#if 0
+fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size,
+ uint32_t align,
+ uint32_t alloc)
+{
+ struct dma_pool *pool = dma_pool_create("Pool", NULL,
+ size, align, alloc);
+ return (fh_pool_t *)pool;
+}
+
+void FH_DMA_POOL_DESTROY(fh_pool_t *pool)
+{
+ dma_pool_destroy((struct dma_pool *)pool);
+}
+
+void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr)
+{
+ return dma_pool_alloc((struct dma_pool *)pool, GFP_KERNEL, dma_addr);
+}
+
+void *FH_DMA_POOL_ZALLOC(fh_pool_t *pool, uint64_t *dma_addr)
+{
+ void *vaddr = FH_DMA_POOL_ALLOC(pool, dma_addr);
+ memset(..);
+}
+
+void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr)
+{
+ dma_pool_free(pool, vaddr, daddr);
+}
+#endif
+
+void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
+{
+#ifdef xxCOSIM /* Only works for 32-bit cosim */
+ void *buf = dma_alloc_coherent(dma_ctx, (size_t)size, dma_addr, GFP_KERNEL);
+#else
+ void *buf = dma_alloc_coherent(dma_ctx, (size_t)size, dma_addr, GFP_ATOMIC);
+#endif
+ if (!buf) {
+ return NULL;
+ }
+
+ memset(buf, 0, (size_t)size);
+ return buf;
+}
+
+void *__FH_DMA_ALLOC_ATOMIC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
+{
+ void *buf = dma_alloc_coherent(NULL, (size_t)size, dma_addr, GFP_ATOMIC);
+ if (!buf) {
+ return NULL;
+ }
+ memset(buf, 0, (size_t)size);
+ return buf;
+}
+
+void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr)
+{
+ dma_free_coherent(dma_ctx, size, virt_addr, dma_addr);
+}
+
+void *__FH_ALLOC(void *mem_ctx, uint32_t size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size)
+{
+ return kzalloc(size, GFP_ATOMIC);
+}
+
+void __FH_FREE(void *mem_ctx, void *addr)
+{
+ kfree(addr);
+}
+
+
+#ifdef FH_CRYPTOLIB
+/* fh_crypto.h */
+
+void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length)
+{
+ get_random_bytes(buffer, length);
+}
+
+int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out)
+{
+ struct crypto_blkcipher *tfm;
+ struct blkcipher_desc desc;
+ struct scatterlist sgd;
+ struct scatterlist sgs;
+
+ tfm = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (tfm == NULL) {
+ printk("failed to load transform for aes CBC\n");
+ return -1;
+ }
+
+ crypto_blkcipher_setkey(tfm, key, keylen);
+ crypto_blkcipher_set_iv(tfm, iv, 16);
+
+ sg_init_one(&sgd, out, messagelen);
+ sg_init_one(&sgs, message, messagelen);
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ if (crypto_blkcipher_encrypt(&desc, &sgd, &sgs, messagelen)) {
+ crypto_free_blkcipher(tfm);
+ FH_ERROR("AES CBC encryption failed");
+ return -1;
+ }
+
+ crypto_free_blkcipher(tfm);
+ return 0;
+}
+
+int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out)
+{
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+
+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ FH_ERROR("Failed to load transform for sha256: %ld\n", PTR_ERR(tfm));
+ return 0;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_one(&sg, message, len);
+ crypto_hash_digest(&desc, &sg, len, out);
+ crypto_free_hash(tfm);
+
+ return 1;
+}
+
+int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen,
+ uint8_t *key, uint32_t keylen, uint8_t *out)
+{
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+
+ tfm = crypto_alloc_hash("hmac(sha256)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ FH_ERROR("Failed to load transform for hmac(sha256): %ld\n", PTR_ERR(tfm));
+ return 0;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_one(&sg, message, messagelen);
+ crypto_hash_setkey(tfm, key, keylen);
+ crypto_hash_digest(&desc, &sg, messagelen, out);
+ crypto_free_hash(tfm);
+
+ return 1;
+}
+#endif /* FH_CRYPTOLIB */
+
+
+/* Byte Ordering Conversions */
+
+uint32_t FH_CPU_TO_LE32(uint32_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_CPU_TO_BE32(uint32_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_LE32_TO_CPU(uint32_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_BE32_TO_CPU(uint32_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint16_t FH_CPU_TO_LE16(uint16_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_CPU_TO_BE16(uint16_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_LE16_TO_CPU(uint16_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_BE16_TO_CPU(uint16_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+
+/* Registers */
+
+uint32_t FH_READ_REG32(uint32_t volatile *reg)
+{
+ return readl(reg);
+}
+
+#if 0
+uint64_t FH_READ_REG64(uint64_t volatile *reg)
+{
+}
+#endif
+
+void FH_WRITE_REG32(uint32_t volatile *reg, uint32_t value)
+{
+ writel(value, reg);
+}
+
+#if 0
+void FH_WRITE_REG64(uint64_t volatile *reg, uint64_t value)
+{
+}
+#endif
+
+void FH_MODIFY_REG32(uint32_t volatile *reg, uint32_t clear_mask, uint32_t set_mask)
+{
+ writel((readl(reg) & ~clear_mask) | set_mask, reg);
+}
+
+#if 0
+void FH_MODIFY_REG64(uint64_t volatile *reg, uint64_t clear_mask, uint64_t set_mask)
+{
+}
+#endif
+
+
+/* Locking */
+
+fh_spinlock_t *FH_SPINLOCK_ALLOC(void)
+{
+ spinlock_t *sl = (spinlock_t *)1;
+
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
+ sl = FH_ALLOC(sizeof(*sl));
+ if (!sl) {
+ FH_ERROR("Cannot allocate memory for spinlock\n");
+ return NULL;
+ }
+
+ spin_lock_init(sl);
+#endif
+ return (fh_spinlock_t *)sl;
+}
+
+void FH_SPINLOCK_FREE(fh_spinlock_t *lock)
+{
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
+ FH_FREE(lock);
+#endif
+}
+
+void FH_SPINLOCK(fh_spinlock_t *lock)
+{
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
+ spin_lock((spinlock_t *)lock);
+#endif
+}
+
+void FH_SPINUNLOCK(fh_spinlock_t *lock)
+{
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
+ spin_unlock((spinlock_t *)lock);
+#endif
+}
+
+void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags)
+{
+ fh_irqflags_t f;
+
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
+ spin_lock_irqsave((spinlock_t *)lock, f);
+#else
+ local_irq_save(f);
+#endif
+ *flags = f;
+}
+
+void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags)
+{
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+#else
+ local_irq_restore(flags);
+#endif
+}
+
+fh_mutex_t *FH_MUTEX_ALLOC(void)
+{
+ struct mutex *m;
+ fh_mutex_t *mutex = (fh_mutex_t *)FH_ALLOC(sizeof(struct mutex));
+
+ if (!mutex) {
+ FH_ERROR("Cannot allocate memory for mutex\n");
+ return NULL;
+ }
+
+ m = (struct mutex *)mutex;
+ mutex_init(m);
+ return mutex;
+}
+
+#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
+#else
+void FH_MUTEX_FREE(fh_mutex_t *mutex)
+{
+ mutex_destroy((struct mutex *)mutex);
+ FH_FREE(mutex);
+}
+#endif
+
+void FH_MUTEX_LOCK(fh_mutex_t *mutex)
+{
+ struct mutex *m = (struct mutex *)mutex;
+ mutex_lock(m);
+}
+
+int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex)
+{
+ struct mutex *m = (struct mutex *)mutex;
+ return mutex_trylock(m);
+}
+
+void FH_MUTEX_UNLOCK(fh_mutex_t *mutex)
+{
+ struct mutex *m = (struct mutex *)mutex;
+ mutex_unlock(m);
+}
+
+
+/* Timing */
+
+void FH_UDELAY(uint32_t usecs)
+{
+ udelay(usecs);
+}
+
+void FH_MDELAY(uint32_t msecs)
+{
+ mdelay(msecs);
+}
+
+void FH_MSLEEP(uint32_t msecs)
+{
+ msleep(msecs);
+}
+
+uint32_t FH_TIME(void)
+{
+ return jiffies_to_msecs(jiffies);
+}
+
+
+/* Timers */
+
+struct fh_timer {
+ struct timer_list *t;
+ char *name;
+ fh_timer_callback_t cb;
+ void *data;
+ uint8_t scheduled;
+ fh_spinlock_t *lock;
+};
+
+static void timer_callback(unsigned long data)
+{
+ fh_timer_t *timer = (fh_timer_t *)data;
+ fh_irqflags_t flags;
+
+ FH_SPINLOCK_IRQSAVE(timer->lock, &flags);
+ timer->scheduled = 0;
+ FH_SPINUNLOCK_IRQRESTORE(timer->lock, flags);
+ FH_DEBUG("Timer %s callback", timer->name);
+ timer->cb(timer->data);
+}
+
+fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data)
+{
+ fh_timer_t *t = FH_ALLOC(sizeof(*t));
+
+ if (!t) {
+ FH_ERROR("Cannot allocate memory for timer");
+ return NULL;
+ }
+
+ t->t = FH_ALLOC(sizeof(*t->t));
+ if (!t->t) {
+ FH_ERROR("Cannot allocate memory for timer->t");
+ goto no_timer;
+ }
+
+ t->name = FH_STRDUP(name);
+ if (!t->name) {
+ FH_ERROR("Cannot allocate memory for timer->name");
+ goto no_name;
+ }
+
+ t->lock = FH_SPINLOCK_ALLOC();
+ if (!t->lock) {
+ FH_ERROR("Cannot allocate memory for lock");
+ goto no_lock;
+ }
+
+ t->scheduled = 0;
+ t->t->base = &boot_tvec_bases;
+ t->t->expires = jiffies;
+ setup_timer(t->t, timer_callback, (unsigned long)t);
+
+ t->cb = cb;
+ t->data = data;
+
+ return t;
+
+ no_lock:
+ FH_FREE(t->name);
+ no_name:
+ FH_FREE(t->t);
+ no_timer:
+ FH_FREE(t);
+ return NULL;
+}
+
+void FH_TIMER_FREE(fh_timer_t *timer)
+{
+ fh_irqflags_t flags;
+
+ FH_SPINLOCK_IRQSAVE(timer->lock, &flags);
+
+ if (timer->scheduled) {
+ del_timer(timer->t);
+ timer->scheduled = 0;
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(timer->lock, flags);
+ FH_SPINLOCK_FREE(timer->lock);
+ FH_FREE(timer->t);
+ FH_FREE(timer->name);
+ FH_FREE(timer);
+}
+
+void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time)
+{
+ fh_irqflags_t flags;
+
+ FH_SPINLOCK_IRQSAVE(timer->lock, &flags);
+
+ if (!timer->scheduled) {
+ timer->scheduled = 1;
+ FH_DEBUG("Scheduling timer %s to expire in +%d msec", timer->name, time);
+ timer->t->expires = jiffies + msecs_to_jiffies(time);
+ add_timer(timer->t);
+ } else {
+ FH_DEBUG("Modifying timer %s to expire in +%d msec", timer->name, time);
+ mod_timer(timer->t, jiffies + msecs_to_jiffies(time));
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(timer->lock, flags);
+}
+
+void FH_TIMER_CANCEL(fh_timer_t *timer)
+{
+ del_timer(timer->t);
+}
+
+
+/* Wait Queues */
+
+struct fh_waitq {
+ wait_queue_head_t queue;
+ int abort;
+};
+
+fh_waitq_t *FH_WAITQ_ALLOC(void)
+{
+ fh_waitq_t *wq = FH_ALLOC(sizeof(*wq));
+
+ if (!wq) {
+ FH_ERROR("Cannot allocate memory for waitqueue\n");
+ return NULL;
+ }
+
+ init_waitqueue_head(&wq->queue);
+ wq->abort = 0;
+ return wq;
+}
+
+void FH_WAITQ_FREE(fh_waitq_t *wq)
+{
+ FH_FREE(wq);
+}
+
+int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data)
+{
+ int result = wait_event_interruptible(wq->queue,
+ cond(data) || wq->abort);
+ if (result == -ERESTARTSYS) {
+ wq->abort = 0;
+ return -FH_E_RESTART;
+ }
+
+ if (wq->abort == 1) {
+ wq->abort = 0;
+ return -FH_E_ABORT;
+ }
+
+ wq->abort = 0;
+
+ if (result == 0) {
+ return 0;
+ }
+
+ return -FH_E_UNKNOWN;
+}
+
+int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
+ void *data, int32_t msecs)
+{
+ int32_t tmsecs;
+ int result = wait_event_interruptible_timeout(wq->queue,
+ cond(data) || wq->abort,
+ msecs_to_jiffies(msecs));
+ if (result == -ERESTARTSYS) {
+ wq->abort = 0;
+ return -FH_E_RESTART;
+ }
+
+ if (wq->abort == 1) {
+ wq->abort = 0;
+ return -FH_E_ABORT;
+ }
+
+ wq->abort = 0;
+
+ if (result > 0) {
+ tmsecs = jiffies_to_msecs(result);
+ if (!tmsecs) {
+ return 1;
+ }
+
+ return tmsecs;
+ }
+
+ if (result == 0) {
+ return -FH_E_TIMEOUT;
+ }
+
+ return -FH_E_UNKNOWN;
+}
+
+void FH_WAITQ_TRIGGER(fh_waitq_t *wq)
+{
+ wq->abort = 0;
+ wake_up_interruptible(&wq->queue);
+}
+
+void FH_WAITQ_ABORT(fh_waitq_t *wq)
+{
+ wq->abort = 1;
+ wake_up_interruptible(&wq->queue);
+}
+
+
+/* Threading */
+
+fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data)
+{
+ struct task_struct *thread = kthread_run(func, data, name);
+
+ if (thread == ERR_PTR(-ENOMEM)) {
+ return NULL;
+ }
+
+ return (fh_thread_t *)thread;
+}
+
+int FH_THREAD_STOP(fh_thread_t *thread)
+{
+ return kthread_stop((struct task_struct *)thread);
+}
+
+fh_bool_t FH_THREAD_SHOULD_STOP(void)
+{
+ return kthread_should_stop();
+}
+
+
+/* tasklets
+ - run in interrupt context (cannot sleep)
+ - each tasklet runs on a single CPU
+ - different tasklets can be running simultaneously on different CPUs
+ */
+struct fh_tasklet {
+ struct tasklet_struct t;
+ fh_tasklet_callback_t cb;
+ void *data;
+};
+
+static void tasklet_callback(unsigned long data)
+{
+ fh_tasklet_t *t = (fh_tasklet_t *)data;
+ t->cb(t->data);
+}
+
+fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data)
+{
+ fh_tasklet_t *t = FH_ALLOC(sizeof(*t));
+
+ if (t) {
+ t->cb = cb;
+ t->data = data;
+ tasklet_init(&t->t, tasklet_callback, (unsigned long)t);
+ } else {
+ FH_ERROR("Cannot allocate memory for tasklet\n");
+ }
+
+ return t;
+}
+
+void FH_TASK_FREE(fh_tasklet_t *task)
+{
+ FH_FREE(task);
+}
+
+void FH_TASK_SCHEDULE(fh_tasklet_t *task)
+{
+ tasklet_schedule(&task->t);
+}
+
+
+/* workqueues
+ - run in process context (can sleep)
+ */
+typedef struct work_container {
+ fh_work_callback_t cb;
+ void *data;
+ fh_workq_t *wq;
+ char *name;
+
+#ifdef DEBUG
+ FH_CIRCLEQ_ENTRY(work_container) entry;
+#endif
+ struct delayed_work work;
+} work_container_t;
+
+#ifdef DEBUG
+FH_CIRCLEQ_HEAD(work_container_queue, work_container);
+#endif
+
+struct fh_workq {
+ struct workqueue_struct *wq;
+ fh_spinlock_t *lock;
+ fh_waitq_t *waitq;
+ int pending;
+
+#ifdef DEBUG
+ struct work_container_queue entries;
+#endif
+};
+
+static void do_work(struct work_struct *work)
+{
+ fh_irqflags_t flags;
+ struct delayed_work *dw = container_of(work, struct delayed_work, work);
+ work_container_t *container = container_of(dw, struct work_container, work);
+ fh_workq_t *wq = container->wq;
+
+ container->cb(container->data);
+
+#ifdef DEBUG
+ FH_CIRCLEQ_REMOVE(&wq->entries, container, entry);
+#endif
+ FH_DEBUG("Work done: %s, container=%p", container->name, container);
+ if (container->name) {
+ FH_FREE(container->name);
+ }
+ FH_FREE(container);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ wq->pending--;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+}
+
+static int work_done(void *data)
+{
+ fh_workq_t *workq = (fh_workq_t *)data;
+ return workq->pending == 0;
+}
+
+int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout)
+{
+ return FH_WAITQ_WAIT_TIMEOUT(workq->waitq, work_done, workq, timeout);
+}
+
+fh_workq_t *FH_WORKQ_ALLOC(char *name)
+{
+ fh_workq_t *wq = FH_ALLOC(sizeof(*wq));
+
+ if (!wq) {
+ return NULL;
+ }
+
+ wq->wq = create_singlethread_workqueue(name);
+ if (!wq->wq) {
+ goto no_wq;
+ }
+
+ wq->pending = 0;
+
+ wq->lock = FH_SPINLOCK_ALLOC();
+ if (!wq->lock) {
+ goto no_lock;
+ }
+
+ wq->waitq = FH_WAITQ_ALLOC();
+ if (!wq->waitq) {
+ goto no_waitq;
+ }
+
+#ifdef DEBUG
+ FH_CIRCLEQ_INIT(&wq->entries);
+#endif
+ return wq;
+
+ no_waitq:
+ FH_SPINLOCK_FREE(wq->lock);
+ no_lock:
+ destroy_workqueue(wq->wq);
+ no_wq:
+ FH_FREE(wq);
+
+ return NULL;
+}
+
+void FH_WORKQ_FREE(fh_workq_t *wq)
+{
+#ifdef DEBUG
+ if (wq->pending != 0) {
+ struct work_container *wc;
+ FH_ERROR("Destroying work queue with pending work");
+ FH_CIRCLEQ_FOREACH(wc, &wq->entries, entry) {
+ FH_ERROR("Work %s still pending", wc->name);
+ }
+ }
+#endif
+ destroy_workqueue(wq->wq);
+ FH_SPINLOCK_FREE(wq->lock);
+ FH_WAITQ_FREE(wq->waitq);
+ FH_FREE(wq);
+}
+bool FH_SCHEDULE_SYSTEM_WORK(struct work_struct *work){
+
+ return queue_work(system_wq, work);
+}
+
+void FH_WORKQ_SCHEDULE(fh_workq_t *wq, fh_work_callback_t cb, void *data,
+ char *format, ...)
+{
+ fh_irqflags_t flags;
+ work_container_t *container;
+ static char name[128];
+ va_list args;
+
+ va_start(args, format);
+ FH_VSNPRINTF(name, 128, format, args);
+ va_end(args);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ wq->pending++;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+
+ container = FH_ALLOC_ATOMIC(sizeof(*container));
+ if (!container) {
+ FH_ERROR("Cannot allocate memory for container\n");
+ return;
+ }
+
+ container->name = FH_STRDUP(name);
+ if (!container->name) {
+ FH_ERROR("Cannot allocate memory for container->name\n");
+ FH_FREE(container);
+ return;
+ }
+
+ container->cb = cb;
+ container->data = data;
+ container->wq = wq;
+ FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
+ INIT_WORK(&container->work.work, do_work);
+
+#ifdef DEBUG
+ FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
+#endif
+ queue_work(wq->wq, &container->work.work);
+}
+
+void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *wq, fh_work_callback_t cb,
+ void *data, uint32_t time, char *format, ...)
+{
+ fh_irqflags_t flags;
+ work_container_t *container;
+ static char name[128];
+ va_list args;
+
+ va_start(args, format);
+ FH_VSNPRINTF(name, 128, format, args);
+ va_end(args);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ wq->pending++;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+
+ container = FH_ALLOC_ATOMIC(sizeof(*container));
+ if (!container) {
+ FH_ERROR("Cannot allocate memory for container\n");
+ return;
+ }
+
+ container->name = FH_STRDUP(name);
+ if (!container->name) {
+ FH_ERROR("Cannot allocate memory for container->name\n");
+ FH_FREE(container);
+ return;
+ }
+
+ container->cb = cb;
+ container->data = data;
+ container->wq = wq;
+ FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
+ INIT_DELAYED_WORK(&container->work, do_work);
+
+#ifdef DEBUG
+ FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
+#endif
+ queue_delayed_work(wq->wq, &container->work, msecs_to_jiffies(time));
+}
+
+int FH_WORKQ_PENDING(fh_workq_t *wq)
+{
+ return wq->pending;
+}
+
+
+#ifdef FH_LIBMODULE
+
+#ifdef FH_CCLIB
+/* CC */
+EXPORT_SYMBOL(fh_cc_if_alloc);
+EXPORT_SYMBOL(fh_cc_if_free);
+EXPORT_SYMBOL(fh_cc_clear);
+EXPORT_SYMBOL(fh_cc_add);
+EXPORT_SYMBOL(fh_cc_remove);
+EXPORT_SYMBOL(fh_cc_change);
+EXPORT_SYMBOL(fh_cc_data_for_save);
+EXPORT_SYMBOL(fh_cc_restore_from_data);
+EXPORT_SYMBOL(fh_cc_match_chid);
+EXPORT_SYMBOL(fh_cc_match_cdid);
+EXPORT_SYMBOL(fh_cc_ck);
+EXPORT_SYMBOL(fh_cc_chid);
+EXPORT_SYMBOL(fh_cc_cdid);
+EXPORT_SYMBOL(fh_cc_name);
+#endif /* FH_CCLIB */
+
+#ifdef FH_CRYPTOLIB
+# ifndef CONFIG_MACH_IPMATE
+/* Modpow */
+EXPORT_SYMBOL(fh_modpow);
+
+/* DH */
+EXPORT_SYMBOL(fh_dh_modpow);
+EXPORT_SYMBOL(fh_dh_derive_keys);
+EXPORT_SYMBOL(fh_dh_pk);
+# endif /* CONFIG_MACH_IPMATE */
+
+/* Crypto */
+EXPORT_SYMBOL(fh_wusb_aes_encrypt);
+EXPORT_SYMBOL(fh_wusb_cmf);
+EXPORT_SYMBOL(fh_wusb_prf);
+EXPORT_SYMBOL(fh_wusb_fill_ccm_nonce);
+EXPORT_SYMBOL(fh_wusb_gen_nonce);
+EXPORT_SYMBOL(fh_wusb_gen_key);
+EXPORT_SYMBOL(fh_wusb_gen_mic);
+#endif /* FH_CRYPTOLIB */
+
+/* Notification */
+#ifdef FH_NOTIFYLIB
+EXPORT_SYMBOL(fh_alloc_notification_manager);
+EXPORT_SYMBOL(fh_free_notification_manager);
+EXPORT_SYMBOL(fh_register_notifier);
+EXPORT_SYMBOL(fh_unregister_notifier);
+EXPORT_SYMBOL(fh_add_observer);
+EXPORT_SYMBOL(fh_remove_observer);
+EXPORT_SYMBOL(fh_notify);
+#endif
+
+/* Memory Debugging Routines */
+#ifdef FH_DEBUG_MEMORY
+EXPORT_SYMBOL(fh_alloc_debug);
+EXPORT_SYMBOL(fh_alloc_atomic_debug);
+EXPORT_SYMBOL(fh_free_debug);
+EXPORT_SYMBOL(fh_dma_alloc_debug);
+EXPORT_SYMBOL(fh_dma_free_debug);
+#endif
+
+EXPORT_SYMBOL(FH_MEMSET);
+EXPORT_SYMBOL(FH_MEMCPY);
+EXPORT_SYMBOL(FH_MEMMOVE);
+EXPORT_SYMBOL(FH_MEMCMP);
+EXPORT_SYMBOL(FH_STRNCMP);
+EXPORT_SYMBOL(FH_STRCMP);
+EXPORT_SYMBOL(FH_STRLEN);
+EXPORT_SYMBOL(FH_STRCPY);
+EXPORT_SYMBOL(FH_STRDUP);
+EXPORT_SYMBOL(FH_ATOI);
+EXPORT_SYMBOL(FH_ATOUI);
+
+#ifdef FH_UTFLIB
+EXPORT_SYMBOL(FH_UTF8_TO_UTF16LE);
+#endif /* FH_UTFLIB */
+
+EXPORT_SYMBOL(FH_IN_IRQ);
+EXPORT_SYMBOL(FH_IN_BH);
+EXPORT_SYMBOL(FH_VPRINTF);
+EXPORT_SYMBOL(FH_VSNPRINTF);
+EXPORT_SYMBOL(FH_PRINTF);
+EXPORT_SYMBOL(FH_SPRINTF);
+EXPORT_SYMBOL(FH_SNPRINTF);
+EXPORT_SYMBOL(__FH_WARN);
+EXPORT_SYMBOL(__FH_ERROR);
+EXPORT_SYMBOL(FH_EXCEPTION);
+
+#ifdef DEBUG
+EXPORT_SYMBOL(__FH_DEBUG);
+#endif
+
+EXPORT_SYMBOL(__FH_DMA_ALLOC);
+EXPORT_SYMBOL(__FH_DMA_ALLOC_ATOMIC);
+EXPORT_SYMBOL(__FH_DMA_FREE);
+EXPORT_SYMBOL(__FH_ALLOC);
+EXPORT_SYMBOL(__FH_ALLOC_ATOMIC);
+EXPORT_SYMBOL(__FH_FREE);
+
+#ifdef FH_CRYPTOLIB
+EXPORT_SYMBOL(FH_RANDOM_BYTES);
+EXPORT_SYMBOL(FH_AES_CBC);
+EXPORT_SYMBOL(FH_SHA256);
+EXPORT_SYMBOL(FH_HMAC_SHA256);
+#endif
+
+EXPORT_SYMBOL(FH_CPU_TO_LE32);
+EXPORT_SYMBOL(FH_CPU_TO_BE32);
+EXPORT_SYMBOL(FH_LE32_TO_CPU);
+EXPORT_SYMBOL(FH_BE32_TO_CPU);
+EXPORT_SYMBOL(FH_CPU_TO_LE16);
+EXPORT_SYMBOL(FH_CPU_TO_BE16);
+EXPORT_SYMBOL(FH_LE16_TO_CPU);
+EXPORT_SYMBOL(FH_BE16_TO_CPU);
+EXPORT_SYMBOL(FH_READ_REG32);
+EXPORT_SYMBOL(FH_WRITE_REG32);
+EXPORT_SYMBOL(FH_MODIFY_REG32);
+
+#if 0
+EXPORT_SYMBOL(FH_READ_REG64);
+EXPORT_SYMBOL(FH_WRITE_REG64);
+EXPORT_SYMBOL(FH_MODIFY_REG64);
+#endif
+
+EXPORT_SYMBOL(FH_SPINLOCK_ALLOC);
+EXPORT_SYMBOL(FH_SPINLOCK_FREE);
+EXPORT_SYMBOL(FH_SPINLOCK);
+EXPORT_SYMBOL(FH_SPINUNLOCK);
+EXPORT_SYMBOL(FH_SPINLOCK_IRQSAVE);
+EXPORT_SYMBOL(FH_SPINUNLOCK_IRQRESTORE);
+EXPORT_SYMBOL(FH_MUTEX_ALLOC);
+
+#if (!defined(FH_LINUX) || !defined(CONFIG_DEBUG_MUTEXES))
+EXPORT_SYMBOL(FH_MUTEX_FREE);
+#endif
+
+EXPORT_SYMBOL(FH_MUTEX_LOCK);
+EXPORT_SYMBOL(FH_MUTEX_TRYLOCK);
+EXPORT_SYMBOL(FH_MUTEX_UNLOCK);
+EXPORT_SYMBOL(FH_UDELAY);
+EXPORT_SYMBOL(FH_MDELAY);
+EXPORT_SYMBOL(FH_MSLEEP);
+EXPORT_SYMBOL(FH_TIME);
+EXPORT_SYMBOL(FH_TIMER_ALLOC);
+EXPORT_SYMBOL(FH_TIMER_FREE);
+EXPORT_SYMBOL(FH_TIMER_SCHEDULE);
+EXPORT_SYMBOL(FH_TIMER_CANCEL);
+EXPORT_SYMBOL(FH_WAITQ_ALLOC);
+EXPORT_SYMBOL(FH_WAITQ_FREE);
+EXPORT_SYMBOL(FH_WAITQ_WAIT);
+EXPORT_SYMBOL(FH_WAITQ_WAIT_TIMEOUT);
+EXPORT_SYMBOL(FH_WAITQ_TRIGGER);
+EXPORT_SYMBOL(FH_WAITQ_ABORT);
+EXPORT_SYMBOL(FH_THREAD_RUN);
+EXPORT_SYMBOL(FH_THREAD_STOP);
+EXPORT_SYMBOL(FH_THREAD_SHOULD_STOP);
+EXPORT_SYMBOL(FH_TASK_ALLOC);
+EXPORT_SYMBOL(FH_TASK_FREE);
+EXPORT_SYMBOL(FH_TASK_SCHEDULE);
+EXPORT_SYMBOL(FH_WORKQ_WAIT_WORK_DONE);
+EXPORT_SYMBOL(FH_WORKQ_ALLOC);
+EXPORT_SYMBOL(FH_WORKQ_FREE);
+EXPORT_SYMBOL(FH_SCHEDULE_SYSTEM_WORK);
+EXPORT_SYMBOL(FH_WORKQ_SCHEDULE);
+EXPORT_SYMBOL(FH_WORKQ_SCHEDULE_DELAYED);
+EXPORT_SYMBOL(FH_WORKQ_PENDING);
+
+static int fh_common_port_init_module(void)
+{
+ int result = 0;
+
+ printk(KERN_DEBUG "Module fh_common_port init\n" );
+
+#ifdef FH_DEBUG_MEMORY
+ result = fh_memory_debug_start(NULL);
+ if (result) {
+ printk(KERN_ERR
+ "fh_memory_debug_start() failed with error %d\n",
+ result);
+ return result;
+ }
+#endif
+
+#ifdef FH_NOTIFYLIB
+ result = fh_alloc_notification_manager(NULL, NULL);
+ if (result) {
+ printk(KERN_ERR
+ "fh_alloc_notification_manager() failed with error %d\n",
+ result);
+ return result;
+ }
+#endif
+ return result;
+}
+
+static void fh_common_port_exit_module(void)
+{
+ printk(KERN_DEBUG "Module fh_common_port exit\n" );
+
+#ifdef FH_NOTIFYLIB
+ fh_free_notification_manager();
+#endif
+
+#ifdef FH_DEBUG_MEMORY
+ fh_memory_debug_stop();
+#endif
+}
+
+module_init(fh_common_port_init_module);
+module_exit(fh_common_port_exit_module);
+
+MODULE_DESCRIPTION("FH Common Library - Portable version");
+MODULE_AUTHOR("Synopsys Inc.");
+MODULE_LICENSE ("GPL");
+
+#endif /* FH_LIBMODULE */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_common_nbsd.c b/drivers/usb/host/fh_otg/fh_common_port/fh_common_nbsd.c
new file mode 100644
index 00000000..188eabc5
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_common_nbsd.c
@@ -0,0 +1,1275 @@
+#include "fh_os.h"
+#include "fh_list.h"
+
+#ifdef FH_CCLIB
+# include "fh_cc.h"
+#endif
+
+#ifdef FH_CRYPTOLIB
+# include "fh_modpow.h"
+# include "fh_dh.h"
+# include "fh_crypto.h"
+#endif
+
+#ifdef FH_NOTIFYLIB
+# include "fh_notifier.h"
+#endif
+
+/* OS-Level Implementations */
+
+/* This is the NetBSD 4.0.1 kernel implementation of the FH platform library. */
+
+
+/* MISC */
+
+void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size)
+{
+ return memset(dest, byte, size);
+}
+
+void *FH_MEMCPY(void *dest, void const *src, uint32_t size)
+{
+ return memcpy(dest, src, size);
+}
+
+void *FH_MEMMOVE(void *dest, void *src, uint32_t size)
+{
+ bcopy(src, dest, size);
+ return dest;
+}
+
+int FH_MEMCMP(void *m1, void *m2, uint32_t size)
+{
+ return memcmp(m1, m2, size);
+}
+
+int FH_STRNCMP(void *s1, void *s2, uint32_t size)
+{
+ return strncmp(s1, s2, size);
+}
+
+int FH_STRCMP(void *s1, void *s2)
+{
+ return strcmp(s1, s2);
+}
+
+int FH_STRLEN(char const *str)
+{
+ return strlen(str);
+}
+
+char *FH_STRCPY(char *to, char const *from)
+{
+ return strcpy(to, from);
+}
+
+char *FH_STRDUP(char const *str)
+{
+ int len = FH_STRLEN(str) + 1;
+ char *new = FH_ALLOC_ATOMIC(len);
+
+ if (!new) {
+ return NULL;
+ }
+
+ FH_MEMCPY(new, str, len);
+ return new;
+}
+
+int FH_ATOI(char *str, int32_t *value)
+{
+ char *end = NULL;
+
+ /* NetBSD doesn't have 'strtol' in the kernel, but 'strtoul'
+ * should be equivalent on 2's complement machines
+ */
+ *value = strtoul(str, &end, 0);
+ if (*end == '\0') {
+ return 0;
+ }
+
+ return -1;
+}
+
+int FH_ATOUI(char *str, uint32_t *value)
+{
+ char *end = NULL;
+
+ *value = strtoul(str, &end, 0);
+ if (*end == '\0') {
+ return 0;
+ }
+
+ return -1;
+}
+
+
+#ifdef FH_UTFLIB
+/* From usbstring.c */
+
+int FH_UTF8_TO_UTF16LE(uint8_t const *s, uint16_t *cp, unsigned len)
+{
+ int count = 0;
+ u8 c;
+ u16 uchar;
+
+ /* this insists on correct encodings, though not minimal ones.
+ * BUT it currently rejects legit 4-byte UTF-8 code points,
+ * which need surrogate pairs. (Unicode 3.1 can use them.)
+ */
+ while (len != 0 && (c = (u8) *s++) != 0) {
+ if (unlikely(c & 0x80)) {
+ // 2-byte sequence:
+ // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
+ if ((c & 0xe0) == 0xc0) {
+ uchar = (c & 0x1f) << 6;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c;
+
+ // 3-byte sequence (most CJKV characters):
+ // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
+ } else if ((c & 0xf0) == 0xe0) {
+ uchar = (c & 0x0f) << 12;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c << 6;
+
+ c = (u8) *s++;
+ if ((c & 0xc0) != 0xc0)
+ goto fail;
+ c &= 0x3f;
+ uchar |= c;
+
+ /* no bogus surrogates */
+ if (0xd800 <= uchar && uchar <= 0xdfff)
+ goto fail;
+
+ // 4-byte sequence (surrogate pairs, currently rare):
+ // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
+ // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
+ // (uuuuu = wwww + 1)
+ // FIXME accept the surrogate code points (only)
+ } else
+ goto fail;
+ } else
+ uchar = c;
+ put_unaligned (cpu_to_le16 (uchar), cp++);
+ count++;
+ len--;
+ }
+ return count;
+fail:
+ return -1;
+}
+
+#endif /* FH_UTFLIB */
+
+
+/* fh_debug.h */
+
+fh_bool_t FH_IN_IRQ(void)
+{
+// return in_irq();
+ return 0;
+}
+
+fh_bool_t FH_IN_BH(void)
+{
+// return in_softirq();
+ return 0;
+}
+
+void FH_VPRINTF(char *format, va_list args)
+{
+ vprintf(format, args);
+}
+
+int FH_VSNPRINTF(char *str, int size, char *format, va_list args)
+{
+ return vsnprintf(str, size, format, args);
+}
+
+void FH_PRINTF(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+int FH_SPRINTF(char *buffer, char *format, ...)
+{
+ int retval;
+ va_list args;
+
+ va_start(args, format);
+ retval = vsprintf(buffer, format, args);
+ va_end(args);
+ return retval;
+}
+
+int FH_SNPRINTF(char *buffer, int size, char *format, ...)
+{
+ int retval;
+ va_list args;
+
+ va_start(args, format);
+ retval = vsnprintf(buffer, size, format, args);
+ va_end(args);
+ return retval;
+}
+
+void __FH_WARN(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+void __FH_ERROR(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+
+void FH_EXCEPTION(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+// BUG_ON(1); ???
+}
+
+#ifdef DEBUG
+void __FH_DEBUG(char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ FH_VPRINTF(format, args);
+ va_end(args);
+}
+#endif
+
+
+/* fh_mem.h */
+
+#if 0
+fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size,
+ uint32_t align,
+ uint32_t alloc)
+{
+ struct dma_pool *pool = dma_pool_create("Pool", NULL,
+ size, align, alloc);
+ return (fh_pool_t *)pool;
+}
+
+void FH_DMA_POOL_DESTROY(fh_pool_t *pool)
+{
+ dma_pool_destroy((struct dma_pool *)pool);
+}
+
+void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr)
+{
+// return dma_pool_alloc((struct dma_pool *)pool, GFP_KERNEL, dma_addr);
+ return dma_pool_alloc((struct dma_pool *)pool, M_WAITOK, dma_addr);
+}
+
+void *FH_DMA_POOL_ZALLOC(fh_pool_t *pool, uint64_t *dma_addr)
+{
+ void *vaddr = FH_DMA_POOL_ALLOC(pool, dma_addr);
+ memset(..);
+}
+
+void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr)
+{
+ dma_pool_free(pool, vaddr, daddr);
+}
+#endif
+
+void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
+{
+ fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
+ int error;
+
+ error = bus_dmamem_alloc(dma->dma_tag, size, 1, size, dma->segs,
+ sizeof(dma->segs) / sizeof(dma->segs[0]),
+ &dma->nsegs, BUS_DMA_NOWAIT);
+ if (error) {
+ printf("%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__,
+ (uintmax_t)size, error);
+ goto fail_0;
+ }
+
+ error = bus_dmamem_map(dma->dma_tag, dma->segs, dma->nsegs, size,
+ (caddr_t *)&dma->dma_vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
+ if (error) {
+ printf("%s: bus_dmamem_map failed: %d\n", __func__, error);
+ goto fail_1;
+ }
+
+ error = bus_dmamap_create(dma->dma_tag, size, 1, size, 0,
+ BUS_DMA_NOWAIT, &dma->dma_map);
+ if (error) {
+ printf("%s: bus_dmamap_create failed: %d\n", __func__, error);
+ goto fail_2;
+ }
+
+ error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+ size, NULL, BUS_DMA_NOWAIT);
+ if (error) {
+ printf("%s: bus_dmamap_load failed: %d\n", __func__, error);
+ goto fail_3;
+ }
+
+ dma->dma_paddr = (bus_addr_t)dma->segs[0].ds_addr;
+ *dma_addr = dma->dma_paddr;
+ return dma->dma_vaddr;
+
+fail_3:
+ bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
+fail_2:
+ bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
+fail_1:
+ bus_dmamem_free(dma->dma_tag, dma->segs, dma->nsegs);
+fail_0:
+ dma->dma_map = NULL;
+ dma->dma_vaddr = NULL;
+ dma->nsegs = 0;
+
+ return NULL;
+}
+
+void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr)
+{
+ fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
+
+ if (dma->dma_map != NULL) {
+ bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, size,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
+ bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
+ bus_dmamem_free(dma->dma_tag, dma->segs, dma->nsegs);
+ dma->dma_paddr = 0;
+ dma->dma_map = NULL;
+ dma->dma_vaddr = NULL;
+ dma->nsegs = 0;
+ }
+}
+
+void *__FH_ALLOC(void *mem_ctx, uint32_t size)
+{
+ return malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
+}
+
+void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size)
+{
+ return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+}
+
+void __FH_FREE(void *mem_ctx, void *addr)
+{
+ free(addr, M_DEVBUF);
+}
+
+
+#ifdef FH_CRYPTOLIB
+/* fh_crypto.h */
+
+void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length)
+{
+ get_random_bytes(buffer, length);
+}
+
+int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out)
+{
+ struct crypto_blkcipher *tfm;
+ struct blkcipher_desc desc;
+ struct scatterlist sgd;
+ struct scatterlist sgs;
+
+ tfm = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (tfm == NULL) {
+ printk("failed to load transform for aes CBC\n");
+ return -1;
+ }
+
+ crypto_blkcipher_setkey(tfm, key, keylen);
+ crypto_blkcipher_set_iv(tfm, iv, 16);
+
+ sg_init_one(&sgd, out, messagelen);
+ sg_init_one(&sgs, message, messagelen);
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ if (crypto_blkcipher_encrypt(&desc, &sgd, &sgs, messagelen)) {
+ crypto_free_blkcipher(tfm);
+ FH_ERROR("AES CBC encryption failed");
+ return -1;
+ }
+
+ crypto_free_blkcipher(tfm);
+ return 0;
+}
+
+int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out)
+{
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+
+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ FH_ERROR("Failed to load transform for sha256: %ld", PTR_ERR(tfm));
+ return 0;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_one(&sg, message, len);
+ crypto_hash_digest(&desc, &sg, len, out);
+ crypto_free_hash(tfm);
+
+ return 1;
+}
+
+int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen,
+ uint8_t *key, uint32_t keylen, uint8_t *out)
+{
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+
+ tfm = crypto_alloc_hash("hmac(sha256)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ FH_ERROR("Failed to load transform for hmac(sha256): %ld", PTR_ERR(tfm));
+ return 0;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_one(&sg, message, messagelen);
+ crypto_hash_setkey(tfm, key, keylen);
+ crypto_hash_digest(&desc, &sg, messagelen, out);
+ crypto_free_hash(tfm);
+
+ return 1;
+}
+
+#endif /* FH_CRYPTOLIB */
+
+
+/* Byte Ordering Conversions */
+
+uint32_t FH_CPU_TO_LE32(uint32_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_CPU_TO_BE32(uint32_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_LE32_TO_CPU(uint32_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint32_t FH_BE32_TO_CPU(uint32_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+
+ return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
+#endif
+}
+
+uint16_t FH_CPU_TO_LE16(uint16_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_CPU_TO_BE16(uint16_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_LE16_TO_CPU(uint16_t *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+uint16_t FH_BE16_TO_CPU(uint16_t *p)
+{
+#ifdef __BIG_ENDIAN
+ return *p;
+#else
+ uint8_t *u_p = (uint8_t *)p;
+ return (u_p[1] | (u_p[0] << 8));
+#endif
+}
+
+
+/* Registers */
+
+uint32_t FH_READ_REG32(void *io_ctx, uint32_t volatile *reg)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ return bus_space_read_4(io->iot, io->ioh, ior);
+}
+
+#if 0
+uint64_t FH_READ_REG64(void *io_ctx, uint64_t volatile *reg)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ return bus_space_read_8(io->iot, io->ioh, ior);
+}
+#endif
+
+void FH_WRITE_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t value)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_4(io->iot, io->ioh, ior, value);
+}
+
+#if 0
+void FH_WRITE_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t value)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_8(io->iot, io->ioh, ior, value);
+}
+#endif
+
+void FH_MODIFY_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t clear_mask,
+ uint32_t set_mask)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_4(io->iot, io->ioh, ior,
+ (bus_space_read_4(io->iot, io->ioh, ior) &
+ ~clear_mask) | set_mask);
+}
+
+#if 0
+void FH_MODIFY_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t clear_mask,
+ uint64_t set_mask)
+{
+ fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
+ bus_size_t ior = (bus_size_t)reg;
+
+ bus_space_write_8(io->iot, io->ioh, ior,
+ (bus_space_read_8(io->iot, io->ioh, ior) &
+ ~clear_mask) | set_mask);
+}
+#endif
+
+
+/* Locking */
+
+fh_spinlock_t *FH_SPINLOCK_ALLOC(void)
+{
+ struct simplelock *sl = FH_ALLOC(sizeof(*sl));
+
+ if (!sl) {
+ FH_ERROR("Cannot allocate memory for spinlock");
+ return NULL;
+ }
+
+ simple_lock_init(sl);
+ return (fh_spinlock_t *)sl;
+}
+
+void FH_SPINLOCK_FREE(fh_spinlock_t *lock)
+{
+ struct simplelock *sl = (struct simplelock *)lock;
+
+ FH_FREE(sl);
+}
+
+void FH_SPINLOCK(fh_spinlock_t *lock)
+{
+ simple_lock((struct simplelock *)lock);
+}
+
+void FH_SPINUNLOCK(fh_spinlock_t *lock)
+{
+ simple_unlock((struct simplelock *)lock);
+}
+
+void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags)
+{
+ simple_lock((struct simplelock *)lock);
+ *flags = splbio();
+}
+
+void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags)
+{
+ splx(flags);
+ simple_unlock((struct simplelock *)lock);
+}
+
+fh_mutex_t *FH_MUTEX_ALLOC(void)
+{
+ fh_mutex_t *mutex = FH_ALLOC(sizeof(struct lock));
+
+ if (!mutex) {
+ FH_ERROR("Cannot allocate memory for mutex");
+ return NULL;
+ }
+
+ lockinit((struct lock *)mutex, 0, "dw3mtx", 0, 0);
+ return mutex;
+}
+
+#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
+#else
+void FH_MUTEX_FREE(fh_mutex_t *mutex)
+{
+ FH_FREE(mutex);
+}
+#endif
+
+void FH_MUTEX_LOCK(fh_mutex_t *mutex)
+{
+ lockmgr((struct lock *)mutex, LK_EXCLUSIVE, NULL);
+}
+
+int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex)
+{
+ int status;
+
+ status = lockmgr((struct lock *)mutex, LK_EXCLUSIVE | LK_NOWAIT, NULL);
+ return status == 0;
+}
+
+void FH_MUTEX_UNLOCK(fh_mutex_t *mutex)
+{
+ lockmgr((struct lock *)mutex, LK_RELEASE, NULL);
+}
+
+
+/* Timing */
+
+void FH_UDELAY(uint32_t usecs)
+{
+ DELAY(usecs);
+}
+
+void FH_MDELAY(uint32_t msecs)
+{
+ do {
+ DELAY(1000);
+ } while (--msecs);
+}
+
+void FH_MSLEEP(uint32_t msecs)
+{
+ struct timeval tv;
+
+ tv.tv_sec = msecs / 1000;
+ tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
+ tsleep(&tv, 0, "dw3slp", tvtohz(&tv));
+}
+
+uint32_t FH_TIME(void)
+{
+ struct timeval tv;
+
+ microuptime(&tv); // or getmicrouptime? (less precise, but faster)
+ return tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+
+
+/* Timers */
+
+struct fh_timer {
+ struct callout t;
+ char *name;
+ fh_spinlock_t *lock;
+ fh_timer_callback_t cb;
+ void *data;
+};
+
+fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data)
+{
+ fh_timer_t *t = FH_ALLOC(sizeof(*t));
+
+ if (!t) {
+ FH_ERROR("Cannot allocate memory for timer");
+ return NULL;
+ }
+
+ callout_init(&t->t);
+
+ t->name = FH_STRDUP(name);
+ if (!t->name) {
+ FH_ERROR("Cannot allocate memory for timer->name");
+ goto no_name;
+ }
+
+ t->lock = FH_SPINLOCK_ALLOC();
+ if (!t->lock) {
+ FH_ERROR("Cannot allocate memory for timer->lock");
+ goto no_lock;
+ }
+
+ t->cb = cb;
+ t->data = data;
+
+ return t;
+
+ no_lock:
+ FH_FREE(t->name);
+ no_name:
+ FH_FREE(t);
+
+ return NULL;
+}
+
+void FH_TIMER_FREE(fh_timer_t *timer)
+{
+ callout_stop(&timer->t);
+ FH_SPINLOCK_FREE(timer->lock);
+ FH_FREE(timer->name);
+ FH_FREE(timer);
+}
+
+void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time)
+{
+ struct timeval tv;
+
+ tv.tv_sec = time / 1000;
+ tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
+ callout_reset(&timer->t, tvtohz(&tv), timer->cb, timer->data);
+}
+
+void FH_TIMER_CANCEL(fh_timer_t *timer)
+{
+ callout_stop(&timer->t);
+}
+
+
+/* Wait Queues */
+
+struct fh_waitq {
+ struct simplelock lock;
+ int abort;
+};
+
+fh_waitq_t *FH_WAITQ_ALLOC(void)
+{
+ fh_waitq_t *wq = FH_ALLOC(sizeof(*wq));
+
+ if (!wq) {
+ FH_ERROR("Cannot allocate memory for waitqueue");
+ return NULL;
+ }
+
+ simple_lock_init(&wq->lock);
+ wq->abort = 0;
+
+ return wq;
+}
+
+void FH_WAITQ_FREE(fh_waitq_t *wq)
+{
+ FH_FREE(wq);
+}
+
+int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data)
+{
+ int ipl;
+ int result = 0;
+
+ simple_lock(&wq->lock);
+ ipl = splbio();
+
+ /* Skip the sleep if already aborted or triggered */
+ if (!wq->abort && !cond(data)) {
+ splx(ipl);
+ result = ltsleep(wq, PCATCH, "dw3wat", 0, &wq->lock); // infinite timeout
+ ipl = splbio();
+ }
+
+ if (result == 0) { // awoken
+ if (wq->abort) {
+ wq->abort = 0;
+ result = -FH_E_ABORT;
+ } else {
+ result = 0;
+ }
+
+ splx(ipl);
+ simple_unlock(&wq->lock);
+ } else {
+ wq->abort = 0;
+ splx(ipl);
+ simple_unlock(&wq->lock);
+
+ if (result == ERESTART) { // signaled - restart
+ result = -FH_E_RESTART;
+ } else { // signaled - must be EINTR
+ result = -FH_E_ABORT;
+ }
+ }
+
+ return result;
+}
+
+int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
+ void *data, int32_t msecs)
+{
+ struct timeval tv, tv1, tv2;
+ int ipl;
+ int result = 0;
+
+ tv.tv_sec = msecs / 1000;
+ tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
+
+ simple_lock(&wq->lock);
+ ipl = splbio();
+
+ /* Skip the sleep if already aborted or triggered */
+ if (!wq->abort && !cond(data)) {
+ splx(ipl);
+ getmicrouptime(&tv1);
+ result = ltsleep(wq, PCATCH, "dw3wto", tvtohz(&tv), &wq->lock);
+ getmicrouptime(&tv2);
+ ipl = splbio();
+ }
+
+ if (result == 0) { // awoken
+ if (wq->abort) {
+ wq->abort = 0;
+ splx(ipl);
+ simple_unlock(&wq->lock);
+ result = -FH_E_ABORT;
+ } else {
+ splx(ipl);
+ simple_unlock(&wq->lock);
+
+ tv2.tv_usec -= tv1.tv_usec;
+ if (tv2.tv_usec < 0) {
+ tv2.tv_usec += 1000000;
+ tv2.tv_sec--;
+ }
+
+ tv2.tv_sec -= tv1.tv_sec;
+ result = tv2.tv_sec * 1000 + tv2.tv_usec / 1000;
+ result = msecs - result;
+ if (result <= 0)
+ result = 1;
+ }
+ } else {
+ wq->abort = 0;
+ splx(ipl);
+ simple_unlock(&wq->lock);
+
+ if (result == ERESTART) { // signaled - restart
+ result = -FH_E_RESTART;
+
+ } else if (result == EINTR) { // signaled - interrupt
+ result = -FH_E_ABORT;
+
+ } else { // timed out
+ result = -FH_E_TIMEOUT;
+ }
+ }
+
+ return result;
+}
+
+void FH_WAITQ_TRIGGER(fh_waitq_t *wq)
+{
+ wakeup(wq);
+}
+
+void FH_WAITQ_ABORT(fh_waitq_t *wq)
+{
+ int ipl;
+
+ simple_lock(&wq->lock);
+ ipl = splbio();
+ wq->abort = 1;
+ wakeup(wq);
+ splx(ipl);
+ simple_unlock(&wq->lock);
+}
+
+
+/* Threading */
+
+struct fh_thread {
+ struct proc *proc;
+ int abort;
+};
+
+fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data)
+{
+ int retval;
+ fh_thread_t *thread = FH_ALLOC(sizeof(*thread));
+
+ if (!thread) {
+ return NULL;
+ }
+
+ thread->abort = 0;
+ retval = kthread_create1((void (*)(void *))func, data, &thread->proc,
+ "%s", name);
+ if (retval) {
+ FH_FREE(thread);
+ return NULL;
+ }
+
+ return thread;
+}
+
+int FH_THREAD_STOP(fh_thread_t *thread)
+{
+ int retval;
+
+ thread->abort = 1;
+ retval = tsleep(&thread->abort, 0, "dw3stp", 60 * hz);
+
+ if (retval == 0) {
+ /* FH_THREAD_EXIT() will free the thread struct */
+ return 0;
+ }
+
+ /* NOTE: We leak the thread struct if thread doesn't die */
+
+ if (retval == EWOULDBLOCK) {
+ return -FH_E_TIMEOUT;
+ }
+
+ return -FH_E_UNKNOWN;
+}
+
+fh_bool_t FH_THREAD_SHOULD_STOP(fh_thread_t *thread)
+{
+ return thread->abort;
+}
+
+void FH_THREAD_EXIT(fh_thread_t *thread)
+{
+ wakeup(&thread->abort);
+ FH_FREE(thread);
+ kthread_exit(0);
+}
+
+/* tasklets
+ - Runs in interrupt context (cannot sleep)
+ - Each tasklet runs on a single CPU
+ - Different tasklets can be running simultaneously on different CPUs
+ [ On NetBSD there is no corresponding mechanism, drivers don't have bottom-
+ halves. So we just call the callback directly from FH_TASK_SCHEDULE() ]
+ */
+struct fh_tasklet {
+ fh_tasklet_callback_t cb;
+ void *data;
+};
+
+static void tasklet_callback(void *data)
+{
+ fh_tasklet_t *task = (fh_tasklet_t *)data;
+
+ task->cb(task->data);
+}
+
+fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data)
+{
+ fh_tasklet_t *task = FH_ALLOC(sizeof(*task));
+
+ if (task) {
+ task->cb = cb;
+ task->data = data;
+ } else {
+ FH_ERROR("Cannot allocate memory for tasklet");
+ }
+
+ return task;
+}
+
+void FH_TASK_FREE(fh_tasklet_t *task)
+{
+ FH_FREE(task);
+}
+
+void FH_TASK_SCHEDULE(fh_tasklet_t *task)
+{
+ tasklet_callback(task);
+}
+
+
+/* workqueues
+ - Runs in process context (can sleep)
+ */
+typedef struct work_container {
+ fh_work_callback_t cb;
+ void *data;
+ fh_workq_t *wq;
+ char *name;
+ int hz;
+ struct work task;
+} work_container_t;
+
+struct fh_workq {
+ struct workqueue *taskq;
+ fh_spinlock_t *lock;
+ fh_waitq_t *waitq;
+ int pending;
+ struct work_container *container;
+};
+
+static void do_work(struct work *task, void *data)
+{
+ fh_workq_t *wq = (fh_workq_t *)data;
+ work_container_t *container = wq->container;
+ fh_irqflags_t flags;
+
+ if (container->hz) {
+ tsleep(container, 0, "dw3wrk", container->hz);
+ }
+
+ container->cb(container->data);
+ FH_DEBUG("Work done: %s, container=%p", container->name, container);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ if (container->name)
+ FH_FREE(container->name);
+ FH_FREE(container);
+ wq->pending--;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+}
+
+static int work_done(void *data)
+{
+ fh_workq_t *workq = (fh_workq_t *)data;
+
+ return workq->pending == 0;
+}
+
+int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout)
+{
+ return FH_WAITQ_WAIT_TIMEOUT(workq->waitq, work_done, workq, timeout);
+}
+
+fh_workq_t *FH_WORKQ_ALLOC(char *name)
+{
+ int result;
+ fh_workq_t *wq = FH_ALLOC(sizeof(*wq));
+
+ if (!wq) {
+ FH_ERROR("Cannot allocate memory for workqueue");
+ return NULL;
+ }
+
+ result = workqueue_create(&wq->taskq, name, do_work, wq, 0 /*PWAIT*/,
+ IPL_BIO, 0);
+ if (result) {
+ FH_ERROR("Cannot create workqueue");
+ goto no_taskq;
+ }
+
+ wq->pending = 0;
+
+ wq->lock = FH_SPINLOCK_ALLOC();
+ if (!wq->lock) {
+ FH_ERROR("Cannot allocate memory for spinlock");
+ goto no_lock;
+ }
+
+ wq->waitq = FH_WAITQ_ALLOC();
+ if (!wq->waitq) {
+ FH_ERROR("Cannot allocate memory for waitqueue");
+ goto no_waitq;
+ }
+
+ return wq;
+
+ no_waitq:
+ FH_SPINLOCK_FREE(wq->lock);
+ no_lock:
+ workqueue_destroy(wq->taskq);
+ no_taskq:
+ FH_FREE(wq);
+
+ return NULL;
+}
+
+void FH_WORKQ_FREE(fh_workq_t *wq)
+{
+#ifdef DEBUG
+ fh_irqflags_t flags;
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+
+ if (wq->pending != 0) {
+ struct work_container *container = wq->container;
+
+ FH_ERROR("Destroying work queue with pending work");
+
+ if (container && container->name) {
+ FH_ERROR("Work %s still pending", container->name);
+ }
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+#endif
+ FH_WAITQ_FREE(wq->waitq);
+ FH_SPINLOCK_FREE(wq->lock);
+ workqueue_destroy(wq->taskq);
+ FH_FREE(wq);
+}
+
+void FH_WORKQ_SCHEDULE(fh_workq_t *wq, fh_work_callback_t cb, void *data,
+ char *format, ...)
+{
+ fh_irqflags_t flags;
+ work_container_t *container;
+ static char name[128];
+ va_list args;
+
+ va_start(args, format);
+ FH_VSNPRINTF(name, 128, format, args);
+ va_end(args);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ wq->pending++;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+
+ container = FH_ALLOC_ATOMIC(sizeof(*container));
+ if (!container) {
+ FH_ERROR("Cannot allocate memory for container");
+ return;
+ }
+
+ container->name = FH_STRDUP(name);
+ if (!container->name) {
+ FH_ERROR("Cannot allocate memory for container->name");
+ FH_FREE(container);
+ return;
+ }
+
+ container->cb = cb;
+ container->data = data;
+ container->wq = wq;
+ container->hz = 0;
+ wq->container = container;
+
+ FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
+ workqueue_enqueue(wq->taskq, &container->task);
+}
+
+void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *wq, fh_work_callback_t cb,
+ void *data, uint32_t time, char *format, ...)
+{
+ fh_irqflags_t flags;
+ work_container_t *container;
+ static char name[128];
+ struct timeval tv;
+ va_list args;
+
+ va_start(args, format);
+ FH_VSNPRINTF(name, 128, format, args);
+ va_end(args);
+
+ FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
+ wq->pending++;
+ FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
+ FH_WAITQ_TRIGGER(wq->waitq);
+
+ container = FH_ALLOC_ATOMIC(sizeof(*container));
+ if (!container) {
+ FH_ERROR("Cannot allocate memory for container");
+ return;
+ }
+
+ container->name = FH_STRDUP(name);
+ if (!container->name) {
+ FH_ERROR("Cannot allocate memory for container->name");
+ FH_FREE(container);
+ return;
+ }
+
+ container->cb = cb;
+ container->data = data;
+ container->wq = wq;
+ tv.tv_sec = time / 1000;
+ tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
+ container->hz = tvtohz(&tv);
+ wq->container = container;
+
+ FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
+ workqueue_enqueue(wq->taskq, &container->task);
+}
+
+int FH_WORKQ_PENDING(fh_workq_t *wq)
+{
+ return wq->pending;
+}
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.c b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.c
new file mode 100644
index 00000000..c63fd24e
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.c
@@ -0,0 +1,308 @@
+/* =========================================================================
+ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_crypto.c $
+ * $Revision: #5 $
+ * $Date: 2010/09/28 $
+ * $Change: 1596182 $
+ *
+ * Synopsys Portability Library Software and documentation
+ * (hereinafter, "Software") is an Unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing
+ * between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for
+ * Licensed Product with Synopsys or any supplement thereto. You are
+ * permitted to use and redistribute this Software in source and binary
+ * forms, with or without modification, provided that redistributions
+ * of source code must retain this notice. You may not view, use,
+ * disclose, copy or distribute this file or any information contained
+ * herein except pursuant to this license grant from Synopsys. If you
+ * do not agree with this notice, including the disclaimer below, then
+ * you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
+ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================= */
+
+/** @file
+ * This file contains the WUSB cryptographic routines.
+ */
+
+#ifdef FH_CRYPTOLIB
+
+#include "fh_crypto.h"
+#include "usb.h"
+
+#ifdef DEBUG
+static inline void dump_bytes(char *name, uint8_t *bytes, int len)
+{
+ int i;
+ FH_PRINTF("%s: ", name);
+ for (i=0; i<len; i++) {
+ FH_PRINTF("%02x ", bytes[i]);
+ }
+ FH_PRINTF("\n");
+}
+#else
+#define dump_bytes(x...)
+#endif
+
+/* Display a block */
+void show_block(const u8 *blk, const char *prefix, const char *suffix, int a)
+{
+#ifdef FH_DEBUG_CRYPTO
+ int i, blksize = 16;
+
+ FH_DEBUG("%s", prefix);
+
+ if (suffix == NULL) {
+ suffix = "\n";
+ blksize = a;
+ }
+
+ for (i = 0; i < blksize; i++)
+ FH_PRINT("%02x%s", *blk++, ((i & 3) == 3) ? " " : " ");
+ FH_PRINT(suffix);
+#endif
+}
+
+/**
+ * Encrypts an array of bytes using the AES encryption engine.
+ * If <code>dst</code> == <code>src</code>, then the bytes will be encrypted
+ * in-place.
+ *
+ * @return 0 on success, negative error code on error.
+ */
+int fh_wusb_aes_encrypt(u8 *src, u8 *key, u8 *dst)
+{
+ u8 block_t[16];
+ FH_MEMSET(block_t, 0, 16);
+
+ return FH_AES_CBC(src, 16, key, 16, block_t, dst);
+}
+
+/**
+ * The CCM-MAC-FUNCTION described in section 6.5 of the WUSB spec.
+ * This function takes a data string and returns the encrypted CBC
+ * Counter-mode MIC.
+ *
+ * @param key The 128-bit symmetric key.
+ * @param nonce The CCM nonce.
+ * @param label The unique 14-byte ASCII text label.
+ * @param bytes The byte array to be encrypted.
+ * @param len Length of the byte array.
+ * @param result Byte array to receive the 8-byte encrypted MIC.
+ */
+void fh_wusb_cmf(u8 *key, u8 *nonce,
+ char *label, u8 *bytes, int len, u8 *result)
+{
+ u8 block_m[16];
+ u8 block_x[16];
+ u8 block_t[8];
+ int idx, blkNum;
+ u16 la = (u16)(len + 14);
+
+ /* Set the AES-128 key */
+ //fh_aes_setkey(tfm, key, 16);
+
+ /* Fill block B0 from flags = 0x59, N, and l(m) = 0 */
+ block_m[0] = 0x59;
+ for (idx = 0; idx < 13; idx++)
+ block_m[idx + 1] = nonce[idx];
+ block_m[14] = 0;
+ block_m[15] = 0;
+
+ /* Produce the CBC IV */
+ fh_wusb_aes_encrypt(block_m, key, block_x);
+ show_block(block_m, "CBC IV in: ", "\n", 0);
+ show_block(block_x, "CBC IV out:", "\n", 0);
+
+ /* Fill block B1 from l(a) = Blen + 14, and A */
+ block_x[0] ^= (u8)(la >> 8);
+ block_x[1] ^= (u8)la;
+ for (idx = 0; idx < 14; idx++)
+ block_x[idx + 2] ^= label[idx];
+ show_block(block_x, "After xor: ", "b1\n", 16);
+
+ fh_wusb_aes_encrypt(block_x, key, block_x);
+ show_block(block_x, "After AES: ", "b1\n", 16);
+
+ idx = 0;
+ blkNum = 0;
+
+ /* Fill remaining blocks with B */
+ while (len-- > 0) {
+ block_x[idx] ^= *bytes++;
+ if (++idx >= 16) {
+ idx = 0;
+ show_block(block_x, "After xor: ", "\n", blkNum);
+ fh_wusb_aes_encrypt(block_x, key, block_x);
+ show_block(block_x, "After AES: ", "\n", blkNum);
+ blkNum++;
+ }
+ }
+
+ /* Handle partial last block */
+ if (idx > 0) {
+ show_block(block_x, "After xor: ", "\n", blkNum);
+ fh_wusb_aes_encrypt(block_x, key, block_x);
+ show_block(block_x, "After AES: ", "\n", blkNum);
+ }
+
+ /* Save the MIC tag */
+ FH_MEMCPY(block_t, block_x, 8);
+ show_block(block_t, "MIC tag : ", NULL, 8);
+
+ /* Fill block A0 from flags = 0x01, N, and counter = 0 */
+ block_m[0] = 0x01;
+ block_m[14] = 0;
+ block_m[15] = 0;
+
+ /* Encrypt the counter */
+ fh_wusb_aes_encrypt(block_m, key, block_x);
+ show_block(block_x, "CTR[MIC] : ", NULL, 8);
+
+ /* XOR with MIC tag */
+ for (idx = 0; idx < 8; idx++) {
+ block_t[idx] ^= block_x[idx];
+ }
+
+ /* Return result to caller */
+ FH_MEMCPY(result, block_t, 8);
+ show_block(result, "CCM-MIC : ", NULL, 8);
+
+}
+
+/**
+ * The PRF function described in section 6.5 of the WUSB spec. This function
+ * concatenates MIC values returned from fh_cmf() to create a value of
+ * the requested length.
+ *
+ * @param prf_len Length of the PRF function in bits (64, 128, or 256).
+ * @param key, nonce, label, bytes, len Same as for fh_cmf().
+ * @param result Byte array to receive the result.
+ */
+void fh_wusb_prf(int prf_len, u8 *key,
+ u8 *nonce, char *label, u8 *bytes, int len, u8 *result)
+{
+ int i;
+
+ nonce[0] = 0;
+ for (i = 0; i < prf_len >> 6; i++, nonce[0]++) {
+ fh_wusb_cmf(key, nonce, label, bytes, len, result);
+ result += 8;
+ }
+}
+
+/**
+ * Fills in CCM Nonce per the WUSB spec.
+ *
+ * @param[in] haddr Host address.
+ * @param[in] daddr Device address.
+ * @param[in] tkid Session Key(PTK) identifier.
+ * @param[out] nonce Pointer to where the CCM Nonce output is to be written.
+ */
+void fh_wusb_fill_ccm_nonce(uint16_t haddr, uint16_t daddr, uint8_t *tkid,
+ uint8_t *nonce)
+{
+
+ FH_DEBUG("%s %x %x\n", __func__, daddr, haddr);
+
+ FH_MEMSET(&nonce[0], 0, 16);
+
+ FH_MEMCPY(&nonce[6], tkid, 3);
+ nonce[9] = daddr & 0xFF;
+ nonce[10] = (daddr >> 8) & 0xFF;
+ nonce[11] = haddr & 0xFF;
+ nonce[12] = (haddr >> 8) & 0xFF;
+
+ dump_bytes("CCM nonce", nonce, 16);
+}
+
+/**
+ * Generates a 16-byte cryptographic-grade random number for the Host/Device
+ * Nonce.
+ */
+void fh_wusb_gen_nonce(uint16_t addr, uint8_t *nonce)
+{
+ uint8_t inonce[16];
+ uint32_t temp[4];
+
+ /* Fill in the Nonce */
+ FH_MEMSET(&inonce[0], 0, sizeof(inonce));
+ inonce[9] = addr & 0xFF;
+ inonce[10] = (addr >> 8) & 0xFF;
+ inonce[11] = inonce[9];
+ inonce[12] = inonce[10];
+
+ /* Collect "randomness samples" */
+ FH_RANDOM_BYTES((uint8_t *)temp, 16);
+
+ fh_wusb_prf_128((uint8_t *)temp, nonce,
+ "Random Numbers", (uint8_t *)temp, sizeof(temp),
+ nonce);
+}
+
+/**
+ * Generates the Session Key (PTK) and Key Confirmation Key (KCK) per the
+ * WUSB spec.
+ *
+ * @param[in] ccm_nonce Pointer to CCM Nonce.
+ * @param[in] mk Master Key to derive the session from
+ * @param[in] hnonce Pointer to Host Nonce.
+ * @param[in] dnonce Pointer to Device Nonce.
+ * @param[out] kck Pointer to where the KCK output is to be written.
+ * @param[out] ptk Pointer to where the PTK output is to be written.
+ */
+void fh_wusb_gen_key(uint8_t *ccm_nonce, uint8_t *mk, uint8_t *hnonce,
+ uint8_t *dnonce, uint8_t *kck, uint8_t *ptk)
+{
+ uint8_t idata[32];
+ uint8_t odata[32];
+
+ dump_bytes("ck", mk, 16);
+ dump_bytes("hnonce", hnonce, 16);
+ dump_bytes("dnonce", dnonce, 16);
+
+ /* The data is the HNonce and DNonce concatenated */
+ FH_MEMCPY(&idata[0], hnonce, 16);
+ FH_MEMCPY(&idata[16], dnonce, 16);
+
+ fh_wusb_prf_256(mk, ccm_nonce, "Pair-wise keys", idata, 32, odata);
+
+ /* Low 16 bytes of the result is the KCK, high 16 is the PTK */
+ FH_MEMCPY(kck, &odata[0], 16);
+ FH_MEMCPY(ptk, &odata[16], 16);
+
+ dump_bytes("kck", kck, 16);
+ dump_bytes("ptk", ptk, 16);
+}
+
+/**
+ * Generates the Message Integrity Code over the Handshake data per the
+ * WUSB spec.
+ *
+ * @param ccm_nonce Pointer to CCM Nonce.
+ * @param kck Pointer to Key Confirmation Key.
+ * @param data Pointer to Handshake data to be checked.
+ * @param mic Pointer to where the MIC output is to be written.
+ */
+void fh_wusb_gen_mic(uint8_t *ccm_nonce, uint8_t *kck,
+ uint8_t *data, uint8_t *mic)
+{
+
+ fh_wusb_prf_64(kck, ccm_nonce, "out-of-bandMIC",
+ data, WUSB_HANDSHAKE_LEN_FOR_MIC, mic);
+}
+
+#endif /* FH_CRYPTOLIB */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.h b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.h
new file mode 100644
index 00000000..3e5cb9fb
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.h
@@ -0,0 +1,111 @@
+/* =========================================================================
+ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_crypto.h $
+ * $Revision: #3 $
+ * $Date: 2010/09/28 $
+ * $Change: 1596182 $
+ *
+ * Synopsys Portability Library Software and documentation
+ * (hereinafter, "Software") is an Unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing
+ * between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for
+ * Licensed Product with Synopsys or any supplement thereto. You are
+ * permitted to use and redistribute this Software in source and binary
+ * forms, with or without modification, provided that redistributions
+ * of source code must retain this notice. You may not view, use,
+ * disclose, copy or distribute this file or any information contained
+ * herein except pursuant to this license grant from Synopsys. If you
+ * do not agree with this notice, including the disclaimer below, then
+ * you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
+ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================= */
+
+#ifndef _FH_CRYPTO_H_
+#define _FH_CRYPTO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file
+ *
+ * This file contains declarations for the WUSB Cryptographic routines as
+ * defined in the WUSB spec. They are only to be used internally by the FH UWB
+ * modules.
+ */
+
+#include "fh_os.h"
+
+int fh_wusb_aes_encrypt(u8 *src, u8 *key, u8 *dst);
+
+void fh_wusb_cmf(u8 *key, u8 *nonce,
+ char *label, u8 *bytes, int len, u8 *result);
+void fh_wusb_prf(int prf_len, u8 *key,
+ u8 *nonce, char *label, u8 *bytes, int len, u8 *result);
+
+/**
+ * The PRF-64 function described in section 6.5 of the WUSB spec.
+ *
+ * @param key, nonce, label, bytes, len, result Same as for fh_prf().
+ */
+static inline void fh_wusb_prf_64(u8 *key, u8 *nonce,
+ char *label, u8 *bytes, int len, u8 *result)
+{
+ fh_wusb_prf(64, key, nonce, label, bytes, len, result);
+}
+
+/**
+ * The PRF-128 function described in section 6.5 of the WUSB spec.
+ *
+ * @param key, nonce, label, bytes, len, result Same as for fh_prf().
+ */
+static inline void fh_wusb_prf_128(u8 *key, u8 *nonce,
+ char *label, u8 *bytes, int len, u8 *result)
+{
+ fh_wusb_prf(128, key, nonce, label, bytes, len, result);
+}
+
+/**
+ * The PRF-256 function described in section 6.5 of the WUSB spec.
+ *
+ * @param key, nonce, label, bytes, len, result Same as for fh_prf().
+ */
+static inline void fh_wusb_prf_256(u8 *key, u8 *nonce,
+ char *label, u8 *bytes, int len, u8 *result)
+{
+ fh_wusb_prf(256, key, nonce, label, bytes, len, result);
+}
+
+
+void fh_wusb_fill_ccm_nonce(uint16_t haddr, uint16_t daddr, uint8_t *tkid,
+ uint8_t *nonce);
+void fh_wusb_gen_nonce(uint16_t addr,
+ uint8_t *nonce);
+
+void fh_wusb_gen_key(uint8_t *ccm_nonce, uint8_t *mk,
+ uint8_t *hnonce, uint8_t *dnonce,
+ uint8_t *kck, uint8_t *ptk);
+
+
+void fh_wusb_gen_mic(uint8_t *ccm_nonce, uint8_t
+ *kck, uint8_t *data, uint8_t *mic);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FH_CRYPTO_H_ */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_dh.c b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.c
new file mode 100644
index 00000000..502e2a95
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.c
@@ -0,0 +1,291 @@
+/* =========================================================================
+ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_dh.c $
+ * $Revision: #3 $
+ * $Date: 2010/09/28 $
+ * $Change: 1596182 $
+ *
+ * Synopsys Portability Library Software and documentation
+ * (hereinafter, "Software") is an Unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing
+ * between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for
+ * Licensed Product with Synopsys or any supplement thereto. You are
+ * permitted to use and redistribute this Software in source and binary
+ * forms, with or without modification, provided that redistributions
+ * of source code must retain this notice. You may not view, use,
+ * disclose, copy or distribute this file or any information contained
+ * herein except pursuant to this license grant from Synopsys. If you
+ * do not agree with this notice, including the disclaimer below, then
+ * you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
+ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================= */
+#ifdef FH_CRYPTOLIB
+
+#ifndef CONFIG_MACH_IPMATE
+
+#include "fh_dh.h"
+#include "fh_modpow.h"
+
+#ifdef DEBUG
+/* This function prints out a buffer in the format described in the Association
+ * Model specification. */
+static void dh_dump(char *str, void *_num, int len)
+{
+ uint8_t *num = _num;
+ int i;
+ FH_PRINTF("%s\n", str);
+ for (i = 0; i < len; i ++) {
+ FH_PRINTF("%02x", num[i]);
+ if (((i + 1) % 2) == 0) FH_PRINTF(" ");
+ if (((i + 1) % 26) == 0) FH_PRINTF("\n");
+ }
+
+ FH_PRINTF("\n");
+}
+#else
+#define dh_dump(_x...) do {; } while(0)
+#endif
+
+/* Constant g value */
+static __u32 dh_g[] = {
+ 0x02000000,
+};
+
+/* Constant p value */
+static __u32 dh_p[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xA2DA0FC9, 0x34C26821, 0x8B62C6C4, 0xD11CDC80, 0x084E0229, 0x74CC678A,
+ 0xA6BE0B02, 0x229B133B, 0x79084A51, 0xDD04348E, 0xB31995EF, 0x1B433ACD, 0x6D0A2B30, 0x37145FF2,
+ 0x6D35E14F, 0x45C2516D, 0x76B585E4, 0xC67E5E62, 0xE9424CF4, 0x6BED37A6, 0xB65CFF0B, 0xEDB706F4,
+ 0xFB6B38EE, 0xA59F895A, 0x11249FAE, 0xE61F4B7C, 0x51662849, 0x3D5BE4EC, 0xB87C00C2, 0x05BF63A1,
+ 0x3648DA98, 0x9AD3551C, 0xA83F1669, 0x5FCF24FD, 0x235D6583, 0x96ADA3DC, 0x56F3621C, 0xBB528520,
+ 0x0729D59E, 0x6D969670, 0x4E350C67, 0x0498BC4A, 0x086C74F1, 0x7C2118CA, 0x465E9032, 0x3BCE362E,
+ 0x2C779EE3, 0x03860E18, 0xA283279B, 0x8FA207EC, 0xF05DC5B5, 0xC9524C6F, 0xF6CB2BDE, 0x18175895,
+ 0x7C499539, 0xE56A95EA, 0x1826D215, 0x1005FA98, 0x5A8E7215, 0x2DC4AA8A, 0x0D1733AD, 0x337A5004,
+ 0xAB2155A8, 0x64BA1CDF, 0x0485FBEC, 0x0AEFDB58, 0x5771EA8A, 0x7D0C065D, 0x850F97B3, 0xC7E4E1A6,
+ 0x8CAEF5AB, 0xD73309DB, 0xE0948C1E, 0x9D61254A, 0x26D2E3CE, 0x6BEED21A, 0x06FA2FF1, 0x64088AD9,
+ 0x730276D8, 0x646AC83E, 0x182B1F52, 0x0C207B17, 0x5717E1BB, 0x6C5D617A, 0xC0880977, 0xE246D9BA,
+ 0xA04FE208, 0x31ABE574, 0xFC5BDB43, 0x8E10FDE0, 0x20D1824B, 0xCAD23AA9, 0xFFFFFFFF, 0xFFFFFFFF,
+};
+
+static void dh_swap_bytes(void *_in, void *_out, uint32_t len)
+{
+ uint8_t *in = _in;
+ uint8_t *out = _out;
+ int i;
+ for (i=0; i<len; i++) {
+ out[i] = in[len-1-i];
+ }
+}
+
+/* Computes the modular exponentiation (num^exp % mod). num, exp, and mod are
+ * big endian numbers of size len, in bytes. Each len value must be a multiple
+ * of 4. */
+int fh_dh_modpow(void *mem_ctx, void *num, uint32_t num_len,
+ void *exp, uint32_t exp_len,
+ void *mod, uint32_t mod_len,
+ void *out)
+{
+ /* modpow() takes little endian numbers. AM uses big-endian. This
+ * function swaps bytes of numbers before passing onto modpow. */
+
+ int retval = 0;
+ uint32_t *result;
+
+ uint32_t *bignum_num = fh_alloc(mem_ctx, num_len + 4);
+ uint32_t *bignum_exp = fh_alloc(mem_ctx, exp_len + 4);
+ uint32_t *bignum_mod = fh_alloc(mem_ctx, mod_len + 4);
+
+ dh_swap_bytes(num, &bignum_num[1], num_len);
+ bignum_num[0] = num_len / 4;
+
+ dh_swap_bytes(exp, &bignum_exp[1], exp_len);
+ bignum_exp[0] = exp_len / 4;
+
+ dh_swap_bytes(mod, &bignum_mod[1], mod_len);
+ bignum_mod[0] = mod_len / 4;
+
+ result = fh_modpow(mem_ctx, bignum_num, bignum_exp, bignum_mod);
+ if (!result) {
+ retval = -1;
+ goto dh_modpow_nomem;
+ }
+
+ dh_swap_bytes(&result[1], out, result[0] * 4);
+ fh_free(mem_ctx, result);
+
+ dh_modpow_nomem:
+ fh_free(mem_ctx, bignum_num);
+ fh_free(mem_ctx, bignum_exp);
+ fh_free(mem_ctx, bignum_mod);
+ return retval;
+}
+
+
+int fh_dh_pk(void *mem_ctx, uint8_t nd, uint8_t *exp, uint8_t *pk, uint8_t *hash)
+{
+ int retval;
+ uint8_t m3[385];
+
+#ifndef DH_TEST_VECTORS
+ FH_RANDOM_BYTES(exp, 32);
+#endif
+
+ /* Compute the pkd */
+ if ((retval = fh_dh_modpow(mem_ctx, dh_g, 4,
+ exp, 32,
+ dh_p, 384, pk))) {
+ return retval;
+ }
+
+ m3[384] = nd;
+ FH_MEMCPY(&m3[0], pk, 384);
+ FH_SHA256(m3, 385, hash);
+
+ dh_dump("PK", pk, 384);
+ dh_dump("SHA-256(M3)", hash, 32);
+ return 0;
+}
+
+int fh_dh_derive_keys(void *mem_ctx, uint8_t nd, uint8_t *pkh, uint8_t *pkd,
+ uint8_t *exp, int is_host,
+ char *dd, uint8_t *ck, uint8_t *kdk)
+{
+ int retval;
+ uint8_t mv[784];
+ uint8_t sha_result[32];
+ uint8_t dhkey[384];
+ uint8_t shared_secret[384];
+ char *message;
+ uint32_t vd;
+
+ uint8_t *pk;
+
+ if (is_host) {
+ pk = pkd;
+ }
+ else {
+ pk = pkh;
+ }
+
+ if ((retval = fh_dh_modpow(mem_ctx, pk, 384,
+ exp, 32,
+ dh_p, 384, shared_secret))) {
+ return retval;
+ }
+ dh_dump("Shared Secret", shared_secret, 384);
+
+ FH_SHA256(shared_secret, 384, dhkey);
+ dh_dump("DHKEY", dhkey, 384);
+
+ FH_MEMCPY(&mv[0], pkd, 384);
+ FH_MEMCPY(&mv[384], pkh, 384);
+ FH_MEMCPY(&mv[768], "displayed digest", 16);
+ dh_dump("MV", mv, 784);
+
+ FH_SHA256(mv, 784, sha_result);
+ dh_dump("SHA-256(MV)", sha_result, 32);
+ dh_dump("First 32-bits of SHA-256(MV)", sha_result, 4);
+
+ dh_swap_bytes(sha_result, &vd, 4);
+#ifdef DEBUG
+ FH_PRINTF("Vd (decimal) = %d\n", vd);
+#endif
+
+ switch (nd) {
+ case 2:
+ vd = vd % 100;
+ FH_SPRINTF(dd, "%02d", vd);
+ break;
+ case 3:
+ vd = vd % 1000;
+ FH_SPRINTF(dd, "%03d", vd);
+ break;
+ case 4:
+ vd = vd % 10000;
+ FH_SPRINTF(dd, "%04d", vd);
+ break;
+ }
+#ifdef DEBUG
+ FH_PRINTF("Display Digits: %s\n", dd);
+#endif
+
+ message = "connection key";
+ FH_HMAC_SHA256(message, FH_STRLEN(message), dhkey, 32, sha_result);
+ dh_dump("HMAC(SHA-256, DHKey, connection key)", sha_result, 32);
+ FH_MEMCPY(ck, sha_result, 16);
+
+ message = "key derivation key";
+ FH_HMAC_SHA256(message, FH_STRLEN(message), dhkey, 32, sha_result);
+ dh_dump("HMAC(SHA-256, DHKey, key derivation key)", sha_result, 32);
+ FH_MEMCPY(kdk, sha_result, 32);
+
+ return 0;
+}
+
+
+#ifdef DH_TEST_VECTORS
+
+static __u8 dh_a[] = {
+ 0x44, 0x00, 0x51, 0xd6,
+ 0xf0, 0xb5, 0x5e, 0xa9,
+ 0x67, 0xab, 0x31, 0xc6,
+ 0x8a, 0x8b, 0x5e, 0x37,
+ 0xd9, 0x10, 0xda, 0xe0,
+ 0xe2, 0xd4, 0x59, 0xa4,
+ 0x86, 0x45, 0x9c, 0xaa,
+ 0xdf, 0x36, 0x75, 0x16,
+};
+
+static __u8 dh_b[] = {
+ 0x5d, 0xae, 0xc7, 0x86,
+ 0x79, 0x80, 0xa3, 0x24,
+ 0x8c, 0xe3, 0x57, 0x8f,
+ 0xc7, 0x5f, 0x1b, 0x0f,
+ 0x2d, 0xf8, 0x9d, 0x30,
+ 0x6f, 0xa4, 0x52, 0xcd,
+ 0xe0, 0x7a, 0x04, 0x8a,
+ 0xde, 0xd9, 0x26, 0x56,
+};
+
+void fh_run_dh_test_vectors(void *mem_ctx)
+{
+ uint8_t pkd[384];
+ uint8_t pkh[384];
+ uint8_t hashd[32];
+ uint8_t hashh[32];
+ uint8_t ck[16];
+ uint8_t kdk[32];
+ char dd[5];
+
+ FH_PRINTF("\n\n\nDH_TEST_VECTORS\n\n");
+
+ /* compute the PKd and SHA-256(PKd || Nd) */
+ FH_PRINTF("Computing PKd\n");
+ fh_dh_pk(mem_ctx, 2, dh_a, pkd, hashd);
+
+ /* compute the PKd and SHA-256(PKh || Nd) */
+ FH_PRINTF("Computing PKh\n");
+ fh_dh_pk(mem_ctx, 2, dh_b, pkh, hashh);
+
+ /* compute the dhkey */
+ fh_dh_derive_keys(mem_ctx, 2, pkh, pkd, dh_a, 0, dd, ck, kdk);
+}
+#endif /* DH_TEST_VECTORS */
+
+#endif /* !CONFIG_MACH_IPMATE */
+
+#endif /* FH_CRYPTOLIB */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_dh.h b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.h
new file mode 100644
index 00000000..c4c9ccca
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.h
@@ -0,0 +1,106 @@
+/* =========================================================================
+ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_dh.h $
+ * $Revision: #4 $
+ * $Date: 2010/09/28 $
+ * $Change: 1596182 $
+ *
+ * Synopsys Portability Library Software and documentation
+ * (hereinafter, "Software") is an Unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing
+ * between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for
+ * Licensed Product with Synopsys or any supplement thereto. You are
+ * permitted to use and redistribute this Software in source and binary
+ * forms, with or without modification, provided that redistributions
+ * of source code must retain this notice. You may not view, use,
+ * disclose, copy or distribute this file or any information contained
+ * herein except pursuant to this license grant from Synopsys. If you
+ * do not agree with this notice, including the disclaimer below, then
+ * you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
+ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================= */
+#ifndef _FH_DH_H_
+#define _FH_DH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "fh_os.h"
+
+/** @file
+ *
+ * This file defines the common functions on device and host for performing
+ * numeric association as defined in the WUSB spec. They are only to be
+ * used internally by the FH UWB modules. */
+
+extern int fh_dh_sha256(uint8_t *message, uint32_t len, uint8_t *out);
+extern int fh_dh_hmac_sha256(uint8_t *message, uint32_t messagelen,
+ uint8_t *key, uint32_t keylen,
+ uint8_t *out);
+extern int fh_dh_modpow(void *mem_ctx, void *num, uint32_t num_len,
+ void *exp, uint32_t exp_len,
+ void *mod, uint32_t mod_len,
+ void *out);
+
+/** Computes PKD or PKH, and SHA-256(PKd || Nd)
+ *
+ * PK = g^exp mod p.
+ *
+ * Input:
+ * Nd = Number of digits on the device.
+ *
+ * Output:
+ * exp = A 32-byte buffer to be filled with a randomly generated number.
+ * used as either A or B.
+ * pk = A 384-byte buffer to be filled with the PKH or PKD.
+ * hash = A 32-byte buffer to be filled with SHA-256(PK || ND).
+ */
+extern int fh_dh_pk(void *mem_ctx, uint8_t nd, uint8_t *exp, uint8_t *pkd, uint8_t *hash);
+
+/** Computes the DHKEY, and VD.
+ *
+ * If called from host, then it will comput DHKEY=PKD^exp % p.
+ * If called from device, then it will comput DHKEY=PKH^exp % p.
+ *
+ * Input:
+ * pkd = The PKD value.
+ * pkh = The PKH value.
+ * exp = The A value (if device) or B value (if host) generated in fh_wudev_dh_pk.
+ * is_host = Set to non zero if a WUSB host is calling this function.
+ *
+ * Output:
+
+ * dd = A pointer to an buffer to be set to the displayed digits string to be shown
+ * to the user. This buffer should be at 5 bytes long to hold 4 digits plus a
+ * null termination character. This buffer can be used directly for display.
+ * ck = A 16-byte buffer to be filled with the CK.
+ * kdk = A 32-byte buffer to be filled with the KDK.
+ */
+extern int fh_dh_derive_keys(void *mem_ctx, uint8_t nd, uint8_t *pkh, uint8_t *pkd,
+ uint8_t *exp, int is_host,
+ char *dd, uint8_t *ck, uint8_t *kdk);
+
+#ifdef DH_TEST_VECTORS
+extern void fh_run_dh_test_vectors(void);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FH_DH_H_ */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_list.h b/drivers/usb/host/fh_otg/fh_common_port/fh_list.h
new file mode 100644
index 00000000..11cbf687
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_list.h
@@ -0,0 +1,594 @@
+/* $OpenBSD: queue.h,v 1.26 2004/05/04 16:59:32 grange Exp $ */
+/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _FH_LIST_H_
+#define _FH_LIST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file
+ *
+ * This file defines linked list operations. It is derived from BSD with
+ * only the MACRO names being prefixed with FH_. This is because a few of
+ * these names conflict with those on Linux. For documentation on use, see the
+ * inline comments in the source code. The original license for this source
+ * code applies and is preserved in the fh_list.h source file.
+ */
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Double-linked List.
+ */
+
+typedef struct fh_list_link {
+ struct fh_list_link *next;
+ struct fh_list_link *prev;
+} fh_list_link_t;
+
+#define FH_LIST_INIT(link) do { \
+ (link)->next = (link); \
+ (link)->prev = (link); \
+} while (0)
+
+#define FH_LIST_FIRST(link) ((link)->next)
+#define FH_LIST_LAST(link) ((link)->prev)
+#define FH_LIST_END(link) (link)
+#define FH_LIST_NEXT(link) ((link)->next)
+#define FH_LIST_PREV(link) ((link)->prev)
+#define FH_LIST_EMPTY(link) \
+ (FH_LIST_FIRST(link) == FH_LIST_END(link))
+#define FH_LIST_ENTRY(link, type, field) \
+ (type *)((uint8_t *)(link) - (size_t)(&((type *)0)->field))
+
+#if 0
+#define FH_LIST_INSERT_HEAD(list, link) do { \
+ (link)->next = (list)->next; \
+ (link)->prev = (list); \
+ (list)->next->prev = (link); \
+ (list)->next = (link); \
+} while (0)
+
+#define FH_LIST_INSERT_TAIL(list, link) do { \
+ (link)->next = (list); \
+ (link)->prev = (list)->prev; \
+ (list)->prev->next = (link); \
+ (list)->prev = (link); \
+} while (0)
+#else
+#define FH_LIST_INSERT_HEAD(list, link) do { \
+ fh_list_link_t *__next__ = (list)->next; \
+ __next__->prev = (link); \
+ (link)->next = __next__; \
+ (link)->prev = (list); \
+ (list)->next = (link); \
+} while (0)
+
+#define FH_LIST_INSERT_TAIL(list, link) do { \
+ fh_list_link_t *__prev__ = (list)->prev; \
+ (list)->prev = (link); \
+ (link)->next = (list); \
+ (link)->prev = __prev__; \
+ __prev__->next = (link); \
+} while (0)
+#endif
+
+#if 0
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+#endif
+
+#define FH_LIST_REMOVE(link) do { \
+ (link)->next->prev = (link)->prev; \
+ (link)->prev->next = (link)->next; \
+} while (0)
+
+#define FH_LIST_REMOVE_INIT(link) do { \
+ FH_LIST_REMOVE(link); \
+ FH_LIST_INIT(link); \
+} while (0)
+
+#define FH_LIST_MOVE_HEAD(list, link) do { \
+ FH_LIST_REMOVE(link); \
+ FH_LIST_INSERT_HEAD(list, link); \
+} while (0)
+
+#define FH_LIST_MOVE_TAIL(list, link) do { \
+ FH_LIST_REMOVE(link); \
+ FH_LIST_INSERT_TAIL(list, link); \
+} while (0)
+
+#define FH_LIST_FOREACH(var, list) \
+ for((var) = FH_LIST_FIRST(list); \
+ (var) != FH_LIST_END(list); \
+ (var) = FH_LIST_NEXT(var))
+
+#define FH_LIST_FOREACH_SAFE(var, var2, list) \
+ for((var) = FH_LIST_FIRST(list), (var2) = FH_LIST_NEXT(var); \
+ (var) != FH_LIST_END(list); \
+ (var) = (var2), (var2) = FH_LIST_NEXT(var2))
+
+#define FH_LIST_FOREACH_REVERSE(var, list) \
+ for((var) = FH_LIST_LAST(list); \
+ (var) != FH_LIST_END(list); \
+ (var) = FH_LIST_PREV(var))
+
+/*
+ * Singly-linked List definitions.
+ */
+#define FH_SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define FH_SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define FH_SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List access methods.
+ */
+#define FH_SLIST_FIRST(head) ((head)->slh_first)
+#define FH_SLIST_END(head) NULL
+#define FH_SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
+#define FH_SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define FH_SLIST_FOREACH(var, head, field) \
+ for((var) = SLIST_FIRST(head); \
+ (var) != SLIST_END(head); \
+ (var) = SLIST_NEXT(var, field))
+
+#define FH_SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for((varp) = &SLIST_FIRST((head)); \
+ ((var) = *(varp)) != SLIST_END(head); \
+ (varp) = &SLIST_NEXT((var), field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define FH_SLIST_INIT(head) { \
+ SLIST_FIRST(head) = SLIST_END(head); \
+}
+
+#define FH_SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (0)
+
+#define FH_SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (0)
+
+#define FH_SLIST_REMOVE_NEXT(head, elm, field) do { \
+ (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \
+} while (0)
+
+#define FH_SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (0)
+
+#define FH_SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = (head)->slh_first; \
+ while( curelm->field.sle_next != (elm) ) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = \
+ curelm->field.sle_next->field.sle_next; \
+ } \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define FH_SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define FH_SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define FH_SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define FH_SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define FH_SIMPLEQ_END(head) NULL
+#define FH_SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#define FH_SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define FH_SIMPLEQ_FOREACH(var, head, field) \
+ for((var) = SIMPLEQ_FIRST(head); \
+ (var) != SIMPLEQ_END(head); \
+ (var) = SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define FH_SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+#define FH_SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (0)
+
+#define FH_SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (0)
+
+#define FH_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (0)
+
+#define FH_SIMPLEQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define FH_TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define FH_TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define FH_TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * tail queue access methods
+ */
+#define FH_TAILQ_FIRST(head) ((head)->tqh_first)
+#define FH_TAILQ_END(head) NULL
+#define FH_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define FH_TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define FH_TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define FH_TAILQ_EMPTY(head) \
+ (TAILQ_FIRST(head) == TAILQ_END(head))
+
+#define FH_TAILQ_FOREACH(var, head, field) \
+ for((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+
+#define FH_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for((var) = TAILQ_LAST(head, headname); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define FH_TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define FH_TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define FH_TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define FH_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define FH_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define FH_TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (0)
+
+#define FH_TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define FH_CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define FH_CIRCLEQ_HEAD_INITIALIZER(head) \
+ { FH_CIRCLEQ_END(&head), FH_CIRCLEQ_END(&head) }
+
+#define FH_CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue access methods
+ */
+#define FH_CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define FH_CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define FH_CIRCLEQ_END(head) ((void *)(head))
+#define FH_CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define FH_CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define FH_CIRCLEQ_EMPTY(head) \
+ (FH_CIRCLEQ_FIRST(head) == FH_CIRCLEQ_END(head))
+
+#define FH_CIRCLEQ_EMPTY_ENTRY(elm, field) (((elm)->field.cqe_next == NULL) && ((elm)->field.cqe_prev == NULL))
+
+#define FH_CIRCLEQ_FOREACH(var, head, field) \
+ for((var) = FH_CIRCLEQ_FIRST(head); \
+ (var) != FH_CIRCLEQ_END(head); \
+ (var) = FH_CIRCLEQ_NEXT(var, field))
+
+#define FH_CIRCLEQ_FOREACH_SAFE(var, var2, head, field) \
+ for((var) = FH_CIRCLEQ_FIRST(head), var2 = FH_CIRCLEQ_NEXT(var, field); \
+ (var) != FH_CIRCLEQ_END(head); \
+ (var) = var2, var2 = FH_CIRCLEQ_NEXT(var, field))
+
+#define FH_CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for((var) = FH_CIRCLEQ_LAST(head); \
+ (var) != FH_CIRCLEQ_END(head); \
+ (var) = FH_CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define FH_CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = FH_CIRCLEQ_END(head); \
+ (head)->cqh_last = FH_CIRCLEQ_END(head); \
+} while (0)
+
+#define FH_CIRCLEQ_INIT_ENTRY(elm, field) do { \
+ (elm)->field.cqe_next = NULL; \
+ (elm)->field.cqe_prev = NULL; \
+} while (0)
+
+#define FH_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == FH_CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define FH_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == FH_CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define FH_CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = FH_CIRCLEQ_END(head); \
+ if ((head)->cqh_last == FH_CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define FH_CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = FH_CIRCLEQ_END(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == FH_CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define FH_CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == FH_CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == FH_CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (0)
+
+#define FH_CIRCLEQ_REMOVE_INIT(head, elm, field) do { \
+ FH_CIRCLEQ_REMOVE(head, elm, field); \
+ FH_CIRCLEQ_INIT_ENTRY(elm, field); \
+} while (0)
+
+#define FH_CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
+ FH_CIRCLEQ_END(head)) \
+ (head).cqh_last = (elm2); \
+ else \
+ (elm2)->field.cqe_next->field.cqe_prev = (elm2); \
+ if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
+ FH_CIRCLEQ_END(head)) \
+ (head).cqh_first = (elm2); \
+ else \
+ (elm2)->field.cqe_prev->field.cqe_next = (elm2); \
+} while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FH_LIST_H_ */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_mem.c b/drivers/usb/host/fh_otg/fh_common_port/fh_mem.c
new file mode 100644
index 00000000..d7fedb34
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_mem.c
@@ -0,0 +1,245 @@
+/* Memory Debugging */
+#ifdef FH_DEBUG_MEMORY
+
+#include "fh_os.h"
+#include "fh_list.h"
+
+struct allocation {
+ void *addr;
+ void *ctx;
+ char *func;
+ int line;
+ uint32_t size;
+ int dma;
+ FH_CIRCLEQ_ENTRY(allocation) entry;
+};
+
+FH_CIRCLEQ_HEAD(allocation_queue, allocation);
+
+struct allocation_manager {
+ void *mem_ctx;
+ struct allocation_queue allocations;
+
+ /* statistics */
+ int num;
+ int num_freed;
+ int num_active;
+ uint32_t total;
+ uint32_t cur;
+ uint32_t max;
+};
+
+static struct allocation_manager *manager = NULL;
+
+static int add_allocation(void *ctx, uint32_t size, char const *func, int line, void *addr,
+ int dma)
+{
+ struct allocation *a;
+
+ FH_ASSERT(manager != NULL, "manager not allocated");
+
+ a = __FH_ALLOC_ATOMIC(manager->mem_ctx, sizeof(*a));
+ if (!a) {
+ return -FH_E_NO_MEMORY;
+ }
+
+ a->func = __FH_ALLOC_ATOMIC(manager->mem_ctx, FH_STRLEN(func) + 1);
+ if (!a->func) {
+ __FH_FREE(manager->mem_ctx, a);
+ return -FH_E_NO_MEMORY;
+ }
+
+ FH_MEMCPY(a->func, func, FH_STRLEN(func) + 1);
+ a->addr = addr;
+ a->ctx = ctx;
+ a->line = line;
+ a->size = size;
+ a->dma = dma;
+ FH_CIRCLEQ_INSERT_TAIL(&manager->allocations, a, entry);
+
+ /* Update stats */
+ manager->num++;
+ manager->num_active++;
+ manager->total += size;
+ manager->cur += size;
+
+ if (manager->max < manager->cur) {
+ manager->max = manager->cur;
+ }
+
+ return 0;
+}
+
+static struct allocation *find_allocation(void *ctx, void *addr)
+{
+ struct allocation *a;
+
+ FH_CIRCLEQ_FOREACH(a, &manager->allocations, entry) {
+ if (a->ctx == ctx && a->addr == addr) {
+ return a;
+ }
+ }
+
+ return NULL;
+}
+
+static void free_allocation(void *ctx, void *addr, char const *func, int line)
+{
+ struct allocation *a = find_allocation(ctx, addr);
+
+ if (!a) {
+ FH_ASSERT(0,
+ "Free of address %p that was never allocated or already freed %s:%d",
+ addr, func, line);
+ return;
+ }
+
+ FH_CIRCLEQ_REMOVE(&manager->allocations, a, entry);
+
+ manager->num_active--;
+ manager->num_freed++;
+ manager->cur -= a->size;
+ __FH_FREE(manager->mem_ctx, a->func);
+ __FH_FREE(manager->mem_ctx, a);
+}
+
+int fh_memory_debug_start(void *mem_ctx)
+{
+ FH_ASSERT(manager == NULL, "Memory debugging has already started\n");
+
+ if (manager) {
+ return -FH_E_BUSY;
+ }
+
+ manager = __FH_ALLOC(mem_ctx, sizeof(*manager));
+ if (!manager) {
+ return -FH_E_NO_MEMORY;
+ }
+
+ FH_CIRCLEQ_INIT(&manager->allocations);
+ manager->mem_ctx = mem_ctx;
+ manager->num = 0;
+ manager->num_freed = 0;
+ manager->num_active = 0;
+ manager->total = 0;
+ manager->cur = 0;
+ manager->max = 0;
+
+ return 0;
+}
+
+void fh_memory_debug_stop(void)
+{
+ struct allocation *a;
+
+ fh_memory_debug_report();
+
+ FH_CIRCLEQ_FOREACH(a, &manager->allocations, entry) {
+ FH_ERROR("Memory leaked from %s:%d\n", a->func, a->line);
+ free_allocation(a->ctx, a->addr, NULL, -1);
+ }
+
+ __FH_FREE(manager->mem_ctx, manager);
+}
+
+void fh_memory_debug_report(void)
+{
+ struct allocation *a;
+
+ FH_PRINTF("\n\n\n----------------- Memory Debugging Report -----------------\n\n");
+ FH_PRINTF("Num Allocations = %d\n", manager->num);
+ FH_PRINTF("Freed = %d\n", manager->num_freed);
+ FH_PRINTF("Active = %d\n", manager->num_active);
+ FH_PRINTF("Current Memory Used = %d\n", manager->cur);
+ FH_PRINTF("Total Memory Used = %d\n", manager->total);
+ FH_PRINTF("Maximum Memory Used at Once = %d\n", manager->max);
+ FH_PRINTF("Unfreed allocations:\n");
+
+ FH_CIRCLEQ_FOREACH(a, &manager->allocations, entry) {
+ FH_PRINTF(" addr=%p, size=%d from %s:%d, DMA=%d\n",
+ a->addr, a->size, a->func, a->line, a->dma);
+ }
+}
+
+/* The replacement functions */
+void *fh_alloc_debug(void *mem_ctx, uint32_t size, char const *func, int line)
+{
+ void *addr = __FH_ALLOC(mem_ctx, size);
+
+ if (!addr) {
+ return NULL;
+ }
+
+ if (add_allocation(mem_ctx, size, func, line, addr, 0)) {
+ __FH_FREE(mem_ctx, addr);
+ return NULL;
+ }
+
+ return addr;
+}
+
+void *fh_alloc_atomic_debug(void *mem_ctx, uint32_t size, char const *func,
+ int line)
+{
+ void *addr = __FH_ALLOC_ATOMIC(mem_ctx, size);
+
+ if (!addr) {
+ return NULL;
+ }
+
+ if (add_allocation(mem_ctx, size, func, line, addr, 0)) {
+ __FH_FREE(mem_ctx, addr);
+ return NULL;
+ }
+
+ return addr;
+}
+
+void fh_free_debug(void *mem_ctx, void *addr, char const *func, int line)
+{
+ free_allocation(mem_ctx, addr, func, line);
+ __FH_FREE(mem_ctx, addr);
+}
+
+void *fh_dma_alloc_debug(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr,
+ char const *func, int line)
+{
+ void *addr = __FH_DMA_ALLOC(dma_ctx, size, dma_addr);
+
+ if (!addr) {
+ return NULL;
+ }
+
+ if (add_allocation(dma_ctx, size, func, line, addr, 1)) {
+ __FH_DMA_FREE(dma_ctx, size, addr, *dma_addr);
+ return NULL;
+ }
+
+ return addr;
+}
+
+void *fh_dma_alloc_atomic_debug(void *dma_ctx, uint32_t size,
+ fh_dma_t *dma_addr, char const *func, int line)
+{
+ void *addr = __FH_DMA_ALLOC_ATOMIC(dma_ctx, size, dma_addr);
+
+ if (!addr) {
+ return NULL;
+ }
+
+ if (add_allocation(dma_ctx, size, func, line, addr, 1)) {
+ __FH_DMA_FREE(dma_ctx, size, addr, *dma_addr);
+ return NULL;
+ }
+
+ return addr;
+}
+
+void fh_dma_free_debug(void *dma_ctx, uint32_t size, void *virt_addr,
+ fh_dma_t dma_addr, char const *func, int line)
+{
+ free_allocation(dma_ctx, virt_addr, func, line);
+ __FH_DMA_FREE(dma_ctx, size, virt_addr, dma_addr);
+}
+
+#endif /* FH_DEBUG_MEMORY */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.c b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.c
new file mode 100644
index 00000000..625495c7
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.c
@@ -0,0 +1,634 @@
+/* Bignum routines adapted from PUTTY sources. PuTTY copyright notice follows.
+ *
+ * PuTTY is copyright 1997-2007 Simon Tatham.
+ *
+ * Portions copyright Robert de Bath, Joris van Rantwijk, Delian
+ * Delchev, Andreas Schultz, Jeroen Massar, Wez Furlong, Nicolas Barry,
+ * Justin Bradford, Ben Harris, Malcolm Smith, Ahmad Khalifa, Markus
+ * Kuhn, and CORE SDI S.A.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE
+ * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifdef FH_CRYPTOLIB
+
+#ifndef CONFIG_MACH_IPMATE
+
+#include <asm/div64.h>
+#include "fh_modpow.h"
+
+#define BIGNUM_INT_MASK 0xFFFFFFFFUL
+#define BIGNUM_TOP_BIT 0x80000000UL
+#define BIGNUM_INT_BITS 32
+
+
+static void *snmalloc(void *mem_ctx, size_t n, size_t size)
+{
+ void *p;
+ size *= n;
+ if (size == 0) size = 1;
+ p = fh_alloc(mem_ctx, size);
+ return p;
+}
+
+#define snewn(ctx, n, type) ((type *)snmalloc((ctx), (n), sizeof(type)))
+#define sfree fh_free
+
+/*
+ * Usage notes:
+ * * Do not call the DIVMOD_WORD macro with expressions such as array
+ * subscripts, as some implementations object to this (see below).
+ * * Note that none of the division methods below will cope if the
+ * quotient won't fit into BIGNUM_INT_BITS. Callers should be careful
+ * to avoid this case.
+ * If this condition occurs, in the case of the x86 DIV instruction,
+ * an overflow exception will occur, which (according to a correspondent)
+ * will manifest on Windows as something like
+ * 0xC0000095: Integer overflow
+ * The C variant won't give the right answer, either.
+ */
+
+#define MUL_WORD(w1, w2) ((BignumDblInt)w1 * w2)
+
+#if defined __GNUC__ && defined __i386__
+#define DIVMOD_WORD(q, r, hi, lo, w) \
+ __asm__("div %2" : \
+ "=d" (r), "=a" (q) : \
+ "r" (w), "d" (hi), "a" (lo))
+#else
+#define DIVMOD_WORD(q, r, hi, lo, w) do { \
+ BignumDblInt n = (((BignumDblInt)hi) << BIGNUM_INT_BITS) | lo; \
+ r = do_div(n, w); \
+ q = n; \
+} while (0)
+#endif
+
+#define BIGNUM_INT_BYTES (BIGNUM_INT_BITS / 8)
+
+#define BIGNUM_INTERNAL
+
+static Bignum newbn(void *mem_ctx, int length)
+{
+ Bignum b = snewn(mem_ctx, length + 1, BignumInt);
+ //if (!b)
+ //abort(); /* FIXME */
+ FH_MEMSET(b, 0, (length + 1) * sizeof(*b));
+ b[0] = length;
+ return b;
+}
+
+void freebn(void *mem_ctx, Bignum b)
+{
+ /*
+ * Burn the evidence, just in case.
+ */
+ FH_MEMSET(b, 0, sizeof(b[0]) * (b[0] + 1));
+ sfree(mem_ctx, b);
+}
+
+/*
+ * Compute c = a * b.
+ * Input is in the first len words of a and b.
+ * Result is returned in the first 2*len words of c.
+ */
+static void internal_mul(BignumInt *a, BignumInt *b,
+ BignumInt *c, int len)
+{
+ int i, j;
+ BignumDblInt t;
+
+ for (j = 0; j < 2 * len; j++)
+ c[j] = 0;
+
+ for (i = len - 1; i >= 0; i--) {
+ t = 0;
+ for (j = len - 1; j >= 0; j--) {
+ t += MUL_WORD(a[i], (BignumDblInt) b[j]);
+ t += (BignumDblInt) c[i + j + 1];
+ c[i + j + 1] = (BignumInt) t;
+ t = t >> BIGNUM_INT_BITS;
+ }
+ c[i] = (BignumInt) t;
+ }
+}
+
+static void internal_add_shifted(BignumInt *number,
+ unsigned n, int shift)
+{
+ int word = 1 + (shift / BIGNUM_INT_BITS);
+ int bshift = shift % BIGNUM_INT_BITS;
+ BignumDblInt addend;
+
+ addend = (BignumDblInt)n << bshift;
+
+ while (addend) {
+ addend += number[word];
+ number[word] = (BignumInt) addend & BIGNUM_INT_MASK;
+ addend >>= BIGNUM_INT_BITS;
+ word++;
+ }
+}
+
+/*
+ * Compute a = a % m.
+ * Input in first alen words of a and first mlen words of m.
+ * Output in first alen words of a
+ * (of which first alen-mlen words will be zero).
+ * The MSW of m MUST have its high bit set.
+ * Quotient is accumulated in the `quotient' array, which is a Bignum
+ * rather than the internal bigendian format. Quotient parts are shifted
+ * left by `qshift' before adding into quot.
+ */
+static void internal_mod(BignumInt *a, int alen,
+ BignumInt *m, int mlen,
+ BignumInt *quot, int qshift)
+{
+ BignumInt m0, m1;
+ unsigned int h;
+ int i, k;
+
+ m0 = m[0];
+ if (mlen > 1)
+ m1 = m[1];
+ else
+ m1 = 0;
+
+ for (i = 0; i <= alen - mlen; i++) {
+ BignumDblInt t;
+ unsigned int q, r, c, ai1;
+
+ if (i == 0) {
+ h = 0;
+ } else {
+ h = a[i - 1];
+ a[i - 1] = 0;
+ }
+
+ if (i == alen - 1)
+ ai1 = 0;
+ else
+ ai1 = a[i + 1];
+
+ /* Find q = h:a[i] / m0 */
+ if (h >= m0) {
+ /*
+ * Special case.
+ *
+ * To illustrate it, suppose a BignumInt is 8 bits, and
+ * we are dividing (say) A1:23:45:67 by A1:B2:C3. Then
+ * our initial division will be 0xA123 / 0xA1, which
+ * will give a quotient of 0x100 and a divide overflow.
+ * However, the invariants in this division algorithm
+ * are not violated, since the full number A1:23:... is
+ * _less_ than the quotient prefix A1:B2:... and so the
+ * following correction loop would have sorted it out.
+ *
+ * In this situation we set q to be the largest
+ * quotient we _can_ stomach (0xFF, of course).
+ */
+ q = BIGNUM_INT_MASK;
+ } else {
+ /* Macro doesn't want an array subscript expression passed
+ * into it (see definition), so use a temporary. */
+ BignumInt tmplo = a[i];
+ DIVMOD_WORD(q, r, h, tmplo, m0);
+
+ /* Refine our estimate of q by looking at
+ h:a[i]:a[i+1] / m0:m1 */
+ t = MUL_WORD(m1, q);
+ if (t > ((BignumDblInt) r << BIGNUM_INT_BITS) + ai1) {
+ q--;
+ t -= m1;
+ r = (r + m0) & BIGNUM_INT_MASK; /* overflow? */
+ if (r >= (BignumDblInt) m0 &&
+ t > ((BignumDblInt) r << BIGNUM_INT_BITS) + ai1) q--;
+ }
+ }
+
+ /* Subtract q * m from a[i...] */
+ c = 0;
+ for (k = mlen - 1; k >= 0; k--) {
+ t = MUL_WORD(q, m[k]);
+ t += c;
+ c = (unsigned)(t >> BIGNUM_INT_BITS);
+ if ((BignumInt) t > a[i + k])
+ c++;
+ a[i + k] -= (BignumInt) t;
+ }
+
+ /* Add back m in case of borrow */
+ if (c != h) {
+ t = 0;
+ for (k = mlen - 1; k >= 0; k--) {
+ t += m[k];
+ t += a[i + k];
+ a[i + k] = (BignumInt) t;
+ t = t >> BIGNUM_INT_BITS;
+ }
+ q--;
+ }
+ if (quot)
+ internal_add_shifted(quot, q, qshift + BIGNUM_INT_BITS * (alen - mlen - i));
+ }
+}
+
+/*
+ * Compute p % mod.
+ * The most significant word of mod MUST be non-zero.
+ * We assume that the result array is the same size as the mod array.
+ * We optionally write out a quotient if `quotient' is non-NULL.
+ * We can avoid writing out the result if `result' is NULL.
+ */
+void bigdivmod(void *mem_ctx, Bignum p, Bignum mod, Bignum result, Bignum quotient)
+{
+ BignumInt *n, *m;
+ int mshift;
+ int plen, mlen, i, j;
+
+ /* Allocate m of size mlen, copy mod to m */
+ /* We use big endian internally */
+ mlen = mod[0];
+ m = snewn(mem_ctx, mlen, BignumInt);
+ //if (!m)
+ //abort(); /* FIXME */
+ for (j = 0; j < mlen; j++)
+ m[j] = mod[mod[0] - j];
+
+ /* Shift m left to make msb bit set */
+ for (mshift = 0; mshift < BIGNUM_INT_BITS-1; mshift++)
+ if ((m[0] << mshift) & BIGNUM_TOP_BIT)
+ break;
+ if (mshift) {
+ for (i = 0; i < mlen - 1; i++)
+ m[i] = (m[i] << mshift) | (m[i + 1] >> (BIGNUM_INT_BITS - mshift));
+ m[mlen - 1] = m[mlen - 1] << mshift;
+ }
+
+ plen = p[0];
+ /* Ensure plen > mlen */
+ if (plen <= mlen)
+ plen = mlen + 1;
+
+ /* Allocate n of size plen, copy p to n */
+ n = snewn(mem_ctx, plen, BignumInt);
+ //if (!n)
+ //abort(); /* FIXME */
+ for (j = 0; j < plen; j++)
+ n[j] = 0;
+ for (j = 1; j <= (int)p[0]; j++)
+ n[plen - j] = p[j];
+
+ /* Main computation */
+ internal_mod(n, plen, m, mlen, quotient, mshift);
+
+ /* Fixup result in case the modulus was shifted */
+ if (mshift) {
+ for (i = plen - mlen - 1; i < plen - 1; i++)
+ n[i] = (n[i] << mshift) | (n[i + 1] >> (BIGNUM_INT_BITS - mshift));
+ n[plen - 1] = n[plen - 1] << mshift;
+ internal_mod(n, plen, m, mlen, quotient, 0);
+ for (i = plen - 1; i >= plen - mlen; i--)
+ n[i] = (n[i] >> mshift) | (n[i - 1] << (BIGNUM_INT_BITS - mshift));
+ }
+
+ /* Copy result to buffer */
+ if (result) {
+ for (i = 1; i <= (int)result[0]; i++) {
+ int j = plen - i;
+ result[i] = j >= 0 ? n[j] : 0;
+ }
+ }
+
+ /* Free temporary arrays */
+ for (i = 0; i < mlen; i++)
+ m[i] = 0;
+ sfree(mem_ctx, m);
+ for (i = 0; i < plen; i++)
+ n[i] = 0;
+ sfree(mem_ctx, n);
+}
+
+/*
+ * Simple remainder.
+ */
+Bignum bigmod(void *mem_ctx, Bignum a, Bignum b)
+{
+ Bignum r = newbn(mem_ctx, b[0]);
+ bigdivmod(mem_ctx, a, b, r, NULL);
+ return r;
+}
+
+/*
+ * Compute (base ^ exp) % mod.
+ */
+Bignum fh_modpow(void *mem_ctx, Bignum base_in, Bignum exp, Bignum mod)
+{
+ BignumInt *a, *b, *n, *m;
+ int mshift;
+ int mlen, i, j;
+ Bignum base, result;
+
+ /*
+ * The most significant word of mod needs to be non-zero. It
+ * should already be, but let's make sure.
+ */
+ //assert(mod[mod[0]] != 0);
+
+ /*
+ * Make sure the base is smaller than the modulus, by reducing
+ * it modulo the modulus if not.
+ */
+ base = bigmod(mem_ctx, base_in, mod);
+
+ /* Allocate m of size mlen, copy mod to m */
+ /* We use big endian internally */
+ mlen = mod[0];
+ m = snewn(mem_ctx, mlen, BignumInt);
+ //if (!m)
+ //abort(); /* FIXME */
+ for (j = 0; j < mlen; j++)
+ m[j] = mod[mod[0] - j];
+
+ /* Shift m left to make msb bit set */
+ for (mshift = 0; mshift < BIGNUM_INT_BITS - 1; mshift++)
+ if ((m[0] << mshift) & BIGNUM_TOP_BIT)
+ break;
+ if (mshift) {
+ for (i = 0; i < mlen - 1; i++)
+ m[i] =
+ (m[i] << mshift) | (m[i + 1] >>
+ (BIGNUM_INT_BITS - mshift));
+ m[mlen - 1] = m[mlen - 1] << mshift;
+ }
+
+ /* Allocate n of size mlen, copy base to n */
+ n = snewn(mem_ctx, mlen, BignumInt);
+ //if (!n)
+ //abort(); /* FIXME */
+ i = mlen - base[0];
+ for (j = 0; j < i; j++)
+ n[j] = 0;
+ for (j = 0; j < base[0]; j++)
+ n[i + j] = base[base[0] - j];
+
+ /* Allocate a and b of size 2*mlen. Set a = 1 */
+ a = snewn(mem_ctx, 2 * mlen, BignumInt);
+ //if (!a)
+ //abort(); /* FIXME */
+ b = snewn(mem_ctx, 2 * mlen, BignumInt);
+ //if (!b)
+ //abort(); /* FIXME */
+ for (i = 0; i < 2 * mlen; i++)
+ a[i] = 0;
+ a[2 * mlen - 1] = 1;
+
+ /* Skip leading zero bits of exp. */
+ i = 0;
+ j = BIGNUM_INT_BITS - 1;
+ while (i < exp[0] && (exp[exp[0] - i] & (1 << j)) == 0) {
+ j--;
+ if (j < 0) {
+ i++;
+ j = BIGNUM_INT_BITS - 1;
+ }
+ }
+
+ /* Main computation */
+ while (i < exp[0]) {
+ while (j >= 0) {
+ internal_mul(a + mlen, a + mlen, b, mlen);
+ internal_mod(b, mlen * 2, m, mlen, NULL, 0);
+ if ((exp[exp[0] - i] & (1 << j)) != 0) {
+ internal_mul(b + mlen, n, a, mlen);
+ internal_mod(a, mlen * 2, m, mlen, NULL, 0);
+ } else {
+ BignumInt *t;
+ t = a;
+ a = b;
+ b = t;
+ }
+ j--;
+ }
+ i++;
+ j = BIGNUM_INT_BITS - 1;
+ }
+
+ /* Fixup result in case the modulus was shifted */
+ if (mshift) {
+ for (i = mlen - 1; i < 2 * mlen - 1; i++)
+ a[i] =
+ (a[i] << mshift) | (a[i + 1] >>
+ (BIGNUM_INT_BITS - mshift));
+ a[2 * mlen - 1] = a[2 * mlen - 1] << mshift;
+ internal_mod(a, mlen * 2, m, mlen, NULL, 0);
+ for (i = 2 * mlen - 1; i >= mlen; i--)
+ a[i] =
+ (a[i] >> mshift) | (a[i - 1] <<
+ (BIGNUM_INT_BITS - mshift));
+ }
+
+ /* Copy result to buffer */
+ result = newbn(mem_ctx, mod[0]);
+ for (i = 0; i < mlen; i++)
+ result[result[0] - i] = a[i + mlen];
+ while (result[0] > 1 && result[result[0]] == 0)
+ result[0]--;
+
+ /* Free temporary arrays */
+ for (i = 0; i < 2 * mlen; i++)
+ a[i] = 0;
+ sfree(mem_ctx, a);
+ for (i = 0; i < 2 * mlen; i++)
+ b[i] = 0;
+ sfree(mem_ctx, b);
+ for (i = 0; i < mlen; i++)
+ m[i] = 0;
+ sfree(mem_ctx, m);
+ for (i = 0; i < mlen; i++)
+ n[i] = 0;
+ sfree(mem_ctx, n);
+
+ freebn(mem_ctx, base);
+
+ return result;
+}
+
+
+#ifdef UNITTEST
+
+static __u32 dh_p[] = {
+ 96,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xA93AD2CA,
+ 0x4B82D120,
+ 0xE0FD108E,
+ 0x43DB5BFC,
+ 0x74E5AB31,
+ 0x08E24FA0,
+ 0xBAD946E2,
+ 0x770988C0,
+ 0x7A615D6C,
+ 0xBBE11757,
+ 0x177B200C,
+ 0x521F2B18,
+ 0x3EC86A64,
+ 0xD8760273,
+ 0xD98A0864,
+ 0xF12FFA06,
+ 0x1AD2EE6B,
+ 0xCEE3D226,
+ 0x4A25619D,
+ 0x1E8C94E0,
+ 0xDB0933D7,
+ 0xABF5AE8C,
+ 0xA6E1E4C7,
+ 0xB3970F85,
+ 0x5D060C7D,
+ 0x8AEA7157,
+ 0x58DBEF0A,
+ 0xECFB8504,
+ 0xDF1CBA64,
+ 0xA85521AB,
+ 0x04507A33,
+ 0xAD33170D,
+ 0x8AAAC42D,
+ 0x15728E5A,
+ 0x98FA0510,
+ 0x15D22618,
+ 0xEA956AE5,
+ 0x3995497C,
+ 0x95581718,
+ 0xDE2BCBF6,
+ 0x6F4C52C9,
+ 0xB5C55DF0,
+ 0xEC07A28F,
+ 0x9B2783A2,
+ 0x180E8603,
+ 0xE39E772C,
+ 0x2E36CE3B,
+ 0x32905E46,
+ 0xCA18217C,
+ 0xF1746C08,
+ 0x4ABC9804,
+ 0x670C354E,
+ 0x7096966D,
+ 0x9ED52907,
+ 0x208552BB,
+ 0x1C62F356,
+ 0xDCA3AD96,
+ 0x83655D23,
+ 0xFD24CF5F,
+ 0x69163FA8,
+ 0x1C55D39A,
+ 0x98DA4836,
+ 0xA163BF05,
+ 0xC2007CB8,
+ 0xECE45B3D,
+ 0x49286651,
+ 0x7C4B1FE6,
+ 0xAE9F2411,
+ 0x5A899FA5,
+ 0xEE386BFB,
+ 0xF406B7ED,
+ 0x0BFF5CB6,
+ 0xA637ED6B,
+ 0xF44C42E9,
+ 0x625E7EC6,
+ 0xE485B576,
+ 0x6D51C245,
+ 0x4FE1356D,
+ 0xF25F1437,
+ 0x302B0A6D,
+ 0xCD3A431B,
+ 0xEF9519B3,
+ 0x8E3404DD,
+ 0x514A0879,
+ 0x3B139B22,
+ 0x020BBEA6,
+ 0x8A67CC74,
+ 0x29024E08,
+ 0x80DC1CD1,
+ 0xC4C6628B,
+ 0x2168C234,
+ 0xC90FDAA2,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+
+static __u32 dh_a[] = {
+ 8,
+ 0xdf367516,
+ 0x86459caa,
+ 0xe2d459a4,
+ 0xd910dae0,
+ 0x8a8b5e37,
+ 0x67ab31c6,
+ 0xf0b55ea9,
+ 0x440051d6,
+};
+
+static __u32 dh_b[] = {
+ 8,
+ 0xded92656,
+ 0xe07a048a,
+ 0x6fa452cd,
+ 0x2df89d30,
+ 0xc75f1b0f,
+ 0x8ce3578f,
+ 0x7980a324,
+ 0x5daec786,
+};
+
+static __u32 dh_g[] = {
+ 1,
+ 2,
+};
+
+int main(void)
+{
+ int i;
+ __u32 *k;
+ k = fh_modpow(NULL, dh_g, dh_a, dh_p);
+
+ printf("\n\n");
+ for (i=0; i<k[0]; i++) {
+ __u32 word32 = k[k[0] - i];
+ __u16 l = word32 & 0xffff;
+ __u16 m = (word32 & 0xffff0000) >> 16;
+ printf("%04x %04x ", m, l);
+ if (!((i + 1)%13)) printf("\n");
+ }
+ printf("\n\n");
+
+ if ((k[0] == 0x60) && (k[1] == 0x28e490e5) && (k[0x60] == 0x5a0d3d4e)) {
+ printf("PASS\n\n");
+ }
+ else {
+ printf("FAIL\n\n");
+ }
+
+}
+
+#endif /* UNITTEST */
+
+#endif /* CONFIG_MACH_IPMATE */
+
+#endif /*FH_CRYPTOLIB */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.h b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.h
new file mode 100644
index 00000000..96cdb551
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.h
@@ -0,0 +1,34 @@
+/*
+ * fh_modpow.h
+ * See fh_modpow.c for license and changes
+ */
+#ifndef _FH_MODPOW_H
+#define _FH_MODPOW_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "fh_os.h"
+
+/** @file
+ *
+ * This file defines the module exponentiation function which is only used
+ * internally by the FH UWB modules for calculation of PKs during numeric
+ * association. The routine is taken from the PUTTY, an open source terminal
+ * emulator. The PUTTY License is preserved in the fh_modpow.c file.
+ *
+ */
+
+typedef uint32_t BignumInt;
+typedef uint64_t BignumDblInt;
+typedef BignumInt *Bignum;
+
+/* Compute modular exponentiaion */
+extern Bignum fh_modpow(void *mem_ctx, Bignum base_in, Bignum exp, Bignum mod);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LINUX_BIGNUM_H */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.c b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.c
new file mode 100644
index 00000000..a2878fe2
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.c
@@ -0,0 +1,319 @@
+#ifdef FH_NOTIFYLIB
+
+#include "fh_notifier.h"
+#include "fh_list.h"
+
+typedef struct fh_observer {
+ void *observer;
+ fh_notifier_callback_t callback;
+ void *data;
+ char *notification;
+ FH_CIRCLEQ_ENTRY(fh_observer) list_entry;
+} observer_t;
+
+FH_CIRCLEQ_HEAD(observer_queue, fh_observer);
+
+typedef struct fh_notifier {
+ void *mem_ctx;
+ void *object;
+ struct observer_queue observers;
+ FH_CIRCLEQ_ENTRY(fh_notifier) list_entry;
+} notifier_t;
+
+FH_CIRCLEQ_HEAD(notifier_queue, fh_notifier);
+
+typedef struct manager {
+ void *mem_ctx;
+ void *wkq_ctx;
+ fh_workq_t *wq;
+// fh_mutex_t *mutex;
+ struct notifier_queue notifiers;
+} manager_t;
+
+static manager_t *manager = NULL;
+
+static int create_manager(void *mem_ctx, void *wkq_ctx)
+{
+ manager = fh_alloc(mem_ctx, sizeof(manager_t));
+ if (!manager) {
+ return -FH_E_NO_MEMORY;
+ }
+
+ FH_CIRCLEQ_INIT(&manager->notifiers);
+
+ manager->wq = fh_workq_alloc(wkq_ctx, "FH Notification WorkQ");
+ if (!manager->wq) {
+ return -FH_E_NO_MEMORY;
+ }
+
+ return 0;
+}
+
+static void free_manager(void)
+{
+ fh_workq_free(manager->wq);
+
+ /* All notifiers must have unregistered themselves before this module
+ * can be removed. Hitting this assertion indicates a programmer
+ * error. */
+ FH_ASSERT(FH_CIRCLEQ_EMPTY(&manager->notifiers),
+ "Notification manager being freed before all notifiers have been removed");
+ fh_free(manager->mem_ctx, manager);
+}
+
+#ifdef DEBUG
+static void dump_manager(void)
+{
+ notifier_t *n;
+ observer_t *o;
+
+ FH_ASSERT(manager, "Notification manager not found");
+
+ FH_DEBUG("List of all notifiers and observers:\n");
+ FH_CIRCLEQ_FOREACH(n, &manager->notifiers, list_entry) {
+ FH_DEBUG("Notifier %p has observers:\n", n->object);
+ FH_CIRCLEQ_FOREACH(o, &n->observers, list_entry) {
+ FH_DEBUG(" %p watching %s\n", o->observer, o->notification);
+ }
+ }
+}
+#else
+#define dump_manager(...)
+#endif
+
+static observer_t *alloc_observer(void *mem_ctx, void *observer, char *notification,
+ fh_notifier_callback_t callback, void *data)
+{
+ observer_t *new_observer = fh_alloc(mem_ctx, sizeof(observer_t));
+
+ if (!new_observer) {
+ return NULL;
+ }
+
+ FH_CIRCLEQ_INIT_ENTRY(new_observer, list_entry);
+ new_observer->observer = observer;
+ new_observer->notification = notification;
+ new_observer->callback = callback;
+ new_observer->data = data;
+ return new_observer;
+}
+
+static void free_observer(void *mem_ctx, observer_t *observer)
+{
+ fh_free(mem_ctx, observer);
+}
+
+static notifier_t *alloc_notifier(void *mem_ctx, void *object)
+{
+ notifier_t *notifier;
+
+ if (!object) {
+ return NULL;
+ }
+
+ notifier = fh_alloc(mem_ctx, sizeof(notifier_t));
+ if (!notifier) {
+ return NULL;
+ }
+
+ FH_CIRCLEQ_INIT(&notifier->observers);
+ FH_CIRCLEQ_INIT_ENTRY(notifier, list_entry);
+
+ notifier->mem_ctx = mem_ctx;
+ notifier->object = object;
+ return notifier;
+}
+
+static void free_notifier(notifier_t *notifier)
+{
+ observer_t *observer;
+
+ FH_CIRCLEQ_FOREACH(observer, &notifier->observers, list_entry) {
+ free_observer(notifier->mem_ctx, observer);
+ }
+
+ fh_free(notifier->mem_ctx, notifier);
+}
+
+static notifier_t *find_notifier(void *object)
+{
+ notifier_t *notifier;
+
+ FH_ASSERT(manager, "Notification manager not found");
+
+ if (!object) {
+ return NULL;
+ }
+
+ FH_CIRCLEQ_FOREACH(notifier, &manager->notifiers, list_entry) {
+ if (notifier->object == object) {
+ return notifier;
+ }
+ }
+
+ return NULL;
+}
+
+int fh_alloc_notification_manager(void *mem_ctx, void *wkq_ctx)
+{
+ return create_manager(mem_ctx, wkq_ctx);
+}
+
+void fh_free_notification_manager(void)
+{
+ free_manager();
+}
+
+fh_notifier_t *fh_register_notifier(void *mem_ctx, void *object)
+{
+ notifier_t *notifier;
+
+ FH_ASSERT(manager, "Notification manager not found");
+
+ notifier = find_notifier(object);
+ if (notifier) {
+ FH_ERROR("Notifier %p is already registered\n", object);
+ return NULL;
+ }
+
+ notifier = alloc_notifier(mem_ctx, object);
+ if (!notifier) {
+ return NULL;
+ }
+
+ FH_CIRCLEQ_INSERT_TAIL(&manager->notifiers, notifier, list_entry);
+
+ FH_INFO("Notifier %p registered", object);
+ dump_manager();
+
+ return notifier;
+}
+
+void fh_unregister_notifier(fh_notifier_t *notifier)
+{
+ FH_ASSERT(manager, "Notification manager not found");
+
+ if (!FH_CIRCLEQ_EMPTY(&notifier->observers)) {
+ observer_t *o;
+
+ FH_ERROR("Notifier %p has active observers when removing\n", notifier->object);
+ FH_CIRCLEQ_FOREACH(o, &notifier->observers, list_entry) {
+ FH_DEBUG(" %p watching %s\n", o->observer, o->notification);
+ }
+
+ FH_ASSERT(FH_CIRCLEQ_EMPTY(&notifier->observers),
+ "Notifier %p has active observers when removing", notifier);
+ }
+
+ FH_CIRCLEQ_REMOVE_INIT(&manager->notifiers, notifier, list_entry);
+ free_notifier(notifier);
+
+ FH_INFO("Notifier unregistered");
+ dump_manager();
+}
+
+/* Add an observer to observe the notifier for a particular state, event, or notification. */
+int fh_add_observer(void *observer, void *object, char *notification,
+ fh_notifier_callback_t callback, void *data)
+{
+ notifier_t *notifier = find_notifier(object);
+ observer_t *new_observer;
+
+ if (!notifier) {
+ FH_ERROR("Notifier %p is not found when adding observer\n", object);
+ return -FH_E_INVALID;
+ }
+
+ new_observer = alloc_observer(notifier->mem_ctx, observer, notification, callback, data);
+ if (!new_observer) {
+ return -FH_E_NO_MEMORY;
+ }
+
+ FH_CIRCLEQ_INSERT_TAIL(&notifier->observers, new_observer, list_entry);
+
+ FH_INFO("Added observer %p to notifier %p observing notification %s, callback=%p, data=%p",
+ observer, object, notification, callback, data);
+
+ dump_manager();
+ return 0;
+}
+
+int fh_remove_observer(void *observer)
+{
+ notifier_t *n;
+
+ FH_ASSERT(manager, "Notification manager not found");
+
+ FH_CIRCLEQ_FOREACH(n, &manager->notifiers, list_entry) {
+ observer_t *o;
+ observer_t *o2;
+
+ FH_CIRCLEQ_FOREACH_SAFE(o, o2, &n->observers, list_entry) {
+ if (o->observer == observer) {
+ FH_CIRCLEQ_REMOVE_INIT(&n->observers, o, list_entry);
+ FH_INFO("Removing observer %p from notifier %p watching notification %s:",
+ o->observer, n->object, o->notification);
+ free_observer(n->mem_ctx, o);
+ }
+ }
+ }
+
+ dump_manager();
+ return 0;
+}
+
+typedef struct callback_data {
+ void *mem_ctx;
+ fh_notifier_callback_t cb;
+ void *observer;
+ void *data;
+ void *object;
+ char *notification;
+ void *notification_data;
+} cb_data_t;
+
+static void cb_task(void *data)
+{
+ cb_data_t *cb = (cb_data_t *)data;
+
+ cb->cb(cb->object, cb->notification, cb->observer, cb->notification_data, cb->data);
+ fh_free(cb->mem_ctx, cb);
+}
+
+void fh_notify(fh_notifier_t *notifier, char *notification, void *notification_data)
+{
+ observer_t *o;
+
+ FH_ASSERT(manager, "Notification manager not found");
+
+ FH_CIRCLEQ_FOREACH(o, &notifier->observers, list_entry) {
+ int len = FH_STRLEN(notification);
+
+ if (FH_STRLEN(o->notification) != len) {
+ continue;
+ }
+
+ if (FH_STRNCMP(o->notification, notification, len) == 0) {
+ cb_data_t *cb_data = fh_alloc(notifier->mem_ctx, sizeof(cb_data_t));
+
+ if (!cb_data) {
+ FH_ERROR("Failed to allocate callback data\n");
+ return;
+ }
+
+ cb_data->mem_ctx = notifier->mem_ctx;
+ cb_data->cb = o->callback;
+ cb_data->observer = o->observer;
+ cb_data->data = o->data;
+ cb_data->object = notifier->object;
+ cb_data->notification = notification;
+ cb_data->notification_data = notification_data;
+ FH_DEBUG("Observer found %p for notification %s\n", o->observer, notification);
+ FH_WORKQ_SCHEDULE(manager->wq, cb_task, cb_data,
+ "Notify callback from %p for Notification %s, to observer %p",
+ cb_data->object, notification, cb_data->observer);
+ }
+ }
+}
+
+#endif /* FH_NOTIFYLIB */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.h b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.h
new file mode 100644
index 00000000..97386291
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.h
@@ -0,0 +1,122 @@
+
+#ifndef __FH_NOTIFIER_H__
+#define __FH_NOTIFIER_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "fh_os.h"
+
+/** @file
+ *
+ * A simple implementation of the Observer pattern. Any "module" can
+ * register as an observer or notifier. The notion of "module" is abstract and
+ * can mean anything used to identify either an observer or notifier. Usually
+ * it will be a pointer to a data structure which contains some state, ie an
+ * object.
+ *
+ * Before any notifiers can be added, the global notification manager must be
+ * brought up with fh_alloc_notification_manager().
+ * fh_free_notification_manager() will bring it down and free all resources.
+ * These would typically be called upon module load and unload. The
+ * notification manager is a single global instance that handles all registered
+ * observable modules and observers so this should be done only once.
+ *
+ * A module can be observable by using Notifications to publicize some general
+ * information about it's state or operation. It does not care who listens, or
+ * even if anyone listens, or what they do with the information. The observable
+ * modules do not need to know any information about it's observers or their
+ * interface, or their state or data.
+ *
+ * Any module can register to emit Notifications. It should publish a list of
+ * notifications that it can emit and their behavior, such as when they will get
+ * triggered, and what information will be provided to the observer. Then it
+ * should register itself as an observable module. See fh_register_notifier().
+ *
+ * Any module can observe any observable, registered module, provided it has a
+ * handle to the other module and knows what notifications to observe. See
+ * fh_add_observer().
+ *
+ * A function of type fh_notifier_callback_t is called whenever a notification
+ * is triggered with one or more observers observing it. This function is
+ * called in it's own process so it may sleep or block if needed. It is
+ * guaranteed to be called sometime after the notification has occurred and will
+ * be called once per each time the notification is triggered. It will NOT be
+ * called in the same process context used to trigger the notification.
+ *
+ * @section Limitiations
+ *
+ * Keep in mind that Notifications that can be triggered in rapid sucession may
+ * schedule too many processes too handle. Be aware of this limitation when
+ * designing to use notifications, and only add notifications for appropriate
+ * observable information.
+ *
+ * Also Notification callbacks are not synchronous. If you need to synchronize
+ * the behavior between module/observer you must use other means. And perhaps
+ * that will mean Notifications are not the proper solution.
+ */
+
+struct fh_notifier;
+typedef struct fh_notifier fh_notifier_t;
+
+/** The callback function must be of this type.
+ *
+ * @param object This is the object that is being observed.
+ * @param notification This is the notification that was triggered.
+ * @param observer This is the observer
+ * @param notification_data This is notification-specific data that the notifier
+ * has included in this notification. The value of this should be published in
+ * the documentation of the observable module with the notifications.
+ * @param user_data This is any custom data that the observer provided when
+ * adding itself as an observer to the notification. */
+typedef void (*fh_notifier_callback_t)(void *object, char *notification, void *observer,
+ void *notification_data, void *user_data);
+
+/** Brings up the notification manager. */
+extern int fh_alloc_notification_manager(void *mem_ctx, void *wkq_ctx);
+/** Brings down the notification manager. */
+extern void fh_free_notification_manager(void);
+
+/** This function registers an observable module. A fh_notifier_t object is
+ * returned to the observable module. This is an opaque object that is used by
+ * the observable module to trigger notifications. This object should only be
+ * accessible to functions that are authorized to trigger notifications for this
+ * module. Observers do not need this object. */
+extern fh_notifier_t *fh_register_notifier(void *mem_ctx, void *object);
+
+/** This function unregisters an observable module. All observers have to be
+ * removed prior to unregistration. */
+extern void fh_unregister_notifier(fh_notifier_t *notifier);
+
+/** Add a module as an observer to the observable module. The observable module
+ * needs to have previously registered with the notification manager.
+ *
+ * @param observer The observer module
+ * @param object The module to observe
+ * @param notification The notification to observe
+ * @param callback The callback function to call
+ * @param user_data Any additional user data to pass into the callback function */
+extern int fh_add_observer(void *observer, void *object, char *notification,
+ fh_notifier_callback_t callback, void *user_data);
+
+/** Removes the specified observer from all notifications that it is currently
+ * observing. */
+extern int fh_remove_observer(void *observer);
+
+/** This function triggers a Notification. It should be called by the
+ * observable module, or any module or library which the observable module
+ * allows to trigger notification on it's behalf. Such as the fh_cc_t.
+ *
+ * fh_notify is a non-blocking function. Callbacks are scheduled called in
+ * their own process context for each trigger. Callbacks can be blocking.
+ * fh_notify can be called from interrupt context if needed.
+ *
+ */
+void fh_notify(fh_notifier_t *notifier, char *notification, void *notification_data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FH_NOTIFIER_H__ */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_os.h b/drivers/usb/host/fh_otg/fh_common_port/fh_os.h
new file mode 100644
index 00000000..d73ab583
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/fh_os.h
@@ -0,0 +1,1245 @@
+/* =========================================================================
+ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_os.h $
+ * $Revision: #15 $
+ * $Date: 2015/06/12 $
+ * $Change: 2859407 $
+ *
+ * Synopsys Portability Library Software and documentation
+ * (hereinafter, "Software") is an Unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing
+ * between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for
+ * Licensed Product with Synopsys or any supplement thereto. You are
+ * permitted to use and redistribute this Software in source and binary
+ * forms, with or without modification, provided that redistributions
+ * of source code must retain this notice. You may not view, use,
+ * disclose, copy or distribute this file or any information contained
+ * herein except pursuant to this license grant from Synopsys. If you
+ * do not agree with this notice, including the disclaimer below, then
+ * you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
+ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================= */
+#ifndef _FH_OS_H_
+#define _FH_OS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file
+ *
+ * FH portability library, low level os-wrapper functions
+ *
+ */
+
+/* These basic types need to be defined by some OS header file or custom header
+ * file for your specific target architecture.
+ *
+ * uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t
+ *
+ * Any custom or alternate header file must be added and enabled here.
+ */
+
+#ifdef FH_LINUX
+# include <linux/types.h>
+# ifdef CONFIG_DEBUG_MUTEXES
+# include <linux/mutex.h>
+# endif
+# include <linux/errno.h>
+# include <stdarg.h>
+#endif
+
+#if defined(FH_FREEBSD) || defined(FH_NETBSD)
+# include <os_dep.h>
+#endif
+
+
+/** @name Primitive Types and Values */
+
+/** We define a boolean type for consistency. Can be either YES or NO */
+typedef uint8_t fh_bool_t;
+#define YES 1
+#define NO 0
+
+#ifdef FH_LINUX
+
+/** @name Error Codes */
+#define FH_E_INVALID EINVAL
+#define FH_E_NO_MEMORY ENOMEM
+#define FH_E_NO_DEVICE ENODEV
+#define FH_E_NOT_SUPPORTED EOPNOTSUPP
+#define FH_E_TIMEOUT ETIMEDOUT
+#define FH_E_BUSY EBUSY
+#define FH_E_AGAIN EAGAIN
+#define FH_E_RESTART ERESTART
+#define FH_E_ABORT ECONNABORTED
+#define FH_E_SHUTDOWN ESHUTDOWN
+#define FH_E_NO_DATA ENODATA
+#define FH_E_DISCONNECT ECONNRESET
+#define FH_E_UNKNOWN EINVAL
+#define FH_E_NO_STREAM_RES ENOSR
+#define FH_E_COMMUNICATION ECOMM
+#define FH_E_OVERFLOW EOVERFLOW
+#define FH_E_PROTOCOL EPROTO
+#define FH_E_IN_PROGRESS EINPROGRESS
+#define FH_E_PIPE EPIPE
+#define FH_E_IO EIO
+#define FH_E_NO_SPACE ENOSPC
+
+#else
+
+/** @name Error Codes */
+#define FH_E_INVALID 1001
+#define FH_E_NO_MEMORY 1002
+#define FH_E_NO_DEVICE 1003
+#define FH_E_NOT_SUPPORTED 1004
+#define FH_E_TIMEOUT 1005
+#define FH_E_BUSY 1006
+#define FH_E_AGAIN 1007
+#define FH_E_RESTART 1008
+#define FH_E_ABORT 1009
+#define FH_E_SHUTDOWN 1010
+#define FH_E_NO_DATA 1011
+#define FH_E_DISCONNECT 2000
+#define FH_E_UNKNOWN 3000
+#define FH_E_NO_STREAM_RES 4001
+#define FH_E_COMMUNICATION 4002
+#define FH_E_OVERFLOW 4003
+#define FH_E_PROTOCOL 4004
+#define FH_E_IN_PROGRESS 4005
+#define FH_E_PIPE 4006
+#define FH_E_IO 4007
+#define FH_E_NO_SPACE 4008
+
+#endif
+
+
+/** @name Tracing/Logging Functions
+ *
+ * These function provide the capability to add tracing, debugging, and error
+ * messages, as well exceptions as assertions. The WUDEV uses these
+ * extensively. These could be logged to the main console, the serial port, an
+ * internal buffer, etc. These functions could also be no-op if they are too
+ * expensive on your system. By default undefining the DEBUG macro already
+ * no-ops some of these functions. */
+
+/** Returns non-zero if in interrupt context. */
+extern fh_bool_t FH_IN_IRQ(void);
+#define fh_in_irq FH_IN_IRQ
+
+/** Returns "IRQ" if FH_IN_IRQ is true. */
+static inline char *fh_irq(void) {
+ return FH_IN_IRQ() ? "IRQ" : "";
+}
+
+/** Returns non-zero if in bottom-half context. */
+extern fh_bool_t FH_IN_BH(void);
+#define fh_in_bh FH_IN_BH
+
+/** Returns "BH" if FH_IN_BH is true. */
+static inline char *fh_bh(void) {
+ return FH_IN_BH() ? "BH" : "";
+}
+
+/**
+ * A vprintf() clone. Just call vprintf if you've got it.
+ */
+extern void FH_VPRINTF(char *format, va_list args);
+#define fh_vprintf FH_VPRINTF
+
+/**
+ * A vsnprintf() clone. Just call vprintf if you've got it.
+ */
+extern int FH_VSNPRINTF(char *str, int size, char *format, va_list args);
+#define fh_vsnprintf FH_VSNPRINTF
+
+/**
+ * printf() clone. Just call printf if you've go it.
+ */
+extern void FH_PRINTF(char *format, ...)
+/* This provides compiler level static checking of the parameters if you're
+ * using GCC. */
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 1, 2)));
+#else
+ ;
+#endif
+#define fh_printf FH_PRINTF
+
+/**
+ * sprintf() clone. Just call sprintf if you've got it.
+ */
+extern int FH_SPRINTF(char *string, char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 2, 3)));
+#else
+ ;
+#endif
+#define fh_sprintf FH_SPRINTF
+
+/**
+ * snprintf() clone. Just call snprintf if you've got it.
+ */
+extern int FH_SNPRINTF(char *string, int size, char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 3, 4)));
+#else
+ ;
+#endif
+#define fh_snprintf FH_SNPRINTF
+
+/**
+ * Prints a WARNING message. On systems that don't differentiate between
+ * warnings and regular log messages, just print it. Indicates that something
+ * may be wrong with the driver. Works like printf().
+ *
+ * Use the FH_WARN macro to call this function.
+ */
+extern void __FH_WARN(char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 1, 2)));
+#else
+ ;
+#endif
+
+/**
+ * Prints an error message. On systems that don't differentiate between errors
+ * and regular log messages, just print it. Indicates that something went wrong
+ * with the driver. Works like printf().
+ *
+ * Use the FH_ERROR macro to call this function.
+ */
+extern void __FH_ERROR(char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 1, 2)));
+#else
+ ;
+#endif
+
+/**
+ * Prints an exception error message and takes some user-defined action such as
+ * print out a backtrace or trigger a breakpoint. Indicates that something went
+ * abnormally wrong with the driver such as programmer error, or other
+ * exceptional condition. It should not be ignored so even on systems without
+ * printing capability, some action should be taken to notify the developer of
+ * it. Works like printf().
+ */
+extern void FH_EXCEPTION(char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 1, 2)));
+#else
+ ;
+#endif
+#define fh_exception FH_EXCEPTION
+
+#ifdef DEBUG
+/**
+ * Prints out a debug message. Used for logging/trace messages.
+ *
+ * Use the FH_DEBUG macro to call this function
+ */
+extern void __FH_DEBUG(char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 1, 2)));
+#else
+ ;
+#endif
+#else
+#define __FH_DEBUG(...)
+#endif
+
+/**
+ * Prints out a Debug message.
+ */
+#define FH_DEBUG(_format, _args...) __FH_DEBUG("DEBUG:%s:%s: " _format "\n", \
+ __func__, fh_irq(), ## _args)
+#define fh_debug FH_DEBUG
+/**
+ * Prints out an informative message.
+ */
+#define FH_INFO(_format, _args...) FH_PRINTF("INFO:%s: " _format "\n", \
+ fh_irq(), ## _args)
+#define fh_info FH_INFO
+/**
+ * Prints out a warning message.
+ */
+#define FH_WARN(_format, _args...) __FH_WARN("WARN:%s:%s:%d: " _format "\n", \
+ fh_irq(), __func__, __LINE__, ## _args)
+#define fh_warn FH_WARN
+/**
+ * Prints out an error message.
+ */
+#define FH_ERROR(_format, _args...) __FH_ERROR("ERROR:%s:%s:%d: " _format "\n", \
+ fh_irq(), __func__, __LINE__, ## _args)
+#define fh_error FH_ERROR
+
+#define FH_PROTO_ERROR(_format, _args...) __FH_WARN("ERROR:%s:%s:%d: " _format "\n", \
+ fh_irq(), __func__, __LINE__, ## _args)
+#define fh_proto_error FH_PROTO_ERROR
+
+#ifdef DEBUG
+/** Prints out a exception error message if the _expr expression fails. Disabled
+ * if DEBUG is not enabled. */
+#define FH_ASSERT(_expr, _format, _args...) do { \
+ if (!(_expr)) { FH_EXCEPTION("%s:%s:%d: " _format "\n", fh_irq(), \
+ __FILE__, __LINE__, ## _args); } \
+ } while (0)
+#else
+#define FH_ASSERT(_x...)
+#endif
+#define fh_assert FH_ASSERT
+
+
+/** @name Byte Ordering
+ * The following functions are for conversions between processor's byte ordering
+ * and specific ordering you want.
+ */
+
+/** Converts 32 bit data in CPU byte ordering to little endian. */
+extern uint32_t FH_CPU_TO_LE32(uint32_t *p);
+#define fh_cpu_to_le32 FH_CPU_TO_LE32
+
+/** Converts 32 bit data in CPU byte orderint to big endian. */
+extern uint32_t FH_CPU_TO_BE32(uint32_t *p);
+#define fh_cpu_to_be32 FH_CPU_TO_BE32
+
+/** Converts 32 bit little endian data to CPU byte ordering. */
+extern uint32_t FH_LE32_TO_CPU(uint32_t *p);
+#define fh_le32_to_cpu FH_LE32_TO_CPU
+
+/** Converts 32 bit big endian data to CPU byte ordering. */
+extern uint32_t FH_BE32_TO_CPU(uint32_t *p);
+#define fh_be32_to_cpu FH_BE32_TO_CPU
+
+/** Converts 16 bit data in CPU byte ordering to little endian. */
+extern uint16_t FH_CPU_TO_LE16(uint16_t *p);
+#define fh_cpu_to_le16 FH_CPU_TO_LE16
+
+/** Converts 16 bit data in CPU byte orderint to big endian. */
+extern uint16_t FH_CPU_TO_BE16(uint16_t *p);
+#define fh_cpu_to_be16 FH_CPU_TO_BE16
+
+/** Converts 16 bit little endian data to CPU byte ordering. */
+extern uint16_t FH_LE16_TO_CPU(uint16_t *p);
+#define fh_le16_to_cpu FH_LE16_TO_CPU
+
+/** Converts 16 bit bi endian data to CPU byte ordering. */
+extern uint16_t FH_BE16_TO_CPU(uint16_t *p);
+#define fh_be16_to_cpu FH_BE16_TO_CPU
+
+
+/** @name Register Read/Write
+ *
+ * The following six functions should be implemented to read/write registers of
+ * 32-bit and 64-bit sizes. All modules use this to read/write register values.
+ * The reg value is a pointer to the register calculated from the void *base
+ * variable passed into the driver when it is started. */
+
+#ifdef FH_LINUX
+/* Linux doesn't need any extra parameters for register read/write, so we
+ * just throw away the IO context parameter.
+ */
+/** Reads the content of a 32-bit register. */
+extern uint32_t FH_READ_REG32(uint32_t volatile *reg);
+#define fh_read_reg32(_ctx_,_reg_) FH_READ_REG32(_reg_)
+
+/** Reads the content of a 64-bit register. */
+extern uint64_t FH_READ_REG64(uint64_t volatile *reg);
+#define fh_read_reg64(_ctx_,_reg_) FH_READ_REG64(_reg_)
+
+/** Writes to a 32-bit register. */
+extern void FH_WRITE_REG32(uint32_t volatile *reg, uint32_t value);
+#define fh_write_reg32(_ctx_,_reg_,_val_) FH_WRITE_REG32(_reg_, _val_)
+
+/** Writes to a 64-bit register. */
+extern void FH_WRITE_REG64(uint64_t volatile *reg, uint64_t value);
+#define fh_write_reg64(_ctx_,_reg_,_val_) FH_WRITE_REG64(_reg_, _val_)
+
+/**
+ * Modify bit values in a register. Using the
+ * algorithm: (reg_contents & ~clear_mask) | set_mask.
+ */
+extern void FH_MODIFY_REG32(uint32_t volatile *reg, uint32_t clear_mask, uint32_t set_mask);
+#define fh_modify_reg32(_ctx_,_reg_,_cmsk_,_smsk_) FH_MODIFY_REG32(_reg_,_cmsk_,_smsk_)
+extern void FH_MODIFY_REG64(uint64_t volatile *reg, uint64_t clear_mask, uint64_t set_mask);
+#define fh_modify_reg64(_ctx_,_reg_,_cmsk_,_smsk_) FH_MODIFY_REG64(_reg_,_cmsk_,_smsk_)
+
+#endif /* FH_LINUX */
+
+#if defined(FH_FREEBSD) || defined(FH_NETBSD)
+typedef struct fh_ioctx {
+ struct device *dev;
+ bus_space_tag_t iot;
+ bus_space_handle_t ioh;
+} fh_ioctx_t;
+
+/** BSD needs two extra parameters for register read/write, so we pass
+ * them in using the IO context parameter.
+ */
+/** Reads the content of a 32-bit register. */
+extern uint32_t FH_READ_REG32(void *io_ctx, uint32_t volatile *reg);
+#define fh_read_reg32 FH_READ_REG32
+
+/** Reads the content of a 64-bit register. */
+extern uint64_t FH_READ_REG64(void *io_ctx, uint64_t volatile *reg);
+#define fh_read_reg64 FH_READ_REG64
+
+/** Writes to a 32-bit register. */
+extern void FH_WRITE_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t value);
+#define fh_write_reg32 FH_WRITE_REG32
+
+/** Writes to a 64-bit register. */
+extern void FH_WRITE_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t value);
+#define fh_write_reg64 FH_WRITE_REG64
+
+/**
+ * Modify bit values in a register. Using the
+ * algorithm: (reg_contents & ~clear_mask) | set_mask.
+ */
+extern void FH_MODIFY_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t clear_mask, uint32_t set_mask);
+#define fh_modify_reg32 FH_MODIFY_REG32
+extern void FH_MODIFY_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t clear_mask, uint64_t set_mask);
+#define fh_modify_reg64 FH_MODIFY_REG64
+
+#endif /* FH_FREEBSD || FH_NETBSD */
+
+/** @cond */
+
+/** @name Some convenience MACROS used internally. Define FH_DEBUG_REGS to log the
+ * register writes. */
+
+#ifdef FH_LINUX
+
+# ifdef FH_DEBUG_REGS
+
+#define fh_define_read_write_reg_n(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg##_n(_container_type *container, int num) { \
+ return FH_READ_REG32(&container->regs->_reg[num]); \
+} \
+static inline void fh_write_##_reg##_n(_container_type *container, int num, uint32_t data) { \
+ FH_DEBUG("WRITING %8s[%d]: %p: %08x", #_reg, num, \
+ &(((uint32_t*)container->regs->_reg)[num]), data); \
+ FH_WRITE_REG32(&(((uint32_t*)container->regs->_reg)[num]), data); \
+}
+
+#define fh_define_read_write_reg(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg(_container_type *container) { \
+ return FH_READ_REG32(&container->regs->_reg); \
+} \
+static inline void fh_write_##_reg(_container_type *container, uint32_t data) { \
+ FH_DEBUG("WRITING %11s: %p: %08x", #_reg, &container->regs->_reg, data); \
+ FH_WRITE_REG32(&container->regs->_reg, data); \
+}
+
+# else /* FH_DEBUG_REGS */
+
+#define fh_define_read_write_reg_n(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg##_n(_container_type *container, int num) { \
+ return FH_READ_REG32(&container->regs->_reg[num]); \
+} \
+static inline void fh_write_##_reg##_n(_container_type *container, int num, uint32_t data) { \
+ FH_WRITE_REG32(&(((uint32_t*)container->regs->_reg)[num]), data); \
+}
+
+#define fh_define_read_write_reg(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg(_container_type *container) { \
+ return FH_READ_REG32(&container->regs->_reg); \
+} \
+static inline void fh_write_##_reg(_container_type *container, uint32_t data) { \
+ FH_WRITE_REG32(&container->regs->_reg, data); \
+}
+
+# endif /* FH_DEBUG_REGS */
+
+#endif /* FH_LINUX */
+
+#if defined(FH_FREEBSD) || defined(FH_NETBSD)
+
+# ifdef FH_DEBUG_REGS
+
+#define fh_define_read_write_reg_n(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg##_n(void *io_ctx, _container_type *container, int num) { \
+ return FH_READ_REG32(io_ctx, &container->regs->_reg[num]); \
+} \
+static inline void fh_write_##_reg##_n(void *io_ctx, _container_type *container, int num, uint32_t data) { \
+ FH_DEBUG("WRITING %8s[%d]: %p: %08x", #_reg, num, \
+ &(((uint32_t*)container->regs->_reg)[num]), data); \
+ FH_WRITE_REG32(io_ctx, &(((uint32_t*)container->regs->_reg)[num]), data); \
+}
+
+#define fh_define_read_write_reg(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg(void *io_ctx, _container_type *container) { \
+ return FH_READ_REG32(io_ctx, &container->regs->_reg); \
+} \
+static inline void fh_write_##_reg(void *io_ctx, _container_type *container, uint32_t data) { \
+ FH_DEBUG("WRITING %11s: %p: %08x", #_reg, &container->regs->_reg, data); \
+ FH_WRITE_REG32(io_ctx, &container->regs->_reg, data); \
+}
+
+# else /* FH_DEBUG_REGS */
+
+#define fh_define_read_write_reg_n(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg##_n(void *io_ctx, _container_type *container, int num) { \
+ return FH_READ_REG32(io_ctx, &container->regs->_reg[num]); \
+} \
+static inline void fh_write_##_reg##_n(void *io_ctx, _container_type *container, int num, uint32_t data) { \
+ FH_WRITE_REG32(io_ctx, &(((uint32_t*)container->regs->_reg)[num]), data); \
+}
+
+#define fh_define_read_write_reg(_reg,_container_type) \
+static inline uint32_t fh_read_##_reg(void *io_ctx, _container_type *container) { \
+ return FH_READ_REG32(io_ctx, &container->regs->_reg); \
+} \
+static inline void fh_write_##_reg(void *io_ctx, _container_type *container, uint32_t data) { \
+ FH_WRITE_REG32(io_ctx, &container->regs->_reg, data); \
+}
+
+# endif /* FH_DEBUG_REGS */
+
+#endif /* FH_FREEBSD || FH_NETBSD */
+
+/** @endcond */
+
+
+#ifdef FH_CRYPTOLIB
+/** @name Crypto Functions
+ *
+ * These are the low-level cryptographic functions used by the driver. */
+
+/** Perform AES CBC */
+extern int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out);
+#define fh_aes_cbc FH_AES_CBC
+
+/** Fill the provided buffer with random bytes. These should be cryptographic grade random numbers. */
+extern void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length);
+#define fh_random_bytes FH_RANDOM_BYTES
+
+/** Perform the SHA-256 hash function */
+extern int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out);
+#define fh_sha256 FH_SHA256
+
+/** Calculated the HMAC-SHA256 */
+extern int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t *out);
+#define fh_hmac_sha256 FH_HMAC_SHA256
+
+#endif /* FH_CRYPTOLIB */
+
+
+/** @name Memory Allocation
+ *
+ * These function provide access to memory allocation. There are only 2 DMA
+ * functions and 3 Regular memory functions that need to be implemented. None
+ * of the memory debugging routines need to be implemented. The allocation
+ * routines all ZERO the contents of the memory.
+ *
+ * Defining FH_DEBUG_MEMORY turns on memory debugging and statistic gathering.
+ * This checks for memory leaks, keeping track of alloc/free pairs. It also
+ * keeps track of how much memory the driver is using at any given time. */
+
+#define FH_PAGE_SIZE 4096
+#define FH_PAGE_OFFSET(addr) (((uint32_t)addr) & 0xfff)
+#define FH_PAGE_ALIGNED(addr) ((((uint32_t)addr) & 0xfff) == 0)
+
+#define FH_INVALID_DMA_ADDR 0x0
+
+#ifdef FH_LINUX
+/** Type for a DMA address */
+typedef dma_addr_t fh_dma_t;
+#endif
+
+#if defined(FH_FREEBSD) || defined(FH_NETBSD)
+typedef bus_addr_t fh_dma_t;
+#endif
+
+#ifdef FH_FREEBSD
+typedef struct fh_dmactx {
+ struct device *dev;
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
+ bus_addr_t dma_paddr;
+ void *dma_vaddr;
+} fh_dmactx_t;
+#endif
+
+#ifdef FH_NETBSD
+typedef struct fh_dmactx {
+ struct device *dev;
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
+ bus_dma_segment_t segs[1];
+ int nsegs;
+ bus_addr_t dma_paddr;
+ void *dma_vaddr;
+} fh_dmactx_t;
+#endif
+
+/* @todo these functions will be added in the future */
+#if 0
+/**
+ * Creates a DMA pool from which you can allocate DMA buffers. Buffers
+ * allocated from this pool will be guaranteed to meet the size, alignment, and
+ * boundary requirements specified.
+ *
+ * @param[in] size Specifies the size of the buffers that will be allocated from
+ * this pool.
+ * @param[in] align Specifies the byte alignment requirements of the buffers
+ * allocated from this pool. Must be a power of 2.
+ * @param[in] boundary Specifies the N-byte boundary that buffers allocated from
+ * this pool must not cross.
+ *
+ * @returns A pointer to an internal opaque structure which is not to be
+ * accessed outside of these library functions. Use this handle to specify
+ * which pools to allocate/free DMA buffers from and also to destroy the pool,
+ * when you are done with it.
+ */
+extern fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size, uint32_t align, uint32_t boundary);
+
+/**
+ * Destroy a DMA pool. All buffers allocated from that pool must be freed first.
+ */
+extern void FH_DMA_POOL_DESTROY(fh_pool_t *pool);
+
+/**
+ * Allocate a buffer from the specified DMA pool and zeros its contents.
+ */
+extern void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr);
+
+/**
+ * Free a previously allocated buffer from the DMA pool.
+ */
+extern void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr);
+#endif
+
+/** Allocates a DMA capable buffer and zeroes its contents. */
+extern void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr);
+
+/** Allocates a DMA capable buffer and zeroes its contents in atomic contest */
+extern void *__FH_DMA_ALLOC_ATOMIC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr);
+
+/** Frees a previously allocated buffer. */
+extern void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr);
+
+/** Allocates a block of memory and zeroes its contents. */
+extern void *__FH_ALLOC(void *mem_ctx, uint32_t size);
+
+/** Allocates a block of memory and zeroes its contents, in an atomic manner
+ * which can be used inside interrupt context. The size should be sufficiently
+ * small, a few KB at most, such that failures are not likely to occur. Can just call
+ * __FH_ALLOC if it is atomic. */
+extern void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size);
+
+/** Frees a previously allocated buffer. */
+extern void __FH_FREE(void *mem_ctx, void *addr);
+
+#ifndef FH_DEBUG_MEMORY
+
+#define FH_ALLOC(_size_) __FH_ALLOC(NULL, _size_)
+#define FH_ALLOC_ATOMIC(_size_) __FH_ALLOC_ATOMIC(NULL, _size_)
+#define FH_FREE(_addr_) __FH_FREE(NULL, _addr_)
+
+# ifdef FH_LINUX
+#define FH_DMA_ALLOC(_size_,_dma_) __FH_DMA_ALLOC(NULL, _size_, _dma_)
+#define FH_DMA_ALLOC_ATOMIC(_size_,_dma_) __FH_DMA_ALLOC_ATOMIC(NULL, _size_,_dma_)
+#define FH_DMA_FREE(_size_,_virt_,_dma_) __FH_DMA_FREE(NULL, _size_, _virt_, _dma_)
+# endif
+
+# if defined(FH_FREEBSD) || defined(FH_NETBSD)
+#define FH_DMA_ALLOC __FH_DMA_ALLOC
+#define FH_DMA_FREE __FH_DMA_FREE
+# endif
+
+#else /* FH_DEBUG_MEMORY */
+
+extern void *fh_alloc_debug(void *mem_ctx, uint32_t size, char const *func, int line);
+extern void *fh_alloc_atomic_debug(void *mem_ctx, uint32_t size, char const *func, int line);
+extern void fh_free_debug(void *mem_ctx, void *addr, char const *func, int line);
+extern void *fh_dma_alloc_debug(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr,
+ char const *func, int line);
+extern void *fh_dma_alloc_atomic_debug(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr,
+ char const *func, int line);
+extern void fh_dma_free_debug(void *dma_ctx, uint32_t size, void *virt_addr,
+ fh_dma_t dma_addr, char const *func, int line);
+
+extern int fh_memory_debug_start(void *mem_ctx);
+extern void fh_memory_debug_stop(void);
+extern void fh_memory_debug_report(void);
+
+#define FH_ALLOC(_size_) fh_alloc_debug(NULL, _size_, __func__, __LINE__)
+#define FH_ALLOC_ATOMIC(_size_) fh_alloc_atomic_debug(NULL, _size_, \
+ __func__, __LINE__)
+#define FH_FREE(_addr_) fh_free_debug(NULL, _addr_, __func__, __LINE__)
+
+# ifdef FH_LINUX
+#define FH_DMA_ALLOC(_size_,_dma_) fh_dma_alloc_debug(NULL, _size_, \
+ _dma_, __func__, __LINE__)
+#define FH_DMA_ALLOC_ATOMIC(_size_,_dma_) fh_dma_alloc_atomic_debug(NULL, _size_, \
+ _dma_, __func__, __LINE__)
+#define FH_DMA_FREE(_size_,_virt_,_dma_) fh_dma_free_debug(NULL, _size_, \
+ _virt_, _dma_, __func__, __LINE__)
+# endif
+
+# if defined(FH_FREEBSD) || defined(FH_NETBSD)
+#define FH_DMA_ALLOC(_ctx_,_size_,_dma_) fh_dma_alloc_debug(_ctx_, _size_, \
+ _dma_, __func__, __LINE__)
+#define FH_DMA_FREE(_ctx_,_size_,_virt_,_dma_) fh_dma_free_debug(_ctx_, _size_, \
+ _virt_, _dma_, __func__, __LINE__)
+# endif
+
+#endif /* FH_DEBUG_MEMORY */
+
+#define fh_alloc(_ctx_,_size_) FH_ALLOC(_size_)
+#define fh_alloc_atomic(_ctx_,_size_) FH_ALLOC_ATOMIC(_size_)
+#define fh_free(_ctx_,_addr_) FH_FREE(_addr_)
+
+#ifdef FH_LINUX
+/* Linux doesn't need any extra parameters for DMA buffer allocation, so we
+ * just throw away the DMA context parameter.
+ */
+#define fh_dma_alloc(_ctx_,_size_,_dma_) FH_DMA_ALLOC(_size_, _dma_)
+#define fh_dma_alloc_atomic(_ctx_,_size_,_dma_) FH_DMA_ALLOC_ATOMIC(_size_, _dma_)
+#define fh_dma_free(_ctx_,_size_,_virt_,_dma_) FH_DMA_FREE(_size_, _virt_, _dma_)
+#endif
+
+#if defined(FH_FREEBSD) || defined(FH_NETBSD)
+/** BSD needs several extra parameters for DMA buffer allocation, so we pass
+ * them in using the DMA context parameter.
+ */
+#define fh_dma_alloc FH_DMA_ALLOC
+#define fh_dma_free FH_DMA_FREE
+#endif
+
+
+/** @name Memory and String Processing */
+
+/** memset() clone */
+extern void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size);
+#define fh_memset FH_MEMSET
+
+/** memcpy() clone */
+extern void *FH_MEMCPY(void *dest, void const *src, uint32_t size);
+#define fh_memcpy FH_MEMCPY
+
+/** memmove() clone */
+extern void *FH_MEMMOVE(void *dest, void *src, uint32_t size);
+#define fh_memmove FH_MEMMOVE
+
+/** memcmp() clone */
+extern int FH_MEMCMP(void *m1, void *m2, uint32_t size);
+#define fh_memcmp FH_MEMCMP
+
+/** strcmp() clone */
+extern int FH_STRCMP(void *s1, void *s2);
+#define fh_strcmp FH_STRCMP
+
+/** strncmp() clone */
+extern int FH_STRNCMP(void *s1, void *s2, uint32_t size);
+#define fh_strncmp FH_STRNCMP
+
+/** strlen() clone, for NULL terminated ASCII strings */
+extern int FH_STRLEN(char const *str);
+#define fh_strlen FH_STRLEN
+
+/** strcpy() clone, for NULL terminated ASCII strings */
+extern char *FH_STRCPY(char *to, const char *from);
+#define fh_strcpy FH_STRCPY
+
+/** strdup() clone. If you wish to use memory allocation debugging, this
+ * implementation of strdup should use the FH_* memory routines instead of
+ * calling a predefined strdup. Otherwise the memory allocated by this routine
+ * will not be seen by the debugging routines. */
+extern char *FH_STRDUP(char const *str);
+#define fh_strdup(_ctx_,_str_) FH_STRDUP(_str_)
+
+/** NOT an atoi() clone. Read the description carefully. Returns an integer
+ * converted from the string str in base 10 unless the string begins with a "0x"
+ * in which case it is base 16. String must be a NULL terminated sequence of
+ * ASCII characters and may optionally begin with whitespace, a + or -, and a
+ * "0x" prefix if base 16. The remaining characters must be valid digits for
+ * the number and end with a NULL character. If any invalid characters are
+ * encountered or it returns with a negative error code and the results of the
+ * conversion are undefined. On sucess it returns 0. Overflow conditions are
+ * undefined. An example implementation using atoi() can be referenced from the
+ * Linux implementation. */
+extern int FH_ATOI(const char *str, int32_t *value);
+#define fh_atoi FH_ATOI
+
+/** Same as above but for unsigned. */
+extern int FH_ATOUI(const char *str, uint32_t *value);
+#define fh_atoui FH_ATOUI
+
+#ifdef FH_UTFLIB
+/** This routine returns a UTF16LE unicode encoded string from a UTF8 string. */
+extern int FH_UTF8_TO_UTF16LE(uint8_t const *utf8string, uint16_t *utf16string, unsigned len);
+#define fh_utf8_to_utf16le FH_UTF8_TO_UTF16LE
+#endif
+
+
+/** @name Wait queues
+ *
+ * Wait queues provide a means of synchronizing between threads or processes. A
+ * process can block on a waitq if some condition is not true, waiting for it to
+ * become true. When the waitq is triggered all waiting process will get
+ * unblocked and the condition will be check again. Waitqs should be triggered
+ * every time a condition can potentially change.*/
+struct fh_waitq;
+
+/** Type for a waitq */
+typedef struct fh_waitq fh_waitq_t;
+
+/** The type of waitq condition callback function. This is called every time
+ * condition is evaluated. */
+typedef int (*fh_waitq_condition_t)(void *data);
+
+/** Allocate a waitq */
+extern fh_waitq_t *FH_WAITQ_ALLOC(void);
+#define fh_waitq_alloc(_ctx_) FH_WAITQ_ALLOC()
+
+/** Free a waitq */
+extern void FH_WAITQ_FREE(fh_waitq_t *wq);
+#define fh_waitq_free FH_WAITQ_FREE
+
+/** Check the condition and if it is false, block on the waitq. When unblocked, check the
+ * condition again. The function returns when the condition becomes true. The return value
+ * is 0 on condition true, FH_WAITQ_ABORTED on abort or killed, or FH_WAITQ_UNKNOWN on error. */
+extern int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data);
+#define fh_waitq_wait FH_WAITQ_WAIT
+
+/** Check the condition and if it is false, block on the waitq. When unblocked,
+ * check the condition again. The function returns when the condition become
+ * true or the timeout has passed. The return value is 0 on condition true or
+ * FH_TIMED_OUT on timeout, or FH_WAITQ_ABORTED, or FH_WAITQ_UNKNOWN on
+ * error. */
+extern int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
+ void *data, int32_t msecs);
+#define fh_waitq_wait_timeout FH_WAITQ_WAIT_TIMEOUT
+
+/** Trigger a waitq, unblocking all processes. This should be called whenever a condition
+ * has potentially changed. */
+extern void FH_WAITQ_TRIGGER(fh_waitq_t *wq);
+#define fh_waitq_trigger FH_WAITQ_TRIGGER
+
+/** Unblock all processes waiting on the waitq with an ABORTED result. */
+extern void FH_WAITQ_ABORT(fh_waitq_t *wq);
+#define fh_waitq_abort FH_WAITQ_ABORT
+
+
+/** @name Threads
+ *
+ * A thread must be explicitly stopped. It must check FH_THREAD_SHOULD_STOP
+ * whenever it is woken up, and then return. The FH_THREAD_STOP function
+ * returns the value from the thread.
+ */
+
+struct fh_thread;
+
+/** Type for a thread */
+typedef struct fh_thread fh_thread_t;
+
+/** The thread function */
+typedef int (*fh_thread_function_t)(void *data);
+
+/** Create a thread and start it running the thread_function. Returns a handle
+ * to the thread */
+extern fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data);
+#define fh_thread_run(_ctx_,_func_,_name_,_data_) FH_THREAD_RUN(_func_, _name_, _data_)
+
+/** Stops a thread. Return the value returned by the thread. Or will return
+ * FH_ABORT if the thread never started. */
+extern int FH_THREAD_STOP(fh_thread_t *thread);
+#define fh_thread_stop FH_THREAD_STOP
+
+/** Signifies to the thread that it must stop. */
+#ifdef FH_LINUX
+/* Linux doesn't need any parameters for kthread_should_stop() */
+extern fh_bool_t FH_THREAD_SHOULD_STOP(void);
+#define fh_thread_should_stop(_thrd_) FH_THREAD_SHOULD_STOP()
+
+/* No thread_exit function in Linux */
+#define fh_thread_exit(_thrd_)
+#endif
+
+#if defined(FH_FREEBSD) || defined(FH_NETBSD)
+/** BSD needs the thread pointer for kthread_suspend_check() */
+extern fh_bool_t FH_THREAD_SHOULD_STOP(fh_thread_t *thread);
+#define fh_thread_should_stop FH_THREAD_SHOULD_STOP
+
+/** The thread must call this to exit. */
+extern void FH_THREAD_EXIT(fh_thread_t *thread);
+#define fh_thread_exit FH_THREAD_EXIT
+#endif
+
+
+/** @name Work queues
+ *
+ * Workqs are used to queue a callback function to be called at some later time,
+ * in another thread. */
+struct fh_workq;
+
+/** Type for a system work */
+struct work_struct;
+
+/** Type for a workq */
+typedef struct fh_workq fh_workq_t;
+
+/** The type of the callback function to be called. */
+typedef void (*fh_work_callback_t)(void *data);
+
+/** Allocate a workq */
+extern fh_workq_t *FH_WORKQ_ALLOC(char *name);
+#define fh_workq_alloc(_ctx_,_name_) FH_WORKQ_ALLOC(_name_)
+
+/** Free a workq. All work must be completed before being freed. */
+extern void FH_WORKQ_FREE(fh_workq_t *workq);
+#define fh_workq_free FH_WORKQ_FREE
+
+/** Schedule a callback on the workq, passing in data. The function will be
+ * scheduled at some later time. */
+extern void FH_WORKQ_SCHEDULE(fh_workq_t *workq, fh_work_callback_t cb,
+ void *data, char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 4, 5)));
+#else
+ ;
+#endif
+
+#define fh_schedule_system_work FH_SCHEDULE_SYSTEM_WORK
+/** Schedule a work on the system workq */
+extern bool FH_SCHEDULE_SYSTEM_WORK(struct work_struct *work);
+
+#define fh_workq_schedule FH_WORKQ_SCHEDULE
+
+/** Schedule a callback on the workq, that will be called until at least
+ * given number miliseconds have passed. */
+extern void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *workq, fh_work_callback_t cb,
+ void *data, uint32_t time, char *format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format(printf, 5, 6)));
+#else
+ ;
+#endif
+#define fh_workq_schedule_delayed FH_WORKQ_SCHEDULE_DELAYED
+
+/** The number of processes in the workq */
+extern int FH_WORKQ_PENDING(fh_workq_t *workq);
+#define fh_workq_pending FH_WORKQ_PENDING
+
+/** Blocks until all the work in the workq is complete or timed out. Returns <
+ * 0 on timeout. */
+extern int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout);
+#define fh_workq_wait_work_done FH_WORKQ_WAIT_WORK_DONE
+
+
+/** @name Tasklets
+ *
+ */
+struct fh_tasklet;
+
+/** Type for a tasklet */
+typedef struct fh_tasklet fh_tasklet_t;
+
+/** The type of the callback function to be called */
+typedef void (*fh_tasklet_callback_t)(void *data);
+
+/** Allocates a tasklet */
+extern fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data);
+#define fh_task_alloc(_ctx_,_name_,_cb_,_data_) FH_TASK_ALLOC(_name_, _cb_, _data_)
+
+/** Frees a tasklet */
+extern void FH_TASK_FREE(fh_tasklet_t *task);
+#define fh_task_free FH_TASK_FREE
+
+/** Schedules a tasklet to run */
+extern void FH_TASK_SCHEDULE(fh_tasklet_t *task);
+#define fh_task_schedule FH_TASK_SCHEDULE
+
+
+/** @name Timer
+ *
+ * Callbacks must be small and atomic.
+ */
+struct fh_timer;
+
+/** Type for a timer */
+typedef struct fh_timer fh_timer_t;
+
+/** The type of the callback function to be called */
+typedef void (*fh_timer_callback_t)(void *data);
+
+/** Allocates a timer */
+extern fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data);
+#define fh_timer_alloc(_ctx_,_name_,_cb_,_data_) FH_TIMER_ALLOC(_name_,_cb_,_data_)
+
+/** Frees a timer */
+extern void FH_TIMER_FREE(fh_timer_t *timer);
+#define fh_timer_free FH_TIMER_FREE
+
+/** Schedules the timer to run at time ms from now. And will repeat at every
+ * repeat_interval msec therafter
+ *
+ * Modifies a timer that is still awaiting execution to a new expiration time.
+ * The mod_time is added to the old time. */
+extern void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time);
+#define fh_timer_schedule FH_TIMER_SCHEDULE
+
+/** Disables the timer from execution. */
+extern void FH_TIMER_CANCEL(fh_timer_t *timer);
+#define fh_timer_cancel FH_TIMER_CANCEL
+
+
+/** @name Spinlocks
+ *
+ * These locks are used when the work between the lock/unlock is atomic and
+ * short. Interrupts are also disabled during the lock/unlock and thus they are
+ * suitable to lock between interrupt/non-interrupt context. They also lock
+ * between processes if you have multiple CPUs or Preemption. If you don't have
+ * multiple CPUS or Preemption, then the you can simply implement the
+ * FH_SPINLOCK and FH_SPINUNLOCK to disable and enable interrupts. Because
+ * the work between the lock/unlock is atomic, the process context will never
+ * change, and so you never have to lock between processes. */
+
+struct fh_spinlock;
+
+/** Type for a spinlock */
+typedef struct fh_spinlock fh_spinlock_t;
+
+/** Type for the 'flags' argument to spinlock funtions */
+typedef unsigned long fh_irqflags_t;
+
+/** Returns an initialized lock variable. This function should allocate and
+ * initialize the OS-specific data structure used for locking. This data
+ * structure is to be used for the FH_LOCK and FH_UNLOCK functions and should
+ * be freed by the FH_FREE_LOCK when it is no longer used. */
+extern fh_spinlock_t *FH_SPINLOCK_ALLOC(void);
+#define fh_spinlock_alloc(_ctx_) FH_SPINLOCK_ALLOC()
+
+/** Frees an initialized lock variable. */
+extern void FH_SPINLOCK_FREE(fh_spinlock_t *lock);
+#define fh_spinlock_free(_ctx_,_lock_) FH_SPINLOCK_FREE(_lock_)
+
+/** Disables interrupts and blocks until it acquires the lock.
+ *
+ * @param lock Pointer to the spinlock.
+ * @param flags Unsigned long for irq flags storage.
+ */
+extern void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags);
+#define fh_spinlock_irqsave FH_SPINLOCK_IRQSAVE
+
+/** Re-enables the interrupt and releases the lock.
+ *
+ * @param lock Pointer to the spinlock.
+ * @param flags Unsigned long for irq flags storage. Must be the same as was
+ * passed into FH_LOCK.
+ */
+extern void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags);
+#define fh_spinunlock_irqrestore FH_SPINUNLOCK_IRQRESTORE
+
+/** Blocks until it acquires the lock.
+ *
+ * @param lock Pointer to the spinlock.
+ */
+extern void FH_SPINLOCK(fh_spinlock_t *lock);
+#define fh_spinlock FH_SPINLOCK
+
+/** Releases the lock.
+ *
+ * @param lock Pointer to the spinlock.
+ */
+extern void FH_SPINUNLOCK(fh_spinlock_t *lock);
+#define fh_spinunlock FH_SPINUNLOCK
+
+
+/** @name Mutexes
+ *
+ * Unlike spinlocks Mutexes lock only between processes and the work between the
+ * lock/unlock CAN block, therefore it CANNOT be called from interrupt context.
+ */
+
+struct fh_mutex;
+
+/** Type for a mutex */
+typedef struct fh_mutex fh_mutex_t;
+
+/* For Linux Mutex Debugging make it inline because the debugging routines use
+ * the symbol to determine recursive locking. This makes it falsely think
+ * recursive locking occurs. */
+#if defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES)
+#define FH_MUTEX_ALLOC_LINUX_DEBUG(__mutexp) ({ \
+ __mutexp = (fh_mutex_t *)FH_ALLOC(sizeof(struct mutex)); \
+ mutex_init((struct mutex *)__mutexp); \
+})
+#endif
+
+/** Allocate a mutex */
+extern fh_mutex_t *FH_MUTEX_ALLOC(void);
+#define fh_mutex_alloc(_ctx_) FH_MUTEX_ALLOC()
+
+/* For memory leak debugging when using Linux Mutex Debugging */
+#if defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES)
+#define FH_MUTEX_FREE(__mutexp) do { \
+ mutex_destroy((struct mutex *)__mutexp); \
+ FH_FREE(__mutexp); \
+} while(0)
+#else
+/** Free a mutex */
+extern void FH_MUTEX_FREE(fh_mutex_t *mutex);
+#define fh_mutex_free(_ctx_,_mutex_) FH_MUTEX_FREE(_mutex_)
+#endif
+
+/** Lock a mutex */
+extern void FH_MUTEX_LOCK(fh_mutex_t *mutex);
+#define fh_mutex_lock FH_MUTEX_LOCK
+
+/** Non-blocking lock returns 1 on successful lock. */
+extern int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex);
+#define fh_mutex_trylock FH_MUTEX_TRYLOCK
+
+/** Unlock a mutex */
+extern void FH_MUTEX_UNLOCK(fh_mutex_t *mutex);
+#define fh_mutex_unlock FH_MUTEX_UNLOCK
+
+
+/** @name Time */
+
+/** Microsecond delay.
+ *
+ * @param usecs Microseconds to delay.
+ */
+extern void FH_UDELAY(uint32_t usecs);
+#define fh_udelay FH_UDELAY
+
+/** Millisecond delay.
+ *
+ * @param msecs Milliseconds to delay.
+ */
+extern void FH_MDELAY(uint32_t msecs);
+#define fh_mdelay FH_MDELAY
+
+/** Non-busy waiting.
+ * Sleeps for specified number of milliseconds.
+ *
+ * @param msecs Milliseconds to sleep.
+ */
+extern void FH_MSLEEP(uint32_t msecs);
+#define fh_msleep FH_MSLEEP
+
+/**
+ * Returns number of milliseconds since boot.
+ */
+extern uint32_t FH_TIME(void);
+#define fh_time FH_TIME
+
+
+
+
+/* @mainpage FH Portability and Common Library
+ *
+ * This is the documentation for the FH Portability and Common Library.
+ *
+ * @section intro Introduction
+ *
+ * The FH Portability library consists of wrapper calls and data structures to
+ * all low-level functions which are typically provided by the OS. The WUDEV
+ * driver uses only these functions. In order to port the WUDEV driver, only
+ * the functions in this library need to be re-implemented, with the same
+ * behavior as documented here.
+ *
+ * The Common library consists of higher level functions, which rely only on
+ * calling the functions from the FH Portability library. These common
+ * routines are shared across modules. Some of the common libraries need to be
+ * used directly by the driver programmer when porting WUDEV. Such as the
+ * parameter and notification libraries.
+ *
+ * @section low Portability Library OS Wrapper Functions
+ *
+ * Any function starting with FH and in all CAPS is a low-level OS-wrapper that
+ * needs to be implemented when porting, for example FH_MUTEX_ALLOC(). All of
+ * these functions are included in the fh_os.h file.
+ *
+ * There are many functions here covering a wide array of OS services. Please
+ * see fh_os.h for details, and implementation notes for each function.
+ *
+ * @section common Common Library Functions
+ *
+ * Any function starting with fh and in all lowercase is a common library
+ * routine. These functions have a portable implementation and do not need to
+ * be reimplemented when porting. The common routines can be used by any
+ * driver, and some must be used by the end user to control the drivers. For
+ * example, you must use the Parameter common library in order to set the
+ * parameters in the WUDEV module.
+ *
+ * The common libraries consist of the following:
+ *
+ * - Connection Contexts - Used internally and can be used by end-user. See fh_cc.h
+ * - Parameters - Used internally and can be used by end-user. See fh_params.h
+ * - Notifications - Used internally and can be used by end-user. See fh_notifier.h
+ * - Lists - Used internally and can be used by end-user. See fh_list.h
+ * - Memory Debugging - Used internally and can be used by end-user. See fh_os.h
+ * - Modpow - Used internally only. See fh_modpow.h
+ * - DH - Used internally only. See fh_dh.h
+ * - Crypto - Used internally only. See fh_crypto.h
+ *
+ *
+ * @section prereq Prerequistes For fh_os.h
+ * @subsection types Data Types
+ *
+ * The fh_os.h file assumes that several low-level data types are pre defined for the
+ * compilation environment. These data types are:
+ *
+ * - uint8_t - unsigned 8-bit data type
+ * - int8_t - signed 8-bit data type
+ * - uint16_t - unsigned 16-bit data type
+ * - int16_t - signed 16-bit data type
+ * - uint32_t - unsigned 32-bit data type
+ * - int32_t - signed 32-bit data type
+ * - uint64_t - unsigned 64-bit data type
+ * - int64_t - signed 64-bit data type
+ *
+ * Ensure that these are defined before using fh_os.h. The easiest way to do
+ * that is to modify the top of the file to include the appropriate header.
+ * This is already done for the Linux environment. If the FH_LINUX macro is
+ * defined, the correct header will be added. A standard header <stdint.h> is
+ * also used for environments where standard C headers are available.
+ *
+ * @subsection stdarg Variable Arguments
+ *
+ * Variable arguments are provided by a standard C header <stdarg.h>. it is
+ * available in Both the Linux and ANSI C enviornment. An equivalent must be
+ * provided in your enviornment in order to use fh_os.h with the debug and
+ * tracing message functionality.
+ *
+ * @subsection thread Threading
+ *
+ * WUDEV Core must be run on an operating system that provides for multiple
+ * threads/processes. Threading can be implemented in many ways, even in
+ * embedded systems without an operating system. At the bare minimum, the
+ * system should be able to start any number of processes at any time to handle
+ * special work. It need not be a pre-emptive system. Process context can
+ * change upon a call to a blocking function. The hardware interrupt context
+ * that calls the module's ISR() function must be differentiable from process
+ * context, even if your processes are impemented via a hardware interrupt.
+ * Further locking mechanism between process must exist (or be implemented), and
+ * process context must have a way to disable interrupts for a period of time to
+ * lock them out. If all of this exists, the functions in fh_os.h related to
+ * threading should be able to be implemented with the defined behavior.
+ *
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FH_OS_H_ */
diff --git a/drivers/usb/host/fh_otg/fh_common_port/usb.h b/drivers/usb/host/fh_otg/fh_common_port/usb.h
new file mode 100644
index 00000000..27bda82d
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_common_port/usb.h
@@ -0,0 +1,946 @@
+/*
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net) at
+ * Carlstedt Research & Technology.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Modified by Synopsys, Inc, 12/12/2007 */
+
+
+#ifndef _USB_H_
+#define _USB_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The USB records contain some unaligned little-endian word
+ * components. The U[SG]ETW macros take care of both the alignment
+ * and endian problem and should always be used to access non-byte
+ * values.
+ */
+typedef u_int8_t uByte;
+typedef u_int8_t uWord[2];
+typedef u_int8_t uDWord[4];
+
+#define USETW2(w,h,l) ((w)[0] = (u_int8_t)(l), (w)[1] = (u_int8_t)(h))
+#define UCONSTW(x) { (x) & 0xff, ((x) >> 8) & 0xff }
+#define UCONSTDW(x) { (x) & 0xff, ((x) >> 8) & 0xff, \
+ ((x) >> 16) & 0xff, ((x) >> 24) & 0xff }
+
+#if 1
+#define UGETW(w) ((w)[0] | ((w)[1] << 8))
+#define USETW(w,v) ((w)[0] = (u_int8_t)(v), (w)[1] = (u_int8_t)((v) >> 8))
+#define UGETDW(w) ((w)[0] | ((w)[1] << 8) | ((w)[2] << 16) | ((w)[3] << 24))
+#define USETDW(w,v) ((w)[0] = (u_int8_t)(v), \
+ (w)[1] = (u_int8_t)((v) >> 8), \
+ (w)[2] = (u_int8_t)((v) >> 16), \
+ (w)[3] = (u_int8_t)((v) >> 24))
+#else
+/*
+ * On little-endian machines that can handle unanliged accesses
+ * (e.g. i386) these macros can be replaced by the following.
+ */
+#define UGETW(w) (*(u_int16_t *)(w))
+#define USETW(w,v) (*(u_int16_t *)(w) = (v))
+#define UGETDW(w) (*(u_int32_t *)(w))
+#define USETDW(w,v) (*(u_int32_t *)(w) = (v))
+#endif
+
+/*
+ * Macros for accessing UAS IU fields, which are big-endian
+ */
+#define IUSETW2(w,h,l) ((w)[0] = (u_int8_t)(h), (w)[1] = (u_int8_t)(l))
+#define IUCONSTW(x) { ((x) >> 8) & 0xff, (x) & 0xff }
+#define IUCONSTDW(x) { ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
+ ((x) >> 8) & 0xff, (x) & 0xff }
+#define IUGETW(w) (((w)[0] << 8) | (w)[1])
+#define IUSETW(w,v) ((w)[0] = (u_int8_t)((v) >> 8), (w)[1] = (u_int8_t)(v))
+#define IUGETDW(w) (((w)[0] << 24) | ((w)[1] << 16) | ((w)[2] << 8) | (w)[3])
+#define IUSETDW(w,v) ((w)[0] = (u_int8_t)((v) >> 24), \
+ (w)[1] = (u_int8_t)((v) >> 16), \
+ (w)[2] = (u_int8_t)((v) >> 8), \
+ (w)[3] = (u_int8_t)(v))
+
+#define UPACKED __attribute__((__packed__))
+
+typedef struct {
+ uByte bmRequestType;
+ uByte bRequest;
+ uWord wValue;
+ uWord wIndex;
+ uWord wLength;
+} UPACKED usb_device_request_t;
+
+#define UT_GET_DIR(a) ((a) & 0x80)
+#define UT_WRITE 0x00
+#define UT_READ 0x80
+
+#define UT_GET_TYPE(a) ((a) & 0x60)
+#define UT_STANDARD 0x00
+#define UT_CLASS 0x20
+#define UT_VENDOR 0x40
+
+#define UT_GET_RECIPIENT(a) ((a) & 0x1f)
+#define UT_DEVICE 0x00
+#define UT_INTERFACE 0x01
+#define UT_ENDPOINT 0x02
+#define UT_OTHER 0x03
+
+#define UT_READ_DEVICE (UT_READ | UT_STANDARD | UT_DEVICE)
+#define UT_READ_INTERFACE (UT_READ | UT_STANDARD | UT_INTERFACE)
+#define UT_READ_ENDPOINT (UT_READ | UT_STANDARD | UT_ENDPOINT)
+#define UT_WRITE_DEVICE (UT_WRITE | UT_STANDARD | UT_DEVICE)
+#define UT_WRITE_INTERFACE (UT_WRITE | UT_STANDARD | UT_INTERFACE)
+#define UT_WRITE_ENDPOINT (UT_WRITE | UT_STANDARD | UT_ENDPOINT)
+#define UT_READ_CLASS_DEVICE (UT_READ | UT_CLASS | UT_DEVICE)
+#define UT_READ_CLASS_INTERFACE (UT_READ | UT_CLASS | UT_INTERFACE)
+#define UT_READ_CLASS_OTHER (UT_READ | UT_CLASS | UT_OTHER)
+#define UT_READ_CLASS_ENDPOINT (UT_READ | UT_CLASS | UT_ENDPOINT)
+#define UT_WRITE_CLASS_DEVICE (UT_WRITE | UT_CLASS | UT_DEVICE)
+#define UT_WRITE_CLASS_INTERFACE (UT_WRITE | UT_CLASS | UT_INTERFACE)
+#define UT_WRITE_CLASS_OTHER (UT_WRITE | UT_CLASS | UT_OTHER)
+#define UT_WRITE_CLASS_ENDPOINT (UT_WRITE | UT_CLASS | UT_ENDPOINT)
+#define UT_READ_VENDOR_DEVICE (UT_READ | UT_VENDOR | UT_DEVICE)
+#define UT_READ_VENDOR_INTERFACE (UT_READ | UT_VENDOR | UT_INTERFACE)
+#define UT_READ_VENDOR_OTHER (UT_READ | UT_VENDOR | UT_OTHER)
+#define UT_READ_VENDOR_ENDPOINT (UT_READ | UT_VENDOR | UT_ENDPOINT)
+#define UT_WRITE_VENDOR_DEVICE (UT_WRITE | UT_VENDOR | UT_DEVICE)
+#define UT_WRITE_VENDOR_INTERFACE (UT_WRITE | UT_VENDOR | UT_INTERFACE)
+#define UT_WRITE_VENDOR_OTHER (UT_WRITE | UT_VENDOR | UT_OTHER)
+#define UT_WRITE_VENDOR_ENDPOINT (UT_WRITE | UT_VENDOR | UT_ENDPOINT)
+
+/* Requests */
+#define UR_GET_STATUS 0x00
+#define USTAT_STANDARD_STATUS 0x00
+#define WUSTAT_WUSB_FEATURE 0x01
+#define WUSTAT_CHANNEL_INFO 0x02
+#define WUSTAT_RECEIVED_DATA 0x03
+#define WUSTAT_MAS_AVAILABILITY 0x04
+#define WUSTAT_CURRENT_TRANSMIT_POWER 0x05
+#define UR_CLEAR_FEATURE 0x01
+#define UR_SET_FEATURE 0x03
+#define UR_SET_AND_TEST_FEATURE 0x0c
+#define UR_SET_ADDRESS 0x05
+#define UR_GET_DESCRIPTOR 0x06
+#define UDESC_DEVICE 0x01
+#define UDESC_CONFIG 0x02
+#define UDESC_STRING 0x03
+#define UDESC_INTERFACE 0x04
+#define UDESC_ENDPOINT 0x05
+#define UDESC_SS_USB_COMPANION 0x30
+#define UDESC_DEVICE_QUALIFIER 0x06
+#define UDESC_OTHER_SPEED_CONFIGURATION 0x07
+#define UDESC_INTERFACE_POWER 0x08
+#define UDESC_OTG 0x09
+#define WUDESC_SECURITY 0x0c
+#define WUDESC_KEY 0x0d
+#define WUD_GET_KEY_INDEX(_wValue_) ((_wValue_) & 0xf)
+#define WUD_GET_KEY_TYPE(_wValue_) (((_wValue_) & 0x30) >> 4)
+#define WUD_KEY_TYPE_ASSOC 0x01
+#define WUD_KEY_TYPE_GTK 0x02
+#define WUD_GET_KEY_ORIGIN(_wValue_) (((_wValue_) & 0x40) >> 6)
+#define WUD_KEY_ORIGIN_HOST 0x00
+#define WUD_KEY_ORIGIN_DEVICE 0x01
+#define WUDESC_ENCRYPTION_TYPE 0x0e
+#define WUDESC_BOS 0x0f
+#define WUDESC_DEVICE_CAPABILITY 0x10
+#define WUDESC_WIRELESS_ENDPOINT_COMPANION 0x11
+#define UDESC_BOS 0x0f
+#define UDESC_DEVICE_CAPABILITY 0x10
+#define UDESC_CS_DEVICE 0x21 /* class specific */
+#define UDESC_CS_CONFIG 0x22
+#define UDESC_CS_STRING 0x23
+#define UDESC_CS_INTERFACE 0x24
+#define UDESC_CS_ENDPOINT 0x25
+#define UDESC_HUB 0x29
+#define UR_SET_DESCRIPTOR 0x07
+#define UR_GET_CONFIG 0x08
+#define UR_SET_CONFIG 0x09
+#define UR_GET_INTERFACE 0x0a
+#define UR_SET_INTERFACE 0x0b
+#define UR_SYNCH_FRAME 0x0c
+#define WUR_SET_ENCRYPTION 0x0d
+#define WUR_GET_ENCRYPTION 0x0e
+#define WUR_SET_HANDSHAKE 0x0f
+#define WUR_GET_HANDSHAKE 0x10
+#define WUR_SET_CONNECTION 0x11
+#define WUR_SET_SECURITY_DATA 0x12
+#define WUR_GET_SECURITY_DATA 0x13
+#define WUR_SET_WUSB_DATA 0x14
+#define WUDATA_DRPIE_INFO 0x01
+#define WUDATA_TRANSMIT_DATA 0x02
+#define WUDATA_TRANSMIT_PARAMS 0x03
+#define WUDATA_RECEIVE_PARAMS 0x04
+#define WUDATA_TRANSMIT_POWER 0x05
+#define WUR_LOOPBACK_DATA_WRITE 0x15
+#define WUR_LOOPBACK_DATA_READ 0x16
+#define WUR_SET_INTERFACE_DS 0x17
+
+/* Feature numbers */
+#define UF_ENDPOINT_HALT 0
+#define UF_DEVICE_REMOTE_WAKEUP 1
+#define UF_TEST_MODE 2
+#define UF_DEVICE_B_HNP_ENABLE 3
+#define UF_DEVICE_A_HNP_SUPPORT 4
+#define UF_DEVICE_A_ALT_HNP_SUPPORT 5
+#define WUF_WUSB 3
+#define WUF_TX_DRPIE 0x0
+#define WUF_DEV_XMIT_PACKET 0x1
+#define WUF_COUNT_PACKETS 0x2
+#define WUF_CAPTURE_PACKETS 0x3
+#define UF_FUNCTION_SUSPEND 0
+#define UF_U1_ENABLE 48
+#define UF_U2_ENABLE 49
+#define UF_LTM_ENABLE 50
+
+/* Class requests from the USB 2.0 hub spec, table 11-15 */
+#define UCR_CLEAR_HUB_FEATURE (0x2000 | UR_CLEAR_FEATURE)
+#define UCR_CLEAR_PORT_FEATURE (0x2300 | UR_CLEAR_FEATURE)
+#define UCR_GET_HUB_DESCRIPTOR (0xa000 | UR_GET_DESCRIPTOR)
+#define UCR_GET_HUB_STATUS (0xa000 | UR_GET_STATUS)
+#define UCR_GET_PORT_STATUS (0xa300 | UR_GET_STATUS)
+#define UCR_SET_HUB_FEATURE (0x2000 | UR_SET_FEATURE)
+#define UCR_SET_PORT_FEATURE (0x2300 | UR_SET_FEATURE)
+#define UCR_SET_AND_TEST_PORT_FEATURE (0xa300 | UR_SET_AND_TEST_FEATURE)
+
+#ifdef _MSC_VER
+#include <pshpack1.h>
+#endif
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+} UPACKED usb_descriptor_t;
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+} UPACKED usb_descriptor_header_t;
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord bcdUSB;
+#define UD_USB_2_0 0x0200
+#define UD_IS_USB2(d) (UGETW((d)->bcdUSB) >= UD_USB_2_0)
+ uByte bDeviceClass;
+ uByte bDeviceSubClass;
+ uByte bDeviceProtocol;
+ uByte bMaxPacketSize;
+ /* The fields below are not part of the initial descriptor. */
+ uWord idVendor;
+ uWord idProduct;
+ uWord bcdDevice;
+ uByte iManufacturer;
+ uByte iProduct;
+ uByte iSerialNumber;
+ uByte bNumConfigurations;
+} UPACKED usb_device_descriptor_t;
+#define USB_DEVICE_DESCRIPTOR_SIZE 18
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord wTotalLength;
+ uByte bNumInterface;
+ uByte bConfigurationValue;
+ uByte iConfiguration;
+#define UC_ATT_ONE (1 << 7) /* must be set */
+#define UC_ATT_SELFPOWER (1 << 6) /* self powered */
+#define UC_ATT_WAKEUP (1 << 5) /* can wakeup */
+#define UC_ATT_BATTERY (1 << 4) /* battery powered */
+ uByte bmAttributes;
+#define UC_BUS_POWERED 0x80
+#define UC_SELF_POWERED 0x40
+#define UC_REMOTE_WAKEUP 0x20
+ uByte bMaxPower; /* max current in 2 mA units */
+#define UC_POWER_FACTOR 2
+} UPACKED usb_config_descriptor_t;
+#define USB_CONFIG_DESCRIPTOR_SIZE 9
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bInterfaceNumber;
+ uByte bAlternateSetting;
+ uByte bNumEndpoints;
+ uByte bInterfaceClass;
+ uByte bInterfaceSubClass;
+ uByte bInterfaceProtocol;
+ uByte iInterface;
+} UPACKED usb_interface_descriptor_t;
+#define USB_INTERFACE_DESCRIPTOR_SIZE 9
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bEndpointAddress;
+#define UE_GET_DIR(a) ((a) & 0x80)
+#define UE_SET_DIR(a,d) ((a) | (((d)&1) << 7))
+#define UE_DIR_IN 0x80
+#define UE_DIR_OUT 0x00
+#define UE_ADDR 0x0f
+#define UE_GET_ADDR(a) ((a) & UE_ADDR)
+ uByte bmAttributes;
+#define UE_XFERTYPE 0x03
+#define UE_CONTROL 0x00
+#define UE_ISOCHRONOUS 0x01
+#define UE_BULK 0x02
+#define UE_INTERRUPT 0x03
+#define UE_GET_XFERTYPE(a) ((a) & UE_XFERTYPE)
+#define UE_ISO_TYPE 0x0c
+#define UE_ISO_ASYNC 0x04
+#define UE_ISO_ADAPT 0x08
+#define UE_ISO_SYNC 0x0c
+#define UE_GET_ISO_TYPE(a) ((a) & UE_ISO_TYPE)
+ uWord wMaxPacketSize;
+ uByte bInterval;
+} UPACKED usb_endpoint_descriptor_t;
+#define USB_ENDPOINT_DESCRIPTOR_SIZE 7
+
+typedef struct ss_endpoint_companion_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bMaxBurst;
+#define USSE_GET_MAX_STREAMS(a) ((a) & 0x1f)
+#define USSE_SET_MAX_STREAMS(a, b) ((a) | ((b) & 0x1f))
+#define USSE_GET_MAX_PACKET_NUM(a) ((a) & 0x03)
+#define USSE_SET_MAX_PACKET_NUM(a, b) ((a) | ((b) & 0x03))
+ uByte bmAttributes;
+ uWord wBytesPerInterval;
+} UPACKED ss_endpoint_companion_descriptor_t;
+#define USB_SS_ENDPOINT_COMPANION_DESCRIPTOR_SIZE 6
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord bString[127];
+} UPACKED usb_string_descriptor_t;
+#define USB_MAX_STRING_LEN 128
+#define USB_LANGUAGE_TABLE 0 /* # of the string language id table */
+
+/* Hub specific request */
+#define UR_GET_BUS_STATE 0x02
+#define UR_CLEAR_TT_BUFFER 0x08
+#define UR_RESET_TT 0x09
+#define UR_GET_TT_STATE 0x0a
+#define UR_STOP_TT 0x0b
+
+/* Hub features */
+#define UHF_C_HUB_LOCAL_POWER 0
+#define UHF_C_HUB_OVER_CURRENT 1
+#define UHF_PORT_CONNECTION 0
+#define UHF_PORT_ENABLE 1
+#define UHF_PORT_SUSPEND 2
+#define UHF_PORT_OVER_CURRENT 3
+#define UHF_PORT_RESET 4
+#define UHF_PORT_L1 5
+#define UHF_PORT_POWER 8
+#define UHF_PORT_LOW_SPEED 9
+#define UHF_PORT_HIGH_SPEED 10
+#define UHF_C_PORT_CONNECTION 16
+#define UHF_C_PORT_ENABLE 17
+#define UHF_C_PORT_SUSPEND 18
+#define UHF_C_PORT_OVER_CURRENT 19
+#define UHF_C_PORT_RESET 20
+#define UHF_C_PORT_L1 23
+#define UHF_PORT_TEST 21
+#define UHF_PORT_INDICATOR 22
+
+typedef struct {
+ uByte bDescLength;
+ uByte bDescriptorType;
+ uByte bNbrPorts;
+ uWord wHubCharacteristics;
+#define UHD_PWR 0x0003
+#define UHD_PWR_GANGED 0x0000
+#define UHD_PWR_INDIVIDUAL 0x0001
+#define UHD_PWR_NO_SWITCH 0x0002
+#define UHD_COMPOUND 0x0004
+#define UHD_OC 0x0018
+#define UHD_OC_GLOBAL 0x0000
+#define UHD_OC_INDIVIDUAL 0x0008
+#define UHD_OC_NONE 0x0010
+#define UHD_TT_THINK 0x0060
+#define UHD_TT_THINK_8 0x0000
+#define UHD_TT_THINK_16 0x0020
+#define UHD_TT_THINK_24 0x0040
+#define UHD_TT_THINK_32 0x0060
+#define UHD_PORT_IND 0x0080
+ uByte bPwrOn2PwrGood; /* delay in 2 ms units */
+#define UHD_PWRON_FACTOR 2
+ uByte bHubContrCurrent;
+ uByte DeviceRemovable[32]; /* max 255 ports */
+#define UHD_NOT_REMOV(desc, i) \
+ (((desc)->DeviceRemovable[(i)/8] >> ((i) % 8)) & 1)
+ /* deprecated */ uByte PortPowerCtrlMask[1];
+} UPACKED usb_hub_descriptor_t;
+#define USB_HUB_DESCRIPTOR_SIZE 9 /* includes deprecated PortPowerCtrlMask */
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord bcdUSB;
+ uByte bDeviceClass;
+ uByte bDeviceSubClass;
+ uByte bDeviceProtocol;
+ uByte bMaxPacketSize0;
+ uByte bNumConfigurations;
+ uByte bReserved;
+} UPACKED usb_device_qualifier_t;
+#define USB_DEVICE_QUALIFIER_SIZE 10
+
+typedef struct {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bmAttributes;
+#define UOTG_SRP 0x01
+#define UOTG_HNP 0x02
+} UPACKED usb_otg_descriptor_t;
+
+/* OTG feature selectors */
+#define UOTG_B_HNP_ENABLE 3
+#define UOTG_A_HNP_SUPPORT 4
+#define UOTG_A_ALT_HNP_SUPPORT 5
+
+typedef struct {
+ uWord wStatus;
+/* Device status flags */
+#define UDS_SELF_POWERED 0x0001
+#define UDS_REMOTE_WAKEUP 0x0002
+/* Endpoint status flags */
+#define UES_HALT 0x0001
+} UPACKED usb_status_t;
+
+typedef struct {
+ uWord wHubStatus;
+#define UHS_LOCAL_POWER 0x0001
+#define UHS_OVER_CURRENT 0x0002
+ uWord wHubChange;
+} UPACKED usb_hub_status_t;
+
+typedef struct {
+ uWord wPortStatus;
+#define UPS_CURRENT_CONNECT_STATUS 0x0001
+#define UPS_PORT_ENABLED 0x0002
+#define UPS_SUSPEND 0x0004
+#define UPS_OVERCURRENT_INDICATOR 0x0008
+#define UPS_RESET 0x0010
+#define UPS_PORT_POWER 0x0100
+#define UPS_LOW_SPEED 0x0200
+#define UPS_HIGH_SPEED 0x0400
+#define UPS_PORT_TEST 0x0800
+#define UPS_PORT_INDICATOR 0x1000
+ uWord wPortChange;
+#define UPS_C_CONNECT_STATUS 0x0001
+#define UPS_C_PORT_ENABLED 0x0002
+#define UPS_C_SUSPEND 0x0004
+#define UPS_C_OVERCURRENT_INDICATOR 0x0008
+#define UPS_C_PORT_RESET 0x0010
+} UPACKED usb_port_status_t;
+
+#ifdef _MSC_VER
+#include <poppack.h>
+#endif
+
+/* Device class codes */
+#define UDCLASS_IN_INTERFACE 0x00
+#define UDCLASS_COMM 0x02
+#define UDCLASS_HUB 0x09
+#define UDSUBCLASS_HUB 0x00
+#define UDPROTO_FSHUB 0x00
+#define UDPROTO_HSHUBSTT 0x01
+#define UDPROTO_HSHUBMTT 0x02
+#define UDCLASS_DIAGNOSTIC 0xdc
+#define UDCLASS_WIRELESS 0xe0
+#define UDSUBCLASS_RF 0x01
+#define UDPROTO_BLUETOOTH 0x01
+#define UDCLASS_VENDOR 0xff
+
+/* Interface class codes */
+#define UICLASS_UNSPEC 0x00
+
+#define UICLASS_AUDIO 0x01
+#define UISUBCLASS_AUDIOCONTROL 1
+#define UISUBCLASS_AUDIOSTREAM 2
+#define UISUBCLASS_MIDISTREAM 3
+
+#define UICLASS_CDC 0x02 /* communication */
+#define UISUBCLASS_DIRECT_LINE_CONTROL_MODEL 1
+#define UISUBCLASS_ABSTRACT_CONTROL_MODEL 2
+#define UISUBCLASS_TELEPHONE_CONTROL_MODEL 3
+#define UISUBCLASS_MULTICHANNEL_CONTROL_MODEL 4
+#define UISUBCLASS_CAPI_CONTROLMODEL 5
+#define UISUBCLASS_ETHERNET_NETWORKING_CONTROL_MODEL 6
+#define UISUBCLASS_ATM_NETWORKING_CONTROL_MODEL 7
+#define UIPROTO_CDC_AT 1
+
+#define UICLASS_HID 0x03
+#define UISUBCLASS_BOOT 1
+#define UIPROTO_BOOT_KEYBOARD 1
+
+#define UICLASS_PHYSICAL 0x05
+
+#define UICLASS_IMAGE 0x06
+
+#define UICLASS_PRINTER 0x07
+#define UISUBCLASS_PRINTER 1
+#define UIPROTO_PRINTER_UNI 1
+#define UIPROTO_PRINTER_BI 2
+#define UIPROTO_PRINTER_1284 3
+
+#define UICLASS_MASS 0x08
+#define UISUBCLASS_RBC 1
+#define UISUBCLASS_SFF8020I 2
+#define UISUBCLASS_QIC157 3
+#define UISUBCLASS_UFI 4
+#define UISUBCLASS_SFF8070I 5
+#define UISUBCLASS_SCSI 6
+#define UIPROTO_MASS_CBI_I 0
+#define UIPROTO_MASS_CBI 1
+#define UIPROTO_MASS_BBB_OLD 2 /* Not in the spec anymore */
+#define UIPROTO_MASS_BBB 80 /* 'P' for the Iomega Zip drive */
+
+#define UICLASS_HUB 0x09
+#define UISUBCLASS_HUB 0
+#define UIPROTO_FSHUB 0
+#define UIPROTO_HSHUBSTT 0 /* Yes, same as previous */
+#define UIPROTO_HSHUBMTT 1
+
+#define UICLASS_CDC_DATA 0x0a
+#define UISUBCLASS_DATA 0
+#define UIPROTO_DATA_ISDNBRI 0x30 /* Physical iface */
+#define UIPROTO_DATA_HDLC 0x31 /* HDLC */
+#define UIPROTO_DATA_TRANSPARENT 0x32 /* Transparent */
+#define UIPROTO_DATA_Q921M 0x50 /* Management for Q921 */
+#define UIPROTO_DATA_Q921 0x51 /* Data for Q921 */
+#define UIPROTO_DATA_Q921TM 0x52 /* TEI multiplexer for Q921 */
+#define UIPROTO_DATA_V42BIS 0x90 /* Data compression */
+#define UIPROTO_DATA_Q931 0x91 /* Euro-ISDN */
+#define UIPROTO_DATA_V120 0x92 /* V.24 rate adaption */
+#define UIPROTO_DATA_CAPI 0x93 /* CAPI 2.0 commands */
+#define UIPROTO_DATA_HOST_BASED 0xfd /* Host based driver */
+#define UIPROTO_DATA_PUF 0xfe /* see Prot. Unit Func. Desc.*/
+#define UIPROTO_DATA_VENDOR 0xff /* Vendor specific */
+
+#define UICLASS_SMARTCARD 0x0b
+
+/*#define UICLASS_FIRM_UPD 0x0c*/
+
+#define UICLASS_SECURITY 0x0d
+
+#define UICLASS_DIAGNOSTIC 0xdc
+
+#define UICLASS_WIRELESS 0xe0
+#define UISUBCLASS_RF 0x01
+#define UIPROTO_BLUETOOTH 0x01
+
+#define UICLASS_APPL_SPEC 0xfe
+#define UISUBCLASS_FIRMWARE_DOWNLOAD 1
+#define UISUBCLASS_IRDA 2
+#define UIPROTO_IRDA 0
+
+#define UICLASS_VENDOR 0xff
+
+#define USB_HUB_MAX_DEPTH 5
+
+/*
+ * Minimum time a device needs to be powered down to go through
+ * a power cycle. XXX Are these time in the spec?
+ */
+#define USB_POWER_DOWN_TIME 200 /* ms */
+#define USB_PORT_POWER_DOWN_TIME 100 /* ms */
+
+#if 0
+/* These are the values from the spec. */
+#define USB_PORT_RESET_DELAY 10 /* ms */
+#define USB_PORT_ROOT_RESET_DELAY 50 /* ms */
+#define USB_PORT_RESET_RECOVERY 10 /* ms */
+#define USB_PORT_POWERUP_DELAY 100 /* ms */
+#define USB_SET_ADDRESS_SETTLE 2 /* ms */
+#define USB_RESUME_DELAY (20*5) /* ms */
+#define USB_RESUME_WAIT 10 /* ms */
+#define USB_RESUME_RECOVERY 10 /* ms */
+#define USB_EXTRA_POWER_UP_TIME 0 /* ms */
+#else
+/* Allow for marginal (i.e. non-conforming) devices. */
+#define USB_PORT_RESET_DELAY 50 /* ms */
+#define USB_PORT_ROOT_RESET_DELAY 250 /* ms */
+#define USB_PORT_RESET_RECOVERY 250 /* ms */
+#define USB_PORT_POWERUP_DELAY 300 /* ms */
+#define USB_SET_ADDRESS_SETTLE 10 /* ms */
+#define USB_RESUME_DELAY (50*5) /* ms */
+#define USB_RESUME_WAIT 50 /* ms */
+#define USB_RESUME_RECOVERY 50 /* ms */
+#define USB_EXTRA_POWER_UP_TIME 20 /* ms */
+#endif
+
+#define USB_MIN_POWER 100 /* mA */
+#define USB_MAX_POWER 500 /* mA */
+
+#define USB_BUS_RESET_DELAY 100 /* ms XXX?*/
+
+#define USB_UNCONFIG_NO 0
+#define USB_UNCONFIG_INDEX (-1)
+
+/*** ioctl() related stuff ***/
+
+struct usb_ctl_request {
+ int ucr_addr;
+ usb_device_request_t ucr_request;
+ void *ucr_data;
+ int ucr_flags;
+#define USBD_SHORT_XFER_OK 0x04 /* allow short reads */
+ int ucr_actlen; /* actual length transferred */
+};
+
+struct usb_alt_interface {
+ int uai_config_index;
+ int uai_interface_index;
+ int uai_alt_no;
+};
+
+#define USB_CURRENT_CONFIG_INDEX (-1)
+#define USB_CURRENT_ALT_INDEX (-1)
+
+struct usb_config_desc {
+ int ucd_config_index;
+ usb_config_descriptor_t ucd_desc;
+};
+
+struct usb_interface_desc {
+ int uid_config_index;
+ int uid_interface_index;
+ int uid_alt_index;
+ usb_interface_descriptor_t uid_desc;
+};
+
+struct usb_endpoint_desc {
+ int ued_config_index;
+ int ued_interface_index;
+ int ued_alt_index;
+ int ued_endpoint_index;
+ usb_endpoint_descriptor_t ued_desc;
+};
+
+struct usb_full_desc {
+ int ufd_config_index;
+ u_int ufd_size;
+ u_char *ufd_data;
+};
+
+struct usb_string_desc {
+ int usd_string_index;
+ int usd_language_id;
+ usb_string_descriptor_t usd_desc;
+};
+
+struct usb_ctl_report_desc {
+ int ucrd_size;
+ u_char ucrd_data[1024]; /* filled data size will vary */
+};
+
+typedef struct { u_int32_t cookie; } usb_event_cookie_t;
+
+#define USB_MAX_DEVNAMES 4
+#define USB_MAX_DEVNAMELEN 16
+struct usb_device_info {
+ u_int8_t udi_bus;
+ u_int8_t udi_addr; /* device address */
+ usb_event_cookie_t udi_cookie;
+ char udi_product[USB_MAX_STRING_LEN];
+ char udi_vendor[USB_MAX_STRING_LEN];
+ char udi_release[8];
+ u_int16_t udi_productNo;
+ u_int16_t udi_vendorNo;
+ u_int16_t udi_releaseNo;
+ u_int8_t udi_class;
+ u_int8_t udi_subclass;
+ u_int8_t udi_protocol;
+ u_int8_t udi_config;
+ u_int8_t udi_speed;
+#define USB_SPEED_UNKNOWN 0
+#define USB_SPEED_LOW 1
+#define USB_SPEED_FULL 2
+#define USB_SPEED_HIGH 3
+#define USB_SPEED_VARIABLE 4
+#define USB_SPEED_SUPER 5
+ int udi_power; /* power consumption in mA, 0 if selfpowered */
+ int udi_nports;
+ char udi_devnames[USB_MAX_DEVNAMES][USB_MAX_DEVNAMELEN];
+ u_int8_t udi_ports[16];/* hub only: addresses of devices on ports */
+#define USB_PORT_ENABLED 0xff
+#define USB_PORT_SUSPENDED 0xfe
+#define USB_PORT_POWERED 0xfd
+#define USB_PORT_DISABLED 0xfc
+};
+
+struct usb_ctl_report {
+ int ucr_report;
+ u_char ucr_data[1024]; /* filled data size will vary */
+};
+
+struct usb_device_stats {
+ u_long uds_requests[4]; /* indexed by transfer type UE_* */
+};
+
+#define WUSB_MIN_IE 0x80
+#define WUSB_WCTA_IE 0x80
+#define WUSB_WCONNECTACK_IE 0x81
+#define WUSB_WHOSTINFO_IE 0x82
+#define WUHI_GET_CA(_bmAttributes_) ((_bmAttributes_) & 0x3)
+#define WUHI_CA_RECONN 0x00
+#define WUHI_CA_LIMITED 0x01
+#define WUHI_CA_ALL 0x03
+#define WUHI_GET_MLSI(_bmAttributes_) (((_bmAttributes_) & 0x38) >> 3)
+#define WUSB_WCHCHANGEANNOUNCE_IE 0x83
+#define WUSB_WDEV_DISCONNECT_IE 0x84
+#define WUSB_WHOST_DISCONNECT_IE 0x85
+#define WUSB_WRELEASE_CHANNEL_IE 0x86
+#define WUSB_WWORK_IE 0x87
+#define WUSB_WCHANNEL_STOP_IE 0x88
+#define WUSB_WDEV_KEEPALIVE_IE 0x89
+#define WUSB_WISOCH_DISCARD_IE 0x8A
+#define WUSB_WRESETDEVICE_IE 0x8B
+#define WUSB_WXMIT_PACKET_ADJUST_IE 0x8C
+#define WUSB_MAX_IE 0x8C
+
+/* Device Notification Types */
+
+#define WUSB_DN_MIN 0x01
+#define WUSB_DN_CONNECT 0x01
+# define WUSB_DA_OLDCONN 0x00
+# define WUSB_DA_NEWCONN 0x01
+# define WUSB_DA_SELF_BEACON 0x02
+# define WUSB_DA_DIR_BEACON 0x04
+# define WUSB_DA_NO_BEACON 0x06
+#define WUSB_DN_DISCONNECT 0x02
+#define WUSB_DN_EPRDY 0x03
+#define WUSB_DN_MASAVAILCHANGED 0x04
+#define WUSB_DN_REMOTEWAKEUP 0x05
+#define WUSB_DN_SLEEP 0x06
+#define WUSB_DN_ALIVE 0x07
+#define WUSB_DN_MAX 0x07
+
+#ifdef _MSC_VER
+#include <pshpack1.h>
+#endif
+
+/* WUSB Handshake Data. Used during the SET/GET HANDSHAKE requests */
+typedef struct wusb_hndshk_data {
+ uByte bMessageNumber;
+ uByte bStatus;
+ uByte tTKID[3];
+ uByte bReserved;
+ uByte CDID[16];
+ uByte Nonce[16];
+ uByte MIC[8];
+} UPACKED wusb_hndshk_data_t;
+#define WUSB_HANDSHAKE_LEN_FOR_MIC 38
+
+/* WUSB Connection Context */
+typedef struct wusb_conn_context {
+ uByte CHID [16];
+ uByte CDID [16];
+ uByte CK [16];
+} UPACKED wusb_conn_context_t;
+
+/* WUSB Security Descriptor */
+typedef struct wusb_security_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord wTotalLength;
+ uByte bNumEncryptionTypes;
+} UPACKED wusb_security_desc_t;
+
+/* WUSB Encryption Type Descriptor */
+typedef struct wusb_encrypt_type_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+
+ uByte bEncryptionType;
+#define WUETD_UNSECURE 0
+#define WUETD_WIRED 1
+#define WUETD_CCM_1 2
+#define WUETD_RSA_1 3
+
+ uByte bEncryptionValue;
+ uByte bAuthKeyIndex;
+} UPACKED wusb_encrypt_type_desc_t;
+
+/* WUSB Key Descriptor */
+typedef struct wusb_key_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte tTKID[3];
+ uByte bReserved;
+ uByte KeyData[1]; /* variable length */
+} UPACKED wusb_key_desc_t;
+
+/* WUSB BOS Descriptor (Binary device Object Store) */
+typedef struct wusb_bos_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord wTotalLength;
+ uByte bNumDeviceCaps;
+} UPACKED wusb_bos_desc_t;
+
+#define USB_DEVICE_CAPABILITY_20_EXTENSION 0x02
+typedef struct usb_dev_cap_20_ext_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+#define USB_20_EXT_LPM 0x02
+ uDWord bmAttributes;
+} UPACKED usb_dev_cap_20_ext_desc_t;
+
+#define USB_DEVICE_CAPABILITY_SS_USB 0x03
+typedef struct usb_dev_cap_ss_usb {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+#define USB_DC_SS_USB_LTM_CAPABLE 0x02
+ uByte bmAttributes;
+#define USB_DC_SS_USB_SPEED_SUPPORT_LOW 0x01
+#define USB_DC_SS_USB_SPEED_SUPPORT_FULL 0x02
+#define USB_DC_SS_USB_SPEED_SUPPORT_HIGH 0x04
+#define USB_DC_SS_USB_SPEED_SUPPORT_SS 0x08
+ uWord wSpeedsSupported;
+ uByte bFunctionalitySupport;
+ uByte bU1DevExitLat;
+ uWord wU2DevExitLat;
+} UPACKED usb_dev_cap_ss_usb_t;
+
+#define USB_DEVICE_CAPABILITY_CONTAINER_ID 0x04
+typedef struct usb_dev_cap_container_id {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+ uByte bReserved;
+ uByte containerID[16];
+} UPACKED usb_dev_cap_container_id_t;
+
+/* Device Capability Type Codes */
+#define WUSB_DEVICE_CAPABILITY_WIRELESS_USB 0x01
+
+/* Device Capability Descriptor */
+typedef struct wusb_dev_cap_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+ uByte caps[1]; /* Variable length */
+} UPACKED wusb_dev_cap_desc_t;
+
+/* Device Capability Descriptor */
+typedef struct wusb_dev_cap_uwb_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+ uByte bmAttributes;
+ uWord wPHYRates; /* Bitmap */
+ uByte bmTFITXPowerInfo;
+ uByte bmFFITXPowerInfo;
+ uWord bmBandGroup;
+ uByte bReserved;
+} UPACKED wusb_dev_cap_uwb_desc_t;
+
+/* Wireless USB Endpoint Companion Descriptor */
+typedef struct wusb_endpoint_companion_desc {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bMaxBurst;
+ uByte bMaxSequence;
+ uWord wMaxStreamDelay;
+ uWord wOverTheAirPacketSize;
+ uByte bOverTheAirInterval;
+ uByte bmCompAttributes;
+} UPACKED wusb_endpoint_companion_desc_t;
+
+/* Wireless USB Numeric Association M1 Data Structure */
+typedef struct wusb_m1_data {
+ uByte version;
+ uWord langId;
+ uByte deviceFriendlyNameLength;
+ uByte sha_256_m3[32];
+ uByte deviceFriendlyName[256];
+} UPACKED wusb_m1_data_t;
+
+typedef struct wusb_m2_data {
+ uByte version;
+ uWord langId;
+ uByte hostFriendlyNameLength;
+ uByte pkh[384];
+ uByte hostFriendlyName[256];
+} UPACKED wusb_m2_data_t;
+
+typedef struct wusb_m3_data {
+ uByte pkd[384];
+ uByte nd;
+} UPACKED wusb_m3_data_t;
+
+typedef struct wusb_m4_data {
+ uDWord _attributeTypeIdAndLength_1;
+ uWord associationTypeId;
+
+ uDWord _attributeTypeIdAndLength_2;
+ uWord associationSubTypeId;
+
+ uDWord _attributeTypeIdAndLength_3;
+ uDWord length;
+
+ uDWord _attributeTypeIdAndLength_4;
+ uDWord associationStatus;
+
+ uDWord _attributeTypeIdAndLength_5;
+ uByte chid[16];
+
+ uDWord _attributeTypeIdAndLength_6;
+ uByte cdid[16];
+
+ uDWord _attributeTypeIdAndLength_7;
+ uByte bandGroups[2];
+} UPACKED wusb_m4_data_t;
+
+#ifdef _MSC_VER
+#include <poppack.h>
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _USB_H_ */
diff --git a/drivers/usb/host/fh_otg/fh_otg/Kconfig b/drivers/usb/host/fh_otg/fh_otg/Kconfig
new file mode 100644
index 00000000..d48d79a7
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/Kconfig
@@ -0,0 +1,14 @@
+config FH_HOST_ONLY
+ bool "Host only mode"
+ default y
+ depends on USB_FH_OTG
+ help
+ The USB2.0 high-speed host controller
+ integrated into many SoCs.
+
+config FH_DEVICE_ONLY
+ bool "Device only mode"
+ depends on USB_FH_OTG
+ help
+ The USB2.0 high-speed gadget controller
+ integrated into many SoCs.
diff --git a/drivers/usb/host/fh_otg/fh_otg/Makefile b/drivers/usb/host/fh_otg/fh_otg/Makefile
new file mode 100644
index 00000000..bdafb31d
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/Makefile
@@ -0,0 +1,80 @@
+#
+# Makefile for FH_otg Highspeed USB controller driver
+#
+
+ifneq ($(KERNELRELEASE),)
+
+# Use the BUS_INTERFACE variable to compile the software for either
+# PCI(PCI_INTERFACE) or LM(LM_INTERFACE) bus.
+ifeq ($(BUS_INTERFACE),)
+ BUS_INTERFACE = -DPCI_INTERFACE
+# BUS_INTERFACE = -DLM_INTERFACE
+endif
+
+#EXTRA_CFLAGS += -DDEBUG
+
+# Use one of the following flags to compile the software in host-only or
+# device-only mode.
+
+ifeq ($(CONFIG_FH_HOST_ONLY)_$(CONFIG_FH_DEVICE_ONLY), y_y)
+$(error "FH_HOST_ONLY FH_DEVICE_ONLY should be one of them or none!!!")
+endif
+
+ifneq ($(CONFIG_FH_HOST_ONLY),)
+EXTRA_CFLAGS += -DFH_HOST_ONLY
+endif
+ifneq ($(CONFIG_FH_DEVICE_ONLY),)
+EXTRA_CFLAGS += -DFH_DEVICE_ONLY
+endif
+
+EXTRA_CFLAGS += -Dlinux -DFH_HS_ELECT_TST
+#EXTRA_CFLAGS += -DFH_EN_ISOC
+EXTRA_CFLAGS += -I$(PWD)/../fh_common_port
+#EXTRA_CFLAGS += -I$(PORTLIB)
+EXTRA_CFLAGS += -DFH_LINUX
+EXTRA_CFLAGS += $(CFI)
+EXTRA_CFLAGS += $(BUS_INTERFACE)
+#EXTRA_CFLAGS += -DFH_DEV_SRPCAP
+
+obj-$(CONFIG_USB_FH_OTG):= fh_otg.o
+
+fh_otg-objs := fh_otg_driver.o fh_otg_attr.o
+fh_otg-objs += fh_otg_cil.o fh_otg_cil_intr.o
+fh_otg-objs += fh_otg_pcd_linux.o fh_otg_pcd.o fh_otg_pcd_intr.o
+fh_otg-objs += fh_otg_hcd.o fh_otg_hcd_linux.o fh_otg_hcd_intr.o fh_otg_hcd_queue.o fh_otg_hcd_ddma.o
+fh_otg-objs += fh_otg_adp.o
+ifneq ($(CFI),)
+fh_otg-objs += fh_otg_cfi.o
+endif
+
+else
+
+PWD := $(shell pwd)
+PORTLIB := $(PWD)/../fh_common_port
+
+# Command paths
+CTAGS := $(CTAGS)
+DOXYGEN := $(DOXYGEN)
+
+default: portlib
+ $(MAKE) -C$(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
+
+install: default
+ $(MAKE) -C$(KDIR) M=$(PORTLIB) modules_install
+ $(MAKE) -C$(KDIR) M=$(PWD) modules_install
+
+portlib:
+ $(MAKE) -C$(KDIR) M=$(PORTLIB) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
+ cp $(PORTLIB)/Module.symvers $(PWD)/
+
+docs: $(wildcard *.[hc]) doc/doxygen.cfg
+ $(DOXYGEN) doc/doxygen.cfg
+
+tags: $(wildcard *.[hc])
+ $(CTAGS) -e $(wildcard *.[hc]) $(wildcard linux/*.[hc]) $(wildcard $(KDIR)/include/linux/usb*.h)
+
+
+clean:
+ rm -rf *.o *.ko .*cmd *.mod.c .tmp_versions Module.symvers
+
+endif
diff --git a/drivers/usb/host/fh_otg/fh_otg/README b/drivers/usb/host/fh_otg/fh_otg/README
new file mode 100644
index 00000000..93cfd3b4
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/README
@@ -0,0 +1,17 @@
+Instructions for building HSOTG driver
+Portability library will be built on the fly
+---------------------------------------
+- Export necessary environment variables or pass them to the make command line.
+
+# Path to the installed kernel directory
+ % export KDIR=/...
+# Architecture type - for HAPS x86_64/x86, for IPMATE ARM
+ % export ARCH=x86_64
+# If BUS_INTERFACE not exported, PCI_INTERFACE is the default, for IPMATE use LM_INTERFACE
+
+- Build the driver.
+ % make
+
+- Install the driver (by default /lib/modules/x.x.xx.x/extra or export INSTALL_MOD_PATH for custom directory)
+ % make install
+
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_cfi_common.h b/drivers/usb/host/fh_otg/fh_otg/fh_cfi_common.h
new file mode 100644
index 00000000..34b453fd
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_cfi_common.h
@@ -0,0 +1,142 @@
+/* ==========================================================================
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__FH_CFI_COMMON_H__)
+#define __FH_CFI_COMMON_H__
+
+//#include <linux/types.h>
+
+/**
+ * @file
+ *
+ * This file contains the CFI specific common constants, interfaces
+ * (functions and macros) and structures for Linux. No PCD specific
+ * data structure or definition is to be included in this file.
+ *
+ */
+
+/** This is a request for all Core Features */
+#define VEN_CORE_GET_FEATURES 0xB1
+
+/** This is a request to get the value of a specific Core Feature */
+#define VEN_CORE_GET_FEATURE 0xB2
+
+/** This command allows the host to set the value of a specific Core Feature */
+#define VEN_CORE_SET_FEATURE 0xB3
+
+/** This command allows the host to set the default values of
+ * either all or any specific Core Feature
+ */
+#define VEN_CORE_RESET_FEATURES 0xB4
+
+/** This command forces the PCD to write the deferred values of a Core Features */
+#define VEN_CORE_ACTIVATE_FEATURES 0xB5
+
+/** This request reads a DWORD value from a register at the specified offset */
+#define VEN_CORE_READ_REGISTER 0xB6
+
+/** This request writes a DWORD value into a register at the specified offset */
+#define VEN_CORE_WRITE_REGISTER 0xB7
+
+/** This structure is the header of the Core Features dataset returned to
+ * the Host
+ */
+struct cfi_all_features_header {
+/** The features header structure length is */
+#define CFI_ALL_FEATURES_HDR_LEN 8
+ /**
+ * The total length of the features dataset returned to the Host
+ */
+ uint16_t wTotalLen;
+
+ /**
+ * CFI version number inBinary-Coded Decimal (i.e., 1.00 is 100H).
+ * This field identifies the version of the CFI Specification with which
+ * the device is compliant.
+ */
+ uint16_t wVersion;
+
+ /** The ID of the Core */
+ uint16_t wCoreID;
+#define CFI_CORE_ID_UDC 1
+#define CFI_CORE_ID_OTG 2
+#define CFI_CORE_ID_WUDEV 3
+
+ /** Number of features returned by VEN_CORE_GET_FEATURES request */
+ uint16_t wNumFeatures;
+} UPACKED;
+
+typedef struct cfi_all_features_header cfi_all_features_header_t;
+
+/** This structure is a header of the Core Feature descriptor dataset returned to
+ * the Host after the VEN_CORE_GET_FEATURES request
+ */
+struct cfi_feature_desc_header {
+#define CFI_FEATURE_DESC_HDR_LEN 8
+
+ /** The feature ID */
+ uint16_t wFeatureID;
+
+ /** Length of this feature descriptor in bytes - including the
+ * length of the feature name string
+ */
+ uint16_t wLength;
+
+ /** The data length of this feature in bytes */
+ uint16_t wDataLength;
+
+ /**
+ * Attributes of this features
+ * D0: Access rights
+ * 0 - Read/Write
+ * 1 - Read only
+ */
+ uint8_t bmAttributes;
+#define CFI_FEATURE_ATTR_RO 1
+#define CFI_FEATURE_ATTR_RW 0
+
+ /** Length of the feature name in bytes */
+ uint8_t bNameLen;
+
+ /** The feature name buffer */
+ //uint8_t *name;
+} UPACKED;
+
+typedef struct cfi_feature_desc_header cfi_feature_desc_header_t;
+
+/**
+ * This structure describes a NULL terminated string referenced by its id field.
+ * It is very similar to usb_string structure but has the id field type set to 16-bit.
+ */
+struct cfi_string {
+ uint16_t id;
+ const uint8_t *s;
+};
+typedef struct cfi_string cfi_string_t;
+
+#endif
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.c
new file mode 100644
index 00000000..55f1e9d5
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.c
@@ -0,0 +1,908 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_adp.c $
+ * $Revision: #16 $
+ * $Date: 2013/04/22 $
+ * $Change: 2211149 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#include "../fh_common_port/fh_os.h"
+#include "fh_otg_regs.h"
+#include "fh_otg_cil.h"
+#include "fh_otg_adp.h"
+
+/** @file
+ *
+ * This file contains the most of the Attach Detect Protocol implementation for
+ * the driver to support OTG Rev2.0.
+ *
+ */
+
+void fh_otg_adp_write_reg(fh_otg_core_if_t * core_if, uint32_t value)
+{
+ adpctl_data_t adpctl;
+
+ adpctl.d32 = value;
+ adpctl.b.ar = 0x2;
+
+ FH_WRITE_REG32(&core_if->core_global_regs->adpctl, adpctl.d32);
+
+ while (adpctl.b.ar) {
+ adpctl.d32 = FH_READ_REG32(&core_if->core_global_regs->adpctl);
+ }
+
+}
+
+/**
+ * Function is called to read ADP registers
+ */
+uint32_t fh_otg_adp_read_reg(fh_otg_core_if_t * core_if)
+{
+ adpctl_data_t adpctl;
+
+ adpctl.d32 = 0;
+ adpctl.b.ar = 0x1;
+
+ FH_WRITE_REG32(&core_if->core_global_regs->adpctl, adpctl.d32);
+
+ while (adpctl.b.ar) {
+ adpctl.d32 = FH_READ_REG32(&core_if->core_global_regs->adpctl);
+ }
+
+ return adpctl.d32;
+}
+
+/**
+ * Function is called to read ADPCTL register and filter Write-clear bits
+ */
+uint32_t fh_otg_adp_read_reg_filter(fh_otg_core_if_t * core_if)
+{
+ adpctl_data_t adpctl;
+
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+ adpctl.b.adp_tmout_int = 0;
+ adpctl.b.adp_prb_int = 0;
+ adpctl.b.adp_tmout_int = 0;
+
+ return adpctl.d32;
+}
+
+/**
+ * Function is called to write ADP registers
+ */
+void fh_otg_adp_modify_reg(fh_otg_core_if_t * core_if, uint32_t clr,
+ uint32_t set)
+{
+ fh_otg_adp_write_reg(core_if,
+ (fh_otg_adp_read_reg(core_if) & (~clr)) | set);
+}
+
+static void adp_probe_func(void * ptr)
+{
+ fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
+ fh_otg_adp_probe_start(core_if);
+}
+
+static void adp_sense_timeout(void *ptr)
+{
+ fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
+ core_if->adp.sense_timer_started = 0;
+ FH_DEBUGPL(DBG_PCD, "ADP SENSE TIMEOUT\n");
+ if (core_if->adp_enable) {
+ fh_otg_adp_sense_stop(core_if);
+ FH_WORKQ_SCHEDULE_DELAYED(core_if->wq_otg, adp_probe_func,
+ core_if, 100 , "start probe");
+ }
+}
+
+/**
+ * This function is called when the ADP vbus timer expires. Timeout is 1.1s.
+ */
+static void adp_vbuson_timeout(void *ptr)
+{
+ gpwrdn_data_t gpwrdn;
+ fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
+ hprt0_data_t hprt0 = {.d32 = 0 };
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ FH_PRINTF("%s: 1.1 seconds expire after turning on VBUS\n",__FUNCTION__);
+ if (core_if) {
+ core_if->adp.vbuson_timer_started = 0;
+ if(fh_otg_is_host_mode(core_if)) {
+ /* Turn off vbus */
+ hprt0.b.prtpwr = 1;
+ FH_MODIFY_REG32(core_if->host_if->hprt0, hprt0.d32, 0);
+ cil_hcd_disconnect(core_if);
+ }
+ gpwrdn.d32 = 0;
+
+ /* Power off the core */
+ if (core_if->power_down == 2) {
+ /* Enable Wakeup Logic */
+// gpwrdn.b.wkupactiv = 1;
+ gpwrdn.b.pmuactv = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
+ gpwrdn.d32);
+
+ /* Suspend the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
+
+ /* Switch on VDD */
+// gpwrdn.b.wkupactiv = 1;
+ gpwrdn.b.pmuactv = 1;
+ gpwrdn.b.pwrdnrstn = 1;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
+ gpwrdn.d32);
+ } else {
+ /* Enable Power Down Logic */
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ if(fh_otg_is_host_mode(core_if))
+ gpwrdn.b.dis_vbus = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ }
+
+ /* Power off the core */
+ if (core_if->power_down == 2) {
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn,
+ gpwrdn.d32, 0);
+ }
+
+ /* Unmask SRP detected interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+
+ fh_mdelay(220);
+ fh_otg_adp_probe_start(core_if);
+ }
+
+}
+
+/**
+ * Start the ADP Initial Probe timer to detect if Port Connected interrupt is
+ * not asserted within 1.1 seconds.
+ *
+ * @param core_if the pointer to core_if strucure.
+ */
+void fh_otg_adp_vbuson_timer_start(fh_otg_core_if_t * core_if)
+{
+ core_if->adp.vbuson_timer_started = 1;
+ if (core_if->adp.vbuson_timer)
+ {
+ FH_PRINTF("SCHEDULING VBUSON TIMER\n");
+ /* 1.1 secs + 60ms necessary for cil_hcd_start*/
+ FH_TIMER_SCHEDULE(core_if->adp.vbuson_timer, 1160);
+ } else {
+ FH_WARN("VBUSON_TIMER = %p\n",core_if->adp.vbuson_timer);
+ }
+}
+
+#if 0
+/**
+ * Masks all FH OTG core interrupts
+ *
+ */
+static void mask_all_interrupts(fh_otg_core_if_t * core_if)
+{
+ int i;
+ gahbcfg_data_t ahbcfg = {.d32 = 0 };
+
+ /* Mask Host Interrupts */
+
+ /* Clear and disable HCINTs */
+ for (i = 0; i < core_if->core_params->host_channels; i++) {
+ FH_WRITE_REG32(&core_if->host_if->hc_regs[i]->hcintmsk, 0);
+ FH_WRITE_REG32(&core_if->host_if->hc_regs[i]->hcint, 0xFFFFFFFF);
+
+ }
+
+ /* Clear and disable HAINT */
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->haintmsk, 0x0000);
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->haint, 0xFFFFFFFF);
+
+ /* Mask Device Interrupts */
+ if (!core_if->multiproc_int_enable) {
+ /* Clear and disable IN Endpoint interrupts */
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->diepmsk, 0);
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->
+ diepint, 0xFFFFFFFF);
+ }
+
+ /* Clear and disable OUT Endpoint interrupts */
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->doepmsk, 0);
+ for (i = 0; i <= core_if->dev_if->num_out_eps; i++) {
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->
+ doepint, 0xFFFFFFFF);
+ }
+
+ /* Clear and disable DAINT */
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->daint,
+ 0xFFFFFFFF);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->daintmsk, 0);
+ } else {
+ for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
+ diepeachintmsk[i], 0);
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->
+ diepint, 0xFFFFFFFF);
+ }
+
+ for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
+ doepeachintmsk[i], 0);
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->
+ doepint, 0xFFFFFFFF);
+ }
+
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->deachintmsk,
+ 0);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->deachint,
+ 0xFFFFFFFF);
+
+ }
+
+ /* Disable interrupts */
+ ahbcfg.b.glblintrmsk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
+
+ /* Disable all interrupts. */
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts */
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Clear any pending OTG Interrupts */
+ FH_WRITE_REG32(&core_if->core_global_regs->gotgint, 0xFFFFFFFF);
+}
+
+/**
+ * Unmask Port Connection Detected interrupt
+ *
+ */
+static void unmask_conn_det_intr(fh_otg_core_if_t * core_if)
+{
+ gintmsk_data_t gintmsk = {.d32 = 0,.b.portintr = 1 };
+
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
+}
+#endif
+
+/**
+ * Starts the ADP Probing
+ *
+ * @param core_if the pointer to core_if structure.
+ */
+uint32_t fh_otg_adp_probe_start(fh_otg_core_if_t * core_if)
+{
+
+ adpctl_data_t adpctl = {.d32 = 0};
+ gpwrdn_data_t gpwrdn;
+#if 0
+ adpctl_data_t adpctl_int = {.d32 = 0, .b.adp_prb_int = 1,
+ .b.adp_sns_int = 1, b.adp_tmout_int};
+#endif
+ if (core_if->stop_adpprb) {
+ core_if->stop_adpprb = 0;
+ return 0;
+ }
+
+ fh_otg_disable_global_interrupts(core_if);
+ FH_DEBUGPL(DBG_ANY, "ADP Probe Start\n");
+ core_if->adp.probe_enabled = 1;
+
+ adpctl.b.adpres = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ while (adpctl.b.adpres) {
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+ }
+
+ adpctl.d32 = 0;
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+
+ /* In Host mode unmask SRP detected interrupt also change the
+ * probe preiod accordingly */
+ if (!gpwrdn.b.idsts) {
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ adpctl.b.prb_per = 0;
+ }
+ else {
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
+ gpwrdn.d32 = 0;
+ gpwrdn.b.sts_chngint_msk = 1;
+ adpctl.b.prb_per = 1;
+ }
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+
+ adpctl.b.adp_tmout_int_msk = 1;
+ adpctl.b.adp_prb_int_msk = 1;
+ adpctl.b.prb_dschg = 1;
+ adpctl.b.prb_delta = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ adpctl.b.adpen = 1;
+ adpctl.b.enaprb = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+ FH_DEBUGPL(DBG_ANY, "ADP Probe Finish\n");
+
+ return 0;
+}
+
+/**
+ * Starts the ADP Sense timer to detect if ADP Sense interrupt is not asserted
+ * within 3 seconds.
+ *
+ * @param core_if the pointer to core_if strucure.
+ */
+void fh_otg_adp_sense_timer_start(fh_otg_core_if_t * core_if)
+{
+ core_if->adp.sense_timer_started = 1;
+ FH_TIMER_SCHEDULE(core_if->adp.sense_timer, 3300 /* 3.3 secs */ );
+}
+
+/**
+ * Starts the ADP Sense
+ *
+ * @param core_if the pointer to core_if strucure.
+ */
+uint32_t fh_otg_adp_sense_start(fh_otg_core_if_t * core_if)
+{
+ adpctl_data_t adpctl;
+
+ FH_DEBUGPL(DBG_PCD, "ADP Sense Start\n");
+
+ /* Set ADP reset bit*/
+ adpctl.d32 = fh_otg_adp_read_reg_filter(core_if);
+ adpctl.b.adpres = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ while (adpctl.b.adpres) {
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+ }
+
+ /* Unmask ADP sense interrupt and mask all other from the core */
+ adpctl.d32 = fh_otg_adp_read_reg_filter(core_if);
+ adpctl.b.adp_sns_int_msk = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+ fh_otg_disable_global_interrupts(core_if);
+
+ adpctl.b.adpres = 0;
+ adpctl.b.adpen = 1;
+ adpctl.b.enasns = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ fh_otg_adp_sense_timer_start(core_if);
+
+ return 0;
+}
+
+/**
+ * Stops the ADP Probing
+ *
+ * @param core_if the pointer to core_if strucure.
+ */
+uint32_t fh_otg_adp_probe_stop(fh_otg_core_if_t * core_if)
+{
+
+ adpctl_data_t adpctl;
+ FH_DEBUGPL(DBG_ANY, "Stop ADP probe\n");
+ core_if->adp.probe_enabled = 0;
+ //core_if->adp.probe_counter = 0;
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+
+ adpctl.b.adpen = 0;
+ adpctl.b.adp_prb_int = 1;
+ adpctl.b.adp_tmout_int = 1;
+ adpctl.b.adp_sns_int = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ return 0;
+}
+
+/**
+ * Stops the ADP Sensing
+ *
+ * @param core_if the pointer to core_if strucure.
+ */
+uint32_t fh_otg_adp_sense_stop(fh_otg_core_if_t * core_if)
+{
+ adpctl_data_t adpctl;
+
+ core_if->adp.sense_enabled = 0;
+
+ adpctl.d32 = fh_otg_adp_read_reg_filter(core_if);
+ adpctl.b.enasns = 0;
+ adpctl.b.adp_sns_int = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ return 0;
+}
+
+/**
+ * Called to turn on the VBUS after initial ADP probe in host mode.
+ * If port power was already enabled in cil_hcd_start function then
+ * only schedule a timer.
+ *
+ * @param core_if the pointer to core_if structure.
+ */
+void fh_otg_adp_turnon_vbus(fh_otg_core_if_t * core_if)
+{
+ hprt0_data_t hprt0 = {.d32 = 0 };
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ FH_PRINTF("Turn on VBUS for 1.1s, port power is %d\n", hprt0.b.prtpwr);
+
+ if (hprt0.b.prtpwr == 0) {
+ hprt0.b.prtpwr = 1;
+ //FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ }
+
+ fh_otg_adp_vbuson_timer_start(core_if);
+}
+
+/**
+ * Called right after driver is loaded
+ * to perform initial actions for ADP
+ *
+ * @param core_if the pointer to core_if structure.
+ * @param is_host - flag for current mode of operation either from GINTSTS or GPWRDN
+ */
+void fh_otg_adp_start(fh_otg_core_if_t * core_if, uint8_t is_host)
+{
+ gpwrdn_data_t gpwrdn;
+
+ FH_DEBUGPL(DBG_ANY, "ADP Initial Start\n");
+ core_if->adp.adp_started = 1;
+
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+ fh_otg_disable_global_interrupts(core_if);
+ if (is_host) {
+ FH_PRINTF("HOST MODE\n");
+ //core_if->op_state = A_HOST; - vahrama, modified checking in hcd_start()
+ /* Enable Power Down Logic Interrupt*/
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ /* Initialize first ADP probe to obtain Ramp Time value */
+ core_if->adp.initial_probe = 1;
+ fh_otg_adp_probe_start(core_if);
+ } else {
+ gotgctl_data_t gotgctl;
+ gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ FH_DEBUGPL(DBG_ANY, "DEVICE MODE\n");
+ //fh_otg_core_init(core_if);
+ if (gotgctl.b.bsesvld == 0) {
+ /* Enable Power Down Logic Interrupt*/
+ gpwrdn.d32 = 0;
+ FH_DEBUGPL(DBG_ANY, "VBUS is not valid - start ADP probe\n");
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ /* Do not need to return to inital probe if we are coming back to
+ * the device mode after HNP */
+ if (core_if->op_state != B_HOST)
+ core_if->adp.initial_probe = 1;
+ fh_otg_adp_probe_start(core_if);
+ } else {
+ FH_PRINTF("VBUS is valid - initialize core as a Device\n");
+ core_if->op_state = B_PERIPHERAL;
+ //fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+ fh_otg_dump_global_registers(core_if);
+ fh_otg_dump_dev_registers(core_if);
+ }
+ }
+}
+
+void fh_otg_adp_init(fh_otg_core_if_t * core_if)
+{
+ core_if->adp.adp_started = 0;
+ core_if->adp.initial_probe = 0;
+ core_if->adp.probe_timer_values[0] = -1;
+ core_if->adp.probe_timer_values[1] = -1;
+ core_if->adp.probe_enabled = 0;
+ core_if->adp.sense_enabled = 0;
+ core_if->adp.sense_timer_started = 0;
+ core_if->adp.vbuson_timer_started = 0;
+ core_if->adp.probe_counter = 0;
+ core_if->adp.gpwrdn = 0;
+ core_if->adp.attached = FH_OTG_ADP_UNKOWN;
+ /* Initialize timers */
+ core_if->adp.sense_timer =
+ FH_TIMER_ALLOC("ADP SENSE TIMER", adp_sense_timeout, core_if);
+ core_if->adp.vbuson_timer =
+ FH_TIMER_ALLOC("ADP VBUS ON TIMER", adp_vbuson_timeout, core_if);
+ if (!core_if->adp.sense_timer || !core_if->adp.vbuson_timer)
+ {
+ FH_ERROR("Could not allocate memory for ADP timers\n");
+ }
+}
+
+void fh_otg_adp_remove(fh_otg_core_if_t * core_if)
+{
+ gpwrdn_data_t gpwrdn = { .d32 = 0 };
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ if (core_if->adp.probe_enabled)
+ fh_otg_adp_probe_stop(core_if);
+ if (core_if->adp.sense_enabled)
+ fh_otg_adp_sense_stop(core_if);
+ if (core_if->adp.sense_timer_started)
+ FH_TIMER_CANCEL(core_if->adp.sense_timer);
+ if (core_if->adp.vbuson_timer_started)
+ FH_TIMER_CANCEL(core_if->adp.vbuson_timer);
+ FH_TIMER_FREE(core_if->adp.sense_timer);
+ FH_TIMER_FREE(core_if->adp.vbuson_timer);
+}
+
+/////////////////////////////////////////////////////////////////////
+////////////// ADP Interrupt Handlers ///////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/**
+ * This function sets Ramp Timer values
+ */
+static uint32_t set_timer_value(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ if (core_if->adp.probe_timer_values[0] == -1) {
+ core_if->adp.probe_timer_values[0] = val;
+ core_if->adp.probe_timer_values[1] = -1;
+ return 1;
+ } else {
+ core_if->adp.probe_timer_values[1] =
+ core_if->adp.probe_timer_values[0];
+ core_if->adp.probe_timer_values[0] = val;
+ return 0;
+ }
+}
+
+/**
+ * This function compares Ramp Timer values
+ */
+static uint32_t compare_timer_values(fh_otg_core_if_t * core_if)
+{
+ uint32_t diff;
+ uint32_t thres;
+ gpwrdn_data_t gpwrdn;
+
+ /* RTIM difference thresold differs for host and device modes */
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ if (!gpwrdn.b.idsts)
+ thres = HOST_RTIM_THRESHOLD;
+ else
+ thres = DEVICE_RTIM_THRESHOLD;
+
+ FH_DEBUGPL(DBG_ANY, "timer value 0 %d timer value 1 %d\n",
+ core_if->adp.probe_timer_values[0], core_if->adp.probe_timer_values[1]);
+ if (core_if->adp.probe_timer_values[0] >= core_if->adp.probe_timer_values[1])
+ diff = core_if->adp.probe_timer_values[0] - core_if->adp.probe_timer_values[1];
+ else
+ diff = core_if->adp.probe_timer_values[1] - core_if->adp.probe_timer_values[0];
+ if (diff < thres)
+ return 0;
+ else
+ return 1;
+}
+
+/**
+ * This function handles ADP Probe Interrupts
+ */
+static int32_t fh_otg_adp_handle_prb_intr(fh_otg_core_if_t * core_if,
+ uint32_t val)
+{
+ adpctl_data_t adpctl = {.d32 = 0 };
+ gpwrdn_data_t gpwrdn, temp;
+ adpctl.d32 = val;
+
+ temp.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+
+ core_if->adp.gpwrdn = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ if (adpctl.b.rtim == 0 /*&& !temp.b.idsts*/){
+ FH_PRINTF("RTIM value is 0\n");
+ goto exit;
+ }
+ core_if->adp.probe_counter++;
+
+ if (set_timer_value(core_if, adpctl.b.rtim) &&
+ core_if->adp.initial_probe) {
+ core_if->adp.initial_probe = 0;
+ fh_otg_adp_probe_stop(core_if);
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+
+ /* check which value is for device mode and which for Host mode */
+ if (!temp.b.idsts) { /* considered host mode value is 0 */
+ /* Choose right op_state depending on previous one */
+ if (core_if->op_state == B_PERIPHERAL)
+ core_if->op_state = B_HOST;
+ else
+ core_if->op_state = A_HOST;
+ fh_otg_enable_global_interrupts(core_if);
+ /*
+ * Turn on VBUS after initial ADP probe.
+ */
+ FH_SPINUNLOCK(core_if->lock);
+ cil_hcd_start(core_if);
+ fh_otg_adp_turnon_vbus(core_if);
+ FH_SPINLOCK(core_if->lock);
+ } else {
+ /*
+ * Initiate SRP after initial ADP probe.
+ */
+ fh_otg_enable_global_interrupts(core_if);
+ fh_otg_initiate_srp(core_if);
+ }
+ } else if (core_if->adp.probe_counter > 2){
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ if (compare_timer_values(core_if)) {
+ FH_PRINTF("Difference in timer values !!! \n");
+// core_if->adp.attached = FH_OTG_ADP_ATTACHED;
+ fh_otg_adp_probe_stop(core_if);
+
+ /* Power on the core */
+ if (core_if->power_down == 2) {
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ }
+
+ /* check which value is for device mode and which for Host mode */
+ if (!temp.b.idsts) { /* considered host mode value is 0 */
+ /* Disable Interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
+
+ /*
+ * Initialize the Core for Host mode.
+ * Choose right op_state depending on previous one
+ */
+ if (core_if->op_state == B_PERIPHERAL)
+ core_if->op_state = B_HOST;
+ else
+ core_if->op_state = A_HOST;
+
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+ fh_otg_adp_turnon_vbus(core_if);
+ } else {
+ gotgctl_data_t gotgctl;
+ /* Mask SRP detected interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
+
+ /* Disable Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
+
+ /*
+ * Initialize the Core for Device mode.
+ */
+ core_if->op_state = B_PERIPHERAL;
+ //fh_otg_core_init(core_if);
+ cil_pcd_start(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+
+ gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ if (!gotgctl.b.bsesvld)
+ fh_otg_initiate_srp(core_if);
+ }
+ }
+ if (core_if->power_down == 2) {
+ if (gpwrdn.b.bsessvld) {
+ /* Mask SRP detected interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Disable Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /*
+ * Initialize the Core for Device mode.
+ */
+ core_if->op_state = B_PERIPHERAL;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+ }
+ }
+ }
+exit:
+ /* Clear interrupt */
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+ adpctl.b.adp_prb_int = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ return 0;
+}
+
+/**
+ * This function hadles ADP Sense Interrupt
+ */
+static int32_t fh_otg_adp_handle_sns_intr(fh_otg_core_if_t * core_if)
+{
+ adpctl_data_t adpctl;
+ /* Stop ADP Sense timer */
+ FH_TIMER_CANCEL(core_if->adp.sense_timer);
+
+ /* Restart ADP Sense timer */
+ fh_otg_adp_sense_timer_start(core_if);
+
+ /* Clear interrupt */
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+ adpctl.b.adp_sns_int = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ return 0;
+}
+
+/**
+ * This function handles ADP Probe Interrupts
+ */
+static int32_t fh_otg_adp_handle_prb_tmout_intr(fh_otg_core_if_t * core_if,
+ uint32_t val)
+{
+ adpctl_data_t adpctl = {.d32 = 0 };
+ adpctl.d32 = val;
+ set_timer_value(core_if, adpctl.b.rtim);
+
+ /* Clear interrupt */
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+ adpctl.b.adp_tmout_int = 1;
+ fh_otg_adp_write_reg(core_if, adpctl.d32);
+
+ return 0;
+}
+
+/**
+ * ADP Interrupt handler.
+ *
+ */
+int32_t fh_otg_adp_handle_intr(fh_otg_core_if_t * core_if)
+{
+ int retval = 0;
+ adpctl_data_t adpctl = {.d32 = 0};
+
+ adpctl.d32 = fh_otg_adp_read_reg(core_if);
+ FH_DEBUGPL(DBG_ANY, "ADPCTL = %08x RAMP TIME = %d\n", adpctl.d32, adpctl.b.rtim);
+
+ if (adpctl.b.adp_sns_int & adpctl.b.adp_sns_int_msk) {
+ FH_DEBUGPL(DBG_ANY, "ADP Sense interrupt\n");
+ retval |= fh_otg_adp_handle_sns_intr(core_if);
+ }
+ if (adpctl.b.adp_tmout_int & adpctl.b.adp_tmout_int_msk) {
+ FH_DEBUGPL(DBG_ANY, "ADP timeout interrupt\n");
+ retval |= fh_otg_adp_handle_prb_tmout_intr(core_if, adpctl.d32);
+ }
+ if (adpctl.b.adp_prb_int & adpctl.b.adp_prb_int_msk) {
+ FH_DEBUGPL(DBG_ANY, "ADP Probe interrupt\n");
+ adpctl.b.adp_prb_int = 1;
+ retval |= fh_otg_adp_handle_prb_intr(core_if, adpctl.d32);
+ }
+
+// fh_otg_adp_modify_reg(core_if, adpctl.d32, 0);
+ //fh_otg_adp_write_reg(core_if, adpctl.d32);
+ FH_DEBUGPL(DBG_ANY, "RETURN FROM ADP ISR\n");
+
+ return retval;
+}
+
+/**
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+int32_t fh_otg_adp_handle_srp_intr(fh_otg_core_if_t * core_if)
+{
+
+#ifndef FH_HOST_ONLY
+ hprt0_data_t hprt0;
+ gpwrdn_data_t gpwrdn;
+ FH_DEBUGPL(DBG_ANY, "++ Power Down Logic Session Request Interrupt++\n");
+
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ /* check which value is for device mode and which for Host mode */
+ if (!gpwrdn.b.idsts) { /* considered host mode value is 0 */
+ FH_PRINTF("SRP: Host mode\n");
+
+ if (core_if->adp_enable) {
+ fh_otg_adp_probe_stop(core_if);
+
+ /* Power on the core */
+ if (core_if->power_down == 2) {
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ }
+
+ core_if->op_state = A_HOST;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+ }
+
+ /* Turn on the port power bit. */
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* Start the Connection timer. So a message can be displayed
+ * if connect does not occur within 10 seconds. */
+ cil_hcd_session_start(core_if);
+ } else {
+ FH_DEBUGPL(DBG_PCD, "SRP: Device mode %s\n", __FUNCTION__);
+ if (core_if->adp_enable) {
+ fh_otg_adp_probe_stop(core_if);
+
+ /* Power on the core */
+ if (core_if->power_down == 2) {
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ }
+
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 0;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
+ gpwrdn.d32);
+
+ core_if->op_state = B_PERIPHERAL;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+ }
+ }
+#endif
+ return 1;
+}
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.h
new file mode 100644
index 00000000..3ecc22ef
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.h
@@ -0,0 +1,82 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_adp.h $
+ * $Revision: #8 $
+ * $Date: 2013/04/09 $
+ * $Change: 2201932 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef __FH_OTG_ADP_H__
+#define __FH_OTG_ADP_H__
+
+/**
+ * @file
+ *
+ * This file contains the Attach Detect Protocol interfaces and defines
+ * (functions) and structures for Linux.
+ *
+ */
+
+#define FH_OTG_ADP_UNATTACHED 0
+#define FH_OTG_ADP_ATTACHED 1
+#define FH_OTG_ADP_UNKOWN 2
+#define HOST_RTIM_THRESHOLD 5
+#define DEVICE_RTIM_THRESHOLD 3
+
+typedef struct fh_otg_adp {
+ uint32_t adp_started;
+ uint32_t initial_probe;
+ int32_t probe_timer_values[2];
+ uint32_t probe_enabled;
+ uint32_t sense_enabled;
+ fh_timer_t *sense_timer;
+ uint32_t sense_timer_started;
+ fh_timer_t *vbuson_timer;
+ uint32_t vbuson_timer_started;
+ uint32_t attached;
+ uint32_t probe_counter;
+ uint32_t gpwrdn;
+} fh_otg_adp_t;
+
+/**
+ * Attach Detect Protocol functions
+ */
+
+extern void fh_otg_adp_write_reg(fh_otg_core_if_t * core_if, uint32_t value);
+extern uint32_t fh_otg_adp_read_reg(fh_otg_core_if_t * core_if);
+extern uint32_t fh_otg_adp_probe_start(fh_otg_core_if_t * core_if);
+extern uint32_t fh_otg_adp_sense_start(fh_otg_core_if_t * core_if);
+extern uint32_t fh_otg_adp_probe_stop(fh_otg_core_if_t * core_if);
+extern uint32_t fh_otg_adp_sense_stop(fh_otg_core_if_t * core_if);
+extern void fh_otg_adp_start(fh_otg_core_if_t * core_if, uint8_t is_host);
+extern void fh_otg_adp_init(fh_otg_core_if_t * core_if);
+extern void fh_otg_adp_remove(fh_otg_core_if_t * core_if);
+extern int32_t fh_otg_adp_handle_intr(fh_otg_core_if_t * core_if);
+extern int32_t fh_otg_adp_handle_srp_intr(fh_otg_core_if_t * core_if);
+
+#endif //__FH_OTG_ADP_H__
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.c
new file mode 100644
index 00000000..648b08b9
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.c
@@ -0,0 +1,1440 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_attr.c $
+ * $Revision: #47 $
+ * $Date: 2015/08/07 $
+ * $Change: 2913245 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing. The Linux driver attributes
+ * feature will be used to provide the Linux Diagnostic
+ * Interface. These attributes are accessed through sysfs.
+ */
+
+/** @page "Linux Module Attributes"
+ *
+ * The Linux module attributes feature is used to provide the Linux
+ * Diagnostic Interface. These attributes are accessed through sysfs.
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing.
+
+ The following table shows the attributes.
+ <table>
+ <tr>
+ <td><b> Name</b></td>
+ <td><b> Description</b></td>
+ <td><b> Access</b></td>
+ </tr>
+
+ <tr>
+ <td> mode </td>
+ <td> Returns the current mode: 0 for device mode, 1 for host mode</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hnpcapable </td>
+ <td> Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> srpcapable </td>
+ <td> Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> hsic_connect </td>
+ <td> Gets or sets the "HSIC-Connect" bit in the GLPMCFG Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> inv_sel_hsic </td>
+ <td> Gets or sets the "Invert Select HSIC" bit in the GLPMFG Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> hnp </td>
+ <td> Initiates the Host Negotiation Protocol. Read returns the status.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> srp </td>
+ <td> Initiates the Session Request Protocol. Read returns the status.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> buspower </td>
+ <td> Gets or sets the Power State of the bus (0 - Off or 1 - On)</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> bussuspend </td>
+ <td> Suspends the USB bus.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> busconnected </td>
+ <td> Gets the connection status of the bus</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> gotgctl </td>
+ <td> Gets or sets the Core Control Status Register.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gusbcfg </td>
+ <td> Gets or sets the Core USB Configuration Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> grxfsiz </td>
+ <td> Gets or sets the Receive FIFO Size Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gnptxfsiz </td>
+ <td> Gets or sets the non-periodic Transmit Size Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gpvndctl </td>
+ <td> Gets or sets the PHY Vendor Control Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> ggpio </td>
+ <td> Gets the value in the lower 16-bits of the General Purpose IO Register
+ or sets the upper 16 bits.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> guid </td>
+ <td> Gets or sets the value of the User ID Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gsnpsid </td>
+ <td> Gets the value of the Synopsys ID Regester</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> devspeed </td>
+ <td> Gets or sets the device speed setting in the DCFG register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> enumspeed </td>
+ <td> Gets the device enumeration Speed.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hptxfsiz </td>
+ <td> Gets the value of the Host Periodic Transmit FIFO</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hprt0 </td>
+ <td> Gets or sets the value in the Host Port Control and Status Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regoffset </td>
+ <td> Sets the register offset for the next Register Access</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regvalue </td>
+ <td> Gets or sets the value of the register at the offset in the regoffset attribute.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> remote_wakeup </td>
+ <td> On read, shows the status of Remote Wakeup. On write, initiates a remote
+ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote
+ Wakeup signalling bit in the Device Control Register is set for 1
+ milli-second.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> rem_wakeup_pwrdn </td>
+ <td> On read, shows the status core - hibernated or not. On write, initiates
+ a remote wakeup of the device from Hibernation. </td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> mode_ch_tim_en </td>
+ <td> This bit is used to enable or disable the host core to wait for 200 PHY
+ clock cycles at the end of Resume to change the opmode signal to the PHY to 00
+ after Suspend or LPM. </td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> fr_interval </td>
+ <td> On read, shows the value of HFIR Frame Interval. On write, dynamically
+ reload HFIR register during runtime. The application can write a value to this
+ register only after the Port Enable bit of the Host Port Control and Status
+ register (HPRT.PrtEnaPort) has been set </td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> disconnect_us </td>
+ <td> On read, shows the status of disconnect_device_us. On write, sets disconnect_us
+ which causes soft disconnect for 100us. Applicable only for device mode of operation.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regdump </td>
+ <td> Dumps the contents of core registers.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> spramdump </td>
+ <td> Dumps the contents of core registers.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hcddump </td>
+ <td> Dumps the current HCD state.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hcd_frrem </td>
+ <td> Shows the average value of the Frame Remaining
+ field in the Host Frame Number/Frame Remaining register when an SOF interrupt
+ occurs. This can be used to determine the average interrupt latency. Also
+ shows the average Frame Remaining value for start_transfer and the "a" and
+ "b" sample points. The "a" and "b" sample points may be used during debugging
+ bto determine how long it takes to execute a section of the HCD code.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> rd_reg_test </td>
+ <td> Displays the time required to read the GNPTXFSIZ register many times
+ (the output shows the number of times the register is read).
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> wr_reg_test </td>
+ <td> Displays the time required to write the GNPTXFSIZ register many times
+ (the output shows the number of times the register is written).
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> lpm_response </td>
+ <td> Gets or sets lpm_response mode. Applicable only in device mode.
+ <td> Write</td>
+ </tr>
+
+ <tr>
+ <td> sleep_status </td>
+ <td> Shows sleep status of device.
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hird_thres </td>
+ <td> Gets or sets the "HIRD_Thres[3:0]" bits in the Core LPM Configuration Register.
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> besl_reject </td>
+ <td> Gets or sets the "besl_reject" bit in the Device Control Register.
+ <td> Read/Write</td>
+ </tr>
+
+ </table>
+
+ Example usage:
+ To get the current mode:
+ cat /sys/devices/lm0/mode
+
+ To power down the USB:
+ echo 0 > /sys/devices/lm0/buspower
+ */
+#include <linux/platform_device.h>
+
+#include "fh_otg_os_dep.h"
+#include "../fh_common_port/fh_os.h"
+#include "fh_otg_driver.h"
+#include "fh_otg_attr.h"
+#include "fh_otg_core_if.h"
+#include "fh_otg_pcd_if.h"
+#include "fh_otg_hcd_if.h"
+
+/*
+ * MACROs for defining sysfs attribute
+ */
+#ifdef LM_INTERFACE
+
+#define FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \
+ uint32_t val; \
+ val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
+ return sprintf (buf, "%s = 0x%x\n", _string_, val); \
+}
+#define FH_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \
+ uint32_t set = simple_strtoul(buf, NULL, 16); \
+ fh_otg_set_##_otg_attr_name_(otg_dev->core_if, set);\
+ return count; \
+}
+
+#elif defined(PCI_INTERFACE)
+
+#define FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
+{ \
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev); \
+ uint32_t val; \
+ val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
+ return sprintf (buf, "%s = 0x%x\n", _string_, val); \
+}
+#define FH_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev); \
+ uint32_t set = simple_strtoul(buf, NULL, 16); \
+ fh_otg_set_##_otg_attr_name_(otg_dev->core_if, set);\
+ return count; \
+}
+
+#endif
+
+/*
+ * MACROs for defining sysfs attribute for 32-bit registers
+ */
+#ifdef LM_INTERFACE
+#define FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \
+ uint32_t val; \
+ val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
+ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \
+}
+#define FH_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \
+ uint32_t val = simple_strtoul(buf, NULL, 16); \
+ fh_otg_set_##_otg_attr_name_ (otg_dev->core_if, val); \
+ return count; \
+}
+#elif defined(PCI_INTERFACE)
+#define FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
+{ \
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev); \
+ uint32_t val; \
+ val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
+ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \
+}
+#define FH_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev); \
+ uint32_t val = simple_strtoul(buf, NULL, 16); \
+ fh_otg_set_##_otg_attr_name_ (otg_dev->core_if, val); \
+ return count; \
+}
+
+#endif
+
+#define FH_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_string_) \
+FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
+FH_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
+
+#define FH_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_string_) \
+FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
+
+#define FH_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \
+FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
+FH_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
+
+#define FH_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \
+FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
+
+/** @name Functions for Show/Store of Attributes */
+/**@{*/
+
+/**
+ * Show the register offset of the Register Access.
+ */
+static ssize_t regoffset_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ return snprintf(buf, sizeof("0xFFFFFFFF\n") + 1, "0x%08x\n",
+ otg_dev->os_dep.reg_offset);
+}
+
+/**
+ * Set the register offset for the next Register Access Read/Write
+ */
+static ssize_t regoffset_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t offset = simple_strtoul(buf, NULL, 16);
+#ifdef LM_INTERFACE
+ if (offset < SZ_256K) {
+#elif defined(PCI_INTERFACE)
+ if (offset < 0x00040000) {
+#endif
+ otg_dev->os_dep.reg_offset = offset;
+ } else {
+ dev_err(_dev, "invalid offset\n");
+ }
+
+ return count;
+}
+
+DEVICE_ATTR(regoffset, S_IRUGO | S_IWUSR, regoffset_show, regoffset_store);
+
+/**
+ * Show the value of the register at the offset in the reg_offset
+ * attribute.
+ */
+static ssize_t regvalue_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t val;
+ volatile uint32_t *addr;
+
+ if (otg_dev->os_dep.reg_offset != 0xFFFFFFFF && 0 != otg_dev->os_dep.base) {
+ /* Calculate the address */
+ addr = (uint32_t *) (otg_dev->os_dep.reg_offset +
+ (uint8_t *) otg_dev->os_dep.base);
+ val = FH_READ_REG32(addr);
+ return snprintf(buf,
+ sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n") + 1,
+ "Reg@0x%06x = 0x%08x\n", otg_dev->os_dep.reg_offset,
+ val);
+ } else {
+ dev_err(_dev, "Invalid offset (0x%0x)\n", otg_dev->os_dep.reg_offset);
+ return sprintf(buf, "invalid offset\n");
+ }
+}
+
+/**
+ * Store the value in the register at the offset in the reg_offset
+ * attribute.
+ *
+ */
+static ssize_t regvalue_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ volatile uint32_t *addr;
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val);
+ if (otg_dev->os_dep.reg_offset != 0xFFFFFFFF && 0 != otg_dev->os_dep.base) {
+ /* Calculate the address */
+ addr = (uint32_t *) (otg_dev->os_dep.reg_offset +
+ (uint8_t *) otg_dev->os_dep.base);
+ FH_WRITE_REG32(addr, val);
+ } else {
+ dev_err(_dev, "Invalid Register Offset (0x%08x)\n",
+ otg_dev->os_dep.reg_offset);
+ }
+ return count;
+}
+
+DEVICE_ATTR(regvalue, S_IRUGO | S_IWUSR, regvalue_show, regvalue_store);
+
+/*
+ * Attributes
+ */
+FH_OTG_DEVICE_ATTR_BITFIELD_RO(mode, "Mode");
+FH_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable, "HNPCapable");
+FH_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable, "SRPCapable");
+FH_OTG_DEVICE_ATTR_BITFIELD_RW(hsic_connect, "HSIC Connect");
+FH_OTG_DEVICE_ATTR_BITFIELD_RW(inv_sel_hsic, "Invert Select HSIC");
+
+//FH_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
+//FH_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
+FH_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected, "Bus Connected");
+
+FH_OTG_DEVICE_ATTR_REG32_RW(gotgctl, 0, "GOTGCTL");
+FH_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,
+ &(otg_dev->core_if->core_global_regs->gusbcfg),
+ "GUSBCFG");
+FH_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,
+ &(otg_dev->core_if->core_global_regs->grxfsiz),
+ "GRXFSIZ");
+FH_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,
+ &(otg_dev->core_if->core_global_regs->gnptxfsiz),
+ "GNPTXFSIZ");
+FH_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,
+ &(otg_dev->core_if->core_global_regs->gpvndctl),
+ "GPVNDCTL");
+FH_OTG_DEVICE_ATTR_REG32_RW(ggpio,
+ &(otg_dev->core_if->core_global_regs->ggpio),
+ "GGPIO");
+FH_OTG_DEVICE_ATTR_REG32_RW(guid, &(otg_dev->core_if->core_global_regs->guid),
+ "GUID");
+FH_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,
+ &(otg_dev->core_if->core_global_regs->gsnpsid),
+ "GSNPSID");
+FH_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed, "Device Speed");
+FH_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed, "Device Enumeration Speed");
+
+FH_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,
+ &(otg_dev->core_if->core_global_regs->hptxfsiz),
+ "HPTXFSIZ");
+FH_OTG_DEVICE_ATTR_REG32_RW(hprt0, otg_dev->core_if->host_if->hprt0, "HPRT0");
+
+/**
+ * @todo Add code to initiate the HNP.
+ */
+/**
+ * Show the HNP status bit
+ */
+static ssize_t hnp_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ return sprintf(buf, "HstNegScs = 0x%x\n",
+ fh_otg_get_hnpstatus(otg_dev->core_if));
+}
+
+/**
+ * Set the HNP Request bit
+ */
+static ssize_t hnp_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ uint32_t in = simple_strtoul(buf, NULL, 16);
+ fh_otg_set_hnpreq(otg_dev->core_if, in);
+ return count;
+}
+
+DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store);
+
+/**
+ * @todo Add code to initiate the SRP.
+ */
+/**
+ * Show the SRP status bit
+ */
+static ssize_t srp_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifndef FH_HOST_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ return sprintf(buf, "SesReqScs = 0x%x\n",
+ fh_otg_get_srpstatus(otg_dev->core_if));
+#else
+ return sprintf(buf, "Host Only Mode!\n");
+#endif
+}
+
+/**
+ * Set the SRP Request bit
+ */
+static ssize_t srp_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifndef FH_HOST_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ fh_otg_pcd_initiate_srp(otg_dev->pcd);
+#endif
+ return count;
+}
+
+DEVICE_ATTR(srp, 0644, srp_show, srp_store);
+
+/**
+ * @todo Need to do more for power on/off?
+ */
+/**
+ * Show the Bus Power status
+ */
+static ssize_t buspower_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ return sprintf(buf, "Bus Power = 0x%x\n",
+ fh_otg_get_prtpower(otg_dev->core_if));
+}
+
+/**
+ * Set the Bus Power status
+ */
+static ssize_t buspower_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ uint32_t on = simple_strtoul(buf, NULL, 16);
+ fh_otg_set_prtpower(otg_dev->core_if, on);
+ return count;
+}
+
+DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store);
+
+/**
+ * @todo Need to do more for suspend?
+ */
+/**
+ * Show the Bus Suspend status
+ */
+static ssize_t bussuspend_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ return sprintf(buf, "Bus Suspend = 0x%x\n",
+ fh_otg_get_prtsuspend(otg_dev->core_if));
+}
+
+/**
+ * Set the Bus Suspend status
+ */
+static ssize_t bussuspend_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t in = simple_strtoul(buf, NULL, 16);
+ fh_otg_set_prtsuspend(otg_dev->core_if, in);
+ return count;
+}
+
+DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store);
+
+/**
+ * Show the Mode Change Ready Timer status
+ */
+static ssize_t mode_ch_tim_en_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ return sprintf(buf, "Mode Change Ready Timer Enable = 0x%x\n",
+ fh_otg_get_mode_ch_tim(otg_dev->core_if));
+}
+
+/**
+ * Set the Mode Change Ready Timer status
+ */
+static ssize_t mode_ch_tim_en_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t in = simple_strtoul(buf, NULL, 16);
+ fh_otg_set_mode_ch_tim(otg_dev->core_if, in);
+ return count;
+}
+
+DEVICE_ATTR(mode_ch_tim_en, 0644, mode_ch_tim_en_show, mode_ch_tim_en_store);
+
+/**
+ * Show the value of HFIR Frame Interval bitfield
+ */
+static ssize_t fr_interval_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ return sprintf(buf, "Frame Interval = 0x%x\n",
+ fh_otg_get_fr_interval(otg_dev->core_if));
+}
+
+/**
+ * Set the HFIR Frame Interval value
+ */
+static ssize_t fr_interval_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t in = simple_strtoul(buf, NULL, 10);
+ fh_otg_set_fr_interval(otg_dev->core_if, in);
+ return count;
+}
+
+DEVICE_ATTR(fr_interval, 0644, fr_interval_show, fr_interval_store);
+
+/**
+ * Show the status of Remote Wakeup.
+ */
+static ssize_t remote_wakeup_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifndef FH_HOST_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ return sprintf(buf,
+ "Remote Wakeup Sig = %d Enabled = %d LPM Remote Wakeup = %d\n",
+ fh_otg_get_remotewakesig(otg_dev->core_if),
+ fh_otg_pcd_get_rmwkup_enable(otg_dev->pcd),
+ fh_otg_get_lpm_remotewakeenabled(otg_dev->core_if));
+#else
+ return sprintf(buf, "Host Only Mode!\n");
+#endif /* FH_HOST_ONLY */
+}
+
+/**
+ * Initiate a remote wakeup of the host. The Device control register
+ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable
+ * flag is set.
+ *
+ */
+static ssize_t remote_wakeup_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifndef FH_HOST_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+
+ if (val & 1) {
+ fh_otg_pcd_remote_wakeup(otg_dev->pcd, 1);
+ } else {
+ fh_otg_pcd_remote_wakeup(otg_dev->pcd, 0);
+ }
+#endif /* FH_HOST_ONLY */
+ return count;
+}
+
+DEVICE_ATTR(remote_wakeup, S_IRUGO | S_IWUSR, remote_wakeup_show,
+ remote_wakeup_store);
+
+/**
+ * Show the whether core is hibernated or not.
+ */
+static ssize_t rem_wakeup_pwrdn_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifndef FH_HOST_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ if (fh_otg_get_core_state(otg_dev->core_if)) {
+ FH_PRINTF("Core is in hibernation\n");
+ } else {
+ FH_PRINTF("Core is not in hibernation\n");
+ }
+#endif /* FH_HOST_ONLY */
+ return 0;
+}
+
+extern int fh_otg_device_hibernation_restore(fh_otg_core_if_t * core_if,
+ int rem_wakeup, int reset);
+
+/**
+ * Initiate a remote wakeup of the device to exit from hibernation.
+ */
+static ssize_t rem_wakeup_pwrdn_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifndef FH_HOST_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ fh_otg_device_hibernation_restore(otg_dev->core_if, 1, 0);
+#endif
+ return count;
+}
+
+DEVICE_ATTR(rem_wakeup_pwrdn, S_IRUGO | S_IWUSR, rem_wakeup_pwrdn_show,
+ rem_wakeup_pwrdn_store);
+
+static ssize_t disconnect_us(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+
+#ifndef FH_HOST_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+ FH_PRINTF("The Passed value is %04x\n", val);
+
+ fh_otg_pcd_disconnect_us(otg_dev->pcd, 50);
+
+#endif /* FH_HOST_ONLY */
+ return count;
+}
+
+DEVICE_ATTR(disconnect_us, S_IWUSR, 0, disconnect_us);
+
+/**
+ * Dump global registers and either host or device registers (depending on the
+ * current mode of the core).
+ */
+static ssize_t regdump_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ fh_otg_dump_global_registers(otg_dev->core_if);
+ if (fh_otg_is_host_mode(otg_dev->core_if)) {
+ fh_otg_dump_host_registers(otg_dev->core_if);
+ } else {
+ fh_otg_dump_dev_registers(otg_dev->core_if);
+
+ }
+ return sprintf(buf, "Register Dump\n");
+}
+
+DEVICE_ATTR(regdump, S_IRUGO, regdump_show, 0);
+
+/**
+ * Dump global registers and either host or device registers (depending on the
+ * current mode of the core).
+ */
+static ssize_t spramdump_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ fh_otg_dump_spram(otg_dev->core_if);
+
+ return sprintf(buf, "SPRAM Dump\n");
+}
+
+DEVICE_ATTR(spramdump, S_IRUGO, spramdump_show, 0);
+
+/**
+ * Dump the current hcd state.
+ */
+static ssize_t hcddump_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifndef FH_DEVICE_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ fh_otg_hcd_dump_state(otg_dev->hcd);
+#endif /* FH_DEVICE_ONLY */
+ return sprintf(buf, "HCD Dump\n");
+}
+
+DEVICE_ATTR(hcddump, S_IRUGO, hcddump_show, 0);
+
+/**
+ * Dump the average frame remaining at SOF. This can be used to
+ * determine average interrupt latency. Frame remaining is also shown for
+ * start transfer and two additional sample points.
+ */
+static ssize_t hcd_frrem_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifndef FH_DEVICE_ONLY
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ fh_otg_hcd_dump_frrem(otg_dev->hcd);
+#endif /* FH_DEVICE_ONLY */
+ return sprintf(buf, "HCD Dump Frame Remaining\n");
+}
+
+DEVICE_ATTR(hcd_frrem, S_IRUGO, hcd_frrem_show, 0);
+
+/**
+ * Displays the time required to read the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is read).
+ */
+#define RW_REG_COUNT 10000000
+#define MSEC_PER_JIFFIE 1000/HZ
+static ssize_t rd_reg_test_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ int i;
+ int time;
+ int start_jiffies;
+
+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+ start_jiffies = jiffies;
+ for (i = 0; i < RW_REG_COUNT; i++) {
+ fh_otg_get_gnptxfsiz(otg_dev->core_if);
+ }
+ time = jiffies - start_jiffies;
+ return sprintf(buf,
+ "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
+}
+
+DEVICE_ATTR(rd_reg_test, S_IRUGO, rd_reg_test_show, 0);
+
+/**
+ * Displays the time required to write the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is written).
+ */
+static ssize_t wr_reg_test_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t reg_val;
+ int i;
+ int time;
+ int start_jiffies;
+
+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+ reg_val = fh_otg_get_gnptxfsiz(otg_dev->core_if);
+ start_jiffies = jiffies;
+ for (i = 0; i < RW_REG_COUNT; i++) {
+ fh_otg_set_gnptxfsiz(otg_dev->core_if, reg_val);
+ }
+ time = jiffies - start_jiffies;
+ return sprintf(buf,
+ "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
+}
+
+DEVICE_ATTR(wr_reg_test, S_IRUGO, wr_reg_test_show, 0);
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+
+/**
+* Show the lpm_response attribute.
+*/
+static ssize_t lpmresp_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ if (!fh_otg_get_param_lpm_enable(otg_dev->core_if))
+ return sprintf(buf, "** LPM is DISABLED **\n");
+
+ if (!fh_otg_is_device_mode(otg_dev->core_if)) {
+ return sprintf(buf, "** Current mode is not device mode\n");
+ }
+ return sprintf(buf, "lpm_response = %d\n",
+ fh_otg_get_lpmresponse(otg_dev->core_if));
+}
+
+/**
+* Store the lpm_response attribute.
+*/
+static ssize_t lpmresp_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+
+ if (!fh_otg_get_param_lpm_enable(otg_dev->core_if)) {
+ return 0;
+ }
+
+ if (!fh_otg_is_device_mode(otg_dev->core_if)) {
+ return 0;
+ }
+
+ fh_otg_set_lpmresponse(otg_dev->core_if, val);
+ return count;
+}
+
+DEVICE_ATTR(lpm_response, S_IRUGO | S_IWUSR, lpmresp_show, lpmresp_store);
+
+/**
+* Show the besl_reject attribute.
+*/
+static ssize_t beslreject_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ if (!fh_otg_get_param_lpm_enable(otg_dev->core_if))
+ return sprintf(buf, "** LPM is DISABLED **\n");
+ if (!fh_otg_get_param_besl_enable(otg_dev->core_if))
+ return sprintf(buf, "** EnBesl is DISABLED **\n");
+
+ if (!fh_otg_is_device_mode(otg_dev->core_if)) {
+ return sprintf(buf, "** Current mode is not device mode\n");
+ }
+
+ return sprintf(buf, "besl_reject = %d\n",
+ fh_otg_get_beslreject(otg_dev->core_if));
+}
+
+/**
+* Store the besl_reject attribute.
+*/
+static ssize_t beslreject_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+
+ if (!fh_otg_get_param_lpm_enable(otg_dev->core_if)) {
+ return 0;
+ }
+
+ if (!fh_otg_get_param_besl_enable(otg_dev->core_if)) {
+ return 0;
+ }
+
+ if (!fh_otg_is_device_mode(otg_dev->core_if)) {
+ return 0;
+ }
+
+ fh_otg_set_beslreject(otg_dev->core_if,val);
+
+ return count;
+}
+
+DEVICE_ATTR(besl_reject, S_IRUGO | S_IWUSR, beslreject_show, beslreject_store);
+
+/**
+* Show the hird_thresh attribute.
+*/
+static ssize_t hirdthresh_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ if (!fh_otg_get_param_lpm_enable(otg_dev->core_if))
+ return sprintf(buf, "** LPM is DISABLED **\n");
+
+ if (!fh_otg_is_device_mode(otg_dev->core_if)) {
+ return sprintf(buf, "** Current mode is not device mode\n");
+ }
+
+ return sprintf(buf, "hirdthresh = 0x%x\n",
+ fh_otg_get_hirdthresh(otg_dev->core_if));
+}
+
+/**
+* Store the hird_thresh attribute.
+*/
+static ssize_t hirdthresh_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+
+ if (!fh_otg_get_param_lpm_enable(otg_dev->core_if)) {
+ return 0;
+ }
+
+ if (!fh_otg_is_device_mode(otg_dev->core_if)) {
+ return 0;
+ }
+
+ fh_otg_set_hirdthresh(otg_dev->core_if,val);
+
+ return count;
+}
+
+DEVICE_ATTR(hird_thres, S_IRUGO | S_IWUSR, hirdthresh_show, hirdthresh_store);
+
+/**
+* Show the sleep_status attribute.
+*/
+static ssize_t sleepstatus_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ return sprintf(buf, "Sleep Status = %d\n",
+ fh_otg_get_lpm_portsleepstatus(otg_dev->core_if));
+}
+
+/**
+ * Store the sleep_status attribure.
+ */
+static ssize_t sleepstatus_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+#ifdef LM_INTERFACE
+ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
+ fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
+#elif defined(PCI_INTERFACE)
+ fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+#endif
+
+ fh_otg_core_if_t *core_if = otg_dev->core_if;
+
+ if (fh_otg_get_lpm_portsleepstatus(otg_dev->core_if)) {
+ if (fh_otg_is_host_mode(core_if)) {
+
+ FH_PRINTF("Host initiated resume\n");
+ fh_otg_set_prtresume(otg_dev->core_if, 1);
+ }
+ }
+
+ return count;
+}
+
+DEVICE_ATTR(sleep_status, S_IRUGO | S_IWUSR, sleepstatus_show,
+ sleepstatus_store);
+
+#endif /* CONFIG_USB_FH_OTG_LPM_ENABLE */
+
+/**@}*/
+
+/**
+ * Create the device files
+ */
+void fh_otg_attr_create(struct platform_device *dev)
+{
+ int error;
+
+ error = device_create_file(&dev->dev, &dev_attr_regoffset);
+ error = device_create_file(&dev->dev, &dev_attr_regvalue);
+ error = device_create_file(&dev->dev, &dev_attr_mode);
+ error = device_create_file(&dev->dev, &dev_attr_hnpcapable);
+ error = device_create_file(&dev->dev, &dev_attr_srpcapable);
+ error = device_create_file(&dev->dev, &dev_attr_hsic_connect);
+ error = device_create_file(&dev->dev, &dev_attr_inv_sel_hsic);
+ error = device_create_file(&dev->dev, &dev_attr_hnp);
+ error = device_create_file(&dev->dev, &dev_attr_srp);
+ error = device_create_file(&dev->dev, &dev_attr_buspower);
+ error = device_create_file(&dev->dev, &dev_attr_bussuspend);
+ error = device_create_file(&dev->dev, &dev_attr_mode_ch_tim_en);
+ error = device_create_file(&dev->dev, &dev_attr_fr_interval);
+ error = device_create_file(&dev->dev, &dev_attr_busconnected);
+ error = device_create_file(&dev->dev, &dev_attr_gotgctl);
+ error = device_create_file(&dev->dev, &dev_attr_gusbcfg);
+ error = device_create_file(&dev->dev, &dev_attr_grxfsiz);
+ error = device_create_file(&dev->dev, &dev_attr_gnptxfsiz);
+ error = device_create_file(&dev->dev, &dev_attr_gpvndctl);
+ error = device_create_file(&dev->dev, &dev_attr_ggpio);
+ error = device_create_file(&dev->dev, &dev_attr_guid);
+ error = device_create_file(&dev->dev, &dev_attr_gsnpsid);
+ error = device_create_file(&dev->dev, &dev_attr_devspeed);
+ error = device_create_file(&dev->dev, &dev_attr_enumspeed);
+ error = device_create_file(&dev->dev, &dev_attr_hptxfsiz);
+ error = device_create_file(&dev->dev, &dev_attr_hprt0);
+ error = device_create_file(&dev->dev, &dev_attr_remote_wakeup);
+ error = device_create_file(&dev->dev, &dev_attr_rem_wakeup_pwrdn);
+ error = device_create_file(&dev->dev, &dev_attr_disconnect_us);
+ error = device_create_file(&dev->dev, &dev_attr_regdump);
+ error = device_create_file(&dev->dev, &dev_attr_spramdump);
+ error = device_create_file(&dev->dev, &dev_attr_hcddump);
+ error = device_create_file(&dev->dev, &dev_attr_hcd_frrem);
+ error = device_create_file(&dev->dev, &dev_attr_rd_reg_test);
+ error = device_create_file(&dev->dev, &dev_attr_wr_reg_test);
+#ifdef CONFIG_USB_FH_OTG_LPM
+ error = device_create_file(&dev->dev, &dev_attr_lpm_response);
+ error = device_create_file(&dev->dev, &dev_attr_sleep_status);
+ error = device_create_file(&dev->dev, &dev_attr_besl_reject);
+ error = device_create_file(&dev->dev, &dev_attr_hird_thres);
+#endif
+}
+
+/**
+ * Remove the device files
+ */
+void fh_otg_attr_remove(struct platform_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_regoffset);
+ device_remove_file(&dev->dev, &dev_attr_regvalue);
+ device_remove_file(&dev->dev, &dev_attr_mode);
+ device_remove_file(&dev->dev, &dev_attr_hnpcapable);
+ device_remove_file(&dev->dev, &dev_attr_srpcapable);
+ device_remove_file(&dev->dev, &dev_attr_hsic_connect);
+ device_remove_file(&dev->dev, &dev_attr_inv_sel_hsic);
+ device_remove_file(&dev->dev, &dev_attr_hnp);
+ device_remove_file(&dev->dev, &dev_attr_srp);
+ device_remove_file(&dev->dev, &dev_attr_buspower);
+ device_remove_file(&dev->dev, &dev_attr_bussuspend);
+ device_remove_file(&dev->dev, &dev_attr_mode_ch_tim_en);
+ device_remove_file(&dev->dev, &dev_attr_fr_interval);
+ device_remove_file(&dev->dev, &dev_attr_busconnected);
+ device_remove_file(&dev->dev, &dev_attr_gotgctl);
+ device_remove_file(&dev->dev, &dev_attr_gusbcfg);
+ device_remove_file(&dev->dev, &dev_attr_grxfsiz);
+ device_remove_file(&dev->dev, &dev_attr_gnptxfsiz);
+ device_remove_file(&dev->dev, &dev_attr_gpvndctl);
+ device_remove_file(&dev->dev, &dev_attr_ggpio);
+ device_remove_file(&dev->dev, &dev_attr_guid);
+ device_remove_file(&dev->dev, &dev_attr_gsnpsid);
+ device_remove_file(&dev->dev, &dev_attr_devspeed);
+ device_remove_file(&dev->dev, &dev_attr_enumspeed);
+ device_remove_file(&dev->dev, &dev_attr_hptxfsiz);
+ device_remove_file(&dev->dev, &dev_attr_hprt0);
+ device_remove_file(&dev->dev, &dev_attr_remote_wakeup);
+ device_remove_file(&dev->dev, &dev_attr_rem_wakeup_pwrdn);
+ device_remove_file(&dev->dev, &dev_attr_disconnect_us);
+ device_remove_file(&dev->dev, &dev_attr_regdump);
+ device_remove_file(&dev->dev, &dev_attr_spramdump);
+ device_remove_file(&dev->dev, &dev_attr_hcddump);
+ device_remove_file(&dev->dev, &dev_attr_hcd_frrem);
+ device_remove_file(&dev->dev, &dev_attr_rd_reg_test);
+ device_remove_file(&dev->dev, &dev_attr_wr_reg_test);
+#ifdef CONFIG_USB_FH_OTG_LPM
+ device_remove_file(&dev->dev, &dev_attr_lpm_response);
+ device_remove_file(&dev->dev, &dev_attr_sleep_status);
+ device_remove_file(&dev->dev, &dev_attr_besl_reject);
+ device_remove_file(&dev->dev, &dev_attr_hird_thres);
+#endif
+}
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.h
new file mode 100644
index 00000000..cee86fd4
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.h
@@ -0,0 +1,76 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_attr.h $
+ * $Revision: #13 $
+ * $Date: 2010/06/21 $
+ * $Change: 1532021 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__FH_OTG_ATTR_H__)
+#define __FH_OTG_ATTR_H__
+
+#include <linux/platform_device.h>
+
+/** @file
+ * This file contains the interface to the Linux device attributes.
+ */
+extern struct device_attribute dev_attr_regoffset;
+extern struct device_attribute dev_attr_regvalue;
+
+extern struct device_attribute dev_attr_mode;
+extern struct device_attribute dev_attr_hnpcapable;
+extern struct device_attribute dev_attr_srpcapable;
+extern struct device_attribute dev_attr_hnp;
+extern struct device_attribute dev_attr_srp;
+extern struct device_attribute dev_attr_buspower;
+extern struct device_attribute dev_attr_bussuspend;
+extern struct device_attribute dev_attr_mode_ch_tim_en;
+extern struct device_attribute dev_attr_fr_interval;
+extern struct device_attribute dev_attr_busconnected;
+extern struct device_attribute dev_attr_gotgctl;
+extern struct device_attribute dev_attr_gusbcfg;
+extern struct device_attribute dev_attr_grxfsiz;
+extern struct device_attribute dev_attr_gnptxfsiz;
+extern struct device_attribute dev_attr_gpvndctl;
+extern struct device_attribute dev_attr_ggpio;
+extern struct device_attribute dev_attr_guid;
+extern struct device_attribute dev_attr_gsnpsid;
+extern struct device_attribute dev_attr_devspeed;
+extern struct device_attribute dev_attr_enumspeed;
+extern struct device_attribute dev_attr_hptxfsiz;
+extern struct device_attribute dev_attr_hprt0;
+#ifdef CONFIG_USB_FH_OTG_LPM
+extern struct device_attribute dev_attr_lpm_response;
+extern struct device_attribute devi_attr_sleep_status;
+#endif
+
+void fh_otg_attr_create(struct platform_device *dev);
+
+void fh_otg_attr_remove(struct platform_device *dev);
+
+#endif
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.c
new file mode 100644
index 00000000..d4f3cb87
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.c
@@ -0,0 +1,1876 @@
+/* ==========================================================================
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * This file contains the most of the CFI(Core Feature Interface)
+ * implementation for the OTG.
+ */
+
+#ifdef FH_UTE_CFI
+
+#include "fh_otg_pcd.h"
+#include "fh_otg_cfi.h"
+
+/** This definition should actually migrate to the Portability Library */
+#define FH_CONSTANT_CPU_TO_LE16(x) (x)
+
+extern fh_otg_pcd_ep_t *get_ep_by_addr(fh_otg_pcd_t * pcd, u16 wIndex);
+
+static int cfi_core_features_buf(uint8_t * buf, uint16_t buflen);
+static int cfi_get_feature_value(uint8_t * buf, uint16_t buflen,
+ struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *ctrl_req);
+static int cfi_set_feature_value(struct fh_otg_pcd *pcd);
+static int cfi_ep_get_sg_val(uint8_t * buf, struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req);
+static int cfi_ep_get_concat_val(uint8_t * buf, struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req);
+static int cfi_ep_get_align_val(uint8_t * buf, struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req);
+static int cfi_preproc_reset(struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req);
+static void cfi_free_ep_bs_dyn_data(cfi_ep_t * cfiep);
+
+static uint16_t get_dfifo_size(fh_otg_core_if_t * core_if);
+static int32_t get_rxfifo_size(fh_otg_core_if_t * core_if, uint16_t wValue);
+static int32_t get_txfifo_size(struct fh_otg_pcd *pcd, uint16_t wValue);
+
+static uint8_t resize_fifos(fh_otg_core_if_t * core_if);
+
+/** This is the header of the all features descriptor */
+static cfi_all_features_header_t all_props_desc_header = {
+ .wVersion = FH_CONSTANT_CPU_TO_LE16(0x100),
+ .wCoreID = FH_CONSTANT_CPU_TO_LE16(CFI_CORE_ID_OTG),
+ .wNumFeatures = FH_CONSTANT_CPU_TO_LE16(9),
+};
+
+/** This is an array of statically allocated feature descriptors */
+static cfi_feature_desc_header_t prop_descs[] = {
+
+ /* FT_ID_DMA_MODE */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_MODE),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(1),
+ },
+
+ /* FT_ID_DMA_BUFFER_SETUP */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFFER_SETUP),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(6),
+ },
+
+ /* FT_ID_DMA_BUFF_ALIGN */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFF_ALIGN),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
+ },
+
+ /* FT_ID_DMA_CONCAT_SETUP */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CONCAT_SETUP),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ //.wDataLength = FH_CONSTANT_CPU_TO_LE16(6),
+ },
+
+ /* FT_ID_DMA_CIRCULAR */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CIRCULAR),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(6),
+ },
+
+ /* FT_ID_THRESHOLD_SETUP */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_THRESHOLD_SETUP),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(6),
+ },
+
+ /* FT_ID_DFIFO_DEPTH */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DFIFO_DEPTH),
+ .bmAttributes = CFI_FEATURE_ATTR_RO,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
+ },
+
+ /* FT_ID_TX_FIFO_DEPTH */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_TX_FIFO_DEPTH),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
+ },
+
+ /* FT_ID_RX_FIFO_DEPTH */
+ {
+ .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_RX_FIFO_DEPTH),
+ .bmAttributes = CFI_FEATURE_ATTR_RW,
+ .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
+ }
+};
+
+/** The table of feature names */
+cfi_string_t prop_name_table[] = {
+ {FT_ID_DMA_MODE, "dma_mode"},
+ {FT_ID_DMA_BUFFER_SETUP, "buffer_setup"},
+ {FT_ID_DMA_BUFF_ALIGN, "buffer_align"},
+ {FT_ID_DMA_CONCAT_SETUP, "concat_setup"},
+ {FT_ID_DMA_CIRCULAR, "buffer_circular"},
+ {FT_ID_THRESHOLD_SETUP, "threshold_setup"},
+ {FT_ID_DFIFO_DEPTH, "dfifo_depth"},
+ {FT_ID_TX_FIFO_DEPTH, "txfifo_depth"},
+ {FT_ID_RX_FIFO_DEPTH, "rxfifo_depth"},
+ {}
+};
+
+/************************************************************************/
+
+/**
+ * Returns the name of the feature by its ID
+ * or NULL if no featute ID matches.
+ *
+ */
+const uint8_t *get_prop_name(uint16_t prop_id, int *len)
+{
+ cfi_string_t *pstr;
+ *len = 0;
+
+ for (pstr = prop_name_table; pstr && pstr->s; pstr++) {
+ if (pstr->id == prop_id) {
+ *len = FH_STRLEN(pstr->s);
+ return pstr->s;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * This function handles all CFI specific control requests.
+ *
+ * Return a negative value to stall the DCE.
+ */
+int cfi_setup(struct fh_otg_pcd *pcd, struct cfi_usb_ctrlrequest *ctrl)
+{
+ int retval = 0;
+ fh_otg_pcd_ep_t *ep = NULL;
+ cfiobject_t *cfi = pcd->cfi;
+ struct fh_otg_core_if *coreif = GET_CORE_IF(pcd);
+ uint16_t wLen = FH_LE16_TO_CPU(&ctrl->wLength);
+ uint16_t wValue = FH_LE16_TO_CPU(&ctrl->wValue);
+ uint16_t wIndex = FH_LE16_TO_CPU(&ctrl->wIndex);
+ uint32_t regaddr = 0;
+ uint32_t regval = 0;
+
+ /* Save this Control Request in the CFI object.
+ * The data field will be assigned in the data stage completion CB function.
+ */
+ cfi->ctrl_req = *ctrl;
+ cfi->ctrl_req.data = NULL;
+
+ cfi->need_gadget_att = 0;
+ cfi->need_status_in_complete = 0;
+
+ switch (ctrl->bRequest) {
+ case VEN_CORE_GET_FEATURES:
+ retval = cfi_core_features_buf(cfi->buf_in.buf, CFI_IN_BUF_LEN);
+ if (retval >= 0) {
+ //dump_msg(cfi->buf_in.buf, retval);
+ ep = &pcd->ep0;
+
+ retval = min((uint16_t) retval, wLen);
+ /* Transfer this buffer to the host through the EP0-IN EP */
+ ep->fh_ep.dma_addr = cfi->buf_in.addr;
+ ep->fh_ep.start_xfer_buff = cfi->buf_in.buf;
+ ep->fh_ep.xfer_buff = cfi->buf_in.buf;
+ ep->fh_ep.xfer_len = retval;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = ep->fh_ep.xfer_len;
+
+ pcd->ep0_pending = 1;
+ fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
+ }
+ retval = 0;
+ break;
+
+ case VEN_CORE_GET_FEATURE:
+ CFI_INFO("VEN_CORE_GET_FEATURE\n");
+ retval = cfi_get_feature_value(cfi->buf_in.buf, CFI_IN_BUF_LEN,
+ pcd, ctrl);
+ if (retval >= 0) {
+ ep = &pcd->ep0;
+
+ retval = min((uint16_t) retval, wLen);
+ /* Transfer this buffer to the host through the EP0-IN EP */
+ ep->fh_ep.dma_addr = cfi->buf_in.addr;
+ ep->fh_ep.start_xfer_buff = cfi->buf_in.buf;
+ ep->fh_ep.xfer_buff = cfi->buf_in.buf;
+ ep->fh_ep.xfer_len = retval;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = ep->fh_ep.xfer_len;
+
+ pcd->ep0_pending = 1;
+ fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
+ }
+ CFI_INFO("VEN_CORE_GET_FEATURE=%d\n", retval);
+ dump_msg(cfi->buf_in.buf, retval);
+ break;
+
+ case VEN_CORE_SET_FEATURE:
+ CFI_INFO("VEN_CORE_SET_FEATURE\n");
+ /* Set up an XFER to get the data stage of the control request,
+ * which is the new value of the feature to be modified.
+ */
+ ep = &pcd->ep0;
+ ep->fh_ep.is_in = 0;
+ ep->fh_ep.dma_addr = cfi->buf_out.addr;
+ ep->fh_ep.start_xfer_buff = cfi->buf_out.buf;
+ ep->fh_ep.xfer_buff = cfi->buf_out.buf;
+ ep->fh_ep.xfer_len = wLen;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = ep->fh_ep.xfer_len;
+
+ pcd->ep0_pending = 1;
+ /* Read the control write's data stage */
+ fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
+ retval = 0;
+ break;
+
+ case VEN_CORE_RESET_FEATURES:
+ CFI_INFO("VEN_CORE_RESET_FEATURES\n");
+ cfi->need_gadget_att = 1;
+ cfi->need_status_in_complete = 1;
+ retval = cfi_preproc_reset(pcd, ctrl);
+ CFI_INFO("VEN_CORE_RESET_FEATURES = (%d)\n", retval);
+ break;
+
+ case VEN_CORE_ACTIVATE_FEATURES:
+ CFI_INFO("VEN_CORE_ACTIVATE_FEATURES\n");
+ break;
+
+ case VEN_CORE_READ_REGISTER:
+ CFI_INFO("VEN_CORE_READ_REGISTER\n");
+ /* wValue optionally contains the HI WORD of the register offset and
+ * wIndex contains the LOW WORD of the register offset
+ */
+ if (wValue == 0) {
+ /* @TODO - MAS - fix the access to the base field */
+ regaddr = 0;
+ //regaddr = (uint32_t) pcd->otg_dev->os_dep.base;
+ //GET_CORE_IF(pcd)->co
+ regaddr |= wIndex;
+ } else {
+ regaddr = (wValue << 16) | wIndex;
+ }
+
+ /* Read a 32-bit value of the memory at the regaddr */
+ regval = FH_READ_REG32((uint32_t *) regaddr);
+
+ ep = &pcd->ep0;
+ fh_memcpy(cfi->buf_in.buf, &regval, sizeof(uint32_t));
+ ep->fh_ep.is_in = 1;
+ ep->fh_ep.dma_addr = cfi->buf_in.addr;
+ ep->fh_ep.start_xfer_buff = cfi->buf_in.buf;
+ ep->fh_ep.xfer_buff = cfi->buf_in.buf;
+ ep->fh_ep.xfer_len = wLen;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = ep->fh_ep.xfer_len;
+
+ pcd->ep0_pending = 1;
+ fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
+ cfi->need_gadget_att = 0;
+ retval = 0;
+ break;
+
+ case VEN_CORE_WRITE_REGISTER:
+ CFI_INFO("VEN_CORE_WRITE_REGISTER\n");
+ /* Set up an XFER to get the data stage of the control request,
+ * which is the new value of the register to be modified.
+ */
+ ep = &pcd->ep0;
+ ep->fh_ep.is_in = 0;
+ ep->fh_ep.dma_addr = cfi->buf_out.addr;
+ ep->fh_ep.start_xfer_buff = cfi->buf_out.buf;
+ ep->fh_ep.xfer_buff = cfi->buf_out.buf;
+ ep->fh_ep.xfer_len = wLen;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = ep->fh_ep.xfer_len;
+
+ pcd->ep0_pending = 1;
+ /* Read the control write's data stage */
+ fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
+ retval = 0;
+ break;
+
+ default:
+ retval = -FH_E_NOT_SUPPORTED;
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * This function prepares the core features descriptors and copies its
+ * raw representation into the buffer <buf>.
+ *
+ * The buffer structure is as follows:
+ * all_features_header (8 bytes)
+ * features_#1 (8 bytes + feature name string length)
+ * features_#2 (8 bytes + feature name string length)
+ * .....
+ * features_#n - where n=the total count of feature descriptors
+ */
+static int cfi_core_features_buf(uint8_t * buf, uint16_t buflen)
+{
+ cfi_feature_desc_header_t *prop_hdr = prop_descs;
+ cfi_feature_desc_header_t *prop;
+ cfi_all_features_header_t *all_props_hdr = &all_props_desc_header;
+ cfi_all_features_header_t *tmp;
+ uint8_t *tmpbuf = buf;
+ const uint8_t *pname = NULL;
+ int i, j, namelen = 0, totlen;
+
+ /* Prepare and copy the core features into the buffer */
+ CFI_INFO("%s:\n", __func__);
+
+ tmp = (cfi_all_features_header_t *) tmpbuf;
+ *tmp = *all_props_hdr;
+ tmpbuf += CFI_ALL_FEATURES_HDR_LEN;
+
+ j = sizeof(prop_descs) / sizeof(cfi_all_features_header_t);
+ for (i = 0; i < j; i++, prop_hdr++) {
+ pname = get_prop_name(prop_hdr->wFeatureID, &namelen);
+ prop = (cfi_feature_desc_header_t *) tmpbuf;
+ *prop = *prop_hdr;
+
+ prop->bNameLen = namelen;
+ prop->wLength =
+ FH_CONSTANT_CPU_TO_LE16(CFI_FEATURE_DESC_HDR_LEN +
+ namelen);
+
+ tmpbuf += CFI_FEATURE_DESC_HDR_LEN;
+ fh_memcpy(tmpbuf, pname, namelen);
+ tmpbuf += namelen;
+ }
+
+ totlen = tmpbuf - buf;
+
+ if (totlen > 0) {
+ tmp = (cfi_all_features_header_t *) buf;
+ tmp->wTotalLen = FH_CONSTANT_CPU_TO_LE16(totlen);
+ }
+
+ return totlen;
+}
+
+/**
+ * This function releases all the dynamic memory in the CFI object.
+ */
+static void cfi_release(cfiobject_t * cfiobj)
+{
+ cfi_ep_t *cfiep;
+ fh_list_link_t *tmp;
+
+ CFI_INFO("%s\n", __func__);
+
+ if (cfiobj->buf_in.buf) {
+ FH_DMA_FREE(CFI_IN_BUF_LEN, cfiobj->buf_in.buf,
+ cfiobj->buf_in.addr);
+ cfiobj->buf_in.buf = NULL;
+ }
+
+ if (cfiobj->buf_out.buf) {
+ FH_DMA_FREE(CFI_OUT_BUF_LEN, cfiobj->buf_out.buf,
+ cfiobj->buf_out.addr);
+ cfiobj->buf_out.buf = NULL;
+ }
+
+ /* Free the Buffer Setup values for each EP */
+ //list_for_each_entry(cfiep, &cfiobj->active_eps, lh) {
+ FH_LIST_FOREACH(tmp, &cfiobj->active_eps) {
+ cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
+ cfi_free_ep_bs_dyn_data(cfiep);
+ }
+}
+
+/**
+ * This function frees the dynamically allocated EP buffer setup data.
+ */
+static void cfi_free_ep_bs_dyn_data(cfi_ep_t * cfiep)
+{
+ if (cfiep->bm_sg) {
+ FH_FREE(cfiep->bm_sg);
+ cfiep->bm_sg = NULL;
+ }
+
+ if (cfiep->bm_align) {
+ FH_FREE(cfiep->bm_align);
+ cfiep->bm_align = NULL;
+ }
+
+ if (cfiep->bm_concat) {
+ if (NULL != cfiep->bm_concat->wTxBytes) {
+ FH_FREE(cfiep->bm_concat->wTxBytes);
+ cfiep->bm_concat->wTxBytes = NULL;
+ }
+ FH_FREE(cfiep->bm_concat);
+ cfiep->bm_concat = NULL;
+ }
+}
+
+/**
+ * This function initializes the default values of the features
+ * for a specific endpoint and should be called only once when
+ * the EP is enabled first time.
+ */
+static int cfi_ep_init_defaults(struct fh_otg_pcd *pcd, cfi_ep_t * cfiep)
+{
+ int retval = 0;
+
+ cfiep->bm_sg = FH_ALLOC(sizeof(ddma_sg_buffer_setup_t));
+ if (NULL == cfiep->bm_sg) {
+ CFI_INFO("Failed to allocate memory for SG feature value\n");
+ return -FH_E_NO_MEMORY;
+ }
+ fh_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
+
+ /* For the Concatenation feature's default value we do not allocate
+ * memory for the wTxBytes field - it will be done in the set_feature_value
+ * request handler.
+ */
+ cfiep->bm_concat = FH_ALLOC(sizeof(ddma_concat_buffer_setup_t));
+ if (NULL == cfiep->bm_concat) {
+ CFI_INFO
+ ("Failed to allocate memory for CONCATENATION feature value\n");
+ FH_FREE(cfiep->bm_sg);
+ return -FH_E_NO_MEMORY;
+ }
+ fh_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
+
+ cfiep->bm_align = FH_ALLOC(sizeof(ddma_align_buffer_setup_t));
+ if (NULL == cfiep->bm_align) {
+ CFI_INFO
+ ("Failed to allocate memory for Alignment feature value\n");
+ FH_FREE(cfiep->bm_sg);
+ FH_FREE(cfiep->bm_concat);
+ return -FH_E_NO_MEMORY;
+ }
+ fh_memset(cfiep->bm_align, 0, sizeof(ddma_align_buffer_setup_t));
+
+ return retval;
+}
+
+/**
+ * The callback function that notifies the CFI on the activation of
+ * an endpoint in the PCD. The following steps are done in this function:
+ *
+ * Create a dynamically allocated cfi_ep_t object (a CFI wrapper to the PCD's
+ * active endpoint)
+ * Create MAX_DMA_DESCS_PER_EP count DMA Descriptors for the EP
+ * Set the Buffer Mode to standard
+ * Initialize the default values for all EP modes (SG, Circular, Concat, Align)
+ * Add the cfi_ep_t object to the list of active endpoints in the CFI object
+ */
+static int cfi_ep_enable(struct cfiobject *cfi, struct fh_otg_pcd *pcd,
+ struct fh_otg_pcd_ep *ep)
+{
+ cfi_ep_t *cfiep;
+ int retval = -FH_E_NOT_SUPPORTED;
+
+ CFI_INFO("%s: epname=%s; epnum=0x%02x\n", __func__,
+ "EP_" /*ep->ep.name */ , ep->desc->bEndpointAddress);
+ /* MAS - Check whether this endpoint already is in the list */
+ cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
+
+ if (NULL == cfiep) {
+ /* Allocate a cfi_ep_t object */
+ cfiep = FH_ALLOC(sizeof(cfi_ep_t));
+ if (NULL == cfiep) {
+ CFI_INFO
+ ("Unable to allocate memory for <cfiep> in function %s\n",
+ __func__);
+ return -FH_E_NO_MEMORY;
+ }
+ fh_memset(cfiep, 0, sizeof(cfi_ep_t));
+
+ /* Save the fh_otg_pcd_ep pointer in the cfiep object */
+ cfiep->ep = ep;
+
+ /* Allocate the DMA Descriptors chain of MAX_DMA_DESCS_PER_EP count */
+ ep->fh_ep.descs =
+ FH_DMA_ALLOC(MAX_DMA_DESCS_PER_EP *
+ sizeof(fh_otg_dma_desc_t),
+ &ep->fh_ep.descs_dma_addr);
+
+ if (NULL == ep->fh_ep.descs) {
+ FH_FREE(cfiep);
+ return -FH_E_NO_MEMORY;
+ }
+
+ FH_LIST_INIT(&cfiep->lh);
+
+ /* Set the buffer mode to BM_STANDARD. It will be modified
+ * when building descriptors for a specific buffer mode */
+ ep->fh_ep.buff_mode = BM_STANDARD;
+
+ /* Create and initialize the default values for this EP's Buffer modes */
+ if ((retval = cfi_ep_init_defaults(pcd, cfiep)) < 0)
+ return retval;
+
+ /* Add the cfi_ep_t object to the CFI object's list of active endpoints */
+ FH_LIST_INSERT_TAIL(&cfi->active_eps, &cfiep->lh);
+ retval = 0;
+ } else { /* The sought EP already is in the list */
+ CFI_INFO("%s: The sought EP already is in the list\n",
+ __func__);
+ }
+
+ return retval;
+}
+
+/**
+ * This function is called when the data stage of a 3-stage Control Write request
+ * is complete.
+ *
+ */
+static int cfi_ctrl_write_complete(struct cfiobject *cfi,
+ struct fh_otg_pcd *pcd)
+{
+ uint32_t addr, reg_value;
+ uint16_t wIndex, wValue;
+ uint8_t bRequest;
+ uint8_t *buf = cfi->buf_out.buf;
+ //struct usb_ctrlrequest *ctrl_req = &cfi->ctrl_req_saved;
+ struct cfi_usb_ctrlrequest *ctrl_req = &cfi->ctrl_req;
+ int retval = -FH_E_NOT_SUPPORTED;
+
+ CFI_INFO("%s\n", __func__);
+
+ bRequest = ctrl_req->bRequest;
+ wIndex = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
+ wValue = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
+
+ /*
+ * Save the pointer to the data stage in the ctrl_req's <data> field.
+ * The request should be already saved in the command stage by now.
+ */
+ ctrl_req->data = cfi->buf_out.buf;
+ cfi->need_status_in_complete = 0;
+ cfi->need_gadget_att = 0;
+
+ switch (bRequest) {
+ case VEN_CORE_WRITE_REGISTER:
+ /* The buffer contains raw data of the new value for the register */
+ reg_value = *((uint32_t *) buf);
+ if (wValue == 0) {
+ addr = 0;
+ //addr = (uint32_t) pcd->otg_dev->os_dep.base;
+ addr += wIndex;
+ } else {
+ addr = (wValue << 16) | wIndex;
+ }
+
+ //writel(reg_value, addr);
+
+ retval = 0;
+ cfi->need_status_in_complete = 1;
+ break;
+
+ case VEN_CORE_SET_FEATURE:
+ /* The buffer contains raw data of the new value of the feature */
+ retval = cfi_set_feature_value(pcd);
+ if (retval < 0)
+ return retval;
+
+ cfi->need_status_in_complete = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * This function builds the DMA descriptors for the SG buffer mode.
+ */
+static void cfi_build_sg_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
+ fh_otg_pcd_request_t * req)
+{
+ struct fh_otg_pcd_ep *ep = cfiep->ep;
+ ddma_sg_buffer_setup_t *sgval = cfiep->bm_sg;
+ struct fh_otg_dma_desc *desc = cfiep->ep->fh_ep.descs;
+ struct fh_otg_dma_desc *desc_last = cfiep->ep->fh_ep.descs;
+ dma_addr_t buff_addr = req->dma;
+ int i;
+ uint32_t txsize, off;
+
+ txsize = sgval->wSize;
+ off = sgval->bOffset;
+
+// CFI_INFO("%s: %s TXSIZE=0x%08x; OFFSET=0x%08x\n",
+// __func__, cfiep->ep->ep.name, txsize, off);
+
+ for (i = 0; i < sgval->bCount; i++) {
+ desc->status.b.bs = BS_HOST_BUSY;
+ desc->buf = buff_addr;
+ desc->status.b.l = 0;
+ desc->status.b.ioc = 0;
+ desc->status.b.sp = 0;
+ desc->status.b.bytes = txsize;
+ desc->status.b.bs = BS_HOST_READY;
+
+ /* Set the next address of the buffer */
+ buff_addr += txsize + off;
+ desc_last = desc;
+ desc++;
+ }
+
+ /* Set the last, ioc and sp bits on the Last DMA Descriptor */
+ desc_last->status.b.l = 1;
+ desc_last->status.b.ioc = 1;
+ desc_last->status.b.sp = ep->fh_ep.sent_zlp;
+ /* Save the last DMA descriptor pointer */
+ cfiep->dma_desc_last = desc_last;
+ cfiep->desc_count = sgval->bCount;
+}
+
+/**
+ * This function builds the DMA descriptors for the Concatenation buffer mode.
+ */
+static void cfi_build_concat_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
+ fh_otg_pcd_request_t * req)
+{
+ struct fh_otg_pcd_ep *ep = cfiep->ep;
+ ddma_concat_buffer_setup_t *concatval = cfiep->bm_concat;
+ struct fh_otg_dma_desc *desc = cfiep->ep->fh_ep.descs;
+ struct fh_otg_dma_desc *desc_last = cfiep->ep->fh_ep.descs;
+ dma_addr_t buff_addr = req->dma;
+ int i;
+ uint16_t *txsize;
+
+ txsize = concatval->wTxBytes;
+
+ for (i = 0; i < concatval->hdr.bDescCount; i++) {
+ desc->buf = buff_addr;
+ desc->status.b.bs = BS_HOST_BUSY;
+ desc->status.b.l = 0;
+ desc->status.b.ioc = 0;
+ desc->status.b.sp = 0;
+ desc->status.b.bytes = *txsize;
+ desc->status.b.bs = BS_HOST_READY;
+
+ txsize++;
+ /* Set the next address of the buffer */
+ buff_addr += UGETW(ep->desc->wMaxPacketSize);
+ desc_last = desc;
+ desc++;
+ }
+
+ /* Set the last, ioc and sp bits on the Last DMA Descriptor */
+ desc_last->status.b.l = 1;
+ desc_last->status.b.ioc = 1;
+ desc_last->status.b.sp = ep->fh_ep.sent_zlp;
+ cfiep->dma_desc_last = desc_last;
+ cfiep->desc_count = concatval->hdr.bDescCount;
+}
+
+/**
+ * This function builds the DMA descriptors for the Circular buffer mode
+ */
+static void cfi_build_circ_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
+ fh_otg_pcd_request_t * req)
+{
+ /* @todo: MAS - add implementation when this feature needs to be tested */
+}
+
+/**
+ * This function builds the DMA descriptors for the Alignment buffer mode
+ */
+static void cfi_build_align_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
+ fh_otg_pcd_request_t * req)
+{
+ struct fh_otg_pcd_ep *ep = cfiep->ep;
+ ddma_align_buffer_setup_t *alignval = cfiep->bm_align;
+ struct fh_otg_dma_desc *desc = cfiep->ep->fh_ep.descs;
+ dma_addr_t buff_addr = req->dma;
+
+ desc->status.b.bs = BS_HOST_BUSY;
+ desc->status.b.l = 1;
+ desc->status.b.ioc = 1;
+ desc->status.b.sp = ep->fh_ep.sent_zlp;
+ desc->status.b.bytes = req->length;
+ /* Adjust the buffer alignment */
+ desc->buf = (buff_addr + alignval->bAlign);
+ desc->status.b.bs = BS_HOST_READY;
+ cfiep->dma_desc_last = desc;
+ cfiep->desc_count = 1;
+}
+
+/**
+ * This function builds the DMA descriptors chain for different modes of the
+ * buffer setup of an endpoint.
+ */
+static void cfi_build_descriptors(struct cfiobject *cfi,
+ struct fh_otg_pcd *pcd,
+ struct fh_otg_pcd_ep *ep,
+ fh_otg_pcd_request_t * req)
+{
+ cfi_ep_t *cfiep;
+
+ /* Get the cfiep by the fh_otg_pcd_ep */
+ cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
+ if (NULL == cfiep) {
+ CFI_INFO("%s: Unable to find a matching active endpoint\n",
+ __func__);
+ return;
+ }
+
+ cfiep->xfer_len = req->length;
+
+ /* Iterate through all the DMA descriptors */
+ switch (cfiep->ep->fh_ep.buff_mode) {
+ case BM_SG:
+ cfi_build_sg_descs(cfi, cfiep, req);
+ break;
+
+ case BM_CONCAT:
+ cfi_build_concat_descs(cfi, cfiep, req);
+ break;
+
+ case BM_CIRCULAR:
+ cfi_build_circ_descs(cfi, cfiep, req);
+ break;
+
+ case BM_ALIGN:
+ cfi_build_align_descs(cfi, cfiep, req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * Allocate DMA buffer for different Buffer modes.
+ */
+static void *cfi_ep_alloc_buf(struct cfiobject *cfi, struct fh_otg_pcd *pcd,
+ struct fh_otg_pcd_ep *ep, dma_addr_t * dma,
+ unsigned size, gfp_t flags)
+{
+ return FH_DMA_ALLOC(size, dma);
+}
+
+/**
+ * This function initializes the CFI object.
+ */
+int init_cfi(cfiobject_t * cfiobj)
+{
+ CFI_INFO("%s\n", __func__);
+
+ /* Allocate a buffer for IN XFERs */
+ cfiobj->buf_in.buf =
+ FH_DMA_ALLOC(CFI_IN_BUF_LEN, &cfiobj->buf_in.addr);
+ if (NULL == cfiobj->buf_in.buf) {
+ CFI_INFO("Unable to allocate buffer for INs\n");
+ return -FH_E_NO_MEMORY;
+ }
+
+ /* Allocate a buffer for OUT XFERs */
+ cfiobj->buf_out.buf =
+ FH_DMA_ALLOC(CFI_OUT_BUF_LEN, &cfiobj->buf_out.addr);
+ if (NULL == cfiobj->buf_out.buf) {
+ CFI_INFO("Unable to allocate buffer for OUT\n");
+ return -FH_E_NO_MEMORY;
+ }
+
+ /* Initialize the callback function pointers */
+ cfiobj->ops.release = cfi_release;
+ cfiobj->ops.ep_enable = cfi_ep_enable;
+ cfiobj->ops.ctrl_write_complete = cfi_ctrl_write_complete;
+ cfiobj->ops.build_descriptors = cfi_build_descriptors;
+ cfiobj->ops.ep_alloc_buf = cfi_ep_alloc_buf;
+
+ /* Initialize the list of active endpoints in the CFI object */
+ FH_LIST_INIT(&cfiobj->active_eps);
+
+ return 0;
+}
+
+/**
+ * This function reads the required feature's current value into the buffer
+ *
+ * @retval: Returns negative as error, or the data length of the feature
+ */
+static int cfi_get_feature_value(uint8_t * buf, uint16_t buflen,
+ struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *ctrl_req)
+{
+ int retval = -FH_E_NOT_SUPPORTED;
+ struct fh_otg_core_if *coreif = GET_CORE_IF(pcd);
+ uint16_t dfifo, rxfifo, txfifo;
+
+ switch (ctrl_req->wIndex) {
+ /* Whether the DDMA is enabled or not */
+ case FT_ID_DMA_MODE:
+ *buf = (coreif->dma_enable && coreif->dma_desc_enable) ? 1 : 0;
+ retval = 1;
+ break;
+
+ case FT_ID_DMA_BUFFER_SETUP:
+ retval = cfi_ep_get_sg_val(buf, pcd, ctrl_req);
+ break;
+
+ case FT_ID_DMA_BUFF_ALIGN:
+ retval = cfi_ep_get_align_val(buf, pcd, ctrl_req);
+ break;
+
+ case FT_ID_DMA_CONCAT_SETUP:
+ retval = cfi_ep_get_concat_val(buf, pcd, ctrl_req);
+ break;
+
+ case FT_ID_DMA_CIRCULAR:
+ CFI_INFO("GetFeature value (FT_ID_DMA_CIRCULAR)\n");
+ break;
+
+ case FT_ID_THRESHOLD_SETUP:
+ CFI_INFO("GetFeature value (FT_ID_THRESHOLD_SETUP)\n");
+ break;
+
+ case FT_ID_DFIFO_DEPTH:
+ dfifo = get_dfifo_size(coreif);
+ *((uint16_t *) buf) = dfifo;
+ retval = sizeof(uint16_t);
+ break;
+
+ case FT_ID_TX_FIFO_DEPTH:
+ retval = get_txfifo_size(pcd, ctrl_req->wValue);
+ if (retval >= 0) {
+ txfifo = retval;
+ *((uint16_t *) buf) = txfifo;
+ retval = sizeof(uint16_t);
+ }
+ break;
+
+ case FT_ID_RX_FIFO_DEPTH:
+ retval = get_rxfifo_size(coreif, ctrl_req->wValue);
+ if (retval >= 0) {
+ rxfifo = retval;
+ *((uint16_t *) buf) = rxfifo;
+ retval = sizeof(uint16_t);
+ }
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * This function resets the SG for the specified EP to its default value
+ */
+static int cfi_reset_sg_val(cfi_ep_t * cfiep)
+{
+ fh_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
+ return 0;
+}
+
+/**
+ * This function resets the Alignment for the specified EP to its default value
+ */
+static int cfi_reset_align_val(cfi_ep_t * cfiep)
+{
+ fh_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
+ return 0;
+}
+
+/**
+ * This function resets the Concatenation for the specified EP to its default value
+ * This function will also set the value of the wTxBytes field to NULL after
+ * freeing the memory previously allocated for this field.
+ */
+static int cfi_reset_concat_val(cfi_ep_t * cfiep)
+{
+ /* First we need to free the wTxBytes field */
+ if (cfiep->bm_concat->wTxBytes) {
+ FH_FREE(cfiep->bm_concat->wTxBytes);
+ cfiep->bm_concat->wTxBytes = NULL;
+ }
+
+ fh_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
+ return 0;
+}
+
+/**
+ * This function resets all the buffer setups of the specified endpoint
+ */
+static int cfi_ep_reset_all_setup_vals(cfi_ep_t * cfiep)
+{
+ cfi_reset_sg_val(cfiep);
+ cfi_reset_align_val(cfiep);
+ cfi_reset_concat_val(cfiep);
+ return 0;
+}
+
+static int cfi_handle_reset_fifo_val(struct fh_otg_pcd *pcd, uint8_t ep_addr,
+ uint8_t rx_rst, uint8_t tx_rst)
+{
+ int retval = -FH_E_INVALID;
+ uint16_t tx_siz[15];
+ uint16_t rx_siz = 0;
+ fh_otg_pcd_ep_t *ep = NULL;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
+
+ if (rx_rst) {
+ rx_siz = params->dev_rx_fifo_size;
+ params->dev_rx_fifo_size = GET_CORE_IF(pcd)->init_rxfsiz;
+ }
+
+ if (tx_rst) {
+ if (ep_addr == 0) {
+ int i;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+ tx_siz[i] =
+ core_if->core_params->dev_tx_fifo_size[i];
+ core_if->core_params->dev_tx_fifo_size[i] =
+ core_if->init_txfsiz[i];
+ }
+ } else {
+
+ ep = get_ep_by_addr(pcd, ep_addr);
+
+ if (NULL == ep) {
+ CFI_INFO
+ ("%s: Unable to get the endpoint addr=0x%02x\n",
+ __func__, ep_addr);
+ return -FH_E_INVALID;
+ }
+
+ tx_siz[0] =
+ params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num -
+ 1];
+ params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1] =
+ GET_CORE_IF(pcd)->init_txfsiz[ep->
+ fh_ep.tx_fifo_num -
+ 1];
+ }
+ }
+
+ if (resize_fifos(GET_CORE_IF(pcd))) {
+ retval = 0;
+ } else {
+ CFI_INFO
+ ("%s: Error resetting the feature Reset All(FIFO size)\n",
+ __func__);
+ if (rx_rst) {
+ params->dev_rx_fifo_size = rx_siz;
+ }
+
+ if (tx_rst) {
+ if (ep_addr == 0) {
+ int i;
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps;
+ i++) {
+ core_if->
+ core_params->dev_tx_fifo_size[i] =
+ tx_siz[i];
+ }
+ } else {
+ params->dev_tx_fifo_size[ep->
+ fh_ep.tx_fifo_num -
+ 1] = tx_siz[0];
+ }
+ }
+ retval = -FH_E_INVALID;
+ }
+ return retval;
+}
+
+static int cfi_handle_reset_all(struct fh_otg_pcd *pcd, uint8_t addr)
+{
+ int retval = 0;
+ cfi_ep_t *cfiep;
+ cfiobject_t *cfi = pcd->cfi;
+ fh_list_link_t *tmp;
+
+ retval = cfi_handle_reset_fifo_val(pcd, addr, 1, 1);
+ if (retval < 0) {
+ return retval;
+ }
+
+ /* If the EP address is known then reset the features for only that EP */
+ if (addr) {
+ cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == cfiep) {
+ CFI_INFO("%s: Error getting the EP address 0x%02x\n",
+ __func__, addr);
+ return -FH_E_INVALID;
+ }
+ retval = cfi_ep_reset_all_setup_vals(cfiep);
+ cfiep->ep->fh_ep.buff_mode = BM_STANDARD;
+ }
+ /* Otherwise (wValue == 0), reset all features of all EP's */
+ else {
+ /* Traverse all the active EP's and reset the feature(s) value(s) */
+ //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
+ FH_LIST_FOREACH(tmp, &cfi->active_eps) {
+ cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
+ retval = cfi_ep_reset_all_setup_vals(cfiep);
+ cfiep->ep->fh_ep.buff_mode = BM_STANDARD;
+ if (retval < 0) {
+ CFI_INFO
+ ("%s: Error resetting the feature Reset All\n",
+ __func__);
+ return retval;
+ }
+ }
+ }
+ return retval;
+}
+
+static int cfi_handle_reset_dma_buff_setup(struct fh_otg_pcd *pcd,
+ uint8_t addr)
+{
+ int retval = 0;
+ cfi_ep_t *cfiep;
+ cfiobject_t *cfi = pcd->cfi;
+ fh_list_link_t *tmp;
+
+ /* If the EP address is known then reset the features for only that EP */
+ if (addr) {
+ cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == cfiep) {
+ CFI_INFO("%s: Error getting the EP address 0x%02x\n",
+ __func__, addr);
+ return -FH_E_INVALID;
+ }
+ retval = cfi_reset_sg_val(cfiep);
+ }
+ /* Otherwise (wValue == 0), reset all features of all EP's */
+ else {
+ /* Traverse all the active EP's and reset the feature(s) value(s) */
+ //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
+ FH_LIST_FOREACH(tmp, &cfi->active_eps) {
+ cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
+ retval = cfi_reset_sg_val(cfiep);
+ if (retval < 0) {
+ CFI_INFO
+ ("%s: Error resetting the feature Buffer Setup\n",
+ __func__);
+ return retval;
+ }
+ }
+ }
+ return retval;
+}
+
+static int cfi_handle_reset_concat_val(struct fh_otg_pcd *pcd, uint8_t addr)
+{
+ int retval = 0;
+ cfi_ep_t *cfiep;
+ cfiobject_t *cfi = pcd->cfi;
+ fh_list_link_t *tmp;
+
+ /* If the EP address is known then reset the features for only that EP */
+ if (addr) {
+ cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == cfiep) {
+ CFI_INFO("%s: Error getting the EP address 0x%02x\n",
+ __func__, addr);
+ return -FH_E_INVALID;
+ }
+ retval = cfi_reset_concat_val(cfiep);
+ }
+ /* Otherwise (wValue == 0), reset all features of all EP's */
+ else {
+ /* Traverse all the active EP's and reset the feature(s) value(s) */
+ //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
+ FH_LIST_FOREACH(tmp, &cfi->active_eps) {
+ cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
+ retval = cfi_reset_concat_val(cfiep);
+ if (retval < 0) {
+ CFI_INFO
+ ("%s: Error resetting the feature Concatenation Value\n",
+ __func__);
+ return retval;
+ }
+ }
+ }
+ return retval;
+}
+
+static int cfi_handle_reset_align_val(struct fh_otg_pcd *pcd, uint8_t addr)
+{
+ int retval = 0;
+ cfi_ep_t *cfiep;
+ cfiobject_t *cfi = pcd->cfi;
+ fh_list_link_t *tmp;
+
+ /* If the EP address is known then reset the features for only that EP */
+ if (addr) {
+ cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == cfiep) {
+ CFI_INFO("%s: Error getting the EP address 0x%02x\n",
+ __func__, addr);
+ return -FH_E_INVALID;
+ }
+ retval = cfi_reset_align_val(cfiep);
+ }
+ /* Otherwise (wValue == 0), reset all features of all EP's */
+ else {
+ /* Traverse all the active EP's and reset the feature(s) value(s) */
+ //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
+ FH_LIST_FOREACH(tmp, &cfi->active_eps) {
+ cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
+ retval = cfi_reset_align_val(cfiep);
+ if (retval < 0) {
+ CFI_INFO
+ ("%s: Error resetting the feature Aliignment Value\n",
+ __func__);
+ return retval;
+ }
+ }
+ }
+ return retval;
+
+}
+
+static int cfi_preproc_reset(struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req)
+{
+ int retval = 0;
+
+ switch (req->wIndex) {
+ case 0:
+ /* Reset all features */
+ retval = cfi_handle_reset_all(pcd, req->wValue & 0xff);
+ break;
+
+ case FT_ID_DMA_BUFFER_SETUP:
+ /* Reset the SG buffer setup */
+ retval =
+ cfi_handle_reset_dma_buff_setup(pcd, req->wValue & 0xff);
+ break;
+
+ case FT_ID_DMA_CONCAT_SETUP:
+ /* Reset the Concatenation buffer setup */
+ retval = cfi_handle_reset_concat_val(pcd, req->wValue & 0xff);
+ break;
+
+ case FT_ID_DMA_BUFF_ALIGN:
+ /* Reset the Alignment buffer setup */
+ retval = cfi_handle_reset_align_val(pcd, req->wValue & 0xff);
+ break;
+
+ case FT_ID_TX_FIFO_DEPTH:
+ retval =
+ cfi_handle_reset_fifo_val(pcd, req->wValue & 0xff, 0, 1);
+ pcd->cfi->need_gadget_att = 0;
+ break;
+
+ case FT_ID_RX_FIFO_DEPTH:
+ retval = cfi_handle_reset_fifo_val(pcd, 0, 1, 0);
+ pcd->cfi->need_gadget_att = 0;
+ break;
+ default:
+ break;
+ }
+ return retval;
+}
+
+/**
+ * This function sets a new value for the SG buffer setup.
+ */
+static int cfi_ep_set_sg_val(uint8_t * buf, struct fh_otg_pcd *pcd)
+{
+ uint8_t inaddr, outaddr;
+ cfi_ep_t *epin, *epout;
+ ddma_sg_buffer_setup_t *psgval;
+ uint32_t desccount, size;
+
+ CFI_INFO("%s\n", __func__);
+
+ psgval = (ddma_sg_buffer_setup_t *) buf;
+ desccount = (uint32_t) psgval->bCount;
+ size = (uint32_t) psgval->wSize;
+
+ /* Check the DMA descriptor count */
+ if ((desccount > MAX_DMA_DESCS_PER_EP) || (desccount == 0)) {
+ CFI_INFO
+ ("%s: The count of DMA Descriptors should be between 1 and %d\n",
+ __func__, MAX_DMA_DESCS_PER_EP);
+ return -FH_E_INVALID;
+ }
+
+ /* Check the DMA descriptor count */
+
+ if (size == 0) {
+
+ CFI_INFO("%s: The transfer size should be at least 1 byte\n",
+ __func__);
+
+ return -FH_E_INVALID;
+
+ }
+
+ inaddr = psgval->bInEndpointAddress;
+ outaddr = psgval->bOutEndpointAddress;
+
+ epin = get_cfi_ep_by_addr(pcd->cfi, inaddr);
+ epout = get_cfi_ep_by_addr(pcd->cfi, outaddr);
+
+ if (NULL == epin || NULL == epout) {
+ CFI_INFO
+ ("%s: Unable to get the endpoints inaddr=0x%02x outaddr=0x%02x\n",
+ __func__, inaddr, outaddr);
+ return -FH_E_INVALID;
+ }
+
+ epin->ep->fh_ep.buff_mode = BM_SG;
+ fh_memcpy(epin->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
+
+ epout->ep->fh_ep.buff_mode = BM_SG;
+ fh_memcpy(epout->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
+
+ return 0;
+}
+
+/**
+ * This function sets a new value for the buffer Alignment setup.
+ */
+static int cfi_ep_set_alignment_val(uint8_t * buf, struct fh_otg_pcd *pcd)
+{
+ cfi_ep_t *ep;
+ uint8_t addr;
+ ddma_align_buffer_setup_t *palignval;
+
+ palignval = (ddma_align_buffer_setup_t *) buf;
+ addr = palignval->bEndpointAddress;
+
+ ep = get_cfi_ep_by_addr(pcd->cfi, addr);
+
+ if (NULL == ep) {
+ CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
+ __func__, addr);
+ return -FH_E_INVALID;
+ }
+
+ ep->ep->fh_ep.buff_mode = BM_ALIGN;
+ fh_memcpy(ep->bm_align, palignval, sizeof(ddma_align_buffer_setup_t));
+
+ return 0;
+}
+
+/**
+ * This function sets a new value for the Concatenation buffer setup.
+ */
+static int cfi_ep_set_concat_val(uint8_t * buf, struct fh_otg_pcd *pcd)
+{
+ uint8_t addr;
+ cfi_ep_t *ep;
+ struct _ddma_concat_buffer_setup_hdr *pConcatValHdr;
+ uint16_t *pVals;
+ uint32_t desccount;
+ int i;
+ uint16_t mps;
+
+ pConcatValHdr = (struct _ddma_concat_buffer_setup_hdr *)buf;
+ desccount = (uint32_t) pConcatValHdr->bDescCount;
+ pVals = (uint16_t *) (buf + BS_CONCAT_VAL_HDR_LEN);
+
+ /* Check the DMA descriptor count */
+ if (desccount > MAX_DMA_DESCS_PER_EP) {
+ CFI_INFO("%s: Maximum DMA Descriptor count should be %d\n",
+ __func__, MAX_DMA_DESCS_PER_EP);
+ return -FH_E_INVALID;
+ }
+
+ addr = pConcatValHdr->bEndpointAddress;
+ ep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == ep) {
+ CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
+ __func__, addr);
+ return -FH_E_INVALID;
+ }
+
+ mps = UGETW(ep->ep->desc->wMaxPacketSize);
+
+#if 0
+ for (i = 0; i < desccount; i++) {
+ CFI_INFO("%s: wTxSize[%d]=0x%04x\n", __func__, i, pVals[i]);
+ }
+ CFI_INFO("%s: epname=%s; mps=%d\n", __func__, ep->ep->ep.name, mps);
+#endif
+
+ /* Check the wTxSizes to be less than or equal to the mps */
+ for (i = 0; i < desccount; i++) {
+ if (pVals[i] > mps) {
+ CFI_INFO
+ ("%s: ERROR - the wTxSize[%d] should be <= MPS (wTxSize=%d)\n",
+ __func__, i, pVals[i]);
+ return -FH_E_INVALID;
+ }
+ }
+
+ ep->ep->fh_ep.buff_mode = BM_CONCAT;
+ fh_memcpy(ep->bm_concat, pConcatValHdr, BS_CONCAT_VAL_HDR_LEN);
+
+ /* Free the previously allocated storage for the wTxBytes */
+ if (ep->bm_concat->wTxBytes) {
+ FH_FREE(ep->bm_concat->wTxBytes);
+ }
+
+ /* Allocate a new storage for the wTxBytes field */
+ ep->bm_concat->wTxBytes =
+ FH_ALLOC(sizeof(uint16_t) * pConcatValHdr->bDescCount);
+ if (NULL == ep->bm_concat->wTxBytes) {
+ CFI_INFO("%s: Unable to allocate memory\n", __func__);
+ return -FH_E_NO_MEMORY;
+ }
+
+ /* Copy the new values into the wTxBytes filed */
+ fh_memcpy(ep->bm_concat->wTxBytes, buf + BS_CONCAT_VAL_HDR_LEN,
+ sizeof(uint16_t) * pConcatValHdr->bDescCount);
+
+ return 0;
+}
+
+/**
+ * This function calculates the total of all FIFO sizes
+ *
+ * @param core_if Programming view of FH_otg controller
+ *
+ * @return The total of data FIFO sizes.
+ *
+ */
+static uint16_t get_dfifo_size(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_params_t *params = core_if->core_params;
+ uint16_t dfifo_total = 0;
+ int i;
+
+ /* The shared RxFIFO size */
+ dfifo_total =
+ params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
+
+ /* Add up each TxFIFO size to the total */
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+ dfifo_total += params->dev_tx_fifo_size[i];
+ }
+
+ return dfifo_total;
+}
+
+/**
+ * This function returns Rx FIFO size
+ *
+ * @param core_if Programming view of FH_otg controller
+ *
+ * @return The total of data FIFO sizes.
+ *
+ */
+static int32_t get_rxfifo_size(fh_otg_core_if_t * core_if, uint16_t wValue)
+{
+ switch (wValue >> 8) {
+ case 0:
+ return (core_if->pwron_rxfsiz <
+ 32768) ? core_if->pwron_rxfsiz : 32768;
+ break;
+ case 1:
+ return core_if->core_params->dev_rx_fifo_size;
+ break;
+ default:
+ return -FH_E_INVALID;
+ break;
+ }
+}
+
+/**
+ * This function returns Tx FIFO size for IN EP
+ *
+ * @param core_if Programming view of FH_otg controller
+ *
+ * @return The total of data FIFO sizes.
+ *
+ */
+static int32_t get_txfifo_size(struct fh_otg_pcd *pcd, uint16_t wValue)
+{
+ fh_otg_pcd_ep_t *ep;
+
+ ep = get_ep_by_addr(pcd, wValue & 0xff);
+
+ if (NULL == ep) {
+ CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
+ __func__, wValue & 0xff);
+ return -FH_E_INVALID;
+ }
+
+ if (!ep->fh_ep.is_in) {
+ CFI_INFO
+ ("%s: No Tx FIFO assingned to the Out endpoint addr=0x%02x\n",
+ __func__, wValue & 0xff);
+ return -FH_E_INVALID;
+ }
+
+ switch (wValue >> 8) {
+ case 0:
+ return (GET_CORE_IF(pcd)->pwron_txfsiz
+ [ep->fh_ep.tx_fifo_num - 1] <
+ 768) ? GET_CORE_IF(pcd)->pwron_txfsiz[ep->
+ fh_ep.tx_fifo_num
+ - 1] : 32768;
+ break;
+ case 1:
+ return GET_CORE_IF(pcd)->core_params->
+ dev_tx_fifo_size[ep->fh_ep.num - 1];
+ break;
+ default:
+ return -FH_E_INVALID;
+ break;
+ }
+}
+
+/**
+ * This function checks if the submitted combination of
+ * device mode FIFO sizes is possible or not.
+ *
+ * @param core_if Programming view of FH_otg controller
+ *
+ * @return 1 if possible, 0 otherwise.
+ *
+ */
+static uint8_t check_fifo_sizes(fh_otg_core_if_t * core_if)
+{
+ uint16_t dfifo_actual = 0;
+ fh_otg_core_params_t *params = core_if->core_params;
+ uint16_t start_addr = 0;
+ int i;
+
+ dfifo_actual =
+ params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+ dfifo_actual += params->dev_tx_fifo_size[i];
+ }
+
+ if (dfifo_actual > core_if->total_fifo_size) {
+ return 0;
+ }
+
+ if (params->dev_rx_fifo_size > 32768 || params->dev_rx_fifo_size < 16)
+ return 0;
+
+ if (params->dev_nperio_tx_fifo_size > 32768
+ || params->dev_nperio_tx_fifo_size < 16)
+ return 0;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+
+ if (params->dev_tx_fifo_size[i] > 768
+ || params->dev_tx_fifo_size[i] < 4)
+ return 0;
+ }
+
+ if (params->dev_rx_fifo_size > core_if->pwron_rxfsiz)
+ return 0;
+ start_addr = params->dev_rx_fifo_size;
+
+ if (params->dev_nperio_tx_fifo_size > core_if->pwron_gnptxfsiz)
+ return 0;
+ start_addr += params->dev_nperio_tx_fifo_size;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+
+ if (params->dev_tx_fifo_size[i] > core_if->pwron_txfsiz[i])
+ return 0;
+ start_addr += params->dev_tx_fifo_size[i];
+ }
+
+ return 1;
+}
+
+/**
+ * This function resizes Device mode FIFOs
+ *
+ * @param core_if Programming view of FH_otg controller
+ *
+ * @return 1 if successful, 0 otherwise
+ *
+ */
+static uint8_t resize_fifos(fh_otg_core_if_t * core_if)
+{
+ int i = 0;
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ fh_otg_core_params_t *params = core_if->core_params;
+ uint32_t rx_fifo_size;
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t txfifosize[15];
+
+ uint32_t rx_fsz_bak;
+ uint32_t nptxfsz_bak;
+ uint32_t txfsz_bak[15];
+
+ uint16_t start_address;
+ uint8_t retval = 1;
+
+ if (!check_fifo_sizes(core_if)) {
+ return 0;
+ }
+
+ /* Configure data FIFO sizes */
+ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ rx_fsz_bak = FH_READ_REG32(&global_regs->grxfsiz);
+ rx_fifo_size = params->dev_rx_fifo_size;
+ FH_WRITE_REG32(&global_regs->grxfsiz, rx_fifo_size);
+
+ /*
+ * Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_tx_fifo_size array and the FIFO size registers in
+ * the dtxfsiz array run from 0 to 14.
+ */
+
+ /* Non-periodic Tx FIFO */
+ nptxfsz_bak = FH_READ_REG32(&global_regs->gnptxfsiz);
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ start_address = params->dev_rx_fifo_size;
+ nptxfifosize.b.startaddr = start_address;
+
+ FH_WRITE_REG32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+
+ start_address += nptxfifosize.b.depth;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+ txfsz_bak[i] = FH_READ_REG32(&global_regs->dtxfsiz[i]);
+
+ txfifosize[i].b.depth = params->dev_tx_fifo_size[i];
+ txfifosize[i].b.startaddr = start_address;
+ FH_WRITE_REG32(&global_regs->dtxfsiz[i],
+ txfifosize[i].d32);
+
+ start_address += txfifosize[i].b.depth;
+ }
+
+ /** Check if register values are set correctly */
+ if (rx_fifo_size != FH_READ_REG32(&global_regs->grxfsiz)) {
+ retval = 0;
+ }
+
+ if (nptxfifosize.d32 != FH_READ_REG32(&global_regs->gnptxfsiz)) {
+ retval = 0;
+ }
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+ if (txfifosize[i].d32 !=
+ FH_READ_REG32(&global_regs->dtxfsiz[i])) {
+ retval = 0;
+ }
+ }
+
+ /** If register values are not set correctly, reset old values */
+ if (retval == 0) {
+ FH_WRITE_REG32(&global_regs->grxfsiz, rx_fsz_bak);
+
+ /* Non-periodic Tx FIFO */
+ FH_WRITE_REG32(&global_regs->gnptxfsiz, nptxfsz_bak);
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+ FH_WRITE_REG32(&global_regs->dtxfsiz[i],
+ txfsz_bak[i]);
+ }
+ }
+ } else {
+ return 0;
+ }
+
+ /* Flush the FIFOs */
+ fh_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
+ fh_otg_flush_rx_fifo(core_if);
+
+ return retval;
+}
+
+/**
+ * This function sets a new value for the buffer Alignment setup.
+ */
+static int cfi_ep_set_tx_fifo_val(uint8_t * buf, fh_otg_pcd_t * pcd)
+{
+ int retval;
+ uint32_t fsiz;
+ uint16_t size;
+ uint16_t ep_addr;
+ fh_otg_pcd_ep_t *ep;
+ fh_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
+ tx_fifo_size_setup_t *ptxfifoval;
+
+ ptxfifoval = (tx_fifo_size_setup_t *) buf;
+ ep_addr = ptxfifoval->bEndpointAddress;
+ size = ptxfifoval->wDepth;
+
+ ep = get_ep_by_addr(pcd, ep_addr);
+
+ CFI_INFO
+ ("%s: Set Tx FIFO size: endpoint addr=0x%02x, depth=%d, FIFO Num=%d\n",
+ __func__, ep_addr, size, ep->fh_ep.tx_fifo_num);
+
+ if (NULL == ep) {
+ CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
+ __func__, ep_addr);
+ return -FH_E_INVALID;
+ }
+
+ fsiz = params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1];
+ params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1] = size;
+
+ if (resize_fifos(GET_CORE_IF(pcd))) {
+ retval = 0;
+ } else {
+ CFI_INFO
+ ("%s: Error setting the feature Tx FIFO Size for EP%d\n",
+ __func__, ep_addr);
+ params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1] = fsiz;
+ retval = -FH_E_INVALID;
+ }
+
+ return retval;
+}
+
+/**
+ * This function sets a new value for the buffer Alignment setup.
+ */
+static int cfi_set_rx_fifo_val(uint8_t * buf, fh_otg_pcd_t * pcd)
+{
+ int retval;
+ uint32_t fsiz;
+ uint16_t size;
+ fh_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
+ rx_fifo_size_setup_t *prxfifoval;
+
+ prxfifoval = (rx_fifo_size_setup_t *) buf;
+ size = prxfifoval->wDepth;
+
+ fsiz = params->dev_rx_fifo_size;
+ params->dev_rx_fifo_size = size;
+
+ if (resize_fifos(GET_CORE_IF(pcd))) {
+ retval = 0;
+ } else {
+ CFI_INFO("%s: Error setting the feature Rx FIFO Size\n",
+ __func__);
+ params->dev_rx_fifo_size = fsiz;
+ retval = -FH_E_INVALID;
+ }
+
+ return retval;
+}
+
+/**
+ * This function reads the SG of an EP's buffer setup into the buffer buf
+ */
+static int cfi_ep_get_sg_val(uint8_t * buf, struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req)
+{
+ int retval = -FH_E_INVALID;
+ uint8_t addr;
+ cfi_ep_t *ep;
+
+ /* The Low Byte of the wValue contains a non-zero address of the endpoint */
+ addr = req->wValue & 0xFF;
+ if (addr == 0) /* The address should be non-zero */
+ return retval;
+
+ ep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == ep) {
+ CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
+ __func__, addr);
+ return retval;
+ }
+
+ fh_memcpy(buf, ep->bm_sg, BS_SG_VAL_DESC_LEN);
+ retval = BS_SG_VAL_DESC_LEN;
+ return retval;
+}
+
+/**
+ * This function reads the Concatenation value of an EP's buffer mode into
+ * the buffer buf
+ */
+static int cfi_ep_get_concat_val(uint8_t * buf, struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req)
+{
+ int retval = -FH_E_INVALID;
+ uint8_t addr;
+ cfi_ep_t *ep;
+ uint8_t desc_count;
+
+ /* The Low Byte of the wValue contains a non-zero address of the endpoint */
+ addr = req->wValue & 0xFF;
+ if (addr == 0) /* The address should be non-zero */
+ return retval;
+
+ ep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == ep) {
+ CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
+ __func__, addr);
+ return retval;
+ }
+
+ /* Copy the header to the buffer */
+ fh_memcpy(buf, ep->bm_concat, BS_CONCAT_VAL_HDR_LEN);
+ /* Advance the buffer pointer by the header size */
+ buf += BS_CONCAT_VAL_HDR_LEN;
+
+ desc_count = ep->bm_concat->hdr.bDescCount;
+ /* Copy alll the wTxBytes to the buffer */
+ fh_memcpy(buf, ep->bm_concat->wTxBytes, sizeof(uid16_t) * desc_count);
+
+ retval = BS_CONCAT_VAL_HDR_LEN + sizeof(uid16_t) * desc_count;
+ return retval;
+}
+
+/**
+ * This function reads the buffer Alignment value of an EP's buffer mode into
+ * the buffer buf
+ *
+ * @return The total number of bytes copied to the buffer or negative error code.
+ */
+static int cfi_ep_get_align_val(uint8_t * buf, struct fh_otg_pcd *pcd,
+ struct cfi_usb_ctrlrequest *req)
+{
+ int retval = -FH_E_INVALID;
+ uint8_t addr;
+ cfi_ep_t *ep;
+
+ /* The Low Byte of the wValue contains a non-zero address of the endpoint */
+ addr = req->wValue & 0xFF;
+ if (addr == 0) /* The address should be non-zero */
+ return retval;
+
+ ep = get_cfi_ep_by_addr(pcd->cfi, addr);
+ if (NULL == ep) {
+ CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
+ __func__, addr);
+ return retval;
+ }
+
+ fh_memcpy(buf, ep->bm_align, BS_ALIGN_VAL_HDR_LEN);
+ retval = BS_ALIGN_VAL_HDR_LEN;
+
+ return retval;
+}
+
+/**
+ * This function sets a new value for the specified feature
+ *
+ * @param pcd A pointer to the PCD object
+ *
+ * @return 0 if successful, negative error code otherwise to stall the DCE.
+ */
+static int cfi_set_feature_value(struct fh_otg_pcd *pcd)
+{
+ int retval = -FH_E_NOT_SUPPORTED;
+ uint16_t wIndex, wValue;
+ uint8_t bRequest;
+ struct fh_otg_core_if *coreif;
+ cfiobject_t *cfi = pcd->cfi;
+ struct cfi_usb_ctrlrequest *ctrl_req;
+ uint8_t *buf;
+ ctrl_req = &cfi->ctrl_req;
+
+ buf = pcd->cfi->ctrl_req.data;
+
+ coreif = GET_CORE_IF(pcd);
+ bRequest = ctrl_req->bRequest;
+ wIndex = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
+ wValue = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
+
+ /* See which feature is to be modified */
+ switch (wIndex) {
+ case FT_ID_DMA_BUFFER_SETUP:
+ /* Modify the feature */
+ if ((retval = cfi_ep_set_sg_val(buf, pcd)) < 0)
+ return retval;
+
+ /* And send this request to the gadget */
+ cfi->need_gadget_att = 1;
+ break;
+
+ case FT_ID_DMA_BUFF_ALIGN:
+ if ((retval = cfi_ep_set_alignment_val(buf, pcd)) < 0)
+ return retval;
+ cfi->need_gadget_att = 1;
+ break;
+
+ case FT_ID_DMA_CONCAT_SETUP:
+ /* Modify the feature */
+ if ((retval = cfi_ep_set_concat_val(buf, pcd)) < 0)
+ return retval;
+ cfi->need_gadget_att = 1;
+ break;
+
+ case FT_ID_DMA_CIRCULAR:
+ CFI_INFO("FT_ID_DMA_CIRCULAR\n");
+ break;
+
+ case FT_ID_THRESHOLD_SETUP:
+ CFI_INFO("FT_ID_THRESHOLD_SETUP\n");
+ break;
+
+ case FT_ID_DFIFO_DEPTH:
+ CFI_INFO("FT_ID_DFIFO_DEPTH\n");
+ break;
+
+ case FT_ID_TX_FIFO_DEPTH:
+ CFI_INFO("FT_ID_TX_FIFO_DEPTH\n");
+ if ((retval = cfi_ep_set_tx_fifo_val(buf, pcd)) < 0)
+ return retval;
+ cfi->need_gadget_att = 0;
+ break;
+
+ case FT_ID_RX_FIFO_DEPTH:
+ CFI_INFO("FT_ID_RX_FIFO_DEPTH\n");
+ if ((retval = cfi_set_rx_fifo_val(buf, pcd)) < 0)
+ return retval;
+ cfi->need_gadget_att = 0;
+ break;
+ }
+
+ return retval;
+}
+
+#endif //FH_UTE_CFI
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.h
new file mode 100644
index 00000000..97a05fa5
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.h
@@ -0,0 +1,320 @@
+/* ==========================================================================
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__FH_OTG_CFI_H__)
+#define __FH_OTG_CFI_H__
+
+#include "fh_otg_pcd.h"
+#include "fh_cfi_common.h"
+
+/**
+ * @file
+ * This file contains the CFI related OTG PCD specific common constants,
+ * interfaces(functions and macros) and data structures.The CFI Protocol is an
+ * optional interface for internal testing purposes that a DUT may implement to
+ * support testing of configurable features.
+ *
+ */
+
+struct fh_otg_pcd;
+struct fh_otg_pcd_ep;
+
+/** OTG CFI Features (properties) ID constants */
+/** This is a request for all Core Features */
+#define FT_ID_DMA_MODE 0x0001
+#define FT_ID_DMA_BUFFER_SETUP 0x0002
+#define FT_ID_DMA_BUFF_ALIGN 0x0003
+#define FT_ID_DMA_CONCAT_SETUP 0x0004
+#define FT_ID_DMA_CIRCULAR 0x0005
+#define FT_ID_THRESHOLD_SETUP 0x0006
+#define FT_ID_DFIFO_DEPTH 0x0007
+#define FT_ID_TX_FIFO_DEPTH 0x0008
+#define FT_ID_RX_FIFO_DEPTH 0x0009
+
+/**********************************************************/
+#define CFI_INFO_DEF
+
+#ifdef CFI_INFO_DEF
+#define CFI_INFO(fmt...) FH_PRINTF("CFI: " fmt);
+#else
+#define CFI_INFO(fmt...)
+#endif
+
+#define min(x,y) ({ \
+ x < y ? x : y; })
+
+#define max(x,y) ({ \
+ x > y ? x : y; })
+
+/**
+ * Descriptor DMA SG Buffer setup structure (SG buffer). This structure is
+ * also used for setting up a buffer for Circular DDMA.
+ */
+struct _ddma_sg_buffer_setup {
+#define BS_SG_VAL_DESC_LEN 6
+ /* The OUT EP address */
+ uint8_t bOutEndpointAddress;
+ /* The IN EP address */
+ uint8_t bInEndpointAddress;
+ /* Number of bytes to put between transfer segments (must be DWORD boundaries) */
+ uint8_t bOffset;
+ /* The number of transfer segments (a DMA descriptors per each segment) */
+ uint8_t bCount;
+ /* Size (in byte) of each transfer segment */
+ uint16_t wSize;
+} __attribute__ ((packed));
+typedef struct _ddma_sg_buffer_setup ddma_sg_buffer_setup_t;
+
+/** Descriptor DMA Concatenation Buffer setup structure */
+struct _ddma_concat_buffer_setup_hdr {
+#define BS_CONCAT_VAL_HDR_LEN 4
+ /* The endpoint for which the buffer is to be set up */
+ uint8_t bEndpointAddress;
+ /* The count of descriptors to be used */
+ uint8_t bDescCount;
+ /* The total size of the transfer */
+ uint16_t wSize;
+} __attribute__ ((packed));
+typedef struct _ddma_concat_buffer_setup_hdr ddma_concat_buffer_setup_hdr_t;
+
+/** Descriptor DMA Concatenation Buffer setup structure */
+struct _ddma_concat_buffer_setup {
+ /* The SG header */
+ ddma_concat_buffer_setup_hdr_t hdr;
+
+ /* The XFER sizes pointer (allocated dynamically) */
+ uint16_t *wTxBytes;
+} __attribute__ ((packed));
+typedef struct _ddma_concat_buffer_setup ddma_concat_buffer_setup_t;
+
+/** Descriptor DMA Alignment Buffer setup structure */
+struct _ddma_align_buffer_setup {
+#define BS_ALIGN_VAL_HDR_LEN 2
+ uint8_t bEndpointAddress;
+ uint8_t bAlign;
+} __attribute__ ((packed));
+typedef struct _ddma_align_buffer_setup ddma_align_buffer_setup_t;
+
+/** Transmit FIFO Size setup structure */
+struct _tx_fifo_size_setup {
+ uint8_t bEndpointAddress;
+ uint16_t wDepth;
+} __attribute__ ((packed));
+typedef struct _tx_fifo_size_setup tx_fifo_size_setup_t;
+
+/** Transmit FIFO Size setup structure */
+struct _rx_fifo_size_setup {
+ uint16_t wDepth;
+} __attribute__ ((packed));
+typedef struct _rx_fifo_size_setup rx_fifo_size_setup_t;
+
+/**
+ * struct cfi_usb_ctrlrequest - the CFI implementation of the struct usb_ctrlrequest
+ * This structure encapsulates the standard usb_ctrlrequest and adds a pointer
+ * to the data returned in the data stage of a 3-stage Control Write requests.
+ */
+struct cfi_usb_ctrlrequest {
+ uint8_t bRequestType;
+ uint8_t bRequest;
+ uint16_t wValue;
+ uint16_t wIndex;
+ uint16_t wLength;
+ uint8_t *data;
+} UPACKED;
+
+/*---------------------------------------------------------------------------*/
+
+/**
+ * The CFI wrapper of the enabled and activated fh_otg_pcd_ep structures.
+ * This structure is used to store the buffer setup data for any
+ * enabled endpoint in the PCD.
+ */
+struct cfi_ep {
+ /* Entry for the list container */
+ fh_list_link_t lh;
+ /* Pointer to the active PCD endpoint structure */
+ struct fh_otg_pcd_ep *ep;
+ /* The last descriptor in the chain of DMA descriptors of the endpoint */
+ struct fh_otg_dma_desc *dma_desc_last;
+ /* The SG feature value */
+ ddma_sg_buffer_setup_t *bm_sg;
+ /* The Circular feature value */
+ ddma_sg_buffer_setup_t *bm_circ;
+ /* The Concatenation feature value */
+ ddma_concat_buffer_setup_t *bm_concat;
+ /* The Alignment feature value */
+ ddma_align_buffer_setup_t *bm_align;
+ /* XFER length */
+ uint32_t xfer_len;
+ /*
+ * Count of DMA descriptors currently used.
+ * The total should not exceed the MAX_DMA_DESCS_PER_EP value
+ * defined in the fh_otg_cil.h
+ */
+ uint32_t desc_count;
+};
+typedef struct cfi_ep cfi_ep_t;
+
+typedef struct cfi_dma_buff {
+#define CFI_IN_BUF_LEN 1024
+#define CFI_OUT_BUF_LEN 1024
+ dma_addr_t addr;
+ uint8_t *buf;
+} cfi_dma_buff_t;
+
+struct cfiobject;
+
+/**
+ * This is the interface for the CFI operations.
+ *
+ * @param ep_enable Called when any endpoint is enabled and activated.
+ * @param release Called when the CFI object is released and it needs to correctly
+ * deallocate the dynamic memory
+ * @param ctrl_write_complete Called when the data stage of the request is complete
+ */
+typedef struct cfi_ops {
+ int (*ep_enable) (struct cfiobject * cfi, struct fh_otg_pcd * pcd,
+ struct fh_otg_pcd_ep * ep);
+ void *(*ep_alloc_buf) (struct cfiobject * cfi, struct fh_otg_pcd * pcd,
+ struct fh_otg_pcd_ep * ep, dma_addr_t * dma,
+ unsigned size, gfp_t flags);
+ void (*release) (struct cfiobject * cfi);
+ int (*ctrl_write_complete) (struct cfiobject * cfi,
+ struct fh_otg_pcd * pcd);
+ void (*build_descriptors) (struct cfiobject * cfi,
+ struct fh_otg_pcd * pcd,
+ struct fh_otg_pcd_ep * ep,
+ fh_otg_pcd_request_t * req);
+} cfi_ops_t;
+
+struct cfiobject {
+ cfi_ops_t ops;
+ struct fh_otg_pcd *pcd;
+ struct usb_gadget *gadget;
+
+ /* Buffers used to send/receive CFI-related request data */
+ cfi_dma_buff_t buf_in;
+ cfi_dma_buff_t buf_out;
+
+ /* CFI specific Control request wrapper */
+ struct cfi_usb_ctrlrequest ctrl_req;
+
+ /* The list of active EP's in the PCD of type cfi_ep_t */
+ fh_list_link_t active_eps;
+
+ /* This flag shall control the propagation of a specific request
+ * to the gadget's processing routines.
+ * 0 - no gadget handling
+ * 1 - the gadget needs to know about this request (w/o completing a status
+ * phase - just return a 0 to the _setup callback)
+ */
+ uint8_t need_gadget_att;
+
+ /* Flag indicating whether the status IN phase needs to be
+ * completed by the PCD
+ */
+ uint8_t need_status_in_complete;
+};
+typedef struct cfiobject cfiobject_t;
+
+#define DUMP_MSG
+
+#if defined(DUMP_MSG)
+static inline void dump_msg(const u8 * buf, unsigned int length)
+{
+ unsigned int start, num, i;
+ char line[52], *p;
+
+ if (length >= 512)
+ return;
+
+ start = 0;
+ while (length > 0) {
+ num = min(length, 16u);
+ p = line;
+ for (i = 0; i < num; ++i) {
+ if (i == 8)
+ *p++ = ' ';
+ FH_SPRINTF(p, " %02x", buf[i]);
+ p += 3;
+ }
+ *p = 0;
+ FH_DEBUG("%6x: %s\n", start, line);
+ buf += num;
+ start += num;
+ length -= num;
+ }
+}
+#else
+static inline void dump_msg(const u8 * buf, unsigned int length)
+{
+}
+#endif
+
+/**
+ * This function returns a pointer to cfi_ep_t object with the addr address.
+ */
+static inline struct cfi_ep *get_cfi_ep_by_addr(struct cfiobject *cfi,
+ uint8_t addr)
+{
+ struct cfi_ep *pcfiep;
+ fh_list_link_t *tmp;
+
+ FH_LIST_FOREACH(tmp, &cfi->active_eps) {
+ pcfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
+
+ if (pcfiep->ep->desc->bEndpointAddress == addr) {
+ return pcfiep;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * This function returns a pointer to cfi_ep_t object that matches
+ * the fh_otg_pcd_ep object.
+ */
+static inline struct cfi_ep *get_cfi_ep_by_pcd_ep(struct cfiobject *cfi,
+ struct fh_otg_pcd_ep *ep)
+{
+ struct cfi_ep *pcfiep = NULL;
+ fh_list_link_t *tmp;
+
+ FH_LIST_FOREACH(tmp, &cfi->active_eps) {
+ pcfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
+ if (pcfiep->ep == ep) {
+ return pcfiep;
+ }
+ }
+ return NULL;
+}
+
+int cfi_setup(struct fh_otg_pcd *pcd, struct cfi_usb_ctrlrequest *ctrl);
+
+#endif /* (__FH_OTG_CFI_H__) */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.c
new file mode 100644
index 00000000..04ba1ad2
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.c
@@ -0,0 +1,7595 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_cil.c $
+ * $Revision: #216 $
+ * $Date: 2015/10/12 $
+ * $Change: 2972621 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the FH_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * The CIL manages the memory map for the core so that the HCD and PCD
+ * don't have to do this separately. It also handles basic tasks like
+ * reading/writing the registers and data FIFOs in the controller.
+ * Some of the data access functions provide encapsulation of several
+ * operations required to perform a task, such as writing multiple
+ * registers to start a transfer. Finally, the CIL performs basic
+ * services that are not specific to either the host or device modes
+ * of operation. These services include management of the OTG Host
+ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
+ * Diagnostic API is also provided to allow testing of the controller
+ * hardware.
+ *
+ * The Core Interface Layer has the following requirements:
+ * - Provides basic controller operations.
+ * - Minimal use of OS services.
+ * - The OS services used will be abstracted by using inline functions
+ * or macros.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <mach/gpio.h>
+#include <mach/board_config.h>
+#include <mach/pmu.h>
+#include "../fh_common_port/fh_os.h"
+#include "fh_otg_regs.h"
+#include "fh_otg_cil.h"
+
+static int fh_otg_setup_params(fh_otg_core_if_t * core_if);
+
+/**
+ * This function is called to initialize the FH_otg CSR data
+ * structures. The register addresses in the device and host
+ * structures are initialized from the base address supplied by the
+ * caller. The calling function must make the OS calls to get the
+ * base address of the FH_otg controller registers. The core_params
+ * argument holds the parameters that specify how the core should be
+ * configured.
+ *
+ * @param reg_base_addr Base address of FH_otg core registers
+ *
+ */
+fh_otg_core_if_t *fh_otg_cil_init(const uint32_t * reg_base_addr,
+ struct fh_usb_platform_data *fh_usb_data)
+{
+ fh_otg_core_if_t *core_if = 0;
+ fh_otg_dev_if_t *dev_if = 0;
+ fh_otg_host_if_t *host_if = 0;
+ uint8_t *reg_base = (uint8_t *) reg_base_addr;
+ int i = 0;
+
+ FH_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, reg_base_addr);
+
+ core_if = FH_ALLOC(sizeof(fh_otg_core_if_t));
+
+ if (core_if == NULL) {
+ FH_DEBUGPL(DBG_CIL,
+ "Allocation of fh_otg_core_if_t failed\n");
+ return 0;
+ }
+ core_if->core_global_regs = (fh_otg_core_global_regs_t *) reg_base;
+
+ /*
+ * Allocate the Device Mode structures.
+ */
+ dev_if = FH_ALLOC(sizeof(fh_otg_dev_if_t));
+
+ if (dev_if == NULL) {
+ FH_DEBUGPL(DBG_CIL, "Allocation of fh_otg_dev_if_t failed\n");
+ FH_FREE(core_if);
+ return 0;
+ }
+
+ dev_if->dev_global_regs =
+ (fh_otg_device_global_regs_t *) (reg_base +
+ FH_DEV_GLOBAL_REG_OFFSET);
+
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ dev_if->in_ep_regs[i] = (fh_otg_dev_in_ep_regs_t *)
+ (reg_base + FH_DEV_IN_EP_REG_OFFSET +
+ (i * FH_EP_REG_OFFSET));
+
+ dev_if->out_ep_regs[i] = (fh_otg_dev_out_ep_regs_t *)
+ (reg_base + FH_DEV_OUT_EP_REG_OFFSET +
+ (i * FH_EP_REG_OFFSET));
+ FH_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n",
+ i, &dev_if->in_ep_regs[i]->diepctl);
+ FH_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n",
+ i, &dev_if->out_ep_regs[i]->doepctl);
+ }
+
+ dev_if->speed = 0; // unknown
+
+ core_if->dev_if = dev_if;
+
+ /*
+ * Allocate the Host Mode structures.
+ */
+ host_if = FH_ALLOC(sizeof(fh_otg_host_if_t));
+
+ if (host_if == NULL) {
+ FH_DEBUGPL(DBG_CIL,
+ "Allocation of fh_otg_host_if_t failed\n");
+ FH_FREE(dev_if);
+ FH_FREE(core_if);
+ return 0;
+ }
+
+ host_if->host_global_regs = (fh_otg_host_global_regs_t *)
+ (reg_base + FH_OTG_HOST_GLOBAL_REG_OFFSET);
+
+ host_if->hprt0 =
+ (uint32_t *) (reg_base + FH_OTG_HOST_PORT_REGS_OFFSET);
+
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ host_if->hc_regs[i] = (fh_otg_hc_regs_t *)
+ (reg_base + FH_OTG_HOST_CHAN_REGS_OFFSET +
+ (i * FH_OTG_CHAN_REGS_OFFSET));
+ FH_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
+ i, &host_if->hc_regs[i]->hcchar);
+ }
+
+ host_if->num_host_channels = MAX_EPS_CHANNELS;
+ core_if->host_if = host_if;
+
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ core_if->data_fifo[i] =
+ (uint32_t *) (reg_base + FH_OTG_DATA_FIFO_OFFSET +
+ (i * FH_OTG_DATA_FIFO_SIZE));
+ FH_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08lx\n",
+ i, (unsigned long)core_if->data_fifo[i]);
+ }
+
+ core_if->pcgcctl = (uint32_t *) (reg_base + FH_OTG_PCGCCTL_OFFSET);
+
+ /* Initiate lx_state to L3 disconnected state */
+ core_if->lx_state = FH_OTG_L3;
+ /*
+ * Store the contents of the hardware configuration registers here for
+ * easy access later.
+ */
+ core_if->hwcfg1.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->ghwcfg1);
+ core_if->hwcfg2.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->ghwcfg2);
+ core_if->hwcfg3.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->ghwcfg3);
+ core_if->hwcfg4.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->ghwcfg4);
+
+ /* Force host mode to get HPTXFSIZ exact power on value */
+ {
+ gusbcfg_data_t gusbcfg = {.d32 = 0 };
+ gusbcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ gusbcfg.b.force_host_mode = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
+ gusbcfg.d32);
+ fh_mdelay(100);
+
+ /*get host period tx fifo size*/
+ core_if->hptxfsiz.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->hptxfsiz);
+ /*restore host rx fifo size*/
+ FH_WRITE_REG32(&core_if->core_global_regs->grxfsiz,
+ fh_usb_data->grxfsiz_pwron_val);
+ /*restore host none period tx fifo size*/
+ FH_WRITE_REG32(&core_if->core_global_regs->gnptxfsiz,
+ fh_usb_data->gnptxfsiz_pwron_val);
+
+#ifndef FH_HOST_ONLY
+ gusbcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ gusbcfg.b.force_host_mode = 0;
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
+ gusbcfg.d32);
+ fh_mdelay(100);
+
+ /*restore device rx fifo size*/
+ FH_WRITE_REG32(&core_if->core_global_regs->grxfsiz,
+ fh_usb_data->grxfsiz_pwron_val);
+ /*restore device none period tx fifo size*/
+ FH_WRITE_REG32(&core_if->core_global_regs->gnptxfsiz,
+ fh_usb_data->gnptxfsiz_pwron_val);
+#endif
+ }
+
+ FH_DEBUGPL(DBG_CILV, "hwcfg1=%08x\n", core_if->hwcfg1.d32);
+ FH_DEBUGPL(DBG_CILV, "hwcfg2=%08x\n", core_if->hwcfg2.d32);
+ FH_DEBUGPL(DBG_CILV, "hwcfg3=%08x\n", core_if->hwcfg3.d32);
+ FH_DEBUGPL(DBG_CILV, "hwcfg4=%08x\n", core_if->hwcfg4.d32);
+
+ core_if->hcfg.d32 =
+ FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
+ core_if->dcfg.d32 =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+
+ FH_DEBUGPL(DBG_CILV, "hcfg=%08x\n", core_if->hcfg.d32);
+ FH_DEBUGPL(DBG_CILV, "dcfg=%08x\n", core_if->dcfg.d32);
+
+ FH_DEBUGPL(DBG_CILV, "op_mode=%0x\n", core_if->hwcfg2.b.op_mode);
+ FH_DEBUGPL(DBG_CILV, "arch=%0x\n", core_if->hwcfg2.b.architecture);
+ FH_DEBUGPL(DBG_CILV, "num_dev_ep=%d\n", core_if->hwcfg2.b.num_dev_ep);
+ FH_DEBUGPL(DBG_CILV, "num_host_chan=%d\n",
+ core_if->hwcfg2.b.num_host_chan);
+ FH_DEBUGPL(DBG_CILV, "nonperio_tx_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.nonperio_tx_q_depth);
+ FH_DEBUGPL(DBG_CILV, "host_perio_tx_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.host_perio_tx_q_depth);
+ FH_DEBUGPL(DBG_CILV, "dev_token_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.dev_token_q_depth);
+
+ FH_DEBUGPL(DBG_CILV, "Total FIFO SZ=%d\n",
+ core_if->hwcfg3.b.dfifo_depth);
+ FH_DEBUGPL(DBG_CILV, "xfer_size_cntr_width=%0x\n",
+ core_if->hwcfg3.b.xfer_size_cntr_width);
+
+ /*
+ * Set the SRP sucess bit for FS-I2c
+ */
+ core_if->srp_success = 0;
+ core_if->srp_timer_started = 0;
+
+ /*
+ * Create new workqueue and init works
+ */
+ core_if->wq_otg = FH_WORKQ_ALLOC("fh_otg");
+ if (core_if->wq_otg == 0) {
+ FH_WARN("FH_WORKQ_ALLOC failed\n");
+ FH_FREE(host_if);
+ FH_FREE(dev_if);
+ FH_FREE(core_if);
+ return 0;
+ }
+
+ /*
+ * Allocates hibernation backup registers
+ */
+ if (core_if->hwcfg4.b.hiber==1 || core_if->hwcfg4.b.xhiber==1) {
+ if(!core_if->gr_backup){
+ core_if->gr_backup = FH_ALLOC(sizeof(*core_if->gr_backup));
+ if(!core_if->gr_backup){
+ FH_WARN("can't alloc mem for gr_backup register \n");
+ FH_FREE(host_if);
+ FH_FREE(dev_if);
+ FH_WORKQ_FREE(core_if->wq_otg);
+ FH_FREE(core_if);
+ return 0;
+ }
+ }
+ if(!core_if->dr_backup){
+ core_if->dr_backup = FH_ALLOC(sizeof(*core_if->dr_backup));
+ if(!core_if->dr_backup){
+ FH_WARN("can't alloc mem for dr_backup register \n");
+ FH_FREE(host_if);
+ FH_FREE(dev_if);
+ FH_WORKQ_FREE(core_if->wq_otg);
+ FH_FREE(core_if);
+ FH_FREE(core_if->gr_backup);
+ return 0;
+ }
+ }
+ if(!core_if->hr_backup){
+ core_if->hr_backup = FH_ALLOC(sizeof(*core_if->hr_backup));
+ if(!core_if->hr_backup){
+ FH_WARN("can't alloc mem for hr_backup register \n");
+ FH_FREE(host_if);
+ FH_FREE(dev_if);
+ FH_WORKQ_FREE(core_if->wq_otg);
+ FH_FREE(core_if);
+ FH_FREE(core_if->gr_backup);
+ FH_FREE(core_if->dr_backup);
+ return 0;
+ }
+ }
+ }
+
+ core_if->snpsid = FH_READ_REG32(&core_if->core_global_regs->gsnpsid);
+
+ FH_PRINTF("Core Release: %x.%x%x%x\n",
+ (core_if->snpsid >> 12 & 0xF),
+ (core_if->snpsid >> 8 & 0xF),
+ (core_if->snpsid >> 4 & 0xF), (core_if->snpsid & 0xF));
+
+ core_if->wkp_timer = FH_TIMER_ALLOC("Wake Up Timer",
+ w_wakeup_detected, core_if);
+ if (core_if->wkp_timer == 0) {
+ FH_WARN("FH_TIMER_ALLOC failed\n");
+ FH_FREE(host_if);
+ FH_FREE(dev_if);
+ FH_WORKQ_FREE(core_if->wq_otg);
+ FH_FREE(core_if);
+ return 0;
+ }
+
+ if (fh_otg_setup_params(core_if))
+ FH_WARN("Error while setting core params\n");
+
+ core_if->hibernation_suspend = 0;
+ if (core_if->otg_ver)
+ core_if->test_mode = 0;
+
+ /** ADP initialization */
+ fh_otg_adp_init(core_if);
+
+ return core_if;
+}
+
+/**
+ * This function frees the structures allocated by fh_otg_cil_init().
+ *
+ * @param core_if The core interface pointer returned from
+ * fh_otg_cil_init().
+ *
+ */
+void fh_otg_cil_remove(fh_otg_core_if_t * core_if)
+{
+ dctl_data_t dctl = {.d32 = 0 };
+ /* Disable all interrupts */
+ FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, 1, 0);
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, 0);
+
+ dctl.b.sftdiscon = 1;
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0,
+ dctl.d32);
+ }
+
+ if (core_if->wq_otg) {
+ FH_WORKQ_WAIT_WORK_DONE(core_if->wq_otg, 500);
+ FH_WORKQ_FREE(core_if->wq_otg);
+ }
+ if (core_if->dev_if) {
+ FH_FREE(core_if->dev_if);
+ }
+ if (core_if->host_if) {
+ FH_FREE(core_if->host_if);
+ }
+
+ /** Remove hibernation recovery registers **/
+ if(core_if->gr_backup){
+ FH_FREE(core_if->gr_backup);
+ }
+ if(core_if->dr_backup){
+ FH_FREE(core_if->dr_backup);
+ }
+ if(core_if->hr_backup){
+ FH_FREE(core_if->hr_backup);
+ }
+
+ /** Remove ADP Stuff */
+ fh_otg_adp_remove(core_if);
+ if (core_if->core_params) {
+ FH_FREE(core_if->core_params);
+ }
+ if (core_if->wkp_timer) {
+ FH_TIMER_FREE(core_if->wkp_timer);
+ }
+ if (core_if->srp_timer) {
+ FH_TIMER_FREE(core_if->srp_timer);
+ }
+ FH_FREE(core_if);
+}
+
+/**
+ * This function enables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+void fh_otg_enable_global_interrupts(fh_otg_core_if_t * core_if)
+{
+ gahbcfg_data_t ahbcfg = {.d32 = 0 };
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
+}
+
+/**
+ * This function disables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+void fh_otg_disable_global_interrupts(fh_otg_core_if_t * core_if)
+{
+ gahbcfg_data_t ahbcfg = {.d32 = 0 };
+ ahbcfg.b.glblintrmsk = 1; /* Disable interrupts */
+ FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
+}
+
+/**
+ * This function initializes the commmon interrupts, used in both
+ * device and host modes.
+ *
+ * @param core_if Programming view of the FH_otg controller
+ *
+ */
+static void fh_otg_enable_common_interrupts(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ /* Clear any pending OTG Interrupts */
+ FH_WRITE_REG32(&global_regs->gotgint, 0xFFFFFFFF);
+
+ /* Clear any pending interrupts */
+ FH_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /*
+ * Enable the interrupts in the GINTMSK.
+ */
+ if (!core_if->core_params->otg_ver)
+ /* To avoid system hang during OTG 2.0 role switch */
+ intr_mask.b.modemismatch = 1;
+ intr_mask.b.otgintr = 1;
+
+ if (!core_if->dma_enable) {
+ intr_mask.b.rxstsqlvl = 1;
+ }
+
+ intr_mask.b.conidstschng = 1;
+ intr_mask.b.wkupintr = 1;
+ intr_mask.b.disconnect = 0;
+ intr_mask.b.usbsuspend = 1;
+ intr_mask.b.sessreqintr = 1;
+#ifdef CONFIG_USB_FH_OTG_LPM
+ if (core_if->core_params->lpm_enable) {
+ intr_mask.b.lpmtranrcvd = 1;
+ }
+#endif
+ FH_WRITE_REG32(&global_regs->gintmsk, intr_mask.d32);
+}
+
+/*
+ * The restore operation is modified to support Synopsys Emulated Powerdown and
+ * Hibernation. This function is for exiting from Device mode hibernation by
+ * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
+ * @param core_if Programming view of FH_otg controller.
+ * @param rem_wakeup - indicates whether resume is initiated by Device or Host.
+ * @param reset - indicates whether resume is initiated by Reset.
+ */
+int fh_otg_device_hibernation_restore(fh_otg_core_if_t * core_if,
+ int rem_wakeup, int reset)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ dctl_data_t dctl = {.d32 = 0 };
+
+ int timeout = 2000;
+
+ if (!core_if->hibernation_suspend) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return 1;
+ }
+
+ FH_DEBUGPL(DBG_PCD, "%s called\n", __FUNCTION__);
+ /* Switch-on voltage to the core */
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Reset core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Assert Restore signal */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.restore = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable power clamps */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ if (rem_wakeup) {
+ fh_udelay(70);
+ }
+
+ /* Deassert Reset core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Mask interrupts from gpwrdn */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.connect_det_msk = 1;
+ gpwrdn.b.srp_det_msk = 1;
+ gpwrdn.b.disconn_det_msk = 1;
+ gpwrdn.b.rst_det_msk = 1;
+ gpwrdn.b.lnstchng_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Indicates that we are going out from hibernation */
+ core_if->hibernation_suspend = 0;
+
+ /*
+ * Set Restore Essential Regs bit in PCGCCTL register, restore_mode = 1
+ * indicates restore from remote_wakeup
+ */
+ restore_essential_regs(core_if, rem_wakeup, 0);
+
+ /*
+ * Wait a little for seeing new value of variable hibernation_suspend if
+ * Restore done interrupt received before polling
+ */
+ fh_udelay(10);
+
+ if (core_if->hibernation_suspend == 0) {
+ /*
+ * Wait For Restore_done Interrupt. This mechanism of polling the
+ * interrupt is introduced to avoid any possible race conditions
+ */
+ do {
+ gintsts_data_t gintsts;
+ gintsts.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (gintsts.b.restoredone) {
+ gintsts.d32 = 0;
+ gintsts.b.restoredone = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->
+ gintsts, gintsts.d32);
+ FH_PRINTF("Restore Done Interrupt seen\n");
+ break;
+ }
+ fh_udelay(10);
+ } while (--timeout);
+ if (!timeout) {
+ FH_PRINTF("Restore Done interrupt wasn't generated here\n");
+ }
+ }
+ /* Clear all pending interupts */
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+
+ /* De-assert Restore */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.restore = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ if (!rem_wakeup) {
+ pcgcctl.d32 = 0;
+ pcgcctl.b.rstpdwnmodule = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+ }
+
+ /* Restore GUSBCFG,DCFG and DCTL */
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
+ core_if->gr_backup->gusbcfg_local);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg,
+ core_if->dr_backup->dcfg);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
+ core_if->dr_backup->dctl);
+
+ /* De-assert Wakeup Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ if (!rem_wakeup) {
+ /* Set Device programming done bit */
+ dctl.b.pwronprgdone = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ } else {
+ /* Start Remote Wakeup Signaling */
+ dctl.d32 = core_if->dr_backup->dctl;
+ dctl.b.rmtwkupsig = 1;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+ }
+
+ fh_mdelay(2);
+ /* Clear all pending interupts */
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Restore global registers */
+ fh_otg_restore_global_regs(core_if);
+ /* Restore device global registers */
+ fh_otg_restore_dev_regs(core_if, rem_wakeup);
+
+ if (rem_wakeup) {
+ fh_mdelay(7);
+ dctl.d32 = 0;
+ dctl.b.rmtwkupsig = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
+ }
+
+ core_if->hibernation_suspend = 0;
+ /* The core will be in ON STATE */
+ core_if->lx_state = FH_OTG_L0;
+ FH_PRINTF("Hibernation recovery completes here\n");
+
+ return 1;
+}
+
+/*
+ * The restore operation is modified to support Synopsys Emulated Powerdown and
+ * Hibernation. This function is for exiting from Host mode hibernation by
+ * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
+ * @param core_if Programming view of FH_otg controller.
+ * @param rem_wakeup - indicates whether resume is initiated by Device or Host.
+ * @param reset - indicates whether resume is initiated by Reset.
+ */
+int fh_otg_host_hibernation_restore(fh_otg_core_if_t * core_if,
+ int rem_wakeup, int reset)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ hprt0_data_t hprt0 = {.d32 = 0 };
+
+ int timeout = 2000;
+
+ FH_DEBUGPL(DBG_HCD, "%s called\n", __FUNCTION__);
+ /* Switch-on voltage to the core */
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Reset core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Assert Restore signal */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.restore = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable power clamps */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ if (!rem_wakeup) {
+ fh_udelay(50);
+ }
+
+ /* Deassert Reset core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ gpwrdn.d32 = 0;
+ gpwrdn.b.connect_det_msk = 1;
+ gpwrdn.b.srp_det_msk = 1;
+ gpwrdn.b.disconn_det_msk = 1;
+ gpwrdn.b.rst_det_msk = 1;
+ gpwrdn.b.lnstchng_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Indicates that we are going out from hibernation */
+ core_if->hibernation_suspend = 0;
+
+ /* Set Restore Essential Regs bit in PCGCCTL register */
+ restore_essential_regs(core_if, rem_wakeup, 1);
+
+ /* Wait a little for seeing new value of variable hibernation_suspend if
+ * Restore done interrupt received before polling */
+ fh_udelay(10);
+
+ if (core_if->hibernation_suspend == 0) {
+ /* Wait For Restore_done Interrupt. This mechanism of polling the
+ * interrupt is introduced to avoid any possible race conditions
+ */
+ do {
+ gintsts_data_t gintsts;
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (gintsts.b.restoredone) {
+ gintsts.d32 = 0;
+ gintsts.b.restoredone = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+ FH_DEBUGPL(DBG_HCD,"Restore Done Interrupt seen\n");
+ break;
+ }
+ fh_udelay(10);
+ } while (--timeout);
+ if (!timeout) {
+ FH_WARN("Restore Done interrupt wasn't generated\n");
+ }
+ }
+
+ /* Set the flag's value to 0 again after receiving restore done interrupt */
+ core_if->hibernation_suspend = 0;
+
+ /* This step is not described in functional spec but if not wait for this
+ * delay, mismatch interrupts occurred because just after restore core is
+ * in Device mode(gintsts.curmode == 0) */
+ fh_mdelay(100);
+
+ /* Clear all pending interrupts */
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+
+ /* De-assert Restore */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.restore = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Restore GUSBCFG and HCFG */
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
+ core_if->gr_backup->gusbcfg_local);
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg,
+ core_if->hr_backup->hcfg_local);
+
+ /* De-assert Wakeup Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Start the Resume operation by programming HPRT0 */
+ hprt0.d32 = core_if->hr_backup->hprt0_local;
+ hprt0.b.prtpwr = 1;
+ hprt0.b.prtena = 0;
+ hprt0.b.prtsusp = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ FH_PRINTF("Resume Starts Now\n");
+ if (!reset) { // Indicates it is Resume Operation
+ hprt0.d32 = core_if->hr_backup->hprt0_local;
+ hprt0.b.prtres = 1;
+ hprt0.b.prtpwr = 1;
+ hprt0.b.prtena = 0;
+ hprt0.b.prtsusp = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ if (!rem_wakeup)
+ hprt0.b.prtres = 0;
+ /* Wait for Resume time and then program HPRT again */
+ fh_mdelay(100);
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ } else { // Indicates it is Reset Operation
+ hprt0.d32 = core_if->hr_backup->hprt0_local;
+ hprt0.b.prtrst = 1;
+ hprt0.b.prtpwr = 1;
+ hprt0.b.prtena = 0;
+ hprt0.b.prtsusp = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ /* Wait for Reset time and then program HPRT again */
+ fh_mdelay(60);
+ hprt0.b.prtrst = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ }
+ /* Clear all interrupt status */
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtconndet = 1;
+ hprt0.b.prtenchng = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* Clear all pending interupts */
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+
+
+ /* Restore global registers */
+ fh_otg_restore_global_regs(core_if);
+ /* Restore host global registers */
+ fh_otg_restore_host_regs(core_if, reset);
+
+ /* The core will be in ON STATE */
+ core_if->lx_state = FH_OTG_L0;
+ FH_PRINTF("Hibernation recovery is complete here\n");
+ return 0;
+}
+
+/** Saves some register values into system memory. */
+int fh_otg_save_global_regs(fh_otg_core_if_t * core_if)
+{
+ struct fh_otg_global_regs_backup *gr;
+ int i;
+
+ gr = core_if->gr_backup;
+ if (!gr) {
+ FH_WARN("gr_backup is not allocated!\n");
+ return -FH_E_NO_MEMORY;
+ }
+
+ gr->gotgctl_local = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ gr->gintmsk_local = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
+ gr->gahbcfg_local = FH_READ_REG32(&core_if->core_global_regs->gahbcfg);
+ gr->gusbcfg_local = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ gr->grxfsiz_local = FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
+ gr->gnptxfsiz_local = FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz);
+ gr->hptxfsiz_local = FH_READ_REG32(&core_if->core_global_regs->hptxfsiz);
+#ifdef CONFIG_USB_FH_OTG_LPM
+ gr->glpmcfg_local = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+#endif
+ gr->gi2cctl_local = FH_READ_REG32(&core_if->core_global_regs->gi2cctl);
+ gr->pcgcctl_local = FH_READ_REG32(core_if->pcgcctl);
+ gr->gdfifocfg_local =
+ FH_READ_REG32(&core_if->core_global_regs->gdfifocfg);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ gr->dtxfsiz_local[i] =
+ FH_READ_REG32(&(core_if->core_global_regs->dtxfsiz[i]));
+ }
+
+ FH_DEBUGPL(DBG_ANY, "===========Backing Global registers==========\n");
+ FH_DEBUGPL(DBG_ANY, "Backed up gotgctl = %08x\n", gr->gotgctl_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up gintmsk = %08x\n", gr->gintmsk_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up gahbcfg = %08x\n", gr->gahbcfg_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up gusbcfg = %08x\n", gr->gusbcfg_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up grxfsiz = %08x\n", gr->grxfsiz_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up gnptxfsiz = %08x\n",
+ gr->gnptxfsiz_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up hptxfsiz = %08x\n",
+ gr->hptxfsiz_local);
+#ifdef CONFIG_USB_FH_OTG_LPM
+ FH_DEBUGPL(DBG_ANY, "Backed up glpmcfg = %08x\n", gr->glpmcfg_local);
+#endif
+ FH_DEBUGPL(DBG_ANY, "Backed up gi2cctl = %08x\n", gr->gi2cctl_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up pcgcctl = %08x\n", gr->pcgcctl_local);
+ FH_DEBUGPL(DBG_ANY,"Backed up gdfifocfg = %08x\n",gr->gdfifocfg_local);
+
+ return 0;
+}
+
+/** Saves GINTMSK register before setting the msk bits. */
+int fh_otg_save_gintmsk_reg(fh_otg_core_if_t * core_if)
+{
+ struct fh_otg_global_regs_backup *gr;
+
+ gr = core_if->gr_backup;
+ if (!gr) {
+ FH_WARN("gr_backup is not allocated!\n");
+ return -FH_E_NO_MEMORY;
+ }
+
+ gr->gintmsk_local = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
+
+ FH_DEBUGPL(DBG_ANY,"=============Backing GINTMSK registers============\n");
+ FH_DEBUGPL(DBG_ANY, "Backed up gintmsk = %08x\n", gr->gintmsk_local);
+
+ return 0;
+}
+
+int fh_otg_save_dev_regs(fh_otg_core_if_t * core_if)
+{
+ struct fh_otg_dev_regs_backup *dr;
+ int i;
+
+ dr = core_if->dr_backup;
+ if (!dr) {
+ FH_WARN("dr_backup is not allocated!\n");
+ return -FH_E_NO_MEMORY;
+ }
+
+ dr->dcfg = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+ dr->dctl = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ dr->daintmsk =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->daintmsk);
+ dr->diepmsk =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->diepmsk);
+ dr->doepmsk =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->doepmsk);
+
+ for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
+ dr->diepctl[i] =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
+ dr->dieptsiz[i] =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->dieptsiz);
+ dr->diepdma[i] =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepdma);
+ }
+
+ for (i = 0; i <= core_if->dev_if->num_out_eps; ++i) {
+ dr->doepctl[i] =
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[i]->doepctl);
+ dr->doeptsiz[i] =
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[i]->doeptsiz);
+ dr->doepdma[i] =
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[i]->doepdma);
+ }
+
+
+
+ FH_DEBUGPL(DBG_ANY,
+ "=============Backing Device registers==============\n");
+ FH_DEBUGPL(DBG_ANY, "Backed up dcfg = %08x\n", dr->dcfg);
+ FH_DEBUGPL(DBG_ANY, "Backed up dctl = %08x\n", dr->dctl);
+ FH_DEBUGPL(DBG_ANY, "Backed up daintmsk = %08x\n",
+ dr->daintmsk);
+ FH_DEBUGPL(DBG_ANY, "Backed up diepmsk = %08x\n", dr->diepmsk);
+ FH_DEBUGPL(DBG_ANY, "Backed up doepmsk = %08x\n", dr->doepmsk);
+ for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
+ FH_DEBUGPL(DBG_ANY, "Backed up diepctl[%d] = %08x\n", i,
+ dr->diepctl[i]);
+ FH_DEBUGPL(DBG_ANY, "Backed up dieptsiz[%d] = %08x\n",
+ i, dr->dieptsiz[i]);
+ FH_DEBUGPL(DBG_ANY, "Backed up diepdma[%d] = %08x\n", i,
+ dr->diepdma[i]);
+ }
+
+ for (i = 0; i <= core_if->dev_if->num_out_eps; ++i) {
+ FH_DEBUGPL(DBG_ANY, "Backed up doepctl[%d] = %08x\n", i,
+ dr->doepctl[i]);
+ FH_DEBUGPL(DBG_ANY, "Backed up doeptsiz[%d] = %08x\n",
+ i, dr->doeptsiz[i]);
+ FH_DEBUGPL(DBG_ANY, "Backed up doepdma[%d] = %08x\n", i,
+ dr->doepdma[i]);
+ }
+
+ return 0;
+}
+
+int fh_otg_save_host_regs(fh_otg_core_if_t * core_if)
+{
+ struct fh_otg_host_regs_backup *hr;
+ int i;
+
+ hr = core_if->hr_backup;
+ if (!hr) {
+ FH_WARN("hr_backup is not allocated!\n");
+ return -FH_E_NO_MEMORY;
+ }
+
+ hr->hcfg_local =
+ FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
+ hr->haintmsk_local =
+ FH_READ_REG32(&core_if->host_if->host_global_regs->haintmsk);
+ for (i = 0; i < fh_otg_get_param_host_channels(core_if); ++i) {
+ hr->hcintmsk_local[i] =
+ FH_READ_REG32(&core_if->host_if->hc_regs[i]->hcintmsk);
+ }
+ hr->hprt0_local = FH_READ_REG32(core_if->host_if->hprt0);
+ hr->hfir_local =
+ FH_READ_REG32(&core_if->host_if->host_global_regs->hfir);
+
+ FH_DEBUGPL(DBG_ANY,
+ "=============Backing Host registers===============\n");
+ FH_DEBUGPL(DBG_ANY, "Backed up hcfg = %08x\n",
+ hr->hcfg_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up haintmsk = %08x\n", hr->haintmsk_local);
+ for (i = 0; i <= fh_otg_get_param_host_channels(core_if); ++i) {
+ FH_DEBUGPL(DBG_ANY, "Backed up hcintmsk[%02d]=%08x\n", i,
+ hr->hcintmsk_local[i]);
+ }
+ FH_DEBUGPL(DBG_ANY, "Backed up hprt0 = %08x\n",
+ hr->hprt0_local);
+ FH_DEBUGPL(DBG_ANY, "Backed up hfir = %08x\n",
+ hr->hfir_local);
+
+ return 0;
+}
+
+int fh_otg_restore_global_regs(fh_otg_core_if_t * core_if)
+{
+ struct fh_otg_global_regs_backup *gr;
+ int i;
+
+ gr = core_if->gr_backup;
+ if (!gr) {
+ return -FH_E_INVALID;
+ }
+
+ FH_WRITE_REG32(&core_if->core_global_regs->gotgctl, gr->gotgctl_local);
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gr->gintmsk_local);
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gr->gusbcfg_local);
+ FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gr->gahbcfg_local);
+ FH_WRITE_REG32(&core_if->core_global_regs->grxfsiz, gr->grxfsiz_local);
+ FH_WRITE_REG32(&core_if->core_global_regs->gnptxfsiz, gr->gnptxfsiz_local);
+ FH_WRITE_REG32(&core_if->core_global_regs->hptxfsiz, gr->hptxfsiz_local);
+ FH_WRITE_REG32(&core_if->core_global_regs->gdfifocfg, gr->gdfifocfg_local);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ FH_WRITE_REG32(&core_if->core_global_regs->dtxfsiz[i], gr->dtxfsiz_local[i]);
+ }
+
+// FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+// FH_WRITE_REG32(core_if->host_if->hprt0, 0x0000100A); Done in fh_otg_host_hibernation_restore no need here //mvardan
+ FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, (gr->gahbcfg_local));
+ return 0;
+}
+
+int fh_otg_restore_dev_regs(fh_otg_core_if_t * core_if, int rem_wakeup)
+{
+ struct fh_otg_dev_regs_backup *dr;
+ int i;
+
+ dr = core_if->dr_backup;
+
+ if (!dr) {
+ return -FH_E_INVALID;
+ }
+
+ if (!rem_wakeup) {
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
+ dr->dctl);
+ }
+
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->daintmsk, dr->daintmsk);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->diepmsk, dr->diepmsk);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->doepmsk, dr->doepmsk);
+
+ for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->dieptsiz, dr->dieptsiz[i]);
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->diepdma, dr->diepdma[i]);
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl, dr->diepctl[i]);
+ }
+
+ for (i = 0; i <= core_if->dev_if->num_out_eps; ++i) {
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doeptsiz, dr->doeptsiz[i]);
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepdma, dr->doepdma[i]);
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepctl, dr->doepctl[i]);
+ }
+
+ return 0;
+}
+
+int fh_otg_restore_host_regs(fh_otg_core_if_t * core_if, int reset)
+{
+ struct fh_otg_host_regs_backup *hr;
+ int i;
+ hr = core_if->hr_backup;
+
+ if (!hr) {
+ return -FH_E_INVALID;
+ }
+
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hr->hcfg_local);
+ //if (!reset)
+ //{
+ // FH_WRITE_REG32(&core_if->host_if->host_global_regs->hfir, hr->hfir_local);
+ //}
+
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->haintmsk,
+ hr->haintmsk_local);
+ for (i = 0; i < fh_otg_get_param_host_channels(core_if); ++i) {
+ FH_WRITE_REG32(&core_if->host_if->hc_regs[i]->hcintmsk,
+ hr->hcintmsk_local[i]);
+ }
+
+ return 0;
+}
+
+int restore_lpm_i2c_regs(fh_otg_core_if_t * core_if)
+{
+ struct fh_otg_global_regs_backup *gr;
+
+ gr = core_if->gr_backup;
+
+ /* Restore values for LPM and I2C */
+#ifdef CONFIG_USB_FH_OTG_LPM
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, gr->glpmcfg_local);
+#endif
+ FH_WRITE_REG32(&core_if->core_global_regs->gi2cctl, gr->gi2cctl_local);
+
+ return 0;
+}
+
+int restore_essential_regs(fh_otg_core_if_t * core_if, int rmode, int is_host)
+{
+ struct fh_otg_global_regs_backup *gr;
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ gahbcfg_data_t gahbcfg = {.d32 = 0 };
+ gusbcfg_data_t gusbcfg = {.d32 = 0 };
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+
+ /* Restore LPM and I2C registers */
+ restore_lpm_i2c_regs(core_if);
+
+ /* Set PCGCCTL to 0 */
+ FH_WRITE_REG32(core_if->pcgcctl, 0x00000000);
+
+ gr = core_if->gr_backup;
+ /* Load restore values for [31:14] bits */
+ FH_WRITE_REG32(core_if->pcgcctl,
+ ((gr->pcgcctl_local & 0xffffc000) | 0x00020000));
+
+ /* Umnask global Interrupt in GAHBCFG and restore it */
+ gahbcfg.d32 = gr->gahbcfg_local;
+ gahbcfg.b.glblintrmsk = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gahbcfg.d32);
+
+ /* Clear all pending interupts */
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Unmask restore done interrupt */
+ gintmsk.b.restoredone = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
+
+ /* Restore GUSBCFG and HCFG/DCFG */
+ gusbcfg.d32 = core_if->gr_backup->gusbcfg_local;
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
+
+ if (is_host) {
+ hcfg_data_t hcfg = {.d32 = 0 };
+ hcfg.d32 = core_if->hr_backup->hcfg_local;
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg,
+ hcfg.d32);
+
+ /* Load restore values for [31:14] bits */
+ pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
+ pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
+
+ if (rmode)
+ pcgcctl.b.restoremode = 1;
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+ fh_udelay(10);
+
+ /* Load restore values for [31:14] bits and set EssRegRestored bit */
+ pcgcctl.d32 = gr->pcgcctl_local | 0xffffc000;
+ pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
+ pcgcctl.b.ess_reg_restored = 1;
+ if (rmode)
+ pcgcctl.b.restoremode = 1;
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+ } else {
+ dcfg_data_t dcfg = {.d32 = 0 };
+ dcfg.d32 = core_if->dr_backup->dcfg;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ /* Load restore values for [31:14] bits */
+ pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
+ pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
+ if (!rmode) {
+ pcgcctl.d32 |= 0x208;
+ }
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+ fh_udelay(10);
+
+ /* Load restore values for [31:14] bits */
+ pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
+ pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
+ pcgcctl.b.ess_reg_restored = 1;
+ if (!rmode)
+ pcgcctl.d32 |= 0x208;
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+ }
+
+ return 0;
+}
+
+/**
+ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
+ * type.
+ */
+static void init_fslspclksel(fh_otg_core_if_t * core_if)
+{
+ uint32_t val;
+ hcfg_data_t hcfg;
+
+ if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (core_if->core_params->ulpi_fs_ls)) ||
+ (core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS)) {
+ /* Full speed PHY */
+ val = FH_HCFG_48_MHZ;
+ } else {
+ /* High speed PHY running at full speed or high speed */
+ val = FH_HCFG_30_60_MHZ;
+ }
+
+ FH_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
+ hcfg.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
+ hcfg.b.fslspclksel = val;
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+}
+
+/**
+ * Initializes the DevSpd field of the DCFG register depending on the PHY type
+ * and the enumeration speed of the device.
+ */
+static void init_devspd(fh_otg_core_if_t * core_if)
+{
+ uint32_t val;
+ dcfg_data_t dcfg;
+
+ if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (core_if->core_params->ulpi_fs_ls)) ||
+ (core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS)) {
+ /* Full speed PHY */
+ val = 0x3;
+ } else if (core_if->core_params->speed == FH_SPEED_PARAM_FULL) {
+ /* High speed PHY running at full speed */
+ val = 0x1;
+ } else {
+ /* High speed PHY running at high speed */
+ val = 0x0;
+ }
+
+ FH_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
+
+ dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+ dcfg.b.devspd = val;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+/**
+ * This function calculates the number of IN EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param core_if Programming view of the FH_otg controller
+ */
+static uint32_t calc_num_in_eps(fh_otg_core_if_t * core_if)
+{
+ uint32_t num_in_eps = 0;
+ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3;
+ uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps;
+ int i;
+
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x1))
+ num_in_eps++;
+
+ hwcfg1 >>= 2;
+ }
+
+ if (core_if->hwcfg4.b.ded_fifo_en) {
+ num_in_eps =
+ (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
+ }
+
+ return num_in_eps;
+}
+
+/**
+ * This function calculates the number of OUT EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param core_if Programming view of the FH_otg controller
+ */
+static uint32_t calc_num_out_eps(fh_otg_core_if_t * core_if)
+{
+ uint32_t num_out_eps = 0;
+ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2;
+ int i;
+
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x1))
+ num_out_eps++;
+
+ hwcfg1 >>= 2;
+ }
+ return num_out_eps;
+}
+
+/**
+ * This function initializes the FH_otg controller registers and
+ * prepares the core for device mode or host mode operation.
+ *
+ * @param core_if Programming view of the FH_otg controller
+ *
+ */
+void fh_otg_core_init(fh_otg_core_if_t * core_if)
+{
+ int i = 0;
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ gahbcfg_data_t ahbcfg = {.d32 = 0 };
+ gusbcfg_data_t usbcfg = {.d32 = 0 };
+ gi2cctl_data_t i2cctl = {.d32 = 0 };
+
+ FH_DEBUGPL(DBG_CILV, "fh_otg_core_init(%p)\n", core_if);
+
+ /* Common Initialization */
+ usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
+
+ /* Program the ULPI External VBUS bit if needed */
+ usbcfg.b.ulpi_ext_vbus_drv =
+ (core_if->core_params->phy_ulpi_ext_vbus ==
+ FH_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
+
+ /* Set external TS Dline pulsing */
+ usbcfg.b.term_sel_dl_pulse =
+ (core_if->core_params->ts_dline == 1) ? 1 : 0;
+ FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset the Controller */
+ fh_otg_core_reset(core_if);
+
+ core_if->adp_enable = core_if->core_params->adp_supp_enable;
+ core_if->power_down = core_if->core_params->power_down;
+
+ /* Initialize parameters from Hardware configuration registers. */
+ dev_if->num_in_eps = calc_num_in_eps(core_if);
+ dev_if->num_out_eps = calc_num_out_eps(core_if);
+
+ FH_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",
+ core_if->hwcfg4.b.num_dev_perio_in_ep);
+
+ for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ dev_if->perio_tx_fifo_size[i] =
+ FH_READ_REG32(&global_regs->dtxfsiz[i]) >> 16;
+ FH_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n",
+ i, dev_if->perio_tx_fifo_size[i]);
+ }
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+ dev_if->tx_fifo_size[i] =
+ FH_READ_REG32(&global_regs->dtxfsiz[i]) >> 16;
+ FH_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n",
+ i, dev_if->tx_fifo_size[i]);
+ }
+
+ core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth;
+ core_if->rx_fifo_size = FH_READ_REG32(&global_regs->grxfsiz);
+ core_if->nperio_tx_fifo_size =
+ FH_READ_REG32(&global_regs->gnptxfsiz) >> 16;
+
+ FH_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n",
+ core_if->nperio_tx_fifo_size);
+
+ /*
+ fh_otg_dump_global_registers(core_if);
+ fh_otg_dump_dev_registers(core_if);
+ */
+
+
+ /* This programming sequence needs to happen in FS mode before any other
+ * programming occurs */
+ if ((core_if->core_params->speed == FH_SPEED_PARAM_FULL) &&
+ (core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS)) {
+ /* If FS mode with FS PHY */
+
+ /* core_init() is now called on every switch so only call the
+ * following for the first time through. */
+ if (!core_if->phy_init_done) {
+ core_if->phy_init_done = 1;
+ FH_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
+ usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
+ usbcfg.b.physel = 1;
+ FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset after a PHY select */
+ fh_otg_core_reset(core_if);
+ }
+
+ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
+ * do this on HNP Dev/Host mode switches (done in dev_init and
+ * host_init). */
+ if (fh_otg_is_host_mode(core_if)) {
+ init_fslspclksel(core_if);
+ } else {
+ init_devspd(core_if);
+ }
+
+ if (core_if->core_params->i2c_enable) {
+ FH_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
+ /* Program GUSBCFG.OtgUtmifsSel to I2C */
+ usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
+ usbcfg.b.otgutmifssel = 1;
+ FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Program GI2CCTL.I2CEn */
+ i2cctl.d32 = FH_READ_REG32(&global_regs->gi2cctl);
+ i2cctl.b.i2cdevaddr = 1;
+ i2cctl.b.i2cen = 0;
+ FH_WRITE_REG32(&global_regs->gi2cctl, i2cctl.d32);
+ i2cctl.b.i2cen = 1;
+ FH_WRITE_REG32(&global_regs->gi2cctl, i2cctl.d32);
+ }
+
+ } /* endif speed == FH_SPEED_PARAM_FULL */
+ else {
+ /* High speed PHY. */
+ if (!core_if->phy_init_done) {
+ core_if->phy_init_done = 1;
+ /* HS PHY parameters. These parameters are preserved
+ * during soft reset so only program the first time. Do
+ * a soft reset immediately after setting phyif. */
+
+ if (core_if->core_params->phy_type == 2) {
+ /* ULPI interface */
+ usbcfg.b.ulpi_utmi_sel = 1;
+ usbcfg.b.phyif = 0;
+ usbcfg.b.ddrsel =
+ core_if->core_params->phy_ulpi_ddr;
+ } else if (core_if->core_params->phy_type == 1) {
+ /* UTMI+ interface */
+ usbcfg.b.ulpi_utmi_sel = 0;
+ if (core_if->core_params->phy_utmi_width == 16) {
+ usbcfg.b.phyif = 1;
+
+ } else {
+ usbcfg.b.phyif = 0;
+ }
+ } else {
+ FH_ERROR("FS PHY TYPE\n");
+ }
+ FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
+ /* Reset after setting the PHY parameters */
+ fh_otg_core_reset(core_if);
+ }
+ }
+
+ if ((core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (core_if->core_params->ulpi_fs_ls)) {
+ FH_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
+ usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 1;
+ usbcfg.b.ulpi_clk_sus_m = 1;
+ FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
+ } else {
+ usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 0;
+ usbcfg.b.ulpi_clk_sus_m = 0;
+ FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
+ }
+
+ /* Program the GAHBCFG Register. */
+ switch (core_if->hwcfg2.b.architecture) {
+
+ case FH_SLAVE_ONLY_ARCH:
+ FH_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
+ ahbcfg.b.nptxfemplvl_txfemplvl =
+ FH_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ ahbcfg.b.ptxfemplvl = FH_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ core_if->dma_enable = 0;
+ core_if->dma_desc_enable = 0;
+ break;
+
+ case FH_EXT_DMA_ARCH:
+ FH_DEBUGPL(DBG_CIL, "External DMA Mode\n");
+ {
+ uint8_t brst_sz = core_if->core_params->dma_burst_size;
+ ahbcfg.b.hburstlen = 0;
+ while (brst_sz > 1) {
+ ahbcfg.b.hburstlen++;
+ brst_sz >>= 1;
+ }
+ }
+ core_if->dma_enable = (core_if->core_params->dma_enable != 0);
+ core_if->dma_desc_enable =
+ (core_if->core_params->dma_desc_enable != 0);
+ break;
+
+ case FH_INT_DMA_ARCH:
+ FH_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
+ /* Old value was FH_GAHBCFG_INT_DMA_BURST_INCR - done for
+ Host mode ISOC in issue fix - vahrama */
+ ahbcfg.b.hburstlen = FH_GAHBCFG_INT_DMA_BURST_INCR4;
+ core_if->dma_enable = (core_if->core_params->dma_enable != 0);
+ core_if->dma_desc_enable =
+ (core_if->core_params->dma_desc_enable != 0);
+ break;
+
+ }
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable) {
+ FH_PRINTF("Using Descriptor DMA mode\n");
+ } else {
+ FH_PRINTF("Using Buffer DMA mode\n");
+ }
+ } else {
+ FH_PRINTF("Using Slave mode\n");
+ core_if->dma_desc_enable = 0;
+ }
+
+ if (core_if->core_params->ahb_single) {
+ ahbcfg.b.ahbsingle = 1;
+ }
+
+ ahbcfg.b.dmaenable = core_if->dma_enable;
+ FH_WRITE_REG32(&global_regs->gahbcfg, ahbcfg.d32);
+
+ core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en;
+
+ core_if->pti_enh_enable = core_if->core_params->pti_enable != 0;
+ core_if->multiproc_int_enable = core_if->core_params->mpi_enable;
+ FH_PRINTF("Periodic Transfer Interrupt Enhancement - %s\n",
+ ((core_if->pti_enh_enable) ? "enabled" : "disabled"));
+ FH_PRINTF("Multiprocessor Interrupt Enhancement - %s\n",
+ ((core_if->multiproc_int_enable) ? "enabled" : "disabled"));
+
+ /*
+ * Program the GUSBCFG register.
+ */
+ usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
+
+ switch (core_if->hwcfg2.b.op_mode) {
+ case FH_MODE_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = (core_if->core_params->otg_cap ==
+ FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
+ usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+ FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case FH_MODE_SRP_ONLY_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+ FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case FH_MODE_NO_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+
+ case FH_MODE_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+ FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case FH_MODE_NO_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+
+ case FH_MODE_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
+ FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case FH_MODE_NO_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ }
+
+ FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+ if (core_if->core_params->lpm_enable) {
+ glpmcfg_data_t lpmcfg = {.d32 = 0 };
+
+ /* To enable LPM support set lpm_cap_en bit */
+ lpmcfg.b.lpm_cap_en = 1;
+
+ /* Make AppL1Res ACK */
+ lpmcfg.b.appl_resp = 1;
+
+ /* Retry 3 times */
+ lpmcfg.b.retry_count = 3;
+
+ FH_MODIFY_REG32(&core_if->core_global_regs->glpmcfg,
+ 0, lpmcfg.d32);
+
+ }
+#endif
+ if (core_if->core_params->ic_usb_cap) {
+ gusbcfg_data_t gusbcfg = {.d32 = 0 };
+ gusbcfg.b.ic_usb_cap = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gusbcfg,
+ 0, gusbcfg.d32);
+ }
+ {
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ gotgctl.b.otgver = core_if->core_params->otg_ver;
+#ifdef FH_HOST_ONLY
+ gotgctl.b.vbvalidoven = 1;
+ gotgctl.b.vbvalidovval = 1;
+#endif
+ FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl, 0,
+ gotgctl.d32);
+ /* Set OTG version supported */
+ core_if->otg_ver = core_if->core_params->otg_ver;
+ FH_PRINTF("OTG VER PARAM: %d, OTG VER FLAG: %d\n",
+ core_if->core_params->otg_ver, core_if->otg_ver);
+ }
+
+ /* Enable common interrupts */
+ fh_otg_enable_common_interrupts(core_if);
+
+ /* Do device or host intialization based on mode during PCD
+ * and HCD initialization */
+ if (fh_otg_is_host_mode(core_if)) {
+ FH_DEBUGPL(DBG_ANY, "Host Mode\n");
+ core_if->op_state = A_HOST;
+ } else {
+ FH_DEBUGPL(DBG_ANY, "Device Mode\n");
+ core_if->op_state = B_PERIPHERAL;
+#ifdef FH_DEVICE_ONLY
+ fh_otg_core_dev_init(core_if);
+#endif
+ }
+}
+
+
+/**
+ * This function enables the Device mode interrupts.
+ *
+ * @param core_if Programming view of FH_otg controller
+ */
+void fh_otg_enable_device_interrupts(fh_otg_core_if_t * core_if)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+
+ FH_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ FH_WRITE_REG32(&global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts */
+ FH_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ fh_otg_enable_common_interrupts(core_if);
+
+ /* Enable interrupts */
+ intr_mask.b.usbreset = 1;
+ intr_mask.b.enumdone = 1;
+ /* Disable Disconnect interrupt in Device mode */
+ intr_mask.b.disconnect = 0;
+
+ if (!core_if->multiproc_int_enable) {
+ intr_mask.b.inepintr = 1;
+ intr_mask.b.outepintr = 1;
+ }
+
+ intr_mask.b.erlysuspend = 1;
+
+ if (core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.epmismatch = 1;
+ }
+
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable) {
+ dctl_data_t dctl1 = {.d32 = 0 };
+ dctl1.b.ifrmnum = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ dctl, 0, dctl1.d32);
+ }
+ }
+
+ //intr_mask.b.incomplisoout = 1;
+ if (!core_if->dma_desc_enable)
+ intr_mask.b.incomplisoin = 1;
+
+/* Enable the ignore frame number for ISOC xfers - MAS */
+/* Disable to support high bandwith ISOC transfers - manukz */
+#if 0
+#ifdef FH_UTE_PER_IO
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable) {
+ dctl_data_t dctl1 = {.d32 = 0 };
+ dctl1.b.ifrmnum = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ dctl, 0, dctl1.d32);
+ FH_DEBUG("----Enabled Ignore frame number (0x%08x)",
+ FH_READ_REG32(&core_if->dev_if->
+ dev_global_regs->dctl));
+ }
+ }
+#endif
+#endif
+#ifdef FH_EN_ISOC
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable == 0) {
+ if (core_if->pti_enh_enable) {
+ dctl_data_t dctl = {.d32 = 0 };
+ dctl.b.ifrmnum = 1;
+ FH_MODIFY_REG32(&core_if->
+ dev_if->dev_global_regs->dctl,
+ 0, dctl.d32);
+ } else {
+ intr_mask.b.incomplisoin = 1;
+ intr_mask.b.incomplisoout = 1;
+ }
+ }
+ } else {
+ intr_mask.b.incomplisoin = 1;
+ intr_mask.b.incomplisoout = 1;
+ }
+#endif /* FH_EN_ISOC */
+
+ /** @todo NGS: Should this be a module parameter? */
+#ifdef USE_PERIODIC_EP
+ intr_mask.b.isooutdrop = 1;
+ intr_mask.b.eopframe = 1;
+ intr_mask.b.incomplisoin = 1;
+ intr_mask.b.incomplisoout = 1;
+#endif
+
+ FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+
+ FH_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
+ FH_READ_REG32(&global_regs->gintmsk));
+}
+
+/**
+ * This function initializes the FH_otg controller registers for
+ * device mode.
+ *
+ * @param core_if Programming view of FH_otg controller
+ *
+ */
+void fh_otg_core_dev_init(fh_otg_core_if_t * core_if)
+{
+ int i;
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ fh_otg_core_params_t *params = core_if->core_params;
+ dcfg_data_t dcfg = {.d32 = 0 };
+ depctl_data_t diepctl = {.d32 = 0 };
+ grstctl_t resetctl = {.d32 = 0 };
+ uint32_t rx_fifo_size;
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t txfifosize;
+ dthrctl_data_t dthrctl;
+ fifosize_data_t ptxfifosize;
+ uint16_t rxfsiz, nptxfsiz;
+ gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
+ hwcfg3_data_t hwcfg3 = {.d32 = 0 };
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+
+ /* Restart the Phy Clock */
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ /* Restart the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+ fh_udelay(10);
+
+ /* Device configuration register */
+ init_devspd(core_if);
+ dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0;
+ dcfg.b.perfrint = FH_DCFG_FRAME_INTERVAL_80;
+ /* Enable Device OUT NAK in case of DDMA mode */
+ if (core_if->core_params->dev_out_nak) {
+ dcfg.b.endevoutnak = 1;
+ }
+
+ if (core_if->core_params->cont_on_bna) {
+ dctl_data_t dctl = {.d32 = 0 };
+ dctl.b.encontonbna = 1;
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ }
+ /** should be done before every reset */
+ if (core_if->otg_ver) {
+ core_if->otg_sts = 0;
+ gotgctl.b.devhnpen = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl, gotgctl.d32, 0);
+ }
+
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ /* Configure data FIFO sizes */
+ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ FH_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
+ core_if->total_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
+ params->dev_rx_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
+ params->dev_nperio_tx_fifo_size);
+
+ /* Rx FIFO */
+ FH_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->grxfsiz));
+
+#ifdef FH_UTE_CFI
+ core_if->pwron_rxfsiz = FH_READ_REG32(&global_regs->grxfsiz);
+ core_if->init_rxfsiz = params->dev_rx_fifo_size;
+#endif
+ rx_fifo_size = params->dev_rx_fifo_size;
+ FH_WRITE_REG32(&global_regs->grxfsiz, rx_fifo_size);
+
+ FH_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->grxfsiz));
+
+ /** Set Periodic Tx FIFO Mask all bits 0 */
+ core_if->p_tx_msk = 0;
+
+ /** Set Tx FIFO Mask all bits 0 */
+ core_if->tx_msk = 0;
+
+ if (core_if->en_multiple_tx_fifo == 0) {
+ /* Non-periodic Tx FIFO */
+ FH_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->gnptxfsiz));
+
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+
+ FH_WRITE_REG32(&global_regs->gnptxfsiz,
+ nptxfifosize.d32);
+
+ FH_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->gnptxfsiz));
+
+ /**@todo NGS: Fix Periodic FIFO Sizing! */
+ /*
+ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_perio_tx_fifo_size array and the FIFO size registers in
+ * the dptxfsiz array run from 0 to 14.
+ */
+ /** @todo Finish debug of this */
+ ptxfifosize.b.startaddr =
+ nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ ptxfifosize.b.depth =
+ params->dev_perio_tx_fifo_size[i];
+ FH_DEBUGPL(DBG_CIL,
+ "initial dtxfsiz[%d]=%08x\n", i,
+ FH_READ_REG32(&global_regs->dtxfsiz
+ [i]));
+ FH_WRITE_REG32(&global_regs->dtxfsiz[i],
+ ptxfifosize.d32);
+ FH_DEBUGPL(DBG_CIL, "new dtxfsiz[%d]=%08x\n",
+ i,
+ FH_READ_REG32(&global_regs->dtxfsiz
+ [i]));
+ ptxfifosize.b.startaddr += ptxfifosize.b.depth;
+ }
+ } else {
+ /*
+ * Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_tx_fifo_size array and the FIFO size registers in
+ * the dtxfsiz array run from 0 to 14.
+ */
+
+ /* Non-periodic Tx FIFO */
+ FH_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->gnptxfsiz));
+
+#ifdef FH_UTE_CFI
+ core_if->pwron_gnptxfsiz =
+ (FH_READ_REG32(&global_regs->gnptxfsiz) >> 16);
+ core_if->init_gnptxfsiz =
+ params->dev_nperio_tx_fifo_size;
+#endif
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+
+ FH_WRITE_REG32(&global_regs->gnptxfsiz,
+ nptxfifosize.d32);
+
+ FH_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->gnptxfsiz));
+
+ txfifosize.b.startaddr =
+ nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
+
+ txfifosize.b.depth =
+ params->dev_tx_fifo_size[i];
+
+#ifdef FH_UTE_CFI
+ core_if->pwron_txfsiz[i] =
+ (FH_READ_REG32
+ (&global_regs->dtxfsiz[i]) >> 16);
+ core_if->init_txfsiz[i] =
+ params->dev_tx_fifo_size[i];
+#endif
+ FH_WRITE_REG32(&global_regs->dtxfsiz[i],
+ txfifosize.d32);
+
+ FH_DEBUGPL(DBG_CIL,
+ "new dtxfsiz[%d]=%08x\n",
+ i,
+ FH_READ_REG32(&global_regs->dtxfsiz
+ [i]));
+
+ txfifosize.b.startaddr += txfifosize.b.depth;
+ }
+
+ /* Calculating DFIFOCFG for Device mode to include RxFIFO and NPTXFIFO
+ * Before 3.00a EpInfoBase was being configured in ep enable/disable
+ * routine as well. Starting from 3.00a it will be set to the end of
+ * allocated FIFO space here due to ep 0 OUT always keeping enabled
+ */
+ gdfifocfg.d32 = FH_READ_REG32(&global_regs->gdfifocfg);
+ hwcfg3.d32 = FH_READ_REG32(&global_regs->ghwcfg3);
+ gdfifocfg.b.gdfifocfg = (FH_READ_REG32(&global_regs->ghwcfg3) >> 16);
+ FH_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
+ if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
+ rxfsiz = (FH_READ_REG32(&global_regs->grxfsiz) & 0x0000ffff);
+ nptxfsiz = (FH_READ_REG32(&global_regs->gnptxfsiz) >> 16);
+ gdfifocfg.b.epinfobase = rxfsiz + nptxfsiz;
+ } else {
+ gdfifocfg.b.epinfobase = txfifosize.b.startaddr;
+ }
+ FH_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
+ }
+ }
+
+ /* Flush the FIFOs */
+ fh_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
+ fh_otg_flush_rx_fifo(core_if);
+
+ /* Flush the Learning Queue. */
+ resetctl.b.intknqflsh = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
+
+ if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable) {
+ core_if->start_predict = 0;
+ for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
+ core_if->nextep_seq[i] = 0xff; // 0xff - EP not active
+ }
+ core_if->nextep_seq[0] = 0;
+ core_if->first_in_nextep_seq = 0;
+ diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
+ diepctl.b.nextep = 0;
+ FH_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
+
+ /* Update IN Endpoint Mismatch Count by active IN NP EP count + 1 */
+ dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.epmscnt = 2;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ FH_DEBUGPL(DBG_CILV,
+ "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
+ __func__, core_if->first_in_nextep_seq);
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ FH_DEBUGPL(DBG_CILV, "%2d ", core_if->nextep_seq[i]);
+ }
+ FH_DEBUGPL(DBG_CILV, "\n");
+ }
+
+ /* Clear all pending Device Interrupts */
+ /** @todo - if the condition needed to be checked
+ * or in any case all pending interrutps should be cleared?
+ */
+ if (core_if->multiproc_int_enable) {
+ for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
+ FH_WRITE_REG32(&dev_if->dev_global_regs->
+ diepeachintmsk[i], 0);
+ }
+
+ for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
+ FH_WRITE_REG32(&dev_if->dev_global_regs->
+ doepeachintmsk[i], 0);
+ }
+
+ FH_WRITE_REG32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF);
+ FH_WRITE_REG32(&dev_if->dev_global_regs->deachintmsk, 0);
+ } else {
+ FH_WRITE_REG32(&dev_if->dev_global_regs->diepmsk, 0);
+ FH_WRITE_REG32(&dev_if->dev_global_regs->doepmsk, 0);
+ FH_WRITE_REG32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
+ FH_WRITE_REG32(&dev_if->dev_global_regs->daintmsk, 0);
+ }
+
+ for (i = 0; i <= dev_if->num_in_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ if (depctl.b.epena) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ } else {
+ depctl.d32 = 0;
+ }
+
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
+
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepdma, 0);
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
+ }
+
+ for (i = 1; i <= dev_if->num_out_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
+ if (depctl.b.epena) {
+ int j = 0;
+ dctl_data_t dctl = {.d32 = 0 };
+ gintmsk_data_t gintsts = {.d32 = 0 };
+ doepint_data_t doepint = {.d32 = 0 };
+ device_grxsts_data_t status;
+ dctl.b.sgoutnak = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ if (!core_if->dma_enable) {
+ do {
+ j++;
+ fh_udelay(10);
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (j == 100000) {
+ FH_ERROR("SNAK is not set during 10s\n");
+ break;
+ }
+ } while (!gintsts.b.rxstsqlvl);
+ status.d32 = FH_READ_REG32(&global_regs->grxstsp);
+ if (status.b.pktsts == FH_DSTS_GOUT_NAK)
+ FH_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+ }
+ j = 0;
+ do {
+ j++;
+ fh_udelay(10);
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (j == 100000) {
+ FH_ERROR("SNAK is not set during 10s\n");
+ break;
+ }
+ } while (!gintsts.b.goutnakeff);
+ gintsts.d32 = 0;
+ gintsts.b.goutnakeff = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ j = 0;
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepctl, depctl.d32);
+ do {
+ fh_udelay(10);
+ doepint.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[i]->doepint);
+ if (j == 100000) {
+ FH_ERROR("EPDIS was not set during 10s\n");
+ break;
+ }
+ } while (!doepint.b.epdisabled);
+
+ doepint.b.epdisabled = 1;
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepint, doepint.d32);
+
+ dctl.d32 = 0;
+ dctl.b.cgoutnak = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ } else {
+ depctl.d32 = 0;
+ }
+
+ FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
+ FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
+ FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepdma, 0);
+ FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
+ }
+
+ if (core_if->en_multiple_tx_fifo && core_if->dma_enable) {
+ dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1;
+ dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1;
+ dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1;
+
+ dev_if->rx_thr_length = params->rx_thr_length;
+ dev_if->tx_thr_length = params->tx_thr_length;
+
+ dev_if->setup_desc_index = 0;
+
+ dthrctl.d32 = 0;
+ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
+ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
+ dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
+ dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
+ dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
+ dthrctl.b.ahb_thr_ratio = params->ahb_thr_ratio;
+
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dtknqr3_dthrctl,
+ dthrctl.d32);
+
+ FH_DEBUGPL(DBG_CIL,
+ "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
+ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en,
+ dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len,
+ dthrctl.b.rx_thr_len);
+
+ }
+
+ fh_otg_enable_device_interrupts(core_if);
+
+ {
+ diepmsk_data_t msk = {.d32 = 0 };
+ msk.b.txfifoundrn = 1;
+ if (core_if->multiproc_int_enable) {
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->
+ diepeachintmsk[0], msk.d32, msk.d32);
+ } else {
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->diepmsk,
+ msk.d32, msk.d32);
+ }
+ }
+
+ if (core_if->multiproc_int_enable) {
+ /* Set NAK on Babble */
+ dctl_data_t dctl = {.d32 = 0 };
+ dctl.b.nakonbble = 1;
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ }
+
+ if (core_if->snpsid >= OTG_CORE_REV_2_94a) {
+ dctl_data_t dctl = {.d32 = 0 };
+ dctl.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dctl);
+ dctl.b.sftdiscon = 0;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dctl, dctl.d32);
+ }
+}
+
+/**
+ * This function enables the Host mode interrupts.
+ *
+ * @param core_if Programming view of FH_otg controller
+ */
+void fh_otg_enable_host_interrupts(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ FH_WRITE_REG32(&global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts. */
+ FH_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ fh_otg_enable_common_interrupts(core_if);
+
+ /*
+ * Enable host mode interrupts without disturbing common
+ * interrupts.
+ */
+
+ intr_mask.b.disconnect = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+
+ FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+}
+
+/**
+ * This function disables the Host Mode interrupts.
+ *
+ * @param core_if Programming view of FH_otg controller
+ */
+void fh_otg_disable_host_interrupts(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_DEBUGPL(DBG_CILV, "%s()\n", __func__);
+
+ /*
+ * Disable host mode interrupts without disturbing common
+ * interrupts.
+ */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+ intr_mask.b.ptxfempty = 1;
+ intr_mask.b.nptxfempty = 1;
+
+ FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, 0);
+}
+
+/**
+ * This function initializes the FH_otg controller registers for
+ * host mode.
+ *
+ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ * request queues. Host channels are reset to ensure that they are ready for
+ * performing transfers.
+ *
+ * @param core_if Programming view of FH_otg controller
+ *
+ */
+void fh_otg_core_host_init(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ fh_otg_host_if_t *host_if = core_if->host_if;
+ fh_otg_core_params_t *params = core_if->core_params;
+ hprt0_data_t hprt0 = {.d32 = 0 };
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t ptxfifosize;
+ uint16_t rxfsiz, nptxfsiz, hptxfsiz;
+ gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
+ int i;
+ hcchar_data_t hcchar;
+ hcfg_data_t hcfg;
+ hfir_data_t hfir;
+ fh_otg_hc_regs_t *hc_regs;
+ int num_channels;
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ gintsts_data_t gintsts;
+
+ FH_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, core_if);
+
+ /* Restart the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+ fh_udelay(10);
+
+ if ((core_if->otg_ver == 1) && (core_if->op_state == A_HOST)) {
+ FH_PRINTF("Init: Port Power? op_state=%d\n", core_if->op_state);
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ FH_PRINTF("Init: Power Port (%d)\n", hprt0.b.prtpwr);
+ if (hprt0.b.prtpwr == 0) {
+ hprt0.b.prtpwr = 1;
+ FH_WRITE_REG32(host_if->hprt0, hprt0.d32);
+ }
+ }
+
+ /* Initialize Host Configuration Register */
+ init_fslspclksel(core_if);
+ if (core_if->core_params->speed == FH_SPEED_PARAM_FULL) {
+ hcfg.d32 = FH_READ_REG32(&host_if->host_global_regs->hcfg);
+ hcfg.b.fslssupp = 1;
+ FH_WRITE_REG32(&host_if->host_global_regs->hcfg, hcfg.d32);
+
+ }
+
+ /* This bit allows dynamic reloading of the HFIR register
+ * during runtime. This bit needs to be programmed during
+ * initial configuration and its value must not be changed
+ * during runtime.*/
+ if (core_if->core_params->reload_ctl == 1) {
+ hfir.d32 = FH_READ_REG32(&host_if->host_global_regs->hfir);
+ hfir.b.hfirrldctrl = 1;
+ FH_WRITE_REG32(&host_if->host_global_regs->hfir, hfir.d32);
+ }
+
+ if (core_if->core_params->dma_desc_enable) {
+ uint8_t op_mode = core_if->hwcfg2.b.op_mode;
+ if (!
+ (core_if->hwcfg4.b.desc_dma
+ && (core_if->snpsid >= OTG_CORE_REV_2_90a)
+ && ((op_mode == FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+ || (op_mode == FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
+ || (op_mode ==
+ FH_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG)
+ || (op_mode == FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)
+ || (op_mode ==
+ FH_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST)))) {
+
+ FH_ERROR("Host can't operate in Descriptor DMA mode.\n"
+ "Either core version is below 2.90a or "
+ "GHWCFG2, GHWCFG4 registers' values do not allow Descriptor DMA in host mode.\n"
+ "To run the driver in Buffer DMA host mode set dma_desc_enable "
+ "module parameter to 0.\n");
+ return;
+ }
+ hcfg.d32 = FH_READ_REG32(&host_if->host_global_regs->hcfg);
+ hcfg.b.descdma = 1;
+ FH_WRITE_REG32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ }
+
+ /* Configure data FIFO sizes */
+ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ FH_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
+ core_if->total_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
+ params->host_rx_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
+ params->host_nperio_tx_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "P Tx FIFO Size=%d\n",
+ params->host_perio_tx_fifo_size);
+
+ /* Rx FIFO */
+ FH_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->grxfsiz));
+ FH_WRITE_REG32(&global_regs->grxfsiz,
+ params->host_rx_fifo_size);
+ FH_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->grxfsiz));
+
+ /* Non-periodic Tx FIFO */
+ FH_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->host_rx_fifo_size;
+ FH_WRITE_REG32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+ FH_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->gnptxfsiz));
+
+ /* Periodic Tx FIFO */
+ FH_DEBUGPL(DBG_CIL, "initial hptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->hptxfsiz));
+ ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
+ ptxfifosize.b.startaddr =
+ nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ FH_WRITE_REG32(&global_regs->hptxfsiz, ptxfifosize.d32);
+ FH_DEBUGPL(DBG_CIL, "new hptxfsiz=%08x\n",
+ FH_READ_REG32(&global_regs->hptxfsiz));
+
+ if (core_if->en_multiple_tx_fifo) {
+ /* Global DFIFOCFG calculation for Host mode - include RxFIFO, NPTXFIFO and HPTXFIFO */
+ gdfifocfg.d32 = FH_READ_REG32(&global_regs->gdfifocfg);
+ rxfsiz = (FH_READ_REG32(&global_regs->grxfsiz) & 0x0000ffff);
+ nptxfsiz = (FH_READ_REG32(&global_regs->gnptxfsiz) >> 16);
+ hptxfsiz = (FH_READ_REG32(&global_regs->hptxfsiz) >> 16);
+ gdfifocfg.b.epinfobase = rxfsiz + nptxfsiz + hptxfsiz;
+ FH_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
+ }
+ }
+
+ /* TODO - check this */
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ gotgctl.b.hstsethnpen = 1;
+ FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
+ /* Make sure the FIFOs are flushed. */
+ fh_otg_flush_tx_fifo(core_if, 0x10 /* all TX FIFOs */ );
+ fh_otg_flush_rx_fifo(core_if);
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ gotgctl.b.hstsethnpen = 1;
+ FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
+
+ if (!core_if->core_params->dma_desc_enable) {
+ /* Flush out any leftover queued requests. */
+ num_channels = core_if->core_params->host_channels;
+
+ for (i = 0; i < num_channels; i++) {
+ hc_regs = core_if->host_if->hc_regs[i];
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+ }
+
+
+ /* Halt all channels to put them into a known state. */
+ for (i = 0; i < num_channels; i++) {
+ int count = 0;
+ hc_regs = core_if->host_if->hc_regs[i];
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+
+ if(!core_if->core_params->dma_enable) {
+ do {
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+ if (++count > 1000) {
+ FH_ERROR
+ ("%s: RxSTSQLVL interrupt wasn't seen for channel %d\n",
+ __func__, i);
+ break;
+ }
+ fh_udelay(1);
+ } while (!gintsts.b.rxstsqlvl);
+
+ if (count<=1000)
+ FH_READ_REG32(&core_if->core_global_regs->grxstsp);
+ count=0;
+ }
+
+ FH_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
+ do {
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ if (++count > 1000) {
+ FH_ERROR
+ ("%s: Unable to clear halt on channel %d\n",
+ __func__, i);
+ break;
+ }
+ fh_udelay(1);
+ } while (hcchar.b.chen);
+ }
+
+ }
+
+ /* Turn on the vbus power. */
+ if ((core_if->otg_ver == 0) && (core_if->op_state == A_HOST)) {
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ FH_PRINTF("Init: Power Port (%d)\n", hprt0.b.prtpwr);
+ if (hprt0.b.prtpwr == 0) {
+ hprt0.b.prtpwr = 1;
+ FH_WRITE_REG32(host_if->hprt0, hprt0.d32);
+ }
+ }
+
+ fh_otg_enable_host_interrupts(core_if);
+}
+
+/**
+ * Prepares a host channel for transferring packets to/from a specific
+ * endpoint. The HCCHARn register is set up with the characteristics specified
+ * in _hc. Host channel interrupts that may need to be serviced while this
+ * transfer is in progress are enabled.
+ *
+ * @param core_if Programming view of FH_otg controller
+ * @param hc Information needed to initialize the host channel
+ */
+void fh_otg_hc_init(fh_otg_core_if_t * core_if, fh_hc_t * hc)
+{
+ uint32_t intr_enable;
+ hcintmsk_data_t hc_intr_mask;
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+
+ uint8_t hc_num = hc->hc_num;
+ fh_otg_host_if_t *host_if = core_if->host_if;
+ fh_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num];
+
+ /* Clear old interrupt conditions for this host channel. */
+ hc_intr_mask.d32 = 0xFFFFFFFF;
+ hc_intr_mask.b.reserved14_31 = 0;
+ FH_WRITE_REG32(&hc_regs->hcint, hc_intr_mask.d32);
+
+ /* Enable channel interrupts required for this transfer. */
+ hc_intr_mask.d32 = 0;
+ hc_intr_mask.b.chhltd = 1;
+ if (core_if->dma_enable) {
+ /* For Descriptor DMA mode core halts the channel on AHB error. Interrupt is not required */
+ if (!core_if->dma_desc_enable)
+ hc_intr_mask.b.ahberr = 1;
+ else {
+ if (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
+ hc_intr_mask.b.xfercompl = 1;
+ }
+
+ if (hc->error_state && !hc->do_split &&
+ hc->ep_type != FH_OTG_EP_TYPE_ISOC) {
+ hc_intr_mask.b.ack = 1;
+ if (hc->ep_is_in) {
+ hc_intr_mask.b.datatglerr = 1;
+ if (hc->ep_type != FH_OTG_EP_TYPE_INTR) {
+ hc_intr_mask.b.nak = 1;
+ }
+ }
+ }
+ } else {
+ switch (hc->ep_type) {
+ case FH_OTG_EP_TYPE_CONTROL:
+ case FH_OTG_EP_TYPE_BULK:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ if (hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ } else {
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.nyet = 1;
+ if (hc->do_ping) {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+
+ if (hc->do_split) {
+ hc_intr_mask.b.nak = 1;
+ if (hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ } else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+
+ if (hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ break;
+ case FH_OTG_EP_TYPE_INTR:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ hc_intr_mask.b.frmovrun = 1;
+
+ if (hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ }
+ if (hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ if (hc->do_split) {
+ if (hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ } else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ break;
+ case FH_OTG_EP_TYPE_ISOC:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.frmovrun = 1;
+ hc_intr_mask.b.ack = 1;
+
+ if (hc->ep_is_in) {
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.bblerr = 1;
+ }
+ break;
+ }
+ }
+ FH_WRITE_REG32(&hc_regs->hcintmsk, hc_intr_mask.d32);
+
+ /* Enable the top level host channel interrupt. */
+ intr_enable = (1 << hc_num);
+ FH_MODIFY_REG32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
+
+ /* Make sure host channel interrupts are enabled. */
+ gintmsk.b.hcintr = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
+
+ /*
+ * Program the HCCHARn register with the endpoint characteristics for
+ * the current transfer.
+ */
+ hcchar.d32 = 0;
+ hcchar.b.devaddr = hc->dev_addr;
+ hcchar.b.epnum = hc->ep_num;
+ hcchar.b.epdir = hc->ep_is_in;
+ hcchar.b.lspddev = (hc->speed == FH_OTG_EP_SPEED_LOW);
+ hcchar.b.eptype = hc->ep_type;
+ hcchar.b.mps = hc->max_packet;
+
+ FH_WRITE_REG32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
+
+ FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+ FH_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
+ FH_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
+ FH_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
+ FH_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
+ FH_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
+ FH_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ FH_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
+
+ /*
+ * Program the HCSPLIT register for SPLITs
+ */
+ hcsplt.d32 = 0;
+ if (hc->do_split) {
+ FH_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n",
+ hc->hc_num,
+ hc->complete_split ? "CSPLIT" : "SSPLIT");
+ hcsplt.b.compsplt = hc->complete_split;
+ hcsplt.b.xactpos = hc->xact_pos;
+ hcsplt.b.hubaddr = hc->hub_addr;
+ hcsplt.b.prtaddr = hc->port_addr;
+ FH_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split);
+ FH_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos);
+ FH_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr);
+ FH_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr);
+ FH_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in);
+ FH_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ FH_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len);
+ }
+ FH_WRITE_REG32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
+
+}
+
+/**
+ * Attempts to halt a host channel. This function should only be called in
+ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
+ * normal circumstances in DMA mode, the controller halts the channel when the
+ * transfer is complete or a condition occurs that requires application
+ * intervention.
+ *
+ * In slave mode, checks for a free request queue entry, then sets the Channel
+ * Enable and Channel Disable bits of the Host Channel Characteristics
+ * register of the specified channel to intiate the halt. If there is no free
+ * request queue entry, sets only the Channel Disable bit of the HCCHARn
+ * register to flush requests for this channel. In the latter case, sets a
+ * flag to indicate that the host channel needs to be halted when a request
+ * queue slot is open.
+ *
+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ * HCCHARn register. The controller ensures there is space in the request
+ * queue before submitting the halt request.
+ *
+ * Some time may elapse before the core flushes any posted requests for this
+ * host channel and halts. The Channel Halted interrupt handler completes the
+ * deactivation of the host channel.
+ *
+ * @param core_if Controller register interface.
+ * @param hc Host channel to halt.
+ * @param halt_status Reason for halting the channel.
+ */
+void fh_otg_hc_halt(fh_otg_core_if_t * core_if,
+ fh_hc_t * hc, fh_otg_halt_status_e halt_status)
+{
+ gnptxsts_data_t nptxsts;
+ hptxsts_data_t hptxsts;
+ hcchar_data_t hcchar;
+ fh_otg_hc_regs_t *hc_regs;
+ fh_otg_core_global_regs_t *global_regs;
+ fh_otg_host_global_regs_t *host_global_regs;
+
+ hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+ global_regs = core_if->core_global_regs;
+ host_global_regs = core_if->host_if->host_global_regs;
+
+ FH_ASSERT(!(halt_status == FH_OTG_HC_XFER_NO_HALT_STATUS),
+ "halt_status = %d\n", halt_status);
+
+ if (halt_status == FH_OTG_HC_XFER_URB_DEQUEUE ||
+ halt_status == FH_OTG_HC_XFER_AHB_ERR) {
+ /*
+ * Disable all channel interrupts except Ch Halted. The QTD
+ * and QH state associated with this transfer has been cleared
+ * (in the case of URB_DEQUEUE), so the channel needs to be
+ * shut down carefully to prevent crashes.
+ */
+ int wtd = 10000;
+ hcintmsk_data_t hcintmsk;
+ hcintmsk.d32 = 0;
+ hcintmsk.b.chhltd = 1;
+ FH_WRITE_REG32(&hc_regs->hcintmsk, hcintmsk.d32);
+
+ /*
+ * Make sure no other interrupts besides halt are currently
+ * pending. Handling another interrupt could cause a crash due
+ * to the QTD and QH state.
+ */
+ FH_WRITE_REG32(&hc_regs->hcint, ~hcintmsk.d32);
+
+ /*
+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+ * even if the channel was already halted for some other
+ * reason.
+ */
+ hc->halt_status = halt_status;
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ while (wtd--) {
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ if (hcchar.b.chen == 0)
+ break;
+ }
+
+ if (hcchar.b.chen == 0) {
+ /*
+ * The channel is either already halted or it hasn't
+ * started yet. In DMA mode, the transfer may halt if
+ * it finishes normally or a condition occurs that
+ * requires driver intervention. Don't want to halt
+ * the channel again. In either Slave or DMA mode,
+ * it's possible that the transfer has been assigned
+ * to a channel, but not started yet when an URB is
+ * dequeued. Don't want to halt a channel that hasn't
+ * started yet.
+ */
+ return;
+ }
+ }
+ if (hc->halt_pending) {
+ /*
+ * A halt has already been issued for this channel. This might
+ * happen when a transfer is aborted by a higher level in
+ * the stack.
+ */
+#ifdef DEBUG
+ FH_PRINTF
+ ("*** %s: Channel %d, _hc->halt_pending already set ***\n",
+ __func__, hc->hc_num);
+
+#endif
+ return;
+ }
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* No need to set the bit in DDMA for disabling the channel */
+ //TODO check it everywhere channel is disabled
+ if (!core_if->core_params->dma_desc_enable)
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+
+ if (!core_if->dma_enable) {
+ /* Check for space in the request queue to issue the halt. */
+ if (hc->ep_type == FH_OTG_EP_TYPE_CONTROL ||
+ hc->ep_type == FH_OTG_EP_TYPE_BULK) {
+ nptxsts.d32 = FH_READ_REG32(&global_regs->gnptxsts);
+ if (nptxsts.b.nptxqspcavail == 0) {
+ hcchar.b.chen = 0;
+ }
+ } else {
+ hptxsts.d32 =
+ FH_READ_REG32(&host_global_regs->hptxsts);
+ if ((hptxsts.b.ptxqspcavail == 0)
+ || (core_if->queuing_high_bandwidth)) {
+ hcchar.b.chen = 0;
+ }
+ }
+ }
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+
+ hc->halt_status = halt_status;
+
+ if (hcchar.b.chen) {
+ hc->halt_pending = 1;
+ hc->halt_on_queue = 0;
+ } else {
+ hc->halt_on_queue = 1;
+ }
+
+ FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+ FH_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
+ FH_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending);
+ FH_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue);
+ FH_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status);
+
+ return;
+}
+
+/**
+ * Clears the transfer state for a host channel. This function is normally
+ * called after a transfer is done and the host channel is being released.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param hc Identifies the host channel to clean up.
+ */
+void fh_otg_hc_cleanup(fh_otg_core_if_t * core_if, fh_hc_t * hc)
+{
+ fh_otg_hc_regs_t *hc_regs;
+
+ hc->xfer_started = 0;
+
+ /*
+ * Clear channel interrupt enables and any unhandled channel interrupt
+ * conditions.
+ */
+ hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+ FH_WRITE_REG32(&hc_regs->hcintmsk, 0);
+ FH_WRITE_REG32(&hc_regs->hcint, 0xFFFFFFFF);
+#ifdef DEBUG
+ FH_TIMER_CANCEL(core_if->hc_xfer_timer[hc->hc_num]);
+#endif
+}
+
+/**
+ * Sets the channel property that indicates in which frame a periodic transfer
+ * should occur. This is always set to the _next_ frame. This function has no
+ * effect on non-periodic transfers.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param hc Identifies the host channel to set up and its properties.
+ * @param hcchar Current value of the HCCHAR register for the specified host
+ * channel.
+ */
+static inline void hc_set_even_odd_frame(fh_otg_core_if_t * core_if,
+ fh_hc_t * hc, hcchar_data_t * hcchar)
+{
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
+ hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+ hfnum_data_t hfnum;
+ hfnum.d32 =
+ FH_READ_REG32(&core_if->host_if->host_global_regs->hfnum);
+
+ /* 1 if _next_ frame is odd, 0 if it's even */
+ hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
+#ifdef DEBUG
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR && hc->do_split
+ && !hc->complete_split) {
+ switch (hfnum.b.frnum & 0x7) {
+ case 7:
+ core_if->hfnum_7_samples++;
+ core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
+ break;
+ case 0:
+ core_if->hfnum_0_samples++;
+ core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
+ break;
+ default:
+ core_if->hfnum_other_samples++;
+ core_if->hfnum_other_frrem_accum +=
+ hfnum.b.frrem;
+ break;
+ }
+ }
+#endif
+ }
+}
+
+#ifdef DEBUG
+void hc_xfer_timeout(void *ptr)
+{
+ hc_xfer_info_t *xfer_info = NULL;
+ int hc_num = 0;
+
+ if (ptr)
+ xfer_info = (hc_xfer_info_t *) ptr;
+
+ if (!xfer_info->hc) {
+ FH_ERROR("xfer_info->hc = %p\n", xfer_info->hc);
+ return;
+ }
+
+ hc_num = xfer_info->hc->hc_num;
+ FH_WARN("%s: timeout on channel %d\n", __func__, hc_num);
+ FH_WARN(" start_hcchar_val 0x%08x\n",
+ xfer_info->core_if->start_hcchar_val[hc_num]);
+}
+#endif
+
+void ep_xfer_timeout(void *ptr)
+{
+ ep_xfer_info_t *xfer_info = NULL;
+ int ep_num = 0;
+ dctl_data_t dctl = {.d32 = 0 };
+ gintsts_data_t gintsts = {.d32 = 0 };
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+
+ if (ptr)
+ xfer_info = (ep_xfer_info_t *) ptr;
+
+ if (!xfer_info->ep) {
+ FH_ERROR("xfer_info->ep = %p\n", xfer_info->ep);
+ return;
+ }
+
+ ep_num = xfer_info->ep->num;
+ FH_WARN("%s: timeout on endpoit %d\n", __func__, ep_num);
+ /* Put the sate to 2 as it was time outed */
+ xfer_info->state = 2;
+
+ dctl.d32 =
+ FH_READ_REG32(&xfer_info->core_if->dev_if->dev_global_regs->dctl);
+ gintsts.d32 =
+ FH_READ_REG32(&xfer_info->core_if->core_global_regs->gintsts);
+ gintmsk.d32 =
+ FH_READ_REG32(&xfer_info->core_if->core_global_regs->gintmsk);
+
+ if (!gintmsk.b.goutnakeff) {
+ /* Unmask it */
+ gintmsk.b.goutnakeff = 1;
+ FH_WRITE_REG32(&xfer_info->core_if->core_global_regs->gintmsk,
+ gintmsk.d32);
+
+ }
+
+ if (!gintsts.b.goutnakeff) {
+ dctl.b.sgoutnak = 1;
+ }
+ FH_WRITE_REG32(&xfer_info->core_if->dev_if->dev_global_regs->dctl,
+ dctl.d32);
+
+}
+
+void set_pid_isoc(fh_hc_t * hc)
+{
+ /* Set up the initial PID for the transfer. */
+ if (hc->speed == FH_OTG_EP_SPEED_HIGH) {
+ if (hc->ep_is_in) {
+ if (hc->multi_count == 1) {
+ hc->data_pid_start = FH_OTG_HC_PID_DATA0;
+ } else if (hc->multi_count == 2) {
+ hc->data_pid_start = FH_OTG_HC_PID_DATA1;
+ } else {
+ hc->data_pid_start = FH_OTG_HC_PID_DATA2;
+ }
+ } else {
+ if (hc->multi_count == 1) {
+ hc->data_pid_start = FH_OTG_HC_PID_DATA0;
+ } else {
+ hc->data_pid_start = FH_OTG_HC_PID_MDATA;
+ }
+ }
+ } else {
+ hc->data_pid_start = FH_OTG_HC_PID_DATA0;
+ }
+}
+
+/**
+ * This function does the setup for a data transfer for a host channel and
+ * starts the transfer. May be called in either Slave mode or DMA mode. In
+ * Slave mode, the caller must ensure that there is sufficient space in the
+ * request queue and Tx Data FIFO.
+ *
+ * For an OUT transfer in Slave mode, it loads a data packet into the
+ * appropriate FIFO. If necessary, additional data packets will be loaded in
+ * the Host ISR.
+ *
+ * For an IN transfer in Slave mode, a data packet is requested. The data
+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ * additional data packets are requested in the Host ISR.
+ *
+ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ * register along with a packet count of 1 and the channel is enabled. This
+ * causes a single PING transaction to occur. Other fields in HCTSIZ are
+ * simply set to 0 since no data transfer occurs in this case.
+ *
+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ * all the information required to perform the subsequent data transfer. In
+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ * controller performs the entire PING protocol, then starts the data
+ * transfer.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param hc Information needed to initialize the host channel. The xfer_len
+ * value may be reduced to accommodate the max widths of the XferSize and
+ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
+ * to reflect the final xfer_len value.
+ */
+void fh_otg_hc_start_transfer(fh_otg_core_if_t * core_if, fh_hc_t * hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ uint16_t num_packets;
+ uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size;
+ uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count;
+ fh_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+
+ hctsiz.d32 = 0;
+
+ if (hc->do_ping) {
+ if (!core_if->dma_enable) {
+ fh_otg_hc_do_ping(core_if, hc);
+ hc->xfer_started = 1;
+ return;
+ } else {
+ hctsiz.b.dopng = 1;
+ }
+ }
+
+ if (hc->do_split) {
+ num_packets = 1;
+
+ if (hc->complete_split && !hc->ep_is_in) {
+ /* For CSPLIT OUT Transfer, set the size to 0 so the
+ * core doesn't expect any data written to the FIFO */
+ hc->xfer_len = 0;
+ } else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) {
+ hc->xfer_len = hc->max_packet;
+ } else if (!hc->ep_is_in && (hc->xfer_len > 188)) {
+ hc->xfer_len = 188;
+ }
+
+ hctsiz.b.xfersize = hc->xfer_len;
+ } else {
+ /*
+ * Ensure that the transfer length and packet count will fit
+ * in the widths allocated for them in the HCTSIZn register.
+ */
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
+ hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure the transfer size is no larger than one
+ * (micro)frame's worth of data. (A check was done
+ * when the periodic transfer was accepted to ensure
+ * that a (micro)frame's worth of data can be
+ * programmed into a channel.)
+ */
+ uint32_t max_periodic_len =
+ hc->multi_count * hc->max_packet;
+ if (hc->xfer_len > max_periodic_len) {
+ hc->xfer_len = max_periodic_len;
+ } else {
+ }
+ } else if (hc->xfer_len > max_hc_xfer_size) {
+ /* Make sure that xfer_len is a multiple of max packet size. */
+ hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1;
+ }
+
+ if (hc->xfer_len > 0) {
+ num_packets =
+ (hc->xfer_len + hc->max_packet -
+ 1) / hc->max_packet;
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ hc->xfer_len = num_packets * hc->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0. */
+ num_packets = 1;
+ }
+
+ if (hc->ep_is_in) {
+ /* Always program an integral # of max packets for IN transfers. */
+ hc->xfer_len = num_packets * hc->max_packet;
+ }
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
+ hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure that the multi_count field matches the
+ * actual transfer length.
+ */
+ hc->multi_count = num_packets;
+ }
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
+ set_pid_isoc(hc);
+
+ hctsiz.b.xfersize = hc->xfer_len;
+ }
+
+ hc->start_pkt_count = num_packets;
+ hctsiz.b.pktcnt = num_packets;
+ hctsiz.b.pid = hc->data_pid_start;
+ FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
+
+ FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+ FH_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
+ FH_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
+ FH_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
+
+ if (core_if->dma_enable) {
+ fh_dma_t dma_addr;
+ if (hc->align_buff) {
+ dma_addr = hc->align_buff;
+ } else {
+ dma_addr = ((unsigned long)hc->xfer_buff & 0xffffffff);
+ }
+ FH_WRITE_REG32(&hc_regs->hcdma, dma_addr);
+ }
+
+ /* Start the split */
+ if (hc->do_split) {
+ hcsplt_data_t hcsplt;
+ hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
+ hcsplt.b.spltena = 1;
+ FH_WRITE_REG32(&hc_regs->hcsplt, hcsplt.d32);
+ }
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.multicnt = hc->multi_count;
+ hc_set_even_odd_frame(core_if, hc, &hcchar);
+#ifdef DEBUG
+ core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
+ if (hcchar.b.chdis) {
+ FH_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, hc->hc_num, hcchar.d32);
+ }
+#endif
+
+ /* Set host channel enable after all other setup is complete. */
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+
+ hc->xfer_started = 1;
+ hc->requests++;
+
+ if (!core_if->dma_enable && !hc->ep_is_in && hc->xfer_len > 0) {
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ fh_otg_hc_write_packet(core_if, hc);
+ }
+#ifdef DEBUG
+ if (hc->ep_type != FH_OTG_EP_TYPE_INTR) {
+ core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
+ core_if->hc_xfer_info[hc->hc_num].hc = hc;
+
+ /* Start a timer for this transfer. */
+ FH_TIMER_SCHEDULE(core_if->hc_xfer_timer[hc->hc_num], 10000);
+ }
+#endif
+}
+
+/**
+ * This function does the setup for a data transfer for a host channel
+ * and starts the transfer in Descriptor DMA mode.
+ *
+ * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
+ * Sets PID and NTD values. For periodic transfers
+ * initializes SCHED_INFO field with micro-frame bitmap.
+ *
+ * Initializes HCDMA register with descriptor list address and CTD value
+ * then starts the transfer via enabling the channel.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param hc Information needed to initialize the host channel.
+ */
+void fh_otg_hc_start_transfer_ddma(fh_otg_core_if_t * core_if, fh_hc_t * hc)
+{
+ fh_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcdma_data_t hcdma;
+
+ hctsiz.d32 = 0;
+
+ if (hc->do_ping)
+ hctsiz.b_ddma.dopng = 1;
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
+ set_pid_isoc(hc);
+
+ /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
+ hctsiz.b_ddma.pid = hc->data_pid_start;
+ hctsiz.b_ddma.ntd = hc->ntd - 1; /* 0 - 1 descriptor, 1 - 2 descriptors, etc. */
+ hctsiz.b_ddma.schinfo = hc->schinfo; /* Non-zero only for high-speed interrupt endpoints */
+
+ FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+ FH_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
+ FH_DEBUGPL(DBG_HCDV, " NTD: %d\n", hctsiz.b_ddma.ntd);
+
+ FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
+
+ hcdma.d32 = 0;
+ hcdma.b.dma_addr = ((uint32_t) hc->desc_list_addr) >> 11;
+
+ /* Always start from first descriptor. */
+ hcdma.b.ctd = 0;
+ FH_WRITE_REG32(&hc_regs->hcdma, hcdma.d32);
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.multicnt = hc->multi_count;
+
+#ifdef DEBUG
+ core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
+ if (hcchar.b.chdis) {
+ FH_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, hc->hc_num, hcchar.d32);
+ }
+#endif
+
+ /* Set host channel enable after all other setup is complete. */
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+
+ hc->xfer_started = 1;
+ hc->requests++;
+
+#ifdef DEBUG
+ if ((hc->ep_type != FH_OTG_EP_TYPE_INTR)
+ && (hc->ep_type != FH_OTG_EP_TYPE_ISOC)) {
+ core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
+ core_if->hc_xfer_info[hc->hc_num].hc = hc;
+ /* Start a timer for this transfer. */
+ FH_TIMER_SCHEDULE(core_if->hc_xfer_timer[hc->hc_num], 10000);
+ }
+#endif
+
+}
+
+/**
+ * This function continues a data transfer that was started by previous call
+ * to <code>fh_otg_hc_start_transfer</code>. The caller must ensure there is
+ * sufficient space in the request queue and Tx Data FIFO. This function
+ * should only be called in Slave mode. In DMA mode, the controller acts
+ * autonomously to complete transfers programmed to a host channel.
+ *
+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
+ * if there is any data remaining to be queued. For an IN transfer, another
+ * data packet is always requested. For the SETUP phase of a control transfer,
+ * this function does nothing.
+ *
+ * @return 1 if a new request is queued, 0 if no more requests are required
+ * for this transfer.
+ */
+int fh_otg_hc_continue_transfer(fh_otg_core_if_t * core_if, fh_hc_t * hc)
+{
+ FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+
+ if (hc->do_split) {
+ /* SPLITs always queue just once per channel */
+ return 0;
+ } else if (hc->data_pid_start == FH_OTG_HC_PID_SETUP) {
+ /* SETUPs are queued only once since they can't be NAKed. */
+ return 0;
+ } else if (hc->ep_is_in) {
+ /*
+ * Always queue another request for other IN transfers. If
+ * back-to-back INs are issued and NAKs are received for both,
+ * the driver may still be processing the first NAK when the
+ * second NAK is received. When the interrupt handler clears
+ * the NAK interrupt for the first NAK, the second NAK will
+ * not be seen. So we can't depend on the NAK interrupt
+ * handler to requeue a NAKed request. Instead, IN requests
+ * are issued each time this function is called. When the
+ * transfer completes, the extra requests for the channel will
+ * be flushed.
+ */
+ hcchar_data_t hcchar;
+ fh_otg_hc_regs_t *hc_regs =
+ core_if->host_if->hc_regs[hc->hc_num];
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(core_if, hc, &hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ FH_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n",
+ hcchar.d32);
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+ hc->requests++;
+ return 1;
+ } else {
+ /* OUT transfers. */
+ if (hc->xfer_count < hc->xfer_len) {
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
+ hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+ hcchar_data_t hcchar;
+ fh_otg_hc_regs_t *hc_regs;
+ hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(core_if, hc, &hcchar);
+ }
+
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ fh_otg_hc_write_packet(core_if, hc);
+ hc->requests++;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/**
+ * Starts a PING transfer. This function should only be called in Slave mode.
+ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
+ */
+void fh_otg_hc_do_ping(fh_otg_core_if_t * core_if, fh_hc_t * hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ fh_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
+
+ FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
+
+ hctsiz.d32 = 0;
+ hctsiz.b.dopng = 1;
+ hctsiz.b.pktcnt = 1;
+ FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+}
+
+/*
+ * This function writes a packet into the Tx FIFO associated with the Host
+ * Channel. For a channel associated with a non-periodic EP, the non-periodic
+ * Tx FIFO is written. For a channel associated with a periodic EP, the
+ * periodic Tx FIFO is written. This function should only be called in Slave
+ * mode.
+ *
+ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
+ * then number of bytes written to the Tx FIFO.
+ */
+void fh_otg_hc_write_packet(fh_otg_core_if_t * core_if, fh_hc_t * hc)
+{
+ uint32_t i;
+ uint32_t remaining_count;
+ uint32_t byte_count;
+ uint32_t dword_count;
+
+ uint32_t *data_buff = (uint32_t *) (hc->xfer_buff);
+ uint32_t *data_fifo = core_if->data_fifo[hc->hc_num];
+
+ remaining_count = hc->xfer_len - hc->xfer_count;
+ if (remaining_count > hc->max_packet) {
+ byte_count = hc->max_packet;
+ } else {
+ byte_count = remaining_count;
+ }
+
+ dword_count = (byte_count + 3) / 4;
+
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* xfer_buff is DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ FH_WRITE_REG32(data_fifo, *data_buff);
+ }
+ } else {
+ /* xfer_buff is not DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ uint32_t data;
+ data =
+ (data_buff[0] | data_buff[1] << 8 | data_buff[2] <<
+ 16 | data_buff[3] << 24);
+ FH_WRITE_REG32(data_fifo, data);
+ }
+ }
+
+ hc->xfer_count += byte_count;
+ hc->xfer_buff += byte_count;
+}
+
+/**
+ * Gets the current USB frame number. This is the frame number from the last
+ * SOF packet.
+ */
+uint32_t fh_otg_get_frame_number(fh_otg_core_if_t * core_if)
+{
+ dsts_data_t dsts;
+ dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+
+ /* read current frame/microframe number from DSTS register */
+ return dsts.b.soffn;
+}
+
+/**
+ * Calculates and gets the frame Interval value of HFIR register according PHY
+ * type and speed.The application can modify a value of HFIR register only after
+ * the Port Enable bit of the Host Port Control and Status register
+ * (HPRT.PrtEnaPort) has been set.
+*/
+
+uint32_t calc_frame_interval(fh_otg_core_if_t * core_if)
+{
+ gusbcfg_data_t usbcfg;
+ hwcfg2_data_t hwcfg2;
+ hprt0_data_t hprt0;
+ int clock = 60; // default value
+ usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ hwcfg2.d32 = FH_READ_REG32(&core_if->core_global_regs->ghwcfg2);
+ hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
+ if (!usbcfg.b.physel && usbcfg.b.ulpi_utmi_sel && !usbcfg.b.phyif)
+ clock = 60;
+ if (usbcfg.b.physel && hwcfg2.b.fs_phy_type == 3)
+ clock = 48;
+ if (!usbcfg.b.phylpwrclksel && !usbcfg.b.physel &&
+ !usbcfg.b.ulpi_utmi_sel && usbcfg.b.phyif)
+ clock = 30;
+ if (!usbcfg.b.phylpwrclksel && !usbcfg.b.physel &&
+ !usbcfg.b.ulpi_utmi_sel && !usbcfg.b.phyif)
+ clock = 60;
+ if (usbcfg.b.phylpwrclksel && !usbcfg.b.physel &&
+ !usbcfg.b.ulpi_utmi_sel && usbcfg.b.phyif)
+ clock = 48;
+ if (usbcfg.b.physel && !usbcfg.b.phyif && hwcfg2.b.fs_phy_type == 2)
+ clock = 48;
+ if (usbcfg.b.physel && hwcfg2.b.fs_phy_type == 1)
+ clock = 48;
+ if (hprt0.b.prtspd == 0)
+ /* High speed case */
+ return 125 * clock;
+ else
+ /* FS/LS case */
+ return 1000 * clock;
+}
+
+/**
+ * This function reads a setup packet from the Rx FIFO into the destination
+ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
+ * Interrupt routine when a SETUP packet has been received in Slave mode.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param dest Destination buffer for packet data.
+ */
+void fh_otg_read_setup_packet(fh_otg_core_if_t * core_if, uint32_t * dest)
+{
+ device_grxsts_data_t status;
+ /* Get the 8 bytes of a setup transaction data */
+
+ /* Pop 2 DWORDS off the receive data FIFO into memory */
+ dest[0] = FH_READ_REG32(core_if->data_fifo[0]);
+ dest[1] = FH_READ_REG32(core_if->data_fifo[0]);
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a && core_if->snpsid < OTG_CORE_REV_3_30a) {
+ status.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->grxstsp);
+ FH_DEBUGPL(DBG_ANY,
+ "EP:%d BCnt:%d " "pktsts:%x Frame:%d(0x%0x)\n",
+ status.b.epnum, status.b.bcnt, status.b.pktsts,
+ status.b.fn, status.b.fn);
+ }
+}
+
+/**
+ * This function enables EP0 OUT to receive SETUP packets and configures EP0
+ * IN for transmitting packets. It is normally called when the
+ * "Enumeration Done" interrupt occurs.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP0 data.
+ */
+void fh_otg_ep0_activate(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ dsts_data_t dsts;
+ depctl_data_t diepctl;
+ depctl_data_t doepctl;
+ dctl_data_t dctl = {.d32 = 0 };
+
+ ep->stp_rollover = 0;
+ /* Read the Device Status and Endpoint 0 Control registers */
+ dsts.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dsts);
+ diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
+ doepctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl);
+
+ /* Set the MPS of the IN EP based on the enumeration speed */
+ switch (dsts.b.enumspd) {
+ case FH_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+ case FH_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+ case FH_DSTS_ENUMSPD_FS_PHY_48MHZ:
+ diepctl.b.mps = FH_DEP0CTL_MPS_64;
+ break;
+ case FH_DSTS_ENUMSPD_LS_PHY_6MHZ:
+ diepctl.b.mps = FH_DEP0CTL_MPS_8;
+ break;
+ }
+
+ FH_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
+
+ /* Enable OUT EP for receive */
+ if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
+ doepctl.b.epena = 1;
+ FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
+ }
+#ifdef VERBOSE
+ FH_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
+ FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
+ FH_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
+ FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl));
+#endif
+ dctl.b.cgnpinnak = 1;
+
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+ FH_DEBUGPL(DBG_PCDV, "dctl=%0x\n",
+ FH_READ_REG32(&dev_if->dev_global_regs->dctl));
+
+}
+
+/**
+ * This function activates an EP. The Device EP control register for
+ * the EP is configured as defined in the ep structure. Note: This
+ * function is not used for EP0.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to activate.
+ */
+void fh_otg_ep_activate(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ depctl_data_t depctl;
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32 = 0 };
+ dcfg_data_t dcfg;
+ uint8_t i;
+
+ FH_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num,
+ (ep->is_in ? "IN" : "OUT"));
+
+#ifdef FH_UTE_PER_IO
+ ep->xiso_frame_num = 0xFFFFFFFF;
+ ep->xiso_active_xfers = 0;
+ ep->xiso_queued_xfers = 0;
+#endif
+ /* Read DEPCTLn register */
+ if (ep->is_in == 1) {
+ addr = &dev_if->in_ep_regs[ep->num]->diepctl;
+ daintmsk.ep.in = 1 << ep->num;
+ } else {
+ addr = &dev_if->out_ep_regs[ep->num]->doepctl;
+ daintmsk.ep.out = 1 << ep->num;
+ }
+
+ /* If the EP is already active don't change the EP Control
+ * register. */
+ depctl.d32 = FH_READ_REG32(addr);
+ if (!depctl.b.usbactep) {
+ depctl.b.mps = ep->maxpacket;
+ depctl.b.eptype = ep->type;
+ depctl.b.txfnum = ep->tx_fifo_num;
+
+ if (ep->type == FH_OTG_EP_TYPE_ISOC) {
+ depctl.b.setd0pid = 1; // ???
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ depctl.b.usbactep = 1;
+
+ /* Update nextep_seq array and EPMSCNT in DCFG */
+ if (!(depctl.b.eptype & 1) && (ep->is_in == 1)) { // NP IN EP
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ if (core_if->nextep_seq[i] == core_if->first_in_nextep_seq)
+ break;
+ }
+ core_if->nextep_seq[i] = ep->num;
+ core_if->nextep_seq[ep->num] = core_if->first_in_nextep_seq;
+ depctl.b.nextep = core_if->nextep_seq[ep->num];
+ dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.epmscnt++;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ FH_DEBUGPL(DBG_PCDV,
+ "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
+ __func__, core_if->first_in_nextep_seq);
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ FH_DEBUGPL(DBG_PCDV, "%2d\n",
+ core_if->nextep_seq[i]);
+ }
+
+ }
+
+
+ FH_WRITE_REG32(addr, depctl.d32);
+ FH_DEBUGPL(DBG_PCDV, "DEPCTL=%08x\n", FH_READ_REG32(addr));
+ }
+
+ /* Enable the Interrupt for this EP */
+ if (core_if->multiproc_int_enable) {
+ if (ep->is_in == 1) {
+ diepmsk_data_t diepmsk = {.d32 = 0 };
+ diepmsk.b.xfercompl = 1;
+ diepmsk.b.timeout = 1;
+ diepmsk.b.epdisabled = 1;
+ diepmsk.b.ahberr = 1;
+ diepmsk.b.intknepmis = 1;
+ if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
+ diepmsk.b.intknepmis = 0;
+ diepmsk.b.txfifoundrn = 1; //?????
+ if (ep->type == FH_OTG_EP_TYPE_ISOC) {
+ diepmsk.b.nak = 1;
+ }
+
+/*
+ if (core_if->dma_desc_enable) {
+ diepmsk.b.bna = 1;
+ }
+*/
+/*
+ if (core_if->dma_enable) {
+ doepmsk.b.nak = 1;
+ }
+*/
+ FH_WRITE_REG32(&dev_if->dev_global_regs->
+ diepeachintmsk[ep->num], diepmsk.d32);
+
+ } else {
+ doepmsk_data_t doepmsk = {.d32 = 0 };
+ doepmsk.b.xfercompl = 1;
+ doepmsk.b.ahberr = 1;
+ doepmsk.b.epdisabled = 1;
+ if (ep->type == FH_OTG_EP_TYPE_ISOC)
+ doepmsk.b.outtknepdis = 1;
+
+/*
+
+ if (core_if->dma_desc_enable) {
+ doepmsk.b.bna = 1;
+ }
+*/
+/*
+ doepmsk.b.babble = 1;
+ doepmsk.b.nyet = 1;
+ doepmsk.b.nak = 1;
+*/
+ FH_WRITE_REG32(&dev_if->dev_global_regs->
+ doepeachintmsk[ep->num], doepmsk.d32);
+ }
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->deachintmsk,
+ 0, daintmsk.d32);
+ } else {
+ if (ep->type == FH_OTG_EP_TYPE_ISOC) {
+ if (ep->is_in) {
+ diepmsk_data_t diepmsk = {.d32 = 0 };
+ diepmsk.b.nak = 1;
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32);
+ } else {
+ doepmsk_data_t doepmsk = {.d32 = 0 };
+ doepmsk.b.outtknepdis = 1;
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->doepmsk, 0, doepmsk.d32);
+ }
+ }
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->daintmsk,
+ 0, daintmsk.d32);
+ }
+
+ FH_DEBUGPL(DBG_PCDV, "DAINTMSK=%0x\n",
+ FH_READ_REG32(&dev_if->dev_global_regs->daintmsk));
+
+ ep->stall_clear_flag = 0;
+
+ return;
+}
+
+/**
+ * This function deactivates an EP. This is done by clearing the USB Active
+ * EP bit in the Device EP control register. Note: This function is not used
+ * for EP0. EP0 cannot be deactivated.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to deactivate.
+ */
+void fh_otg_ep_deactivate(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ depctl_data_t depctl = {.d32 = 0 };
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32 = 0 };
+ dcfg_data_t dcfg;
+ uint8_t i = 0;
+ uint32_t timeout = 1000;
+
+#ifdef FH_UTE_PER_IO
+ ep->xiso_frame_num = 0xFFFFFFFF;
+ ep->xiso_active_xfers = 0;
+ ep->xiso_queued_xfers = 0;
+#endif
+
+ /* Read DEPCTLn register */
+ if (ep->is_in == 1) {
+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
+ daintmsk.ep.in = 1 << ep->num;
+ } else {
+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
+ daintmsk.ep.out = 1 << ep->num;
+ }
+
+ depctl.d32 = FH_READ_REG32(addr);
+
+ depctl.b.usbactep = 0;
+
+ /* Update nextep_seq array and EPMSCNT in DCFG */
+ if (!(depctl.b.eptype & 1) && ep->is_in == 1) { // NP EP IN
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ if (core_if->nextep_seq[i] == ep->num)
+ break;
+ }
+ core_if->nextep_seq[i] = core_if->nextep_seq[ep->num];
+ if (core_if->first_in_nextep_seq == ep->num)
+ core_if->first_in_nextep_seq = i;
+ core_if->nextep_seq[ep->num] = 0xff;
+ depctl.b.nextep = 0;
+ dcfg.d32 =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+ dcfg.b.epmscnt--;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg,
+ dcfg.d32);
+
+ FH_DEBUGPL(DBG_PCDV,
+ "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
+ __func__, core_if->first_in_nextep_seq);
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ FH_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
+ }
+ }
+
+ if (ep->is_in == 1)
+ depctl.b.txfnum = 0;
+
+ if (core_if->dma_desc_enable)
+ depctl.b.epdis = 1;
+
+ FH_WRITE_REG32(addr, depctl.d32);
+ depctl.d32 = FH_READ_REG32(addr);
+ if (core_if->dma_enable && ep->type == FH_OTG_EP_TYPE_ISOC
+ && depctl.b.epena) {
+ depctl_data_t depctl = {.d32 = 0 };
+ if (ep->is_in) {
+ diepint_data_t diepint = {.d32 = 0 };
+
+ depctl.b.snak = 1;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
+ diepctl, depctl.d32);
+ do {
+ fh_udelay(10);
+ diepint.d32 =
+ FH_READ_REG32(&core_if->
+ dev_if->in_ep_regs[ep->num]->
+ diepint);
+ } while (!diepint.b.inepnakeff && timeout--);
+ diepint.b.inepnakeff = 1;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
+ diepint, diepint.d32);
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
+ diepctl, depctl.d32);
+ do {
+ fh_udelay(10);
+ diepint.d32 =
+ FH_READ_REG32(&core_if->
+ dev_if->in_ep_regs[ep->num]->
+ diepint);
+ } while (!diepint.b.epdisabled);
+ diepint.b.epdisabled = 1;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
+ diepint, diepint.d32);
+ } else {
+ dctl_data_t dctl = {.d32 = 0};
+ gintmsk_data_t gintsts = {.d32 = 0};
+ doepint_data_t doepint = {.d32 = 0};
+ dctl.b.sgoutnak = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ dctl, 0, dctl.d32);
+ do {
+ fh_udelay(10);
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ } while (!gintsts.b.goutnakeff);
+ gintsts.d32 = 0;
+ gintsts.b.goutnakeff = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->doepctl, depctl.d32);
+ do
+ {
+ fh_udelay(10);
+ doepint.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[ep->num]->doepint);
+ } while (!doepint.b.epdisabled);
+
+ doepint.b.epdisabled = 1;
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->doepint, doepint.d32);
+
+ dctl.d32 = 0;
+ dctl.b.cgoutnak = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ }
+ }
+
+ /* Disable the Interrupt for this EP */
+ if (core_if->multiproc_int_enable) {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->deachintmsk,
+ daintmsk.d32, 0);
+
+ if (ep->is_in == 1) {
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
+ diepeachintmsk[ep->num], 0);
+ } else {
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
+ doepeachintmsk[ep->num], 0);
+ }
+ } else {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->daintmsk,
+ daintmsk.d32, 0);
+ }
+
+}
+
+/**
+ * This function initializes dma descriptor chain.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ */
+static void init_dma_desc_chain(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ fh_otg_dev_dma_desc_t *dma_desc;
+ uint32_t offset;
+ uint32_t xfer_est;
+ int i;
+ unsigned maxxfer_local, total_len;
+
+ if (!ep->is_in && ep->type == FH_OTG_EP_TYPE_INTR &&
+ (ep->maxpacket % 4)) {
+ maxxfer_local = ep->maxpacket;
+ total_len = ep->xfer_len;
+ } else {
+ maxxfer_local = ep->maxxfer;
+ total_len = ep->total_len;
+ }
+
+ ep->desc_cnt = (total_len / maxxfer_local) +
+ ((total_len % maxxfer_local) ? 1 : 0);
+
+ if (!ep->desc_cnt)
+ ep->desc_cnt = 1;
+
+ if (ep->desc_cnt > MAX_DMA_DESC_CNT)
+ ep->desc_cnt = MAX_DMA_DESC_CNT;
+
+ dma_desc = ep->desc_addr;
+ if (maxxfer_local == ep->maxpacket) {
+ if ((total_len % maxxfer_local) &&
+ (total_len / maxxfer_local < MAX_DMA_DESC_CNT)) {
+ xfer_est = (ep->desc_cnt - 1) * maxxfer_local +
+ (total_len % maxxfer_local);
+ } else
+ xfer_est = ep->desc_cnt * maxxfer_local;
+ } else
+ xfer_est = total_len;
+ offset = 0;
+ for (i = 0; i < ep->desc_cnt; ++i) {
+ if (ep->type != FH_OTG_EP_TYPE_ISOC) {
+
+ /** DMA Descriptor Setup */
+ if (xfer_est > maxxfer_local) {
+ dma_desc->status.b.bs = BS_HOST_BUSY;
+ dma_desc->status.b.l = 0;
+ dma_desc->status.b.ioc = 0;
+ dma_desc->status.b.sp = 0;
+ dma_desc->status.b.bytes = maxxfer_local;
+ dma_desc->buf = ep->dma_addr + offset;
+ dma_desc->status.b.sts = 0;
+ dma_desc->status.b.bs = BS_HOST_READY;
+
+ xfer_est -= maxxfer_local;
+ offset += maxxfer_local;
+ } else {
+ dma_desc->status.b.bs = BS_HOST_BUSY;
+ dma_desc->status.b.l = 1;
+ dma_desc->status.b.ioc = 1;
+ if (ep->is_in) {
+ dma_desc->status.b.sp =
+ (xfer_est %
+ ep->maxpacket) ? 1 : ((ep->
+ sent_zlp) ? 1 : 0);
+ dma_desc->status.b.bytes = xfer_est;
+ } else {
+ if (maxxfer_local == ep->maxpacket)
+ dma_desc->status.b.bytes = xfer_est;
+ else
+ dma_desc->status.b.bytes =
+ xfer_est + ((4 - (xfer_est & 0x3)) & 0x3);
+ }
+
+ dma_desc->buf = ep->dma_addr + offset;
+ dma_desc->status.b.sts = 0;
+ dma_desc->status.b.bs = BS_HOST_READY;
+ }
+
+ } else {
+
+ if (ep->is_in) {
+ uint32_t soffn = fh_otg_get_frame_number(core_if);
+
+ /** DMA Descriptor Setup */
+ if (xfer_est > ep->maxxfer) {
+ dma_desc->status.b_iso_in.bs = BS_HOST_BUSY;
+ dma_desc->status.b_iso_in.l = 0;
+ dma_desc->status.b_iso_in.ioc = 0;
+ dma_desc->status.b_iso_in.sp = 0;
+ dma_desc->status.b_iso_in.pid = 1;
+ dma_desc->status.b_iso_in.framenum = soffn;
+ dma_desc->status.b_iso_in.txbytes = ep->maxxfer;
+ dma_desc->buf = ep->dma_addr + offset;
+ dma_desc->status.b_iso_in.bs = BS_HOST_READY;
+
+ xfer_est -= ep->maxxfer;
+ offset += ep->maxxfer;
+ } else {
+ dma_desc->status.b_iso_in.bs = BS_HOST_BUSY;
+ dma_desc->status.b_iso_in.l = 1;
+ dma_desc->status.b_iso_in.ioc = 1;
+ dma_desc->status.b_iso_in.pid = 1;
+ dma_desc->status.b_iso_in.framenum = soffn ;
+ dma_desc->status.b_iso_in.sp =
+ (xfer_est % ep->maxpacket) ? 1 :
+ ((ep->sent_zlp) ? 1 : 0);
+ dma_desc->status.b_iso_in.txbytes = xfer_est;
+ dma_desc->buf = ep->dma_addr + offset;
+ dma_desc->status.b_iso_in.bs = BS_HOST_READY;
+ }
+ } else {
+ uint32_t soffn = fh_otg_get_frame_number(core_if);
+
+ if (xfer_est > ep->maxxfer) {
+ dma_desc->status.b_iso_out.bs = BS_HOST_BUSY;
+ dma_desc->status.b_iso_out.l = 0;
+ dma_desc->status.b_iso_out.ioc = 0;
+ /*dma_desc->status.b_iso_in.sp = 0;*/
+ dma_desc->status.b_iso_out.pid = 1;
+ dma_desc->status.b_iso_out.framenum = soffn;
+ dma_desc->status.b_iso_out.rxbytes
+ = ep->maxxfer;
+ dma_desc->buf = ep->dma_addr + offset;
+ dma_desc->status.b_iso_out.bs = BS_HOST_READY;
+
+ xfer_est -= ep->maxxfer;
+ offset += ep->maxxfer;
+ } else {
+ dma_desc->status.b_iso_out.bs = BS_HOST_BUSY;
+ dma_desc->status.b_iso_out.l = 1;
+ dma_desc->status.b_iso_out.ioc = 1;
+ /*dma_desc->status.b_iso_in.sp = 0;*/
+ dma_desc->status.b_iso_out.pid = 1;
+ dma_desc->status.b_iso_out.framenum = soffn;
+ dma_desc->status.b_iso_out.rxbytes = xfer_est;
+ dma_desc->buf = ep->dma_addr + offset;
+ dma_desc->status.b_iso_out.bs = BS_HOST_READY;
+ }
+ }
+
+ }
+ /*FH_PRINTF("desc: %d:0x%08X:0x%08X\n", i, dma_desc->status, dma_desc->buf );*/
+ dma_desc++;
+ }
+}
+
+/**
+ * This function is called when to write ISOC data into appropriate dedicated
+ * periodic FIFO.
+ */
+static int32_t write_isoc_tx_fifo(fh_otg_core_if_t * core_if, fh_ep_t * fh_ep)
+{
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ fh_otg_dev_in_ep_regs_t *ep_regs;
+ dtxfsts_data_t txstatus = {.d32 = 0 };
+ uint32_t len = 0;
+ int epnum = fh_ep->num;
+ int dwords;
+
+ FH_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
+
+ ep_regs = core_if->dev_if->in_ep_regs[epnum];
+
+ len = fh_ep->xfer_len - fh_ep->xfer_count;
+
+ if (len > fh_ep->maxpacket) {
+ len = fh_ep->maxpacket;
+ }
+
+ dwords = (len + 3) / 4;
+
+ /* While there is space in the queue and space in the FIFO and
+ * More data to tranfer, Write packets to the Tx FIFO */
+ txstatus.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+ FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
+
+ while (txstatus.b.txfspcavail >= dwords &&
+ fh_ep->xfer_count < fh_ep->xfer_len && fh_ep->xfer_len != 0) {
+ /* Write the FIFO */
+ fh_otg_ep_write_packet(core_if, fh_ep, 0);
+
+ len = fh_ep->xfer_len - fh_ep->xfer_count;
+ if (len > fh_ep->maxpacket) {
+ len = fh_ep->maxpacket;
+ }
+
+ dwords = (len + 3) / 4;
+ txstatus.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+ FH_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
+ txstatus.d32);
+ }
+
+ FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
+ FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts));
+
+ return 1;
+}
+
+/**
+ * This function does the setup for a data transfer for an EP and
+ * starts the transfer. For an IN transfer, the packets will be
+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ */
+
+void fh_otg_ep_start_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ depctl_data_t depctl;
+ deptsiz_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
+ FH_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p, total_len = %d\n",
+ ep->num, (ep->is_in ? "IN" : "OUT"), ep->xfer_len,
+ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,
+ ep->total_len);
+
+ /* IN endpoint */
+ if (ep->is_in == 1) {
+ fh_otg_dev_in_ep_regs_t *in_regs =
+ core_if->dev_if->in_ep_regs[ep->num];
+
+ gnptxsts_data_t gtxstatus;
+
+ gtxstatus.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gnptxsts);
+
+ if (core_if->en_multiple_tx_fifo == 0
+ && gtxstatus.b.nptxqspcavail == 0 && !core_if->dma_enable) {
+#ifdef DEBUG
+ FH_PRINTF("TX Queue Full (0x%0x)\n", gtxstatus.d32);
+#endif
+ return;
+ }
+
+ depctl.d32 = FH_READ_REG32(&(in_regs->diepctl));
+ deptsiz.d32 = FH_READ_REG32(&(in_regs->dieptsiz));
+
+ if (ep->maxpacket > ep->maxxfer / MAX_PKT_CNT)
+ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
+ ep->maxxfer : (ep->total_len - ep->xfer_len);
+ else
+ ep->xfer_len += (MAX_PKT_CNT * ep->maxpacket < (ep->total_len - ep->xfer_len)) ?
+ MAX_PKT_CNT * ep->maxpacket : (ep->total_len - ep->xfer_len);
+
+ /* Zero Length Packet? */
+ if ((ep->xfer_len - ep->xfer_count) == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
+ deptsiz.b.pktcnt =
+ (ep->xfer_len - ep->xfer_count - 1 +
+ ep->maxpacket) / ep->maxpacket;
+ if (deptsiz.b.pktcnt > MAX_PKT_CNT) {
+ deptsiz.b.pktcnt = MAX_PKT_CNT;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
+ }
+ if (ep->type == FH_OTG_EP_TYPE_ISOC)
+ deptsiz.b.mc = deptsiz.b.pktcnt;
+ }
+
+ /* Write the DMA register */
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable == 0) {
+ if (ep->type != FH_OTG_EP_TYPE_ISOC)
+ deptsiz.b.mc = 1;
+ FH_WRITE_REG32(&in_regs->dieptsiz,
+ deptsiz.d32);
+ FH_WRITE_REG32(&(in_regs->diepdma),
+ (uint32_t) ep->dma_addr);
+ } else {
+#ifdef FH_UTE_CFI
+ /* The descriptor chain should be already initialized by now */
+ if (ep->buff_mode != BM_STANDARD) {
+ FH_WRITE_REG32(&in_regs->diepdma,
+ ep->descs_dma_addr);
+ } else {
+#endif
+ init_dma_desc_chain(core_if, ep);
+ /** DIEPDMAn Register write */
+ FH_WRITE_REG32(&in_regs->diepdma,
+ ep->dma_desc_addr);
+#ifdef FH_UTE_CFI
+ }
+#endif
+ }
+ } else {
+ FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
+ if (ep->type != FH_OTG_EP_TYPE_ISOC) {
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt,
+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
+ * the data will be written into the fifo by the ISR.
+ */
+ if (core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ FH_MODIFY_REG32
+ (&core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = 1 << ep->num;
+ FH_MODIFY_REG32
+ (&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ 0, fifoemptymsk);
+
+ }
+ }
+ }
+ }
+ if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
+ depctl.b.nextep = core_if->nextep_seq[ep->num];
+
+ if (ep->type == FH_OTG_EP_TYPE_ISOC) {
+ dsts_data_t dsts = {.d32 = 0 };
+ if (ep->bInterval == 1) {
+ dsts.d32 =
+ FH_READ_REG32(&core_if->dev_if->
+ dev_global_regs->dsts);
+ ep->frame_num = dsts.b.soffn + ep->bInterval;
+ if (ep->frame_num > 0x3FFF) {
+ ep->frm_overrun = 1;
+ ep->frame_num &= 0x3FFF;
+ } else
+ ep->frm_overrun = 0;
+ if (ep->frame_num & 0x1) {
+ depctl.b.setd1pid = 1;
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ }
+ }
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
+
+ if (!core_if->dma_enable && ep->type == FH_OTG_EP_TYPE_ISOC) {
+ write_isoc_tx_fifo(core_if, ep);
+ }
+
+ } else {
+ /* OUT endpoint */
+ fh_otg_dev_out_ep_regs_t *out_regs =
+ core_if->dev_if->out_ep_regs[ep->num];
+
+ depctl.d32 = FH_READ_REG32(&(out_regs->doepctl));
+ deptsiz.d32 = FH_READ_REG32(&(out_regs->doeptsiz));
+
+ if (!core_if->dma_desc_enable) {
+ if (ep->maxpacket > ep->maxxfer / MAX_PKT_CNT)
+ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
+ ep->maxxfer : (ep->total_len - ep->xfer_len);
+ else
+ ep->xfer_len += (MAX_PKT_CNT * ep->maxpacket < (ep->total_len
+ - ep->xfer_len)) ? MAX_PKT_CNT * ep->maxpacket : (ep->total_len - ep->xfer_len);
+ }
+
+ /* Program the transfer size and packet count as follows:
+ *
+ * pktcnt = N
+ * xfersize = N * maxpacket
+ */
+ if ((ep->xfer_len - ep->xfer_count) == 0) {
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ deptsiz.b.pktcnt =
+ (ep->xfer_len - ep->xfer_count +
+ (ep->maxpacket - 1)) / ep->maxpacket;
+ if (deptsiz.b.pktcnt > MAX_PKT_CNT) {
+ deptsiz.b.pktcnt = MAX_PKT_CNT;
+ }
+ if (!core_if->dma_desc_enable) {
+ ep->xfer_len =
+ deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count;
+ }
+ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
+ }
+
+ FH_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
+ ep->num, deptsiz.b.xfersize, deptsiz.b.pktcnt);
+
+ if (core_if->dma_enable) {
+ if (!core_if->dma_desc_enable) {
+ FH_WRITE_REG32(&out_regs->doeptsiz,
+ deptsiz.d32);
+
+ FH_WRITE_REG32(&(out_regs->doepdma),
+ (uint32_t) ep->dma_addr);
+ } else {
+#ifdef FH_UTE_CFI
+ /* The descriptor chain should be already initialized by now */
+ if (ep->buff_mode != BM_STANDARD) {
+ FH_WRITE_REG32(&out_regs->doepdma,
+ ep->descs_dma_addr);
+ } else {
+#endif
+ /** This is used for interrupt out transfers*/
+ if (!ep->xfer_len)
+ ep->xfer_len = ep->total_len;
+ init_dma_desc_chain(core_if, ep);
+
+ if (core_if->core_params->dev_out_nak) {
+ if (ep->type == FH_OTG_EP_TYPE_BULK) {
+ deptsiz.b.pktcnt = (ep->total_len +
+ (ep->maxpacket - 1)) / ep->maxpacket;
+ deptsiz.b.xfersize = ep->total_len;
+ /* Remember initial value of doeptsiz */
+ core_if->start_doeptsiz_val[ep->num] = deptsiz.d32;
+ FH_WRITE_REG32(&out_regs->doeptsiz,
+ deptsiz.d32);
+ }
+ }
+ /** DOEPDMAn Register write */
+ FH_WRITE_REG32(&out_regs->doepdma,
+ ep->dma_desc_addr);
+#ifdef FH_UTE_CFI
+ }
+#endif
+ }
+ } else {
+ FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
+ }
+
+ if (ep->type == FH_OTG_EP_TYPE_ISOC) {
+ dsts_data_t dsts = {.d32 = 0 };
+ if (ep->bInterval == 1) {
+ dsts.d32 =
+ FH_READ_REG32(&core_if->dev_if->
+ dev_global_regs->dsts);
+ ep->frame_num = dsts.b.soffn + ep->bInterval;
+ if (ep->frame_num > 0x3FFF) {
+ ep->frm_overrun = 1;
+ ep->frame_num &= 0x3FFF;
+ } else
+ ep->frm_overrun = 0;
+
+ if (ep->frame_num & 0x1) {
+ depctl.b.setd1pid = 1;
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ }
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+
+ FH_WRITE_REG32(&out_regs->doepctl, depctl.d32);
+
+ FH_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+ FH_READ_REG32(&out_regs->doepctl),
+ FH_READ_REG32(&out_regs->doeptsiz));
+ FH_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->
+ daintmsk),
+ FH_READ_REG32(&core_if->core_global_regs->
+ gintmsk));
+
+ /* Timer is scheduling only for out bulk transfers for
+ * "Device DDMA OUT NAK Enhancement" feature to inform user
+ * about received data payload in case of timeout
+ */
+ if (core_if->core_params->dev_out_nak) {
+ if (ep->type == FH_OTG_EP_TYPE_BULK) {
+ core_if->ep_xfer_info[ep->num].core_if = core_if;
+ core_if->ep_xfer_info[ep->num].ep = ep;
+ core_if->ep_xfer_info[ep->num].state = 1;
+
+ /* Start a timer for this transfer. */
+ FH_TIMER_SCHEDULE(core_if->ep_xfer_timer[ep->num], 10000);
+ }
+ }
+ }
+}
+
+/**
+ * This function setup a zero length transfer in Buffer DMA and
+ * Slave modes for usb requests with zero field set
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ *
+ */
+void fh_otg_ep_start_zl_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+
+ depctl_data_t depctl;
+ deptsiz_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
+ FH_PRINTF("zero length transfer is called\n");
+
+ /* IN endpoint */
+ if (ep->is_in == 1) {
+ fh_otg_dev_in_ep_regs_t *in_regs =
+ core_if->dev_if->in_ep_regs[ep->num];
+
+ depctl.d32 = FH_READ_REG32(&(in_regs->diepctl));
+ deptsiz.d32 = FH_READ_REG32(&(in_regs->dieptsiz));
+
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+
+ /* Write the DMA register */
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable == 0) {
+ deptsiz.b.mc = 1;
+ FH_WRITE_REG32(&in_regs->dieptsiz,
+ deptsiz.d32);
+ FH_WRITE_REG32(&(in_regs->diepdma),
+ (uint32_t) ep->dma_addr);
+ }
+ } else {
+ FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt,
+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
+ * the data will be written into the fifo by the ISR.
+ */
+ if (core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = 1 << ep->num;
+ FH_MODIFY_REG32(&core_if->
+ dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ 0, fifoemptymsk);
+ }
+ }
+ }
+
+ if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
+ depctl.b.nextep = core_if->nextep_seq[ep->num];
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
+
+ } else {
+ /* OUT endpoint */
+ fh_otg_dev_out_ep_regs_t *out_regs =
+ core_if->dev_if->out_ep_regs[ep->num];
+
+ depctl.d32 = FH_READ_REG32(&(out_regs->doepctl));
+ deptsiz.d32 = FH_READ_REG32(&(out_regs->doeptsiz));
+
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+
+ if (core_if->dma_enable) {
+ if (!core_if->dma_desc_enable) {
+ FH_WRITE_REG32(&out_regs->doeptsiz,
+ deptsiz.d32);
+
+ FH_WRITE_REG32(&(out_regs->doepdma),
+ (uint32_t) ep->dma_addr);
+ }
+ } else {
+ FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+
+ FH_WRITE_REG32(&out_regs->doepctl, depctl.d32);
+
+ }
+}
+
+/**
+ * This function does the setup for a data transfer for EP0 and starts
+ * the transfer. For an IN transfer, the packets will be loaded into
+ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
+ * unloaded from the Rx FIFO in the ISR.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP0 data.
+ */
+void fh_otg_ep0_start_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ depctl_data_t depctl;
+ deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ fh_otg_dev_dma_desc_t *dma_desc;
+
+ FH_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p \n",
+ ep->num, (ep->is_in ? "IN" : "OUT"), ep->xfer_len,
+ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff);
+
+ ep->total_len = ep->xfer_len;
+
+ /* IN endpoint */
+ if (ep->is_in == 1) {
+ fh_otg_dev_in_ep_regs_t *in_regs =
+ core_if->dev_if->in_ep_regs[0];
+
+ gnptxsts_data_t gtxstatus;
+
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
+ depctl.d32 = FH_READ_REG32(&in_regs->diepctl);
+ if (depctl.b.epena)
+ return;
+ }
+
+ gtxstatus.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gnptxsts);
+
+ /* If dedicated FIFO every time flush fifo before enable ep*/
+ if (core_if->en_multiple_tx_fifo && core_if->snpsid >= OTG_CORE_REV_3_00a)
+ fh_otg_flush_tx_fifo(core_if, ep->tx_fifo_num);
+
+ if (core_if->en_multiple_tx_fifo == 0
+ && gtxstatus.b.nptxqspcavail == 0
+ && !core_if->dma_enable) {
+#ifdef DEBUG
+ deptsiz.d32 = FH_READ_REG32(&in_regs->dieptsiz);
+ FH_DEBUGPL(DBG_PCD, "DIEPCTL0=%0x\n",
+ FH_READ_REG32(&in_regs->diepctl));
+ FH_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
+ deptsiz.d32,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+ FH_PRINTF("TX Queue or FIFO Full (0x%0x)\n",
+ gtxstatus.d32);
+#endif
+ return;
+ }
+
+ depctl.d32 = FH_READ_REG32(&in_regs->diepctl);
+ deptsiz.d32 = FH_READ_REG32(&in_regs->dieptsiz);
+
+ /* Zero Length Packet? */
+ if (ep->xfer_len == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ if (ep->xfer_len > ep->maxpacket) {
+ ep->xfer_len = ep->maxpacket;
+ deptsiz.b.xfersize = ep->maxpacket;
+ } else {
+ deptsiz.b.xfersize = ep->xfer_len;
+ }
+ deptsiz.b.pktcnt = 1;
+
+ }
+ FH_DEBUGPL(DBG_PCDV,
+ "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
+ deptsiz.d32);
+
+ /* Write the DMA register */
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable == 0) {
+ FH_WRITE_REG32(&in_regs->dieptsiz,
+ deptsiz.d32);
+
+ FH_WRITE_REG32(&(in_regs->diepdma),
+ (uint32_t) ep->dma_addr);
+ } else {
+ dma_desc = core_if->dev_if->in_desc_addr;
+
+ /** DMA Descriptor Setup */
+ dma_desc->status.b.bs = BS_HOST_BUSY;
+ dma_desc->status.b.l = 1;
+ dma_desc->status.b.ioc = 1;
+ dma_desc->status.b.sp =
+ (ep->xfer_len == ep->maxpacket) ? 0 : 1;
+ dma_desc->status.b.bytes = ep->xfer_len;
+ dma_desc->buf = ep->dma_addr;
+ dma_desc->status.b.sts = 0;
+ dma_desc->status.b.bs = BS_HOST_READY;
+
+ /** DIEPDMA0 Register write */
+ FH_WRITE_REG32(&in_regs->diepdma,
+ core_if->
+ dev_if->dma_in_desc_addr);
+ }
+ } else {
+ FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
+ }
+
+ if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
+ depctl.b.nextep = core_if->nextep_seq[ep->num];
+
+ // Unexpected Back-to-Back SETUP Interrupt Issue:
+ // RxFIFO thresholding enabled: thr_ctl >= 4,
+ // Control IN EP in status or data stage,
+ // Affected versions v3.00a, v3.10a & v3.20a
+ if ((core_if->core_params->thr_ctl & 4) && (ep->is_in) &&
+ ((core_if->snpsid == OTG_CORE_REV_3_00a) ||
+ (core_if->snpsid == OTG_CORE_REV_3_10a) ||
+ (core_if->snpsid == OTG_CORE_REV_3_20a))) {
+
+ int j = 0;
+ dctl_data_t dctl = {.d32 = 0};
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ gintsts_data_t gintsts = {.d32 = 0};
+
+ gintmsk.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
+ if (gintmsk.b.goutnakeff) {
+ gintmsk.b.goutnakeff = 0;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
+ gintmsk.b.goutnakeff = 1; // restore initial value of gintmsk.b.goutnakeff
+ }
+
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ dctl.b.sgoutnak = 1;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+
+ j = 0;
+ do {
+ j++;
+ fh_udelay(100);
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (j == 100000) {
+ FH_ERROR("GOUTNAKEFF is not set during 10s\n");
+ break;
+ }
+ } while (!gintsts.b.goutnakeff); // while not set
+
+ dctl.b.cgoutnak = 1;
+ dctl.b.sgoutnak = 0;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+
+ j = 0;
+ do {
+ j++;
+ fh_udelay(100);
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (j == 100000) {
+ FH_ERROR("GOUTNAKEFF is not cleared during 10s\n");
+ break;
+ }
+ } while (gintsts.b.goutnakeff); // while not cleared
+
+ // restore saved gintmsk
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
+ }
+ // END of WA for Unexpected Back-to-Back SETUP Interrupt Issue
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!core_if->dma_enable) {
+ if (core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk |= 1 << ep->num;
+ FH_MODIFY_REG32(&core_if->
+ dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ 0, fifoemptymsk);
+ }
+ }
+ }
+ } else {
+ /* OUT endpoint */
+ fh_otg_dev_out_ep_regs_t *out_regs =
+ core_if->dev_if->out_ep_regs[0];
+
+ depctl.d32 = FH_READ_REG32(&out_regs->doepctl);
+ deptsiz.d32 = FH_READ_REG32(&out_regs->doeptsiz);
+
+ /* Program the transfer size and packet count as follows:
+ * xfersize = N * (maxpacket + 4 - (maxpacket % 4))
+ * pktcnt = N */
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a)
+ deptsiz.b.supcnt = 3;
+
+ FH_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n",
+ ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt);
+
+ if (core_if->dma_enable) {
+ if (!core_if->dma_desc_enable) {
+ FH_WRITE_REG32(&out_regs->doeptsiz,
+ deptsiz.d32);
+
+ FH_WRITE_REG32(&(out_regs->doepdma),
+ (uint32_t) ep->dma_addr);
+ } else {
+ dma_desc = core_if->dev_if->out_desc_addr;
+
+ /** DMA Descriptor Setup */
+ dma_desc->status.b.bs = BS_HOST_BUSY;
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
+ dma_desc->status.b.mtrf = 0;
+ dma_desc->status.b.sr = 0;
+ }
+ dma_desc->status.b.l = 1;
+ dma_desc->status.b.ioc = 1;
+ dma_desc->status.b.bytes = ep->maxpacket;
+ dma_desc->buf = ep->dma_addr;
+ dma_desc->status.b.sts = 0;
+ dma_desc->status.b.bs = BS_HOST_READY;
+
+ /** DOEPDMA0 Register write */
+ FH_WRITE_REG32(&out_regs->doepdma,
+ core_if->dev_if->
+ dma_out_desc_addr);
+ }
+ } else {
+ FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ FH_WRITE_REG32(&(out_regs->doepctl), depctl.d32);
+ }
+}
+
+/**
+ * This function continues control IN transfers started by
+ * fh_otg_ep0_start_transfer, when the transfer does not fit in a
+ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
+ * bit for the packet count.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP0 data.
+ */
+void fh_otg_ep0_continue_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ depctl_data_t depctl;
+ deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ fh_otg_dev_dma_desc_t *dma_desc;
+
+ if (ep->is_in == 1) {
+ fh_otg_dev_in_ep_regs_t *in_regs =
+ core_if->dev_if->in_ep_regs[0];
+ gnptxsts_data_t tx_status = {.d32 = 0 };
+
+ tx_status.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gnptxsts);
+ /** @todo Should there be check for room in the Tx
+ * Status Queue. If not remove the code above this comment. */
+
+ depctl.d32 = FH_READ_REG32(&in_regs->diepctl);
+ deptsiz.d32 = FH_READ_REG32(&in_regs->dieptsiz);
+
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+
+ if (core_if->dma_desc_enable == 0) {
+ deptsiz.b.xfersize =
+ (ep->total_len - ep->xfer_count) >
+ ep->maxpacket ? ep->maxpacket : (ep->total_len -
+ ep->xfer_count);
+ deptsiz.b.pktcnt = 1;
+ if (core_if->dma_enable == 0) {
+ ep->xfer_len += deptsiz.b.xfersize;
+ } else {
+ ep->xfer_len = deptsiz.b.xfersize;
+ }
+ FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
+ } else {
+ ep->xfer_len =
+ (ep->total_len - ep->xfer_count) >
+ ep->maxpacket ? ep->maxpacket : (ep->total_len -
+ ep->xfer_count);
+
+ dma_desc = core_if->dev_if->in_desc_addr;
+
+ /** DMA Descriptor Setup */
+ dma_desc->status.b.bs = BS_HOST_BUSY;
+ dma_desc->status.b.l = 1;
+ dma_desc->status.b.ioc = 1;
+ dma_desc->status.b.sp =
+ (ep->xfer_len == ep->maxpacket) ? 0 : 1;
+ dma_desc->status.b.bytes = ep->xfer_len;
+ dma_desc->buf = ep->dma_addr;
+ dma_desc->status.b.sts = 0;
+ dma_desc->status.b.bs = BS_HOST_READY;
+
+ /** DIEPDMA0 Register write */
+ FH_WRITE_REG32(&in_regs->diepdma,
+ core_if->dev_if->dma_in_desc_addr);
+ }
+
+ FH_DEBUGPL(DBG_PCDV,
+ "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
+ deptsiz.d32);
+
+ /* Write the DMA register */
+ if (core_if->hwcfg2.b.architecture == FH_INT_DMA_ARCH) {
+ if (core_if->dma_desc_enable == 0)
+ FH_WRITE_REG32(&(in_regs->diepdma),
+ (uint32_t) ep->dma_addr);
+ }
+ if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
+ depctl.b.nextep = core_if->nextep_seq[ep->num];
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!core_if->dma_enable) {
+ if (core_if->en_multiple_tx_fifo == 0) {
+ /* First clear it from GINTSTS */
+ intr_mask.b.nptxfempty = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk |= 1 << ep->num;
+ FH_MODIFY_REG32(&core_if->
+ dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ 0, fifoemptymsk);
+ }
+ }
+ }
+ } else {
+ fh_otg_dev_out_ep_regs_t *out_regs =
+ core_if->dev_if->out_ep_regs[0];
+
+ depctl.d32 = FH_READ_REG32(&out_regs->doepctl);
+ deptsiz.d32 = FH_READ_REG32(&out_regs->doeptsiz);
+
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.xfersize = ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+
+ if (core_if->dma_desc_enable == 0) {
+ FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
+ } else {
+ dma_desc = core_if->dev_if->out_desc_addr;
+
+ /** DMA Descriptor Setup */
+ dma_desc->status.b.bs = BS_HOST_BUSY;
+ dma_desc->status.b.l = 1;
+ dma_desc->status.b.ioc = 1;
+ dma_desc->status.b.bytes = ep->maxpacket;
+ dma_desc->buf = ep->dma_addr;
+ dma_desc->status.b.sts = 0;
+ dma_desc->status.b.bs = BS_HOST_READY;
+
+ /** DOEPDMA0 Register write */
+ FH_WRITE_REG32(&out_regs->doepdma,
+ core_if->dev_if->dma_out_desc_addr);
+ }
+
+ FH_DEBUGPL(DBG_PCDV,
+ "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
+ deptsiz.d32);
+
+ /* Write the DMA register */
+ if (core_if->hwcfg2.b.architecture == FH_INT_DMA_ARCH) {
+ if (core_if->dma_desc_enable == 0)
+ FH_WRITE_REG32(&(out_regs->doepdma),
+ (uint32_t) ep->dma_addr);
+
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ FH_WRITE_REG32(&out_regs->doepctl, depctl.d32);
+
+ }
+}
+
+#ifdef DEBUG
+void dump_msg(const u8 * buf, unsigned int length)
+{
+ unsigned int start, num, i;
+ char line[52], *p;
+
+ if (length >= 512)
+ return;
+ start = 0;
+ while (length > 0) {
+ num = length < 16u ? length : 16u;
+ p = line;
+ for (i = 0; i < num; ++i) {
+ if (i == 8)
+ *p++ = ' ';
+ FH_SPRINTF(p, " %02x", buf[i]);
+ p += 3;
+ }
+ *p = 0;
+ FH_PRINTF("%6x: %s\n", start, line);
+ buf += num;
+ start += num;
+ length -= num;
+ }
+}
+#else
+static inline void dump_msg(const u8 * buf, unsigned int length)
+{
+}
+#endif
+
+/**
+ * This function writes a packet into the Tx FIFO associated with the
+ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For
+ * periodic EPs the periodic Tx FIFO associated with the EP is written
+ * with all packets for the next micro-frame.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to write packet for.
+ * @param dma Indicates if DMA is being used.
+ */
+void fh_otg_ep_write_packet(fh_otg_core_if_t * core_if, fh_ep_t * ep,
+ int dma)
+{
+ /**
+ * The buffer is padded to DWORD on a per packet basis in
+ * slave/dma mode if the MPS is not DWORD aligned. The last
+ * packet, if short, is also padded to a multiple of DWORD.
+ *
+ * ep->xfer_buff always starts DWORD aligned in memory and is a
+ * multiple of DWORD in length
+ *
+ * ep->xfer_len can be any number of bytes
+ *
+ * ep->xfer_count is a multiple of ep->maxpacket until the last
+ * packet
+ *
+ * FIFO access is DWORD */
+
+ uint32_t i;
+ uint32_t byte_count;
+ uint32_t dword_count;
+ uint32_t *fifo;
+ uint32_t *data_buff = (uint32_t *) ep->xfer_buff;
+
+ FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if,
+ ep);
+ if (ep->xfer_count >= ep->xfer_len) {
+ FH_WARN("%s() No data for EP%d!!!\n", __func__, ep->num);
+ return;
+ }
+
+ /* Find the byte length of the packet either short packet or MPS */
+ if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) {
+ byte_count = ep->xfer_len - ep->xfer_count;
+ } else {
+ byte_count = ep->maxpacket;
+ }
+
+ /* Find the DWORD length, padded by extra bytes as neccessary if MPS
+ * is not a multiple of DWORD */
+ dword_count = (byte_count + 3) / 4;
+
+#ifdef VERBOSE
+ dump_msg(ep->xfer_buff, byte_count);
+#endif
+
+ /**@todo NGS Where are the Periodic Tx FIFO addresses
+ * intialized? What should this be? */
+
+ fifo = core_if->data_fifo[ep->num];
+
+ FH_DEBUGPL((DBG_PCDV | DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n",
+ fifo, data_buff, *data_buff, byte_count);
+
+ if (!dma) {
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ FH_WRITE_REG32(fifo, *data_buff);
+ }
+ }
+
+ ep->xfer_count += byte_count;
+ ep->xfer_buff += byte_count;
+ ep->dma_addr += byte_count;
+}
+
+/**
+ * Set the EP STALL.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to set the stall on.
+ */
+void fh_otg_ep_set_stall(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+
+ FH_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
+ (ep->is_in ? "IN" : "OUT"));
+
+ if (ep->is_in == 1) {
+ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
+ depctl.d32 = FH_READ_REG32(depctl_addr);
+
+ /* set the disable and stall bits */
+ if (depctl.b.epena) {
+ depctl.b.epdis = 1;
+ }
+ depctl.b.stall = 1;
+ FH_WRITE_REG32(depctl_addr, depctl.d32);
+ } else {
+ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
+ depctl.d32 = FH_READ_REG32(depctl_addr);
+
+ /* set the stall bit */
+ depctl.b.stall = 1;
+ FH_WRITE_REG32(depctl_addr, depctl.d32);
+ }
+
+ FH_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", FH_READ_REG32(depctl_addr));
+
+ return;
+}
+
+/**
+ * Clear the EP STALL.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to clear stall from.
+ */
+void fh_otg_ep_clear_stall(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+
+ FH_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
+ (ep->is_in ? "IN" : "OUT"));
+
+ if (ep->is_in == 1) {
+ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
+ } else {
+ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
+ }
+
+ depctl.d32 = FH_READ_REG32(depctl_addr);
+
+ /* clear the stall bits */
+ depctl.b.stall = 0;
+
+ /*
+ * USB Spec 9.4.5: For endpoints using data toggle, regardless
+ * of whether an endpoint has the Halt feature set, a
+ * ClearFeature(ENDPOINT_HALT) request always results in the
+ * data toggle being reinitialized to DATA0.
+ */
+ if (ep->type == FH_OTG_EP_TYPE_INTR ||
+ ep->type == FH_OTG_EP_TYPE_BULK) {
+ depctl.b.setd0pid = 1; /* DATA0 */
+ }
+
+ FH_WRITE_REG32(depctl_addr, depctl.d32);
+ FH_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", FH_READ_REG32(depctl_addr));
+ return;
+}
+
+/**
+ * This function reads a packet from the Rx FIFO into the destination
+ * buffer. To read SETUP data use fh_otg_read_setup_packet.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param dest Destination buffer for the packet.
+ * @param bytes Number of bytes to copy to the destination.
+ */
+void fh_otg_read_packet(fh_otg_core_if_t * core_if,
+ uint8_t * dest, uint16_t bytes)
+{
+ int i;
+ int word_count = (bytes + 3) / 4;
+
+ volatile uint32_t *fifo = core_if->data_fifo[0];
+ uint32_t *data_buff = (uint32_t *) dest;
+
+ /**
+ * @todo Account for the case where _dest is not dword aligned. This
+ * requires reading data from the FIFO into a uint32_t temp buffer,
+ * then moving it into the data buffer.
+ */
+
+ FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__,
+ core_if, dest, bytes);
+
+ for (i = 0; i < word_count; i++, data_buff++) {
+ *data_buff = FH_READ_REG32(fifo);
+ }
+
+ return;
+}
+
+/**
+ * This functions reads the device registers and prints them
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+void fh_otg_dump_dev_registers(fh_otg_core_if_t * core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+
+ FH_PRINTF("Device Global Registers\n");
+ addr = &core_if->dev_if->dev_global_regs->dcfg;
+ FH_PRINTF("DCFG @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->dctl;
+ FH_PRINTF("DCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->dsts;
+ FH_PRINTF("DSTS @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->diepmsk;
+ FH_PRINTF("DIEPMSK @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->doepmsk;
+ FH_PRINTF("DOEPMSK @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->daint;
+ FH_PRINTF("DAINT @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->daintmsk;
+ FH_PRINTF("DAINTMSK @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->dtknqr1;
+ FH_PRINTF("DTKNQR1 @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ if (core_if->hwcfg2.b.dev_token_q_depth > 6) {
+ addr = &core_if->dev_if->dev_global_regs->dtknqr2;
+ FH_PRINTF("DTKNQR2 @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ }
+
+ addr = &core_if->dev_if->dev_global_regs->dvbusdis;
+ FH_PRINTF("DVBUSID @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+
+ addr = &core_if->dev_if->dev_global_regs->dvbuspulse;
+ FH_PRINTF("DVBUSPULSE @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+
+ addr = &core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
+ FH_PRINTF("DTKNQR3_DTHRCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+
+ if (core_if->hwcfg2.b.dev_token_q_depth > 22) {
+ addr = &core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
+ FH_PRINTF("DTKNQR4 @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ }
+
+ addr = &core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
+ FH_PRINTF("FIFOEMPMSK @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+
+ if (core_if->hwcfg2.b.multi_proc_int) {
+
+ addr = &core_if->dev_if->dev_global_regs->deachint;
+ FH_PRINTF("DEACHINT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->dev_global_regs->deachintmsk;
+ FH_PRINTF("DEACHINTMSK @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ addr =
+ &core_if->dev_if->
+ dev_global_regs->diepeachintmsk[i];
+ FH_PRINTF("DIEPEACHINTMSK[%d] @0x%08lX : 0x%08X\n",
+ i, (unsigned long)addr,
+ FH_READ_REG32(addr));
+ }
+
+ for (i = 0; i <= core_if->dev_if->num_out_eps; i++) {
+ addr =
+ &core_if->dev_if->
+ dev_global_regs->doepeachintmsk[i];
+ FH_PRINTF("DOEPEACHINTMSK[%d] @0x%08lX : 0x%08X\n",
+ i, (unsigned long)addr,
+ FH_READ_REG32(addr));
+ }
+ }
+
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ FH_PRINTF("Device IN EP %d Registers\n", i);
+ addr = &core_if->dev_if->in_ep_regs[i]->diepctl;
+ FH_PRINTF("DIEPCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->in_ep_regs[i]->diepint;
+ FH_PRINTF("DIEPINT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->in_ep_regs[i]->dieptsiz;
+ FH_PRINTF("DIETSIZ @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->in_ep_regs[i]->diepdma;
+ FH_PRINTF("DIEPDMA @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->in_ep_regs[i]->dtxfsts;
+ FH_PRINTF("DTXFSTS @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->in_ep_regs[i]->diepdmab;
+ FH_PRINTF("DIEPDMAB @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, 0 /*FH_READ_REG32(addr) */ );
+ }
+
+ for (i = 0; i <= core_if->dev_if->num_out_eps; i++) {
+ FH_PRINTF("Device OUT EP %d Registers\n", i);
+ addr = &core_if->dev_if->out_ep_regs[i]->doepctl;
+ FH_PRINTF("DOEPCTL @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->out_ep_regs[i]->doepint;
+ FH_PRINTF("DOEPINT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->out_ep_regs[i]->doeptsiz;
+ FH_PRINTF("DOETSIZ @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->dev_if->out_ep_regs[i]->doepdma;
+ FH_PRINTF("DOEPDMA @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ if (core_if->dma_enable) { /* Don't access this register in SLAVE mode */
+ addr = &core_if->dev_if->out_ep_regs[i]->doepdmab;
+ FH_PRINTF("DOEPDMAB @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ }
+
+ }
+}
+
+/**
+ * This functions reads the SPRAM and prints its content
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+void fh_otg_dump_spram(fh_otg_core_if_t * core_if)
+{
+ volatile uint8_t *addr, *start_addr, *end_addr;
+
+ FH_PRINTF("SPRAM Data:\n");
+ start_addr = (void *)core_if->core_global_regs;
+ FH_PRINTF("Base Address: 0x%8lX\n", (unsigned long)start_addr);
+ start_addr += 0x00028000;
+ end_addr = (void *)core_if->core_global_regs;
+ end_addr += 0x000280e0;
+
+ for (addr = start_addr; addr < end_addr; addr += 16) {
+ FH_PRINTF
+ ("0x%8lX:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n",
+ (unsigned long)addr, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], addr[6], addr[7], addr[8], addr[9],
+ addr[10], addr[11], addr[12], addr[13], addr[14], addr[15]
+ );
+ }
+
+ return;
+}
+
+/**
+ * This function reads the host registers and prints them
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+void fh_otg_dump_host_registers(fh_otg_core_if_t * core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+
+ FH_PRINTF("Host Global Registers\n");
+ addr = &core_if->host_if->host_global_regs->hcfg;
+ FH_PRINTF("HCFG @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->host_if->host_global_regs->hfir;
+ FH_PRINTF("HFIR @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->host_if->host_global_regs->hfnum;
+ FH_PRINTF("HFNUM @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->host_if->host_global_regs->hptxsts;
+ FH_PRINTF("HPTXSTS @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->host_if->host_global_regs->haint;
+ FH_PRINTF("HAINT @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->host_if->host_global_regs->haintmsk;
+ FH_PRINTF("HAINTMSK @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ if (core_if->dma_desc_enable) {
+ addr = &core_if->host_if->host_global_regs->hflbaddr;
+ FH_PRINTF("HFLBADDR @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ }
+
+ addr = core_if->host_if->hprt0;
+ FH_PRINTF("HPRT0 @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+
+ for (i = 0; i < core_if->core_params->host_channels; i++) {
+ FH_PRINTF("Host Channel %d Specific Registers\n", i);
+ addr = &core_if->host_if->hc_regs[i]->hcchar;
+ FH_PRINTF("HCCHAR @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->host_if->hc_regs[i]->hcsplt;
+ FH_PRINTF("HCSPLT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->host_if->hc_regs[i]->hcint;
+ FH_PRINTF("HCINT @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->host_if->hc_regs[i]->hcintmsk;
+ FH_PRINTF("HCINTMSK @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->host_if->hc_regs[i]->hctsiz;
+ FH_PRINTF("HCTSIZ @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->host_if->hc_regs[i]->hcdma;
+ FH_PRINTF("HCDMA @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ if (core_if->dma_desc_enable) {
+ addr = &core_if->host_if->hc_regs[i]->hcdmab;
+ FH_PRINTF("HCDMAB @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ }
+
+ }
+ return;
+}
+
+/**
+ * This function reads the core global registers and prints them
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+void fh_otg_dump_global_registers(fh_otg_core_if_t * core_if)
+{
+ int i, ep_num;
+ volatile uint32_t *addr;
+ char *txfsiz;
+
+ FH_PRINTF("Core Global Registers\n");
+ addr = &core_if->core_global_regs->gotgctl;
+ FH_PRINTF("GOTGCTL @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gotgint;
+ FH_PRINTF("GOTGINT @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gahbcfg;
+ FH_PRINTF("GAHBCFG @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gusbcfg;
+ FH_PRINTF("GUSBCFG @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->grstctl;
+ FH_PRINTF("GRSTCTL @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gintsts;
+ FH_PRINTF("GINTSTS @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gintmsk;
+ FH_PRINTF("GINTMSK @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->grxstsr;
+ FH_PRINTF("GRXSTSR @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->grxfsiz;
+ FH_PRINTF("GRXFSIZ @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gnptxfsiz;
+ FH_PRINTF("GNPTXFSIZ @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gnptxsts;
+ FH_PRINTF("GNPTXSTS @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gi2cctl;
+ FH_PRINTF("GI2CCTL @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gpvndctl;
+ FH_PRINTF("GPVNDCTL @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->ggpio;
+ FH_PRINTF("GGPIO @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->guid;
+ FH_PRINTF("GUID @0x%08lX : 0x%08X\n",
+ (unsigned long)addr, FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gsnpsid;
+ FH_PRINTF("GSNPSID @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->ghwcfg1;
+ FH_PRINTF("GHWCFG1 @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->ghwcfg2;
+ FH_PRINTF("GHWCFG2 @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->ghwcfg3;
+ FH_PRINTF("GHWCFG3 @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->ghwcfg4;
+ FH_PRINTF("GHWCFG4 @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->glpmcfg;
+ FH_PRINTF("GLPMCFG @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gpwrdn;
+ FH_PRINTF("GPWRDN @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->gdfifocfg;
+ FH_PRINTF("GDFIFOCFG @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+ addr = &core_if->core_global_regs->adpctl;
+ FH_PRINTF("ADPCTL @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ fh_otg_adp_read_reg(core_if));
+ addr = &core_if->core_global_regs->hptxfsiz;
+ FH_PRINTF("HPTXFSIZ @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+
+ if (core_if->en_multiple_tx_fifo == 0) {
+ ep_num = core_if->hwcfg4.b.num_dev_perio_in_ep;
+ txfsiz = "DPTXFSIZ";
+ } else {
+ ep_num = core_if->hwcfg4.b.num_in_eps;
+ txfsiz = "DIENPTXF";
+ }
+ for (i = 0; i < ep_num; i++) {
+ addr = &core_if->core_global_regs->dtxfsiz[i];
+ FH_PRINTF("%s[%d] @0x%08lX : 0x%08X\n", txfsiz, i + 1,
+ (unsigned long)addr, FH_READ_REG32(addr));
+ }
+ addr = core_if->pcgcctl;
+ FH_PRINTF("PCGCCTL @0x%08lX : 0x%08X\n", (unsigned long)addr,
+ FH_READ_REG32(addr));
+}
+
+/**
+ * Flush a Tx FIFO.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param num Tx FIFO to flush.
+ */
+void fh_otg_flush_tx_fifo(fh_otg_core_if_t * core_if, const int num)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+
+ FH_DEBUGPL((DBG_CIL | DBG_PCDV), "Flush Tx FIFO %d\n", num);
+
+ greset.b.txfflsh = 1;
+ greset.b.txfnum = num;
+ FH_WRITE_REG32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = FH_READ_REG32(&global_regs->grstctl);
+ if (++count > 10000) {
+ FH_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
+ __func__, greset.d32,
+ FH_READ_REG32(&global_regs->gnptxsts));
+ break;
+ }
+ fh_udelay(1);
+ } while (greset.b.txfflsh == 1);
+
+ /* Wait for 3 PHY Clocks */
+ fh_udelay(1);
+}
+
+/**
+ * Flush Rx FIFO.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+void fh_otg_flush_rx_fifo(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+
+ FH_DEBUGPL((DBG_CIL | DBG_PCDV), "%s\n", __func__);
+ /*
+ *
+ */
+ greset.b.rxfflsh = 1;
+ FH_WRITE_REG32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = FH_READ_REG32(&global_regs->grstctl);
+ if (++count > 10000) {
+ FH_WARN("%s() HANG! GRSTCTL=%0x\n", __func__,
+ greset.d32);
+ break;
+ }
+ fh_udelay(1);
+ } while (greset.b.rxfflsh == 1);
+
+ /* Wait for 3 PHY Clocks */
+ fh_udelay(1);
+}
+
+/**
+ * Do core a soft reset of the core. Be careful with this because it
+ * resets all the internal state machines of the core.
+ */
+void fh_otg_core_reset(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+
+ FH_DEBUGPL(DBG_CILV, "%s\n", __func__);
+ /* Wait for AHB master IDLE state. */
+ do {
+ fh_udelay(10);
+ greset.d32 = FH_READ_REG32(&global_regs->grstctl);
+ if (++count > 100000) {
+ FH_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__,
+ greset.d32);
+ return;
+ }
+ }
+ while (greset.b.ahbidle == 0);
+
+ /* Core Soft Reset */
+ count = 0;
+ greset.b.csftrst = 1;
+ FH_WRITE_REG32(&global_regs->grstctl, greset.d32);
+ do {
+ greset.d32 = FH_READ_REG32(&global_regs->grstctl);
+ if (++count > 10000) {
+ FH_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n",
+ __func__, greset.d32);
+ break;
+ }
+ fh_udelay(1);
+ }
+ while (greset.b.csftrst == 1);
+
+ /* Wait for 3 PHY Clocks */
+ fh_mdelay(100);
+}
+
+uint8_t fh_otg_is_device_mode(fh_otg_core_if_t * _core_if)
+{
+ return (fh_otg_mode(_core_if) != FH_HOST_MODE);
+}
+
+uint8_t fh_otg_is_host_mode(fh_otg_core_if_t * _core_if)
+{
+ return (fh_otg_mode(_core_if) == FH_HOST_MODE);
+}
+
+/**
+ * Register HCD callbacks. The callbacks are used to start and stop
+ * the HCD for interrupt processing.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param cb the HCD callback structure.
+ * @param p pointer to be passed to callback function (usb_hcd*).
+ */
+void fh_otg_cil_register_hcd_callbacks(fh_otg_core_if_t * core_if,
+ fh_otg_cil_callbacks_t * cb, void *p)
+{
+ core_if->hcd_cb = cb;
+ cb->p = p;
+}
+
+/**
+ * Register PCD callbacks. The callbacks are used to start and stop
+ * the PCD for interrupt processing.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param cb the PCD callback structure.
+ * @param p pointer to be passed to callback function (pcd*).
+ */
+void fh_otg_cil_register_pcd_callbacks(fh_otg_core_if_t * core_if,
+ fh_otg_cil_callbacks_t * cb, void *p)
+{
+ core_if->pcd_cb = cb;
+ cb->p = p;
+}
+
+#ifdef FH_EN_ISOC
+
+/**
+ * This function writes isoc data per 1 (micro)frame into tx fifo
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ *
+ */
+void write_isoc_frame_data(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ fh_otg_dev_in_ep_regs_t *ep_regs;
+ dtxfsts_data_t txstatus = {.d32 = 0 };
+ uint32_t len = 0;
+ uint32_t dwords;
+
+ ep->xfer_len = ep->data_per_frame;
+ ep->xfer_count = 0;
+
+ ep_regs = core_if->dev_if->in_ep_regs[ep->num];
+
+ len = ep->xfer_len - ep->xfer_count;
+
+ if (len > ep->maxpacket) {
+ len = ep->maxpacket;
+ }
+
+ dwords = (len + 3) / 4;
+
+ /* While there is space in the queue and space in the FIFO and
+ * More data to tranfer, Write packets to the Tx FIFO */
+ txstatus.d32 =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
+ FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32);
+
+ while (txstatus.b.txfspcavail > dwords &&
+ ep->xfer_count < ep->xfer_len && ep->xfer_len != 0) {
+ /* Write the FIFO */
+ fh_otg_ep_write_packet(core_if, ep, 0);
+
+ len = ep->xfer_len - ep->xfer_count;
+ if (len > ep->maxpacket) {
+ len = ep->maxpacket;
+ }
+
+ dwords = (len + 3) / 4;
+ txstatus.d32 =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
+ dtxfsts);
+ FH_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", ep->num,
+ txstatus.d32);
+ }
+}
+
+/**
+ * This function initializes a descriptor chain for Isochronous transfer
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ *
+ */
+void fh_otg_iso_ep_start_frm_transfer(fh_otg_core_if_t * core_if,
+ fh_ep_t * ep)
+{
+ deptsiz_data_t deptsiz = {.d32 = 0 };
+ depctl_data_t depctl = {.d32 = 0 };
+ dsts_data_t dsts = {.d32 = 0 };
+ volatile uint32_t *addr;
+
+ if (ep->is_in) {
+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
+ } else {
+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
+ }
+
+ ep->xfer_len = ep->data_per_frame;
+ ep->xfer_count = 0;
+ ep->xfer_buff = ep->cur_pkt_addr;
+ ep->dma_addr = ep->cur_pkt_dma_addr;
+
+ if (ep->is_in) {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.xfersize = ep->xfer_len;
+ deptsiz.b.pktcnt =
+ (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
+ deptsiz.b.mc = deptsiz.b.pktcnt;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz,
+ deptsiz.d32);
+
+ /* Write the DMA register */
+ if (core_if->dma_enable) {
+ FH_WRITE_REG32(&
+ (core_if->dev_if->in_ep_regs[ep->num]->
+ diepdma), (uint32_t) ep->dma_addr);
+ }
+ } else {
+ deptsiz.b.pktcnt =
+ (ep->xfer_len + (ep->maxpacket - 1)) / ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
+
+ FH_WRITE_REG32(&core_if->dev_if->
+ out_ep_regs[ep->num]->doeptsiz, deptsiz.d32);
+
+ if (core_if->dma_enable) {
+ FH_WRITE_REG32(&
+ (core_if->dev_if->
+ out_ep_regs[ep->num]->doepdma),
+ (uint32_t) ep->dma_addr);
+ }
+ }
+
+ /** Enable endpoint, clear nak */
+
+ depctl.d32 = 0;
+ if (ep->bInterval == 1) {
+ dsts.d32 =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+ ep->next_frame = dsts.b.soffn + ep->bInterval;
+
+ if (ep->next_frame & 0x1) {
+ depctl.b.setd1pid = 1;
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ } else {
+ ep->next_frame += ep->bInterval;
+
+ if (ep->next_frame & 0x1) {
+ depctl.b.setd1pid = 1;
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ }
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+
+ FH_MODIFY_REG32(addr, 0, depctl.d32);
+ depctl.d32 = FH_READ_REG32(addr);
+
+ if (ep->is_in && core_if->dma_enable == 0) {
+ write_isoc_frame_data(core_if, ep);
+ }
+
+}
+#endif /* FH_EN_ISOC */
+
+static void fh_otg_set_uninitialized(int32_t * p, int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ p[i] = -1;
+ }
+}
+
+static int fh_otg_param_initialized(int32_t val)
+{
+ return val != -1;
+}
+
+static int fh_otg_setup_params(fh_otg_core_if_t * core_if)
+{
+ int i;
+ gintsts_data_t gintsts;
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+
+ core_if->core_params = FH_ALLOC(sizeof(*core_if->core_params));
+ if (!core_if->core_params) {
+ return -FH_E_NO_MEMORY;
+ }
+ fh_otg_set_uninitialized((int32_t *) core_if->core_params,
+ sizeof(*core_if->core_params) /
+ sizeof(int32_t));
+ FH_PRINTF("Setting default values for core params\n");
+ fh_otg_set_param_otg_cap(core_if, fh_param_otg_cap_default);
+ fh_otg_set_param_dma_enable(core_if, fh_param_dma_enable_default);
+ fh_otg_set_param_dma_desc_enable(core_if,
+ fh_param_dma_desc_enable_default);
+ fh_otg_set_param_opt(core_if, fh_param_opt_default);
+ fh_otg_set_param_dma_burst_size(core_if,
+ fh_param_dma_burst_size_default);
+ fh_otg_set_param_host_support_fs_ls_low_power(core_if,
+ fh_param_host_support_fs_ls_low_power_default);
+ fh_otg_set_param_enable_dynamic_fifo(core_if,
+ fh_param_enable_dynamic_fifo_default);
+ fh_otg_set_param_data_fifo_size(core_if,
+ fh_param_data_fifo_size_default);
+ fh_otg_set_param_dev_rx_fifo_size(core_if,
+ fh_param_dev_rx_fifo_size_default);
+ fh_otg_set_param_dev_nperio_tx_fifo_size(core_if,
+ fh_param_dev_nperio_tx_fifo_size_default);
+ fh_otg_set_param_host_rx_fifo_size(core_if,
+ fh_param_host_rx_fifo_size_default);
+ fh_otg_set_param_host_nperio_tx_fifo_size(core_if,
+ fh_param_host_nperio_tx_fifo_size_default);
+ fh_otg_set_param_host_perio_tx_fifo_size(core_if,
+ fh_param_host_perio_tx_fifo_size_default);
+ fh_otg_set_param_max_transfer_size(core_if,
+ fh_param_max_transfer_size_default);
+ fh_otg_set_param_max_packet_count(core_if,
+ fh_param_max_packet_count_default);
+ fh_otg_set_param_host_channels(core_if,
+ fh_param_host_channels_default);
+ fh_otg_set_param_dev_endpoints(core_if,
+ fh_param_dev_endpoints_default);
+ fh_otg_set_param_phy_type(core_if, fh_param_phy_type_default);
+ fh_otg_set_param_speed(core_if, fh_param_speed_default);
+ fh_otg_set_param_host_ls_low_power_phy_clk(core_if,
+ fh_param_host_ls_low_power_phy_clk_default);
+ fh_otg_set_param_phy_ulpi_ddr(core_if, fh_param_phy_ulpi_ddr_default);
+ fh_otg_set_param_phy_ulpi_ext_vbus(core_if,
+ fh_param_phy_ulpi_ext_vbus_default);
+ fh_otg_set_param_phy_utmi_width(core_if,
+ fh_param_phy_utmi_width_default);
+ fh_otg_set_param_ts_dline(core_if, fh_param_ts_dline_default);
+ fh_otg_set_param_i2c_enable(core_if, fh_param_i2c_enable_default);
+ fh_otg_set_param_ulpi_fs_ls(core_if, fh_param_ulpi_fs_ls_default);
+ fh_otg_set_param_en_multiple_tx_fifo(core_if,
+ fh_param_en_multiple_tx_fifo_default);
+
+ if (gintsts.b.curmode) {
+ /* Force device mode to get power-on values of device FIFOs */
+ gusbcfg_data_t gusbcfg = {.d32 = 0 };
+ gusbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ gusbcfg.b.force_dev_mode = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
+ fh_mdelay(100);
+ for (i = 0; i < 15; i++) {
+ fh_otg_set_param_dev_perio_tx_fifo_size(core_if,
+ fh_param_dev_perio_tx_fifo_size_default, i);
+ }
+ for (i = 0; i < 15; i++) {
+ fh_otg_set_param_dev_tx_fifo_size(core_if,
+ fh_param_dev_tx_fifo_size_default, i);
+ }
+ gusbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ gusbcfg.b.force_dev_mode = 0;
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
+ fh_mdelay(100);
+ } else {
+ for (i = 0; i < 15; i++) {
+ fh_otg_set_param_dev_perio_tx_fifo_size(core_if,
+ fh_param_dev_perio_tx_fifo_size_default, i);
+ }
+ for (i = 0; i < 15; i++) {
+ fh_otg_set_param_dev_tx_fifo_size(core_if,
+ fh_param_dev_tx_fifo_size_default, i);
+ }
+ }
+
+ fh_otg_set_param_thr_ctl(core_if, fh_param_thr_ctl_default);
+ fh_otg_set_param_mpi_enable(core_if, fh_param_mpi_enable_default);
+ fh_otg_set_param_pti_enable(core_if, fh_param_pti_enable_default);
+ fh_otg_set_param_lpm_enable(core_if, fh_param_lpm_enable_default);
+
+ fh_otg_set_param_besl_enable(core_if, fh_param_besl_enable_default);
+ fh_otg_set_param_baseline_besl(core_if, fh_param_baseline_besl_default);
+ fh_otg_set_param_deep_besl(core_if, fh_param_deep_besl_default);
+
+ fh_otg_set_param_ic_usb_cap(core_if, fh_param_ic_usb_cap_default);
+ fh_otg_set_param_tx_thr_length(core_if,
+ fh_param_tx_thr_length_default);
+ fh_otg_set_param_rx_thr_length(core_if,
+ fh_param_rx_thr_length_default);
+ fh_otg_set_param_ahb_thr_ratio(core_if,
+ fh_param_ahb_thr_ratio_default);
+ fh_otg_set_param_power_down(core_if, fh_param_power_down_default);
+ fh_otg_set_param_reload_ctl(core_if, fh_param_reload_ctl_default);
+ fh_otg_set_param_dev_out_nak(core_if, fh_param_dev_out_nak_default);
+ fh_otg_set_param_cont_on_bna(core_if, fh_param_cont_on_bna_default);
+ fh_otg_set_param_ahb_single(core_if, fh_param_ahb_single_default);
+ fh_otg_set_param_otg_ver(core_if, fh_param_otg_ver_default);
+ fh_otg_set_param_adp_enable(core_if, fh_param_adp_enable_default);
+ return 0;
+}
+
+uint8_t fh_otg_is_dma_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->dma_enable;
+}
+
+/* Checks if the parameter is outside of its valid range of values */
+#define FH_OTG_PARAM_TEST(_param_, _low_, _high_) \
+ (((_param_) < (_low_)) || \
+ ((_param_) > (_high_)))
+
+/* Parameter access functions */
+int fh_otg_set_param_otg_cap(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int valid;
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 2)) {
+ FH_WARN("Wrong value for otg_cap parameter\n");
+ FH_WARN("otg_cap parameter must be 0,1 or 2\n");
+ retval = -FH_E_INVALID;
+ goto out;
+ }
+
+ valid = 1;
+ switch (val) {
+ case FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE:
+ if (core_if->hwcfg2.b.op_mode !=
+ FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+ valid = 0;
+ break;
+ case FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE:
+ if ((core_if->hwcfg2.b.op_mode !=
+ FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+ && (core_if->hwcfg2.b.op_mode !=
+ FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
+ && (core_if->hwcfg2.b.op_mode !=
+ FH_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
+ && (core_if->hwcfg2.b.op_mode !=
+ FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) {
+ valid = 0;
+ }
+ break;
+ case FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+ /* always valid */
+ break;
+ }
+ if (!valid) {
+ if (fh_otg_param_initialized(core_if->core_params->otg_cap)) {
+ FH_ERROR
+ ("%d invalid for otg_cap paremter. Check HW configuration.\n",
+ val);
+ }
+ val =
+ (((core_if->hwcfg2.b.op_mode ==
+ FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+ || (core_if->hwcfg2.b.op_mode ==
+ FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
+ || (core_if->hwcfg2.b.op_mode ==
+ FH_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
+ || (core_if->hwcfg2.b.op_mode ==
+ FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ?
+ FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE :
+ FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->otg_cap = val;
+out:
+ return retval;
+}
+
+int32_t fh_otg_get_param_otg_cap(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->otg_cap;
+}
+
+int fh_otg_set_param_opt(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for opt parameter\n");
+ return -FH_E_INVALID;
+ }
+ core_if->core_params->opt = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_opt(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->opt;
+}
+
+int fh_otg_set_param_dma_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for dma enable\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == 1) && (core_if->hwcfg2.b.architecture == 0)) {
+ if (fh_otg_param_initialized(core_if->core_params->dma_enable)) {
+ FH_ERROR
+ ("%d invalid for dma_enable paremter. Check HW configuration.\n",
+ val);
+ }
+ val = 0;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->dma_enable = val;
+ if (val == 0) {
+ fh_otg_set_param_dma_desc_enable(core_if, 0);
+ }
+ return retval;
+}
+
+int32_t fh_otg_get_param_dma_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->dma_enable;
+}
+
+int fh_otg_set_param_dma_desc_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for dma_enable\n");
+ FH_WARN("dma_desc_enable must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == 1)
+ && ((fh_otg_get_param_dma_enable(core_if) == 0)
+ || (core_if->hwcfg4.b.desc_dma == 0))) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->dma_desc_enable)) {
+ FH_ERROR
+ ("%d invalid for dma_desc_enable paremter. Check HW configuration.\n",
+ val);
+ }
+ val = 0;
+ retval = -FH_E_INVALID;
+ }
+ core_if->core_params->dma_desc_enable = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_dma_desc_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->dma_desc_enable;
+}
+
+int fh_otg_set_param_host_support_fs_ls_low_power(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for host_support_fs_low_power\n");
+ FH_WARN("host_support_fs_low_power must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+ core_if->core_params->host_support_fs_ls_low_power = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_host_support_fs_ls_low_power(fh_otg_core_if_t *
+ core_if)
+{
+ return core_if->core_params->host_support_fs_ls_low_power;
+}
+
+int fh_otg_set_param_enable_dynamic_fifo(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for enable_dynamic_fifo\n");
+ FH_WARN("enable_dynamic_fifo must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == 1) && (core_if->hwcfg2.b.dynamic_fifo == 0)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->enable_dynamic_fifo)) {
+ FH_ERROR
+ ("%d invalid for enable_dynamic_fifo paremter. Check HW configuration.\n",
+ val);
+ }
+ val = 0;
+ retval = -FH_E_INVALID;
+ }
+ core_if->core_params->enable_dynamic_fifo = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_enable_dynamic_fifo(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->enable_dynamic_fifo;
+}
+
+int fh_otg_set_param_data_fifo_size(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 32, 32768)) {
+ FH_WARN("Wrong value for data_fifo_size\n");
+ FH_WARN("data_fifo_size must be 32-32768\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > core_if->hwcfg3.b.dfifo_depth) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->data_fifo_size)) {
+ FH_ERROR
+ ("%d invalid for data_fifo_size parameter. Check HW configuration.%d\n",
+ val, core_if->hwcfg3.b.dfifo_depth);
+ }
+ val = core_if->hwcfg3.b.dfifo_depth;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->data_fifo_size = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_data_fifo_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->data_fifo_size;
+}
+
+int fh_otg_set_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
+ FH_WARN("Wrong value for dev_rx_fifo_size\n");
+ FH_WARN("dev_rx_fifo_size must be 16-32768\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > FH_READ_REG32(&core_if->core_global_regs->grxfsiz)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->dev_rx_fifo_size)) {
+ FH_WARN("%d invalid for dev_rx_fifo_size parameter\n", val);
+ }
+ val = FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->dev_rx_fifo_size = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->dev_rx_fifo_size;
+}
+
+int fh_otg_set_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
+ FH_WARN("Wrong value for dev_nperio_tx_fifo\n");
+ FH_WARN("dev_nperio_tx_fifo must be 16-32768\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > (FH_READ_REG32
+ (&core_if->core_global_regs->gnptxfsiz) >> 16)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->dev_nperio_tx_fifo_size)) {
+ FH_ERROR
+ ("%d invalid for dev_nperio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ }
+ val =
+ (FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz) >>
+ 16);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->dev_nperio_tx_fifo_size = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->dev_nperio_tx_fifo_size;
+}
+
+int fh_otg_set_param_host_rx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
+ FH_WARN("Wrong value for host_rx_fifo_size\n");
+ FH_WARN("host_rx_fifo_size must be 16-32768\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > FH_READ_REG32(&core_if->core_global_regs->grxfsiz)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->host_rx_fifo_size)) {
+ FH_ERROR
+ ("%d invalid for host_rx_fifo_size. Check HW configuration.\n",
+ val);
+ }
+ val = FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->host_rx_fifo_size = val;
+ return retval;
+
+}
+
+int32_t fh_otg_get_param_host_rx_fifo_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->host_rx_fifo_size;
+}
+
+int fh_otg_set_param_host_nperio_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
+ FH_WARN("Wrong value for host_nperio_tx_fifo_size\n");
+ FH_WARN("host_nperio_tx_fifo_size must be 16-32768\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > (FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz) >> 16)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->host_nperio_tx_fifo_size)) {
+ FH_ERROR
+ ("%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ }
+ val =
+ (FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz) >>
+ 16);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->host_nperio_tx_fifo_size = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_host_nperio_tx_fifo_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->host_nperio_tx_fifo_size;
+}
+
+int fh_otg_set_param_host_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
+ FH_WARN("Wrong value for host_perio_tx_fifo_size\n");
+ FH_WARN("host_perio_tx_fifo_size must be 16-32768\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > ((core_if->hptxfsiz.d32) >> 16)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->host_perio_tx_fifo_size)) {
+ FH_ERROR
+ ("%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ }
+ val = (core_if->hptxfsiz.d32) >> 16;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->host_perio_tx_fifo_size = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_host_perio_tx_fifo_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->host_perio_tx_fifo_size;
+}
+
+int fh_otg_set_param_max_transfer_size(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 2047, 524288)) {
+ FH_WARN("Wrong value for max_transfer_size\n");
+ FH_WARN("max_transfer_size must be 2047-524288\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val >= (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->max_transfer_size)) {
+ FH_ERROR
+ ("%d invalid for max_transfer_size. Check HW configuration.\n",
+ val);
+ }
+ val =
+ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 11)) -
+ 1);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->max_transfer_size = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_max_transfer_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->max_transfer_size;
+}
+
+int fh_otg_set_param_max_packet_count(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 15, 511)) {
+ FH_WARN("Wrong value for max_packet_count\n");
+ FH_WARN("max_packet_count must be 15-511\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->max_packet_count)) {
+ FH_ERROR
+ ("%d invalid for max_packet_count. Check HW configuration.\n",
+ val);
+ }
+ val =
+ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->max_packet_count = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_max_packet_count(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->max_packet_count;
+}
+
+int fh_otg_set_param_host_channels(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 1, 16)) {
+ FH_WARN("Wrong value for host_channels\n");
+ FH_WARN("host_channels must be 1-16\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > (core_if->hwcfg2.b.num_host_chan + 1)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->host_channels)) {
+ FH_ERROR
+ ("%d invalid for host_channels. Check HW configurations.\n",
+ val);
+ }
+ val = (core_if->hwcfg2.b.num_host_chan + 1);
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->host_channels = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_host_channels(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->host_channels;
+}
+
+int fh_otg_set_param_dev_endpoints(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 1, 15)) {
+ FH_WARN("Wrong value for dev_endpoints\n");
+ FH_WARN("dev_endpoints must be 1-15\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val > (core_if->hwcfg2.b.num_dev_ep)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->dev_endpoints)) {
+ FH_ERROR
+ ("%d invalid for dev_endpoints. Check HW configurations.\n",
+ val);
+ }
+ val = core_if->hwcfg2.b.num_dev_ep;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->dev_endpoints = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_dev_endpoints(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->dev_endpoints;
+}
+
+int fh_otg_set_param_phy_type(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ int valid = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 2)) {
+ FH_WARN("Wrong value for phy_type\n");
+ FH_WARN("phy_type must be 0,1 or 2\n");
+ return -FH_E_INVALID;
+ }
+#ifndef NO_FS_PHY_HW_CHECKS
+ if ((val == FH_PHY_TYPE_PARAM_UTMI) &&
+ ((core_if->hwcfg2.b.hs_phy_type == 1) ||
+ (core_if->hwcfg2.b.hs_phy_type == 3))) {
+ valid = 1;
+ } else if ((val == FH_PHY_TYPE_PARAM_ULPI) &&
+ ((core_if->hwcfg2.b.hs_phy_type == 2) ||
+ (core_if->hwcfg2.b.hs_phy_type == 3))) {
+ valid = 1;
+ } else if ((val == FH_PHY_TYPE_PARAM_FS) &&
+ (core_if->hwcfg2.b.fs_phy_type == 1)) {
+ valid = 1;
+ }
+ if (!valid) {
+ if (fh_otg_param_initialized(core_if->core_params->phy_type)) {
+ FH_ERROR
+ ("%d invalid for phy_type. Check HW configurations.\n",
+ val);
+ }
+ if (core_if->hwcfg2.b.hs_phy_type) {
+ if ((core_if->hwcfg2.b.hs_phy_type == 3) ||
+ (core_if->hwcfg2.b.hs_phy_type == 1)) {
+ val = FH_PHY_TYPE_PARAM_UTMI;
+ } else {
+ val = FH_PHY_TYPE_PARAM_ULPI;
+ }
+ }
+ retval = -FH_E_INVALID;
+ }
+#endif
+ core_if->core_params->phy_type = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_phy_type(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->phy_type;
+}
+
+int fh_otg_set_param_speed(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for speed parameter\n");
+ FH_WARN("max_speed parameter must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+ if ((val == 0)
+ && fh_otg_get_param_phy_type(core_if) == FH_PHY_TYPE_PARAM_FS) {
+ if (fh_otg_param_initialized(core_if->core_params->speed)) {
+ FH_ERROR
+ ("%d invalid for speed paremter. Check HW configuration.\n",
+ val);
+ }
+ val =
+ (fh_otg_get_param_phy_type(core_if) ==
+ FH_PHY_TYPE_PARAM_FS ? 1 : 0);
+ retval = -FH_E_INVALID;
+ }
+ core_if->core_params->speed = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_speed(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->speed;
+}
+
+int fh_otg_set_param_host_ls_low_power_phy_clk(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN
+ ("Wrong value for host_ls_low_power_phy_clk parameter\n");
+ FH_WARN("host_ls_low_power_phy_clk must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)
+ && (fh_otg_get_param_phy_type(core_if) == FH_PHY_TYPE_PARAM_FS)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->host_ls_low_power_phy_clk)) {
+ FH_ERROR
+ ("%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
+ val);
+ }
+ val =
+ (fh_otg_get_param_phy_type(core_if) ==
+ FH_PHY_TYPE_PARAM_FS) ?
+ FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ :
+ FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->host_ls_low_power_phy_clk = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_host_ls_low_power_phy_clk(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->host_ls_low_power_phy_clk;
+}
+
+int fh_otg_set_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for phy_ulpi_ddr\n");
+ FH_WARN("phy_upli_ddr must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->phy_ulpi_ddr = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->phy_ulpi_ddr;
+}
+
+int fh_otg_set_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong valaue for phy_ulpi_ext_vbus\n");
+ FH_WARN("phy_ulpi_ext_vbus must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->phy_ulpi_ext_vbus = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->phy_ulpi_ext_vbus;
+}
+
+int fh_otg_set_param_phy_utmi_width(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 8, 8) && FH_OTG_PARAM_TEST(val, 16, 16)) {
+ FH_WARN("Wrong valaue for phy_utmi_width\n");
+ FH_WARN("phy_utmi_width must be 8 or 16\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->phy_utmi_width = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_phy_utmi_width(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->phy_utmi_width;
+}
+
+int fh_otg_set_param_ulpi_fs_ls(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong valaue for ulpi_fs_ls\n");
+ FH_WARN("ulpi_fs_ls must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->ulpi_fs_ls = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_ulpi_fs_ls(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->ulpi_fs_ls;
+}
+
+int fh_otg_set_param_ts_dline(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong valaue for ts_dline\n");
+ FH_WARN("ts_dline must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->ts_dline = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_ts_dline(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->ts_dline;
+}
+
+int fh_otg_set_param_i2c_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong valaue for i2c_enable\n");
+ FH_WARN("i2c_enable must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+#ifndef NO_FS_PHY_HW_CHECK
+ if (val == 1 && core_if->hwcfg3.b.i2c == 0) {
+ if (fh_otg_param_initialized(core_if->core_params->i2c_enable)) {
+ FH_ERROR
+ ("%d invalid for i2c_enable. Check HW configuration.\n",
+ val);
+ }
+ val = 0;
+ retval = -FH_E_INVALID;
+ }
+#endif
+
+ core_if->core_params->i2c_enable = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_i2c_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->i2c_enable;
+}
+
+int fh_otg_set_param_dev_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val, int fifo_num)
+{
+ int retval = 0;
+ gintsts_data_t gintsts;
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+
+ if (FH_OTG_PARAM_TEST(val, 4, 768)) {
+ FH_WARN("Wrong value for dev_perio_tx_fifo_size\n");
+ FH_WARN("dev_perio_tx_fifo_size must be 4-768\n");
+ return -FH_E_INVALID;
+ }
+
+ /*
+ if (val >
+ (FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[fifo_num]) >> 16)) {
+ FH_WARN("Value is larger then power-on FIFO size\n");
+ if (fh_otg_param_initialized
+ (core_if->core_params->dev_perio_tx_fifo_size[fifo_num])) {
+ FH_ERROR
+ ("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n",
+ val, fifo_num);
+ }
+ val = (FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[fifo_num]) >> 16);
+ retval = -FH_E_INVALID;
+ }
+ */
+
+ core_if->core_params->dev_perio_tx_fifo_size[fifo_num] = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_dev_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int fifo_num)
+{
+ return core_if->core_params->dev_perio_tx_fifo_size[fifo_num];
+}
+
+int fh_otg_set_param_en_multiple_tx_fifo(fh_otg_core_if_t * core_if,
+ int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong valaue for en_multiple_tx_fifo,\n");
+ FH_WARN("en_multiple_tx_fifo must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val == 1 && core_if->hwcfg4.b.ded_fifo_en == 0) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->en_multiple_tx_fifo)) {
+ FH_ERROR
+ ("%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
+ val);
+ }
+ val = 0;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->en_multiple_tx_fifo = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_en_multiple_tx_fifo(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->en_multiple_tx_fifo;
+}
+
+int fh_otg_set_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if, int32_t val,
+ int fifo_num)
+{
+ int retval = 0;
+ fifosize_data_t txfifosize;
+ txfifosize.d32 = FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[fifo_num]);
+
+ if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
+ FH_WARN("Wrong value for dev_tx_fifo_size\n");
+ FH_WARN("dev_tx_fifo_size must be 16-32768\n");
+ return -FH_E_INVALID;
+ }
+
+ /*
+ if (val > txfifosize.b.depth) {
+ FH_WARN("Value is larger then power-on FIFO size\n");
+ if (fh_otg_param_initialized
+ (core_if->core_params->dev_tx_fifo_size[fifo_num])) {
+ FH_ERROR
+ ("`%d' invalid for parameter `dev_tx_fifo_size_%d'. Check HW configuration.\n",
+ val, fifo_num);
+ }
+ val = txfifosize.b.depth;
+ retval = -FH_E_INVALID;
+ }
+ */
+
+ core_if->core_params->dev_tx_fifo_size[fifo_num] = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int fifo_num)
+{
+ return core_if->core_params->dev_tx_fifo_size[fifo_num];
+}
+
+int fh_otg_set_param_thr_ctl(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 7)) {
+ FH_WARN("Wrong value for thr_ctl\n");
+ FH_WARN("thr_ctl must be 0-7\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val != 0) &&
+ (!fh_otg_get_param_dma_enable(core_if) ||
+ !core_if->hwcfg4.b.ded_fifo_en)) {
+ if (fh_otg_param_initialized(core_if->core_params->thr_ctl)) {
+ FH_ERROR
+ ("%d invalid for parameter thr_ctl. Check HW configuration.\n",
+ val);
+ }
+ val = 0;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->thr_ctl = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_thr_ctl(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->thr_ctl;
+}
+
+int fh_otg_set_param_lpm_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for lpm_enable\n");
+ FH_WARN("lpm_enable must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val && !core_if->hwcfg3.b.otg_lpm_en) {
+ if (fh_otg_param_initialized(core_if->core_params->lpm_enable)) {
+ FH_ERROR
+ ("%d invalid for parameter lpm_enable. Check HW configuration.\n",
+ val);
+ }
+ val = 0;
+ retval = -FH_E_INVALID;
+ }
+
+ core_if->core_params->lpm_enable = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_lpm_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->lpm_enable;
+}
+
+int fh_otg_set_param_besl_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("Wrong value for besl_enable\n");
+ FH_WARN("besl_enable must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->besl_enable = val;
+
+ if(val)
+ {
+ retval += fh_otg_set_param_lpm_enable(core_if,val);
+ }
+
+ return retval;
+}
+
+int32_t fh_otg_get_param_besl_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->besl_enable;
+}
+
+int fh_otg_set_param_baseline_besl(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 15)) {
+ FH_WARN("Wrong value for baseline_besl\n");
+ FH_WARN("baseline_besl must be 0-15\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->baseline_besl = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_baseline_besl(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->baseline_besl;
+}
+
+int fh_otg_set_param_deep_besl(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 15)) {
+ FH_WARN("Wrong value for deep_besl\n");
+ FH_WARN("deep_besl must be 0-15\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->deep_besl = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_deep_besl(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->deep_besl;
+}
+
+int fh_otg_set_param_tx_thr_length(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 8, 128)) {
+ FH_WARN("Wrong valaue for tx_thr_length\n");
+ FH_WARN("tx_thr_length must be 8 - 128\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->tx_thr_length = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_tx_thr_length(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->tx_thr_length;
+}
+
+int fh_otg_set_param_rx_thr_length(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 8, 128)) {
+ FH_WARN("Wrong valaue for rx_thr_length\n");
+ FH_WARN("rx_thr_length must be 8 - 128\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->rx_thr_length = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_rx_thr_length(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->rx_thr_length;
+}
+
+int fh_otg_set_param_dma_burst_size(fh_otg_core_if_t * core_if, int32_t val)
+{
+ if (FH_OTG_PARAM_TEST(val, 1, 1) &&
+ FH_OTG_PARAM_TEST(val, 4, 4) &&
+ FH_OTG_PARAM_TEST(val, 8, 8) &&
+ FH_OTG_PARAM_TEST(val, 16, 16) &&
+ FH_OTG_PARAM_TEST(val, 32, 32) &&
+ FH_OTG_PARAM_TEST(val, 64, 64) &&
+ FH_OTG_PARAM_TEST(val, 128, 128) &&
+ FH_OTG_PARAM_TEST(val, 256, 256)) {
+ FH_WARN("`%d' invalid for parameter `dma_burst_size'\n", val);
+ return -FH_E_INVALID;
+ }
+ core_if->core_params->dma_burst_size = val;
+ return 0;
+}
+
+int32_t fh_otg_get_param_dma_burst_size(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->dma_burst_size;
+}
+
+int fh_otg_set_param_pti_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `pti_enable'\n", val);
+ return -FH_E_INVALID;
+ }
+ if (val && (core_if->snpsid < OTG_CORE_REV_2_72a)) {
+ if (fh_otg_param_initialized(core_if->core_params->pti_enable)) {
+ FH_ERROR
+ ("%d invalid for parameter pti_enable. Check HW configuration.\n",
+ val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->pti_enable = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_pti_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->pti_enable;
+}
+
+int fh_otg_set_param_mpi_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `mpi_enable'\n", val);
+ return -FH_E_INVALID;
+ }
+ if (val && (core_if->hwcfg2.b.multi_proc_int == 0)) {
+ if (fh_otg_param_initialized(core_if->core_params->mpi_enable)) {
+ FH_ERROR
+ ("%d invalid for parameter mpi_enable. Check HW configuration.\n",
+ val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->mpi_enable = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_mpi_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->mpi_enable;
+}
+
+int fh_otg_set_param_adp_enable(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `adp_enable'\n", val);
+ return -FH_E_INVALID;
+ }
+ if (val && (core_if->hwcfg3.b.adp_supp == 0)) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->adp_supp_enable)) {
+ FH_ERROR
+ ("%d invalid for parameter adp_enable. Check HW configuration.\n",
+ val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->adp_supp_enable = val;
+ /* Set OTG version 2.0 in case of enabling ADP */
+ if (val)
+ fh_otg_set_param_otg_ver(core_if, 1);
+
+ return retval;
+}
+
+int32_t fh_otg_get_param_adp_enable(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->adp_supp_enable;
+}
+
+int fh_otg_set_param_ic_usb_cap(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `ic_usb_cap'\n", val);
+ FH_WARN("ic_usb_cap must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val && (core_if->hwcfg2.b.otg_enable_ic_usb == 0)) {
+ if (fh_otg_param_initialized(core_if->core_params->ic_usb_cap)) {
+ FH_ERROR
+ ("%d invalid for parameter ic_usb_cap. Check HW configuration.\n",
+ val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->ic_usb_cap = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_ic_usb_cap(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->ic_usb_cap;
+}
+
+int fh_otg_set_param_ahb_thr_ratio(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ int valid = 1;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 3)) {
+ FH_WARN("`%d' invalid for parameter `ahb_thr_ratio'\n", val);
+ FH_WARN("ahb_thr_ratio must be 0 - 3\n");
+ return -FH_E_INVALID;
+ }
+
+ if (val
+ && (core_if->snpsid < OTG_CORE_REV_2_81a
+ || !fh_otg_get_param_thr_ctl(core_if))) {
+ valid = 0;
+ } else if (val
+ && ((fh_otg_get_param_tx_thr_length(core_if) / (1 << val)) <
+ 4)) {
+ valid = 0;
+ }
+ if (valid == 0) {
+ if (fh_otg_param_initialized
+ (core_if->core_params->ahb_thr_ratio)) {
+ FH_ERROR
+ ("%d invalid for parameter ahb_thr_ratio. Check HW configuration.\n",
+ val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+
+ core_if->core_params->ahb_thr_ratio = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_ahb_thr_ratio(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->ahb_thr_ratio;
+}
+
+int fh_otg_set_param_power_down(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ int valid = 1;
+ hwcfg4_data_t hwcfg4 = {.d32 = 0 };
+ hwcfg4.d32 = FH_READ_REG32(&core_if->core_global_regs->ghwcfg4);
+
+ if (FH_OTG_PARAM_TEST(val, 0, 3)) {
+ FH_WARN("`%d' invalid for parameter `power_down'\n", val);
+ FH_WARN("power_down must be 0 - 3\n");
+ return -FH_E_INVALID;
+ }
+ if ((val == 1) && (hwcfg4.b.power_optimiz == 0)) {
+ val = 0;
+ }
+ if ((val == 2) && ((core_if->snpsid < OTG_CORE_REV_2_91a) || (hwcfg4.b.hiber == 0)) ) {
+ valid = 0;
+ }
+ if ((val == 3)
+ && ((core_if->snpsid < OTG_CORE_REV_3_00a)
+ || (hwcfg4.b.xhiber == 0))) {
+ valid = 0;
+ }
+ if (valid == 0) {
+ if (fh_otg_param_initialized(core_if->core_params->power_down)) {
+ FH_ERROR
+ ("%d invalid for parameter power_down. Check HW configuration.\n",
+ val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->power_down = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_power_down(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->power_down;
+}
+
+int fh_otg_set_param_reload_ctl(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ int valid = 1;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `reload_ctl'\n", val);
+ FH_WARN("reload_ctl must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == 1) && (core_if->snpsid < OTG_CORE_REV_2_92a)) {
+ valid = 0;
+ }
+ if (valid == 0) {
+ if (fh_otg_param_initialized(core_if->core_params->reload_ctl)) {
+ FH_ERROR("%d invalid for parameter reload_ctl."
+ "Check HW configuration.\n", val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->reload_ctl = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_reload_ctl(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->reload_ctl;
+}
+
+int fh_otg_set_param_dev_out_nak(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ int valid = 1;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `dev_out_nak'\n", val);
+ FH_WARN("dev_out_nak must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == 1) && ((core_if->snpsid < OTG_CORE_REV_2_93a) ||
+ !(core_if->core_params->dma_desc_enable))) {
+ valid = 0;
+ }
+ if (valid == 0) {
+ if (fh_otg_param_initialized(core_if->core_params->dev_out_nak)) {
+ FH_ERROR("%d invalid for parameter dev_out_nak."
+ "Check HW configuration.\n", val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->dev_out_nak = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_dev_out_nak(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->dev_out_nak;
+}
+
+int fh_otg_set_param_cont_on_bna(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ int valid = 1;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `cont_on_bna'\n", val);
+ FH_WARN("cont_on_bna must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == 1) && ((core_if->snpsid < OTG_CORE_REV_2_94a) ||
+ !(core_if->core_params->dma_desc_enable))) {
+ valid = 0;
+ }
+ if (valid == 0) {
+ if (fh_otg_param_initialized(core_if->core_params->cont_on_bna)) {
+ FH_ERROR("%d invalid for parameter cont_on_bna."
+ "Check HW configuration.\n", val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->cont_on_bna = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_cont_on_bna(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->cont_on_bna;
+}
+
+int fh_otg_set_param_ahb_single(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+ int valid = 1;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `ahb_single'\n", val);
+ FH_WARN("ahb_single must be 0 or 1\n");
+ return -FH_E_INVALID;
+ }
+
+ if ((val == 1) && (core_if->snpsid < OTG_CORE_REV_2_94a)) {
+ valid = 0;
+ }
+ if (valid == 0) {
+ if (fh_otg_param_initialized(core_if->core_params->ahb_single)) {
+ FH_ERROR("%d invalid for parameter ahb_single."
+ "Check HW configuration.\n", val);
+ }
+ retval = -FH_E_INVALID;
+ val = 0;
+ }
+ core_if->core_params->ahb_single = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_ahb_single(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->ahb_single;
+}
+
+int fh_otg_set_param_otg_ver(fh_otg_core_if_t * core_if, int32_t val)
+{
+ int retval = 0;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 1)) {
+ FH_WARN("`%d' invalid for parameter `otg_ver'\n", val);
+ FH_WARN
+ ("otg_ver must be 0(for OTG 1.3 support) or 1(for OTG 2.0 support)\n");
+ return -FH_E_INVALID;
+ }
+
+ core_if->core_params->otg_ver = val;
+ return retval;
+}
+
+int32_t fh_otg_get_param_otg_ver(fh_otg_core_if_t * core_if)
+{
+ return core_if->core_params->otg_ver;
+}
+
+uint32_t fh_otg_get_hnpstatus(fh_otg_core_if_t * core_if)
+{
+ gotgctl_data_t otgctl;
+ otgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ return otgctl.b.hstnegscs;
+}
+
+uint32_t fh_otg_get_srpstatus(fh_otg_core_if_t * core_if)
+{
+ gotgctl_data_t otgctl;
+ otgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ return otgctl.b.sesreqscs;
+}
+
+void fh_otg_set_hnpreq(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ if(core_if->otg_ver == 0) {
+ gotgctl_data_t otgctl;
+ otgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ otgctl.b.hnpreq = val;
+ FH_WRITE_REG32(&core_if->core_global_regs->gotgctl, otgctl.d32);
+ } else {
+ core_if->otg_sts = val;
+ }
+}
+
+uint32_t fh_otg_get_gsnpsid(fh_otg_core_if_t * core_if)
+{
+ return core_if->snpsid;
+}
+
+uint32_t fh_otg_get_mode(fh_otg_core_if_t * core_if)
+{
+ gintsts_data_t gintsts;
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ return gintsts.b.curmode;
+}
+
+uint32_t fh_otg_get_hnpcapable(fh_otg_core_if_t * core_if)
+{
+ gusbcfg_data_t usbcfg;
+ usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ return usbcfg.b.hnpcap;
+}
+
+void fh_otg_set_hnpcapable(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ gusbcfg_data_t usbcfg;
+ usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ usbcfg.b.hnpcap = val;
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, usbcfg.d32);
+}
+
+uint32_t fh_otg_get_srpcapable(fh_otg_core_if_t * core_if)
+{
+ gusbcfg_data_t usbcfg;
+ usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ return usbcfg.b.srpcap;
+}
+
+void fh_otg_set_srpcapable(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ gusbcfg_data_t usbcfg;
+ usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+ usbcfg.b.srpcap = val;
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, usbcfg.d32);
+}
+
+uint32_t fh_otg_get_devspeed(fh_otg_core_if_t * core_if)
+{
+ dcfg_data_t dcfg;
+ dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+ return dcfg.b.devspd;
+}
+
+void fh_otg_set_devspeed(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ dcfg_data_t dcfg;
+ dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+ dcfg.b.devspd = val;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+uint32_t fh_otg_get_busconnected(fh_otg_core_if_t * core_if)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
+ return hprt0.b.prtconnsts;
+}
+
+uint32_t fh_otg_get_enumspeed(fh_otg_core_if_t * core_if)
+{
+ dsts_data_t dsts;
+ dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+ return dsts.b.enumspd;
+}
+
+uint32_t fh_otg_get_prtpower(fh_otg_core_if_t * core_if)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
+ return hprt0.b.prtpwr;
+
+}
+
+uint32_t fh_otg_get_core_state(fh_otg_core_if_t * core_if)
+{
+ return core_if->hibernation_suspend;
+}
+
+void fh_otg_set_prtpower(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = val;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+}
+
+uint32_t fh_otg_get_prtsuspend(fh_otg_core_if_t * core_if)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
+ return hprt0.b.prtsusp;
+
+}
+
+void fh_otg_set_prtsuspend(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = val;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+}
+
+uint32_t fh_otg_get_fr_interval(fh_otg_core_if_t * core_if)
+{
+ hfir_data_t hfir;
+ hfir.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hfir);
+ return hfir.b.frint;
+
+}
+
+void fh_otg_set_fr_interval(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ hfir_data_t hfir;
+ uint32_t fram_int;
+ fram_int = calc_frame_interval(core_if);
+ hfir.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hfir);
+ if (!core_if->core_params->reload_ctl) {
+ FH_WARN("\nCannot reload HFIR register.HFIR.HFIRRldCtrl bit is"
+ "not set to 1.\nShould load driver with reload_ctl=1"
+ " module parameter\n");
+ return;
+ }
+ switch (fram_int) {
+ case 3750:
+ if ((val < 3350) || (val > 4150)) {
+ FH_WARN("HFIR interval for HS core and 30 MHz"
+ "clock freq should be from 3350 to 4150\n");
+ return;
+ }
+ break;
+ case 30000:
+ if ((val < 26820) || (val > 33180)) {
+ FH_WARN("HFIR interval for FS/LS core and 30 MHz"
+ "clock freq should be from 26820 to 33180\n");
+ return;
+ }
+ break;
+ case 6000:
+ if ((val < 5360) || (val > 6640)) {
+ FH_WARN("HFIR interval for HS core and 48 MHz"
+ "clock freq should be from 5360 to 6640\n");
+ return;
+ }
+ break;
+ case 48000:
+ if ((val < 42912) || (val > 53088)) {
+ FH_WARN("HFIR interval for FS/LS core and 48 MHz"
+ "clock freq should be from 42912 to 53088\n");
+ return;
+ }
+ break;
+ case 7500:
+ if ((val < 6700) || (val > 8300)) {
+ FH_WARN("HFIR interval for HS core and 60 MHz"
+ "clock freq should be from 6700 to 8300\n");
+ return;
+ }
+ break;
+ case 60000:
+ if ((val < 53640) || (val > 65536)) {
+ FH_WARN("HFIR interval for FS/LS core and 60 MHz"
+ "clock freq should be from 53640 to 65536\n");
+ return;
+ }
+ break;
+ default:
+ FH_WARN("Unknown frame interval\n");
+ return;
+ break;
+
+ }
+ hfir.b.frint = val;
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->hfir, hfir.d32);
+}
+
+uint32_t fh_otg_get_mode_ch_tim(fh_otg_core_if_t * core_if)
+{
+ hcfg_data_t hcfg;
+ hcfg.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
+ return hcfg.b.modechtimen;
+
+}
+
+void fh_otg_set_mode_ch_tim(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ hcfg_data_t hcfg;
+ hcfg.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
+ hcfg.b.modechtimen = val;
+ FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+}
+
+void fh_otg_set_prtresume(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtres = val;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+}
+
+uint32_t fh_otg_get_remotewakesig(fh_otg_core_if_t * core_if)
+{
+ dctl_data_t dctl;
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ return dctl.b.rmtwkupsig;
+}
+
+uint32_t fh_otg_get_beslreject(fh_otg_core_if_t * core_if)
+{
+ dctl_data_t dctl;
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ return dctl.b.besl_reject;
+}
+
+void fh_otg_set_beslreject(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ dctl_data_t dctl;
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ dctl.b.besl_reject = val;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+}
+uint32_t fh_otg_get_hirdthresh(fh_otg_core_if_t * core_if)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ return lpmcfg.b.hird_thres;
+}
+
+void fh_otg_set_hirdthresh(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ glpmcfg_data_t lpmcfg;
+
+ if (FH_OTG_PARAM_TEST(val, 0, 15)) {
+ FH_WARN("Wrong valaue for hird_thres\n");
+ FH_WARN("hird_thres must be 0-f\n");
+ return ;
+ }
+
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ lpmcfg.b.hird_thres &= (1<<4);
+ lpmcfg.b.hird_thres |= val;
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+}
+
+uint32_t fh_otg_get_lpm_portsleepstatus(fh_otg_core_if_t * core_if)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+
+ FH_ASSERT(!
+ ((core_if->lx_state == FH_OTG_L1) ^ lpmcfg.b.prt_sleep_sts),
+ "lx_state = %d, lmpcfg.prt_sleep_sts = %d\n",
+ core_if->lx_state, lpmcfg.b.prt_sleep_sts);
+
+ return lpmcfg.b.prt_sleep_sts;
+}
+
+uint32_t fh_otg_get_lpm_remotewakeenabled(fh_otg_core_if_t * core_if)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ return lpmcfg.b.rem_wkup_en;
+}
+
+uint32_t fh_otg_get_lpmresponse(fh_otg_core_if_t * core_if)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ return lpmcfg.b.appl_resp;
+}
+
+void fh_otg_set_lpmresponse(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ lpmcfg.b.appl_resp = val;
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+}
+
+uint32_t fh_otg_get_hsic_connect(fh_otg_core_if_t * core_if)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ return lpmcfg.b.hsic_connect;
+}
+
+void fh_otg_set_hsic_connect(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ lpmcfg.b.hsic_connect = val;
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+}
+
+uint32_t fh_otg_get_inv_sel_hsic(fh_otg_core_if_t * core_if)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ return lpmcfg.b.inv_sel_hsic;
+
+}
+
+void fh_otg_set_inv_sel_hsic(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ lpmcfg.b.inv_sel_hsic = val;
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+}
+
+uint32_t fh_otg_get_gotgctl(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+}
+
+void fh_otg_set_gotgctl(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(&core_if->core_global_regs->gotgctl, val);
+}
+
+uint32_t fh_otg_get_gusbcfg(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+}
+
+void fh_otg_set_gusbcfg(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, val);
+}
+
+uint32_t fh_otg_get_grxfsiz(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
+}
+
+void fh_otg_set_grxfsiz(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(&core_if->core_global_regs->grxfsiz, val);
+}
+
+uint32_t fh_otg_get_gnptxfsiz(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz);
+}
+
+void fh_otg_set_gnptxfsiz(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(&core_if->core_global_regs->gnptxfsiz, val);
+}
+
+uint32_t fh_otg_get_gpvndctl(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->gpvndctl);
+}
+
+void fh_otg_set_gpvndctl(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(&core_if->core_global_regs->gpvndctl, val);
+}
+
+uint32_t fh_otg_get_ggpio(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->ggpio);
+}
+
+void fh_otg_set_ggpio(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(&core_if->core_global_regs->ggpio, val);
+}
+
+uint32_t fh_otg_get_hprt0(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(core_if->host_if->hprt0);
+
+}
+
+void fh_otg_set_hprt0(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(core_if->host_if->hprt0, val);
+}
+
+uint32_t fh_otg_get_guid(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->guid);
+}
+
+void fh_otg_set_guid(fh_otg_core_if_t * core_if, uint32_t val)
+{
+ FH_WRITE_REG32(&core_if->core_global_regs->guid, val);
+}
+
+uint32_t fh_otg_get_hptxfsiz(fh_otg_core_if_t * core_if)
+{
+ return FH_READ_REG32(&core_if->core_global_regs->hptxfsiz);
+}
+
+uint16_t fh_otg_get_otg_version(fh_otg_core_if_t * core_if)
+{
+ return ((core_if->otg_ver == 1) ? (uint16_t)0x0200 : (uint16_t)0x0103);
+}
+
+/**
+ * Start the SRP timer to detect when the SRP does not complete within
+ * 6 seconds.
+ *
+ * @param core_if the pointer to core_if strucure.
+ */
+void fh_otg_pcd_start_srp_timer(fh_otg_core_if_t * core_if)
+{
+ core_if->srp_timer_started = 1;
+ FH_TIMER_SCHEDULE(core_if->srp_timer, 6000 /* 6 secs */ );
+}
+
+void fh_otg_initiate_srp(void * p)
+{
+ fh_otg_core_if_t * core_if = p;
+ uint32_t *addr = (uint32_t *) & (core_if->core_global_regs->gotgctl);
+ gotgctl_data_t mem;
+ gotgctl_data_t val;
+
+ val.d32 = FH_READ_REG32(addr);
+ if (val.b.sesreq) {
+ FH_ERROR("Session Request Already active!\n");
+ return;
+ }
+
+ FH_INFO("Session Request Initated\n"); //NOTICE
+ mem.d32 = FH_READ_REG32(addr);
+ mem.b.sesreq = 1;
+ FH_WRITE_REG32(addr, mem.d32);
+
+ /* Start the SRP timer */
+ fh_otg_pcd_start_srp_timer(core_if);
+ return;
+}
+
+int fh_otg_check_haps_status(fh_otg_core_if_t * core_if)
+{
+ int retval = 0;
+
+ if(FH_READ_REG32(&core_if->core_global_regs->gsnpsid) == 0xffffffff)
+ {
+ return -1;
+ } else {
+ return retval;
+ }
+
+}
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.h
new file mode 100644
index 00000000..bf5e773a
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.h
@@ -0,0 +1,1503 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_cil.h $
+ * $Revision: #136 $
+ * $Date: 2015/10/12 $
+ * $Change: 2972621 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__FH_CIL_H__)
+#define __FH_CIL_H__
+
+#include "../fh_common_port/fh_list.h"
+#include "fh_otg_dbg.h"
+#include "fh_otg_regs.h"
+
+#include "fh_otg_core_if.h"
+#include "fh_otg_adp.h"
+
+/**
+ * @file
+ * This file contains the interface to the Core Interface Layer.
+ */
+
+#ifdef FH_UTE_CFI
+
+#define MAX_DMA_DESCS_PER_EP 256
+
+/**
+ * Enumeration for the data buffer mode
+ */
+typedef enum _data_buffer_mode {
+ BM_STANDARD = 0, /* data buffer is in normal mode */
+ BM_SG = 1, /* data buffer uses the scatter/gather mode */
+ BM_CONCAT = 2, /* data buffer uses the concatenation mode */
+ BM_CIRCULAR = 3, /* data buffer uses the circular DMA mode */
+ BM_ALIGN = 4 /* data buffer is in buffer alignment mode */
+} data_buffer_mode_e;
+#endif //FH_UTE_CFI
+
+/** Macros defined for FH OTG HW Release version */
+
+#define OTG_CORE_REV_2_60a 0x4F54260A
+#define OTG_CORE_REV_2_71a 0x4F54271A
+#define OTG_CORE_REV_2_72a 0x4F54272A
+#define OTG_CORE_REV_2_80a 0x4F54280A
+#define OTG_CORE_REV_2_81a 0x4F54281A
+#define OTG_CORE_REV_2_90a 0x4F54290A
+#define OTG_CORE_REV_2_91a 0x4F54291A
+#define OTG_CORE_REV_2_92a 0x4F54292A
+#define OTG_CORE_REV_2_93a 0x4F54293A
+#define OTG_CORE_REV_2_94a 0x4F54294A
+#define OTG_CORE_REV_3_00a 0x4F54300A
+#define OTG_CORE_REV_3_10a 0x4F54310A
+#define OTG_CORE_REV_3_20a 0x4F54320A
+#define OTG_CORE_REV_3_30a 0x4F54330A
+
+/**
+ * Information for each ISOC packet.
+ */
+typedef struct iso_pkt_info {
+ uint32_t offset;
+ uint32_t length;
+ int32_t status;
+} iso_pkt_info_t;
+
+/**
+ * The <code>fh_ep</code> structure represents the state of a single
+ * endpoint when acting in device mode. It contains the data items
+ * needed for an endpoint to be activated and transfer packets.
+ */
+typedef struct fh_ep {
+ /** EP number used for register address lookup */
+ uint8_t num;
+ /** EP direction 0 = OUT */
+ unsigned is_in:1;
+ /** EP active. */
+ unsigned active:1;
+
+ /**
+ * Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic
+ * Tx FIFO. If dedicated Tx FIFOs are enabled Tx FIFO # FOR IN EPs*/
+ unsigned tx_fifo_num:4;
+ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */
+ unsigned type:2;
+#define FH_OTG_EP_TYPE_CONTROL 0
+#define FH_OTG_EP_TYPE_ISOC 1
+#define FH_OTG_EP_TYPE_BULK 2
+#define FH_OTG_EP_TYPE_INTR 3
+
+ /** DATA start PID for INTR and BULK EP */
+ unsigned data_pid_start:1;
+ /** Frame (even/odd) for ISOC EP */
+ unsigned even_odd_frame:1;
+ /** Max Packet bytes */
+ unsigned maxpacket:11;
+
+ /** Max Transfer size */
+ uint32_t maxxfer;
+
+ /** @name Transfer state */
+ /** @{ */
+
+ /**
+ * Pointer to the beginning of the transfer buffer -- do not modify
+ * during transfer.
+ */
+ fh_dma_t dma_addr;
+
+ fh_dma_t dma_desc_addr;
+ fh_otg_dev_dma_desc_t *desc_addr;
+
+ /* Additional desc chain for ISO transfers */
+ fh_dma_t dma_desc_addr1;
+ fh_otg_dev_dma_desc_t *desc_addr1;
+ /* Flag indicating which one of two ISO desc chains currently is in use */
+ uint8_t use_add_buf;
+
+ uint8_t *start_xfer_buff;
+ /** pointer to the transfer buffer */
+ uint8_t *xfer_buff;
+ /** Number of bytes to transfer */
+ unsigned xfer_len:19;
+ /** Number of bytes transferred. */
+ unsigned xfer_count:19;
+ /** Sent ZLP */
+ unsigned sent_zlp:1;
+ /** Total len for control transfer */
+ unsigned total_len:19;
+
+ /** stall clear flag */
+ unsigned stall_clear_flag:1;
+
+ /** SETUP pkt cnt rollover flag for EP0 out*/
+ unsigned stp_rollover;
+
+#ifdef FH_UTE_CFI
+ /* The buffer mode */
+ data_buffer_mode_e buff_mode;
+
+ /* The chain of DMA descriptors.
+ * MAX_DMA_DESCS_PER_EP will be allocated for each active EP.
+ */
+ fh_otg_dma_desc_t *descs;
+
+ /* The DMA address of the descriptors chain start */
+ dma_addr_t descs_dma_addr;
+ /** This variable stores the length of the last enqueued request */
+ uint32_t cfi_req_len;
+#endif //FH_UTE_CFI
+
+/** Max DMA Descriptor count for any EP */
+#define MAX_DMA_DESC_CNT 256
+ /** Allocated DMA Desc count */
+ uint32_t desc_cnt;
+
+ /** First ISO Desc in use in the first chain*/
+ uint32_t iso_desc_first;
+ /** Last ISO Desc in use in the second chain */
+ uint32_t iso_desc_second;
+ /** Flag indicated that iso transfers were started */
+ uint8_t iso_transfer_started;
+
+ /** bInterval */
+ uint32_t bInterval;
+ /** Next frame num to setup next ISOC transfer */
+ uint32_t frame_num;
+ /** Indicates SOF number overrun in DSTS */
+ uint8_t frm_overrun;
+
+#ifdef FH_UTE_PER_IO
+ /** Next frame num for which will be setup DMA Desc */
+ uint32_t xiso_frame_num;
+ /** bInterval */
+ uint32_t xiso_bInterval;
+ /** Count of currently active transfers - shall be either 0 or 1 */
+ int xiso_active_xfers;
+ int xiso_queued_xfers;
+#endif
+#ifdef FH_EN_ISOC
+ /**
+ * Variables specific for ISOC EPs
+ *
+ */
+ /** DMA addresses of ISOC buffers */
+ fh_dma_t dma_addr0;
+ fh_dma_t dma_addr1;
+
+ fh_dma_t iso_dma_desc_addr;
+ fh_otg_dev_dma_desc_t *iso_desc_addr;
+
+ /** pointer to the transfer buffers */
+ uint8_t *xfer_buff0;
+ uint8_t *xfer_buff1;
+
+ /** number of ISOC Buffer is processing */
+ uint32_t proc_buf_num;
+ /** Interval of ISOC Buffer processing */
+ uint32_t buf_proc_intrvl;
+ /** Data size for regular frame */
+ uint32_t data_per_frame;
+
+ /* todo - pattern data support is to be implemented in the future */
+ /** Data size for pattern frame */
+ uint32_t data_pattern_frame;
+ /** Frame number of pattern data */
+ uint32_t sync_frame;
+
+ /** bInterval */
+ uint32_t bInterval;
+ /** ISO Packet number per frame */
+ uint32_t pkt_per_frm;
+ /** Next frame num for which will be setup DMA Desc */
+ uint32_t next_frame;
+ /** Number of packets per buffer processing */
+ uint32_t pkt_cnt;
+ /** Info for all isoc packets */
+ iso_pkt_info_t *pkt_info;
+ /** current pkt number */
+ uint32_t cur_pkt;
+ /** current pkt number */
+ uint8_t *cur_pkt_addr;
+ /** current pkt number */
+ uint32_t cur_pkt_dma_addr;
+#endif /* FH_EN_ISOC */
+
+/** @} */
+} fh_ep_t;
+
+/*
+ * Reasons for halting a host channel.
+ */
+typedef enum fh_otg_halt_status {
+ FH_OTG_HC_XFER_NO_HALT_STATUS,
+ FH_OTG_HC_XFER_COMPLETE,
+ FH_OTG_HC_XFER_URB_COMPLETE,
+ FH_OTG_HC_XFER_ACK,
+ FH_OTG_HC_XFER_NAK,
+ FH_OTG_HC_XFER_NYET,
+ FH_OTG_HC_XFER_STALL,
+ FH_OTG_HC_XFER_XACT_ERR,
+ FH_OTG_HC_XFER_FRAME_OVERRUN,
+ FH_OTG_HC_XFER_BABBLE_ERR,
+ FH_OTG_HC_XFER_DATA_TOGGLE_ERR,
+ FH_OTG_HC_XFER_AHB_ERR,
+ FH_OTG_HC_XFER_PERIODIC_INCOMPLETE,
+ FH_OTG_HC_XFER_URB_DEQUEUE
+} fh_otg_halt_status_e;
+
+/**
+ * Host channel descriptor. This structure represents the state of a single
+ * host channel when acting in host mode. It contains the data items needed to
+ * transfer packets to an endpoint via a host channel.
+ */
+typedef struct fh_hc {
+ /** Host channel number used for register address lookup */
+ uint8_t hc_num;
+
+ /** Device to access */
+ unsigned dev_addr:7;
+
+ /** EP to access */
+ unsigned ep_num:4;
+
+ /** EP direction. 0: OUT, 1: IN */
+ unsigned ep_is_in:1;
+
+ /**
+ * EP speed.
+ * One of the following values:
+ * - FH_OTG_EP_SPEED_LOW
+ * - FH_OTG_EP_SPEED_FULL
+ * - FH_OTG_EP_SPEED_HIGH
+ */
+ unsigned speed:2;
+#define FH_OTG_EP_SPEED_LOW 0
+#define FH_OTG_EP_SPEED_FULL 1
+#define FH_OTG_EP_SPEED_HIGH 2
+
+ /**
+ * Endpoint type.
+ * One of the following values:
+ * - FH_OTG_EP_TYPE_CONTROL: 0
+ * - FH_OTG_EP_TYPE_ISOC: 1
+ * - FH_OTG_EP_TYPE_BULK: 2
+ * - FH_OTG_EP_TYPE_INTR: 3
+ */
+ unsigned ep_type:2;
+
+ /** Max packet size in bytes */
+ unsigned max_packet:11;
+
+ /**
+ * PID for initial transaction.
+ * 0: DATA0,<br>
+ * 1: DATA2,<br>
+ * 2: DATA1,<br>
+ * 3: MDATA (non-Control EP),
+ * SETUP (Control EP)
+ */
+ unsigned data_pid_start:2;
+#define FH_OTG_HC_PID_DATA0 0
+#define FH_OTG_HC_PID_DATA2 1
+#define FH_OTG_HC_PID_DATA1 2
+#define FH_OTG_HC_PID_MDATA 3
+#define FH_OTG_HC_PID_SETUP 3
+
+ /** Number of periodic transactions per (micro)frame */
+ unsigned multi_count:2;
+
+ /** @name Transfer State */
+ /** @{ */
+
+ /** Pointer to the current transfer buffer position. */
+ uint8_t *xfer_buff;
+ /**
+ * In Buffer DMA mode this buffer will be used
+ * if xfer_buff is not DWORD aligned.
+ */
+ fh_dma_t align_buff;
+ /** Total number of bytes to transfer. */
+ uint32_t xfer_len;
+ /** Number of bytes transferred so far. */
+ uint32_t xfer_count;
+ /** Packet count at start of transfer.*/
+ uint16_t start_pkt_count;
+
+ /**
+ * Flag to indicate whether the transfer has been started. Set to 1 if
+ * it has been started, 0 otherwise.
+ */
+ uint8_t xfer_started;
+
+ /**
+ * Set to 1 to indicate that a PING request should be issued on this
+ * channel. If 0, process normally.
+ */
+ uint8_t do_ping;
+
+ /**
+ * Set to 1 to indicate that the error count for this transaction is
+ * non-zero. Set to 0 if the error count is 0.
+ */
+ uint8_t error_state;
+
+ /**
+ * Set to 1 to indicate that this channel should be halted the next
+ * time a request is queued for the channel. This is necessary in
+ * slave mode if no request queue space is available when an attempt
+ * is made to halt the channel.
+ */
+ uint8_t halt_on_queue;
+
+ /**
+ * Set to 1 if the host channel has been halted, but the core is not
+ * finished flushing queued requests. Otherwise 0.
+ */
+ uint8_t halt_pending;
+
+ /**
+ * Reason for halting the host channel.
+ */
+ fh_otg_halt_status_e halt_status;
+
+ /*
+ * Split settings for the host channel
+ */
+ uint8_t do_split; /**< Enable split for the channel */
+ uint8_t complete_split; /**< Enable complete split */
+ uint8_t hub_addr; /**< Address of high speed hub */
+
+ uint8_t port_addr; /**< Port of the low/full speed device */
+ /** Split transaction position
+ * One of the following values:
+ * - FH_HCSPLIT_XACTPOS_MID
+ * - FH_HCSPLIT_XACTPOS_BEGIN
+ * - FH_HCSPLIT_XACTPOS_END
+ * - FH_HCSPLIT_XACTPOS_ALL */
+ uint8_t xact_pos;
+
+ /** Set when the host channel does a short read. */
+ uint8_t short_read;
+
+ /**
+ * Number of requests issued for this channel since it was assigned to
+ * the current transfer (not counting PINGs).
+ */
+ uint8_t requests;
+
+ /**
+ * Queue Head for the transfer being processed by this channel.
+ */
+ struct fh_otg_qh *qh;
+
+ /** @} */
+
+ /** Entry in list of host channels. */
+ FH_CIRCLEQ_ENTRY(fh_hc) hc_list_entry;
+
+ /** @name Descriptor DMA support */
+ /** @{ */
+
+ /** Number of Transfer Descriptors */
+ uint16_t ntd;
+
+ /** Descriptor List DMA address */
+ fh_dma_t desc_list_addr;
+
+ /** Scheduling micro-frame bitmap. */
+ uint8_t schinfo;
+
+ /** @} */
+} fh_hc_t;
+
+/**
+ * The following parameters may be specified when starting the module. These
+ * parameters define how the FH_otg controller should be configured.
+ */
+typedef struct fh_otg_core_params {
+ int32_t opt;
+
+ /**
+ * Specifies the OTG capabilities. The driver will automatically
+ * detect the value for this parameter if none is specified.
+ * 0 - HNP and SRP capable (default)
+ * 1 - SRP Only capable
+ * 2 - No HNP/SRP capable
+ */
+ int32_t otg_cap;
+
+ /**
+ * Specifies whether to use slave or DMA mode for accessing the data
+ * FIFOs. The driver will automatically detect the value for this
+ * parameter if none is specified.
+ * 0 - Slave
+ * 1 - DMA (default, if available)
+ */
+ int32_t dma_enable;
+
+ /**
+ * When DMA mode is enabled specifies whether to use address DMA or DMA
+ * Descriptor mode for accessing the data FIFOs in device mode. The driver
+ * will automatically detect the value for this if none is specified.
+ * 0 - address DMA
+ * 1 - DMA Descriptor(default, if available)
+ */
+ int32_t dma_desc_enable;
+ /** The DMA Burst size (applicable only for External DMA
+ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ */
+ int32_t dma_burst_size; /* Translate this to GAHBCFG values */
+
+ /**
+ * Specifies the maximum speed of operation in host and device mode.
+ * The actual speed depends on the speed of the attached device and
+ * the value of phy_type. The actual speed depends on the speed of the
+ * attached device.
+ * 0 - High Speed (default)
+ * 1 - Full Speed
+ */
+ int32_t speed;
+ /** Specifies whether low power mode is supported when attached
+ * to a Full Speed or Low Speed device in host mode.
+ * 0 - Don't support low power mode (default)
+ * 1 - Support low power mode
+ */
+ int32_t host_support_fs_ls_low_power;
+
+ /** Specifies the PHY clock rate in low power mode when connected to a
+ * Low Speed device in host mode. This parameter is applicable only if
+ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
+ * then defaults to 6 MHZ otherwise 48 MHZ.
+ *
+ * 0 - 48 MHz
+ * 1 - 6 MHz
+ */
+ int32_t host_ls_low_power_phy_clk;
+
+ /**
+ * 0 - Use cC FIFO size parameters
+ * 1 - Allow dynamic FIFO sizing (default)
+ */
+ int32_t enable_dynamic_fifo;
+
+ /** Total number of 4-byte words in the data FIFO memory. This
+ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
+ * Tx FIFOs.
+ * 32 to 32768 (default 8192)
+ * Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ */
+ int32_t data_fifo_size;
+
+ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1064)
+ */
+ int32_t dev_rx_fifo_size;
+
+ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode
+ * when dynamic FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t dev_nperio_tx_fifo_size;
+
+ /** Number of 4-byte words in each of the periodic Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS];
+
+ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_rx_fifo_size;
+
+ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode
+ * when Dynamic FIFO sizing is enabled in the core.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_nperio_tx_fifo_size;
+
+ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_perio_tx_fifo_size;
+
+ /** The maximum transfer size supported in bytes.
+ * 2047 to 65,535 (default 65,535)
+ */
+ int32_t max_transfer_size;
+
+ /** The maximum number of packets in a transfer.
+ * 15 to 511 (default 511)
+ */
+ int32_t max_packet_count;
+
+ /** The number of host channel registers to use.
+ * 1 to 16 (default 12)
+ * Note: The FPGA configuration supports a maximum of 12 host channels.
+ */
+ int32_t host_channels;
+
+ /** The number of endpoints in addition to EP0 available for device
+ * mode operations.
+ * 1 to 15 (default 6 IN and OUT)
+ * Note: The FPGA configuration supports a maximum of 6 IN and OUT
+ * endpoints in addition to EP0.
+ */
+ int32_t dev_endpoints;
+
+ /**
+ * Specifies the type of PHY interface to use. By default, the driver
+ * will automatically detect the phy_type.
+ *
+ * 0 - Full Speed PHY
+ * 1 - UTMI+ (default)
+ * 2 - ULPI
+ */
+ int32_t phy_type;
+
+ /**
+ * Specifies the UTMI+ Data Width. This parameter is
+ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
+ * PHY_TYPE, this parameter indicates the data width between
+ * the MAC and the ULPI Wrapper.) Also, this parameter is
+ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
+ * to "8 and 16 bits", meaning that the core has been
+ * configured to work at either data path width.
+ *
+ * 8 or 16 bits (default 16)
+ */
+ int32_t phy_utmi_width;
+
+ /**
+ * Specifies whether the ULPI operates at double or single
+ * data rate. This parameter is only applicable if PHY_TYPE is
+ * ULPI.
+ *
+ * 0 - single data rate ULPI interface with 8 bit wide data
+ * bus (default)
+ * 1 - double data rate ULPI interface with 4 bit wide data
+ * bus
+ */
+ int32_t phy_ulpi_ddr;
+
+ /**
+ * Specifies whether to use the internal or external supply to
+ * drive the vbus with a ULPI phy.
+ */
+ int32_t phy_ulpi_ext_vbus;
+
+ /**
+ * Specifies whether to use the I2Cinterface for full speed PHY. This
+ * parameter is only applicable if PHY_TYPE is FS.
+ * 0 - No (default)
+ * 1 - Yes
+ */
+ int32_t i2c_enable;
+
+ int32_t ulpi_fs_ls;
+
+ int32_t ts_dline;
+
+ /**
+ * Specifies whether dedicated transmit FIFOs are
+ * enabled for non periodic IN endpoints in device mode
+ * 0 - No
+ * 1 - Yes
+ */
+ int32_t en_multiple_tx_fifo;
+
+ /** Number of 4-byte words in each of the Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS];
+
+ /** Thresholding enable flag-
+ * bit 0 - enable non-ISO Tx thresholding
+ * bit 1 - enable ISO Tx thresholding
+ * bit 2 - enable Rx thresholding
+ */
+ uint32_t thr_ctl;
+
+ /** Thresholding length for Tx
+ * FIFOs in 32 bit DWORDs
+ */
+ uint32_t tx_thr_length;
+
+ /** Thresholding length for Rx
+ * FIFOs in 32 bit DWORDs
+ */
+ uint32_t rx_thr_length;
+
+ /**
+ * Specifies whether LPM (Link Power Management) support is enabled
+ */
+ int32_t lpm_enable;
+
+ /**
+ * Specifies whether LPM Errata (Link Power Management) support is enabled
+ */
+ int32_t besl_enable;
+
+ /**
+ * Specifies the baseline besl value
+ */
+ int32_t baseline_besl;
+
+ /**
+ * Specifies the deep besl value
+ */
+ int32_t deep_besl;
+ /** Per Transfer Interrupt
+ * mode enable flag
+ * 1 - Enabled
+ * 0 - Disabled
+ */
+ int32_t pti_enable;
+
+ /** Multi Processor Interrupt
+ * mode enable flag
+ * 1 - Enabled
+ * 0 - Disabled
+ */
+ int32_t mpi_enable;
+
+ /** IS_USB Capability
+ * 1 - Enabled
+ * 0 - Disabled
+ */
+ int32_t ic_usb_cap;
+
+ /** AHB Threshold Ratio
+ * 2'b00 AHB Threshold = MAC Threshold
+ * 2'b01 AHB Threshold = 1/2 MAC Threshold
+ * 2'b10 AHB Threshold = 1/4 MAC Threshold
+ * 2'b11 AHB Threshold = 1/8 MAC Threshold
+ */
+ int32_t ahb_thr_ratio;
+
+ /** ADP Support
+ * 1 - Enabled
+ * 0 - Disabled
+ */
+ int32_t adp_supp_enable;
+
+ /** HFIR Reload Control
+ * 0 - The HFIR cannot be reloaded dynamically.
+ * 1 - Allow dynamic reloading of the HFIR register during runtime.
+ */
+ int32_t reload_ctl;
+
+ /** DCFG: Enable device Out NAK
+ * 0 - The core does not set NAK after Bulk Out transfer complete.
+ * 1 - The core sets NAK after Bulk OUT transfer complete.
+ */
+ int32_t dev_out_nak;
+
+ /** DCFG: Enable Continue on BNA
+ * After receiving BNA interrupt the core disables the endpoint,when the
+ * endpoint is re-enabled by the application the core starts processing
+ * 0 - from the DOEPDMA descriptor
+ * 1 - from the descriptor which received the BNA.
+ */
+ int32_t cont_on_bna;
+
+ /** GAHBCFG: AHB Single Support
+ * This bit when programmed supports SINGLE transfers for remainder
+ * data in a transfer for DMA mode of operation.
+ * 0 - in this case the remainder data will be sent using INCR burst size.
+ * 1 - in this case the remainder data will be sent using SINGLE burst size.
+ */
+ int32_t ahb_single;
+
+ /** Core Power down mode
+ * 0 - No Power Down is enabled
+ * 1 - Reserved
+ * 2 - Complete Power Down (Hibernation)
+ */
+ int32_t power_down;
+
+ /** OTG revision supported
+ * 0 - OTG 1.3 revision
+ * 1 - OTG 2.0 revision
+ */
+ int32_t otg_ver;
+
+} fh_otg_core_params_t;
+
+#ifdef DEBUG
+struct fh_otg_core_if;
+typedef struct hc_xfer_info {
+ struct fh_otg_core_if *core_if;
+ fh_hc_t *hc;
+} hc_xfer_info_t;
+#endif
+
+typedef struct ep_xfer_info {
+ struct fh_otg_core_if *core_if;
+ fh_ep_t *ep;
+ uint8_t state;
+} ep_xfer_info_t;
+/*
+ * Device States
+ */
+typedef enum fh_otg_lx_state {
+ /** On state */
+ FH_OTG_L0,
+ /** LPM sleep state*/
+ FH_OTG_L1,
+ /** USB suspend state*/
+ FH_OTG_L2,
+ /** Off state*/
+ FH_OTG_L3
+} fh_otg_lx_state_e;
+
+struct fh_otg_global_regs_backup {
+ uint32_t gotgctl_local;
+ uint32_t gintmsk_local;
+ uint32_t gahbcfg_local;
+ uint32_t gusbcfg_local;
+ uint32_t grxfsiz_local;
+ uint32_t gnptxfsiz_local;
+#ifdef CONFIG_USB_FH_OTG_LPM
+ uint32_t glpmcfg_local;
+#endif
+ uint32_t gi2cctl_local;
+ uint32_t hptxfsiz_local;
+ uint32_t pcgcctl_local;
+ uint32_t gdfifocfg_local;
+ uint32_t dtxfsiz_local[MAX_EPS_CHANNELS];
+ uint32_t gpwrdn_local;
+ uint32_t xhib_pcgcctl;
+ uint32_t xhib_gpwrdn;
+};
+
+struct fh_otg_host_regs_backup {
+ uint32_t hcfg_local;
+ uint32_t haintmsk_local;
+ uint32_t hcintmsk_local[MAX_EPS_CHANNELS];
+ uint32_t hprt0_local;
+ uint32_t hfir_local;
+};
+
+struct fh_otg_dev_regs_backup {
+ uint32_t dcfg;
+ uint32_t dctl;
+ uint32_t daintmsk;
+ uint32_t diepmsk;
+ uint32_t doepmsk;
+ uint32_t diepctl[MAX_EPS_CHANNELS];
+ uint32_t dieptsiz[MAX_EPS_CHANNELS];
+ uint32_t diepdma[MAX_EPS_CHANNELS];
+ uint32_t doepctl[MAX_EPS_CHANNELS];
+ uint32_t doeptsiz[MAX_EPS_CHANNELS];
+ uint32_t doepdma[MAX_EPS_CHANNELS];
+};
+/**
+ * The <code>fh_otg_core_if</code> structure contains information needed to manage
+ * the FH_otg controller acting in either host or device mode. It
+ * represents the programming view of the controller as a whole.
+ */
+struct fh_otg_core_if {
+ /** Parameters that define how the core should be configured.*/
+ fh_otg_core_params_t *core_params;
+
+ /** Core Global registers starting at offset 000h. */
+ fh_otg_core_global_regs_t *core_global_regs;
+
+ /** Device-specific information */
+ fh_otg_dev_if_t *dev_if;
+ /** Host-specific information */
+ fh_otg_host_if_t *host_if;
+
+ /** Value from SNPSID register */
+ uint32_t snpsid;
+
+ /*
+ * Set to 1 if the core PHY interface bits in USBCFG have been
+ * initialized.
+ */
+ uint8_t phy_init_done;
+
+ /*
+ * SRP Success flag, set by srp success interrupt in FS I2C mode
+ */
+ uint8_t srp_success;
+ uint8_t srp_timer_started;
+ /** Timer for SRP. If it expires before SRP is successful
+ * clear the SRP. */
+ fh_timer_t *srp_timer;
+
+#ifdef FH_DEV_SRPCAP
+ /* This timer is needed to power on the hibernated host core if SRP is not
+ * initiated on connected SRP capable device for limited period of time
+ */
+ uint8_t pwron_timer_started;
+ fh_timer_t *pwron_timer;
+#endif
+ /* Common configuration information */
+ /** Power and Clock Gating Control Register */
+ volatile uint32_t *pcgcctl;
+#define FH_OTG_PCGCCTL_OFFSET 0xE00
+
+ /** Push/pop addresses for endpoints or host channels.*/
+ uint32_t *data_fifo[MAX_EPS_CHANNELS];
+#define FH_OTG_DATA_FIFO_OFFSET 0x1000
+#define FH_OTG_DATA_FIFO_SIZE 0x1000
+
+ /** Total RAM for FIFOs (Bytes) */
+ uint16_t total_fifo_size;
+ /** Size of Rx FIFO (Bytes) */
+ uint16_t rx_fifo_size;
+ /** Size of Non-periodic Tx FIFO (Bytes) */
+ uint16_t nperio_tx_fifo_size;
+
+ /** 1 if DMA is enabled, 0 otherwise. */
+ uint8_t dma_enable;
+
+ /** 1 if DMA descriptor is enabled, 0 otherwise. */
+ uint8_t dma_desc_enable;
+
+ /** 1 if PTI Enhancement mode is enabled, 0 otherwise. */
+ uint8_t pti_enh_enable;
+
+ /** 1 if MPI Enhancement mode is enabled, 0 otherwise. */
+ uint8_t multiproc_int_enable;
+
+ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */
+ uint8_t en_multiple_tx_fifo;
+
+ /** Set to 1 if multiple packets of a high-bandwidth transfer is in
+ * process of being queued */
+ uint8_t queuing_high_bandwidth;
+
+ /** Hardware Configuration -- stored here for convenience.*/
+ hwcfg1_data_t hwcfg1;
+ hwcfg2_data_t hwcfg2;
+ hwcfg3_data_t hwcfg3;
+ hwcfg4_data_t hwcfg4;
+ fifosize_data_t hptxfsiz;
+
+ /** Host and Device Configuration -- stored here for convenience.*/
+ hcfg_data_t hcfg;
+ dcfg_data_t dcfg;
+
+ /** The operational State, during transations
+ * (a_host>>a_peripherial and b_device=>b_host) this may not
+ * match the core but allows the software to determine
+ * transitions.
+ */
+ uint8_t op_state;
+
+ /** Test mode for PET testing */
+ uint8_t test_mode;
+
+ /**
+ * Set to 1 if the HCD needs to be restarted on a session request
+ * interrupt. This is required if no connector ID status change has
+ * occurred since the HCD was last disconnected.
+ */
+ uint8_t restart_hcd_on_session_req;
+
+ /** HCD callbacks */
+ /** A-Device is a_host */
+#define A_HOST (1)
+ /** A-Device is a_suspend */
+#define A_SUSPEND (2)
+ /** A-Device is a_peripherial */
+#define A_PERIPHERAL (3)
+ /** B-Device is operating as a Peripheral. */
+#define B_PERIPHERAL (4)
+ /** B-Device is operating as a Host. */
+#define B_HOST (5)
+
+ /** HCD callbacks */
+ struct fh_otg_cil_callbacks *hcd_cb;
+ /** PCD callbacks */
+ struct fh_otg_cil_callbacks *pcd_cb;
+
+ /** Device mode Periodic Tx FIFO Mask */
+ uint32_t p_tx_msk;
+ /** Device mode Periodic Tx FIFO Mask */
+ uint32_t tx_msk;
+
+ /** Workqueue object used for handling several interrupts */
+ fh_workq_t *wq_otg;
+
+ /** Timer object used for handling "Wakeup Detected" Interrupt */
+ fh_timer_t *wkp_timer;
+ /** This arrays used for debug purposes for DEV OUT NAK enhancement */
+ uint32_t start_doeptsiz_val[MAX_EPS_CHANNELS];
+ ep_xfer_info_t ep_xfer_info[MAX_EPS_CHANNELS];
+ fh_timer_t *ep_xfer_timer[MAX_EPS_CHANNELS];
+#ifdef DEBUG
+ uint32_t start_hcchar_val[MAX_EPS_CHANNELS];
+
+ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS];
+ fh_timer_t *hc_xfer_timer[MAX_EPS_CHANNELS];
+
+ uint32_t hfnum_7_samples;
+ uint64_t hfnum_7_frrem_accum;
+ uint32_t hfnum_0_samples;
+ uint64_t hfnum_0_frrem_accum;
+ uint32_t hfnum_other_samples;
+ uint64_t hfnum_other_frrem_accum;
+#endif
+
+#ifdef FH_UTE_CFI
+ uint16_t pwron_rxfsiz;
+ uint16_t pwron_gnptxfsiz;
+ uint16_t pwron_txfsiz[15];
+
+ uint16_t init_rxfsiz;
+ uint16_t init_gnptxfsiz;
+ uint16_t init_txfsiz[15];
+#endif
+
+ /** Lx state of device */
+ fh_otg_lx_state_e lx_state;
+
+ /** Saved Core Global registers */
+ struct fh_otg_global_regs_backup *gr_backup;
+ /** Saved Host registers */
+ struct fh_otg_host_regs_backup *hr_backup;
+ /** Saved Device registers */
+ struct fh_otg_dev_regs_backup *dr_backup;
+
+ /** Power Down Enable */
+ uint32_t power_down;
+
+ /** ADP support Enable */
+ uint32_t adp_enable;
+
+ /** ADP structure object */
+ fh_otg_adp_t adp;
+
+ /** hibernation/suspend flag */
+ int hibernation_suspend;
+
+ /** Device mode extended hibernation flag */
+ int xhib;
+
+ /** OTG revision supported */
+ uint32_t otg_ver;
+
+ /** OTG status flag used for HNP polling */
+ uint8_t otg_sts;
+
+ /** Pointer to either hcd->lock or pcd->lock */
+ fh_spinlock_t *lock;
+
+ /** Start predict NextEP based on Learning Queue if equal 1,
+ * also used as counter of disabled NP IN EP's */
+ uint8_t start_predict;
+
+ /** NextEp sequence, including EP0: nextep_seq[] = EP if non-periodic and
+ * active, 0xff otherwise */
+ uint8_t nextep_seq[MAX_EPS_CHANNELS];
+
+ /** Index of fisrt EP in nextep_seq array which should be re-enabled **/
+ uint8_t first_in_nextep_seq;
+
+ /** Frame number while entering to ISR - needed for ISOCs **/
+ uint32_t frame_num;
+
+ /** Flag to not perform ADP probing if IDSTS event happened */
+ uint8_t stop_adpprb;
+
+};
+
+#ifdef DEBUG
+/*
+ * This function is called when transfer is timed out.
+ */
+extern void hc_xfer_timeout(void *ptr);
+#endif
+
+/*
+ * This function is called when transfer is timed out on endpoint.
+ */
+extern void ep_xfer_timeout(void *ptr);
+
+/*
+ * The following functions are functions for works
+ * using during handling some interrupts
+ */
+extern void w_conn_id_status_change(void *p);
+
+extern void w_wakeup_detected(void *p);
+
+/** Saves global register values into system memory. */
+extern int fh_otg_save_global_regs(fh_otg_core_if_t * core_if);
+/** Saves device register values into system memory. */
+extern int fh_otg_save_dev_regs(fh_otg_core_if_t * core_if);
+/** Saves host register values into system memory. */
+extern int fh_otg_save_host_regs(fh_otg_core_if_t * core_if);
+/** Restore global register values. */
+extern int fh_otg_restore_global_regs(fh_otg_core_if_t * core_if);
+/** Restore host register values. */
+extern int fh_otg_restore_host_regs(fh_otg_core_if_t * core_if, int reset);
+/** Restore device register values. */
+extern int fh_otg_restore_dev_regs(fh_otg_core_if_t * core_if,
+ int rem_wakeup);
+extern int restore_lpm_i2c_regs(fh_otg_core_if_t * core_if);
+extern int restore_essential_regs(fh_otg_core_if_t * core_if, int rmode,
+ int is_host);
+
+extern int fh_otg_host_hibernation_restore(fh_otg_core_if_t * core_if,
+ int restore_mode, int reset);
+extern int fh_otg_device_hibernation_restore(fh_otg_core_if_t * core_if,
+ int rem_wakeup, int reset);
+
+/*
+ * The following functions support initialization of the CIL driver component
+ * and the FH_otg controller.
+ */
+extern void fh_otg_core_host_init(fh_otg_core_if_t * _core_if);
+extern void fh_otg_core_dev_init(fh_otg_core_if_t * _core_if);
+
+/** @name Device CIL Functions
+ * The following functions support managing the FH_otg controller in device
+ * mode.
+ */
+/**@{*/
+extern void fh_otg_wakeup(fh_otg_core_if_t * _core_if);
+extern void fh_otg_read_setup_packet(fh_otg_core_if_t * _core_if,
+ uint32_t * _dest);
+extern uint32_t fh_otg_get_frame_number(fh_otg_core_if_t * _core_if);
+extern void fh_otg_ep0_activate(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
+extern void fh_otg_ep_activate(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
+extern void fh_otg_ep_deactivate(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
+extern void fh_otg_ep_start_transfer(fh_otg_core_if_t * _core_if,
+ fh_ep_t * _ep);
+extern void fh_otg_ep_start_zl_transfer(fh_otg_core_if_t * _core_if,
+ fh_ep_t * _ep);
+extern void fh_otg_ep0_start_transfer(fh_otg_core_if_t * _core_if,
+ fh_ep_t * _ep);
+extern void fh_otg_ep0_continue_transfer(fh_otg_core_if_t * _core_if,
+ fh_ep_t * _ep);
+extern void fh_otg_ep_write_packet(fh_otg_core_if_t * _core_if,
+ fh_ep_t * _ep, int _dma);
+extern void fh_otg_ep_set_stall(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
+extern void fh_otg_ep_clear_stall(fh_otg_core_if_t * _core_if,
+ fh_ep_t * _ep);
+extern void fh_otg_enable_device_interrupts(fh_otg_core_if_t * _core_if);
+
+#ifdef FH_EN_ISOC
+extern void fh_otg_iso_ep_start_frm_transfer(fh_otg_core_if_t * core_if,
+ fh_ep_t * ep);
+extern void fh_otg_iso_ep_start_buf_transfer(fh_otg_core_if_t * core_if,
+ fh_ep_t * ep);
+#endif /* FH_EN_ISOC */
+/**@}*/
+
+/** @name Host CIL Functions
+ * The following functions support managing the FH_otg controller in host
+ * mode.
+ */
+/**@{*/
+extern void fh_otg_hc_init(fh_otg_core_if_t * _core_if, fh_hc_t * _hc);
+extern void fh_otg_hc_halt(fh_otg_core_if_t * _core_if,
+ fh_hc_t * _hc, fh_otg_halt_status_e _halt_status);
+extern void fh_otg_hc_cleanup(fh_otg_core_if_t * _core_if, fh_hc_t * _hc);
+extern void fh_otg_hc_start_transfer(fh_otg_core_if_t * _core_if,
+ fh_hc_t * _hc);
+extern int fh_otg_hc_continue_transfer(fh_otg_core_if_t * _core_if,
+ fh_hc_t * _hc);
+extern void fh_otg_hc_do_ping(fh_otg_core_if_t * _core_if, fh_hc_t * _hc);
+extern void fh_otg_hc_write_packet(fh_otg_core_if_t * _core_if,
+ fh_hc_t * _hc);
+extern void fh_otg_enable_host_interrupts(fh_otg_core_if_t * _core_if);
+extern void fh_otg_disable_host_interrupts(fh_otg_core_if_t * _core_if);
+
+extern void fh_otg_hc_start_transfer_ddma(fh_otg_core_if_t * core_if,
+ fh_hc_t * hc);
+
+extern uint32_t calc_frame_interval(fh_otg_core_if_t * core_if);
+extern int fh_otg_check_haps_status(fh_otg_core_if_t * core_if);
+
+/* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hc_regs_, _intr_) \
+do { \
+ hcint_data_t hcint_clear = {.d32 = 0}; \
+ hcint_clear.b._intr_ = 1; \
+ FH_WRITE_REG32(&(_hc_regs_)->hcint, hcint_clear.d32); \
+} while (0)
+
+/*
+ * Macro used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+#define disable_hc_int(_hc_regs_, _intr_) \
+do { \
+ hcintmsk_data_t hcintmsk = {.d32 = 0}; \
+ hcintmsk.b._intr_ = 1; \
+ FH_MODIFY_REG32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \
+} while (0)
+
+/**
+ * This function Reads HPRT0 in preparation to modify. It keeps the
+ * WC bits 0 so that if they are read as 1, they won't clear when you
+ * write it back
+ */
+static inline uint32_t fh_otg_read_hprt0(fh_otg_core_if_t * _core_if)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = FH_READ_REG32(_core_if->host_if->hprt0);
+ hprt0.b.prtena = 0;
+ hprt0.b.prtconndet = 0;
+ hprt0.b.prtenchng = 0;
+ hprt0.b.prtovrcurrchng = 0;
+ return hprt0.d32;
+}
+
+/**@}*/
+
+/** @name Common CIL Functions
+ * The following functions support managing the FH_otg controller in either
+ * device or host mode.
+ */
+/**@{*/
+
+extern void fh_otg_read_packet(fh_otg_core_if_t * core_if,
+ uint8_t * dest, uint16_t bytes);
+
+extern void fh_otg_flush_tx_fifo(fh_otg_core_if_t * _core_if, const int _num);
+extern void fh_otg_flush_rx_fifo(fh_otg_core_if_t * _core_if);
+extern void fh_otg_core_reset(fh_otg_core_if_t * _core_if);
+
+/**
+ * This function returns the Core Interrupt register.
+ */
+static inline uint32_t fh_otg_read_core_intr(fh_otg_core_if_t * core_if)
+{
+ return (FH_READ_REG32(&core_if->core_global_regs->gintsts) &
+ FH_READ_REG32(&core_if->core_global_regs->gintmsk));
+}
+
+/**
+ * This function returns the OTG Interrupt register.
+ */
+static inline uint32_t fh_otg_read_otg_intr(fh_otg_core_if_t * core_if)
+{
+ return (FH_READ_REG32(&core_if->core_global_regs->gotgint));
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the IN endpoint interrupt bits.
+ */
+static inline uint32_t fh_otg_read_dev_all_in_ep_intr(fh_otg_core_if_t *
+ core_if)
+{
+
+ uint32_t v;
+
+ if (core_if->multiproc_int_enable) {
+ v = FH_READ_REG32(&core_if->dev_if->
+ dev_global_regs->deachint) &
+ FH_READ_REG32(&core_if->
+ dev_if->dev_global_regs->deachintmsk);
+ } else {
+ v = FH_READ_REG32(&core_if->dev_if->dev_global_regs->daint) &
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->daintmsk);
+ }
+ return (v & 0xffff);
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the OUT endpoint interrupt bits.
+ */
+static inline uint32_t fh_otg_read_dev_all_out_ep_intr(fh_otg_core_if_t *
+ core_if)
+{
+ uint32_t v;
+
+ if (core_if->multiproc_int_enable) {
+ v = FH_READ_REG32(&core_if->dev_if->
+ dev_global_regs->deachint) &
+ FH_READ_REG32(&core_if->
+ dev_if->dev_global_regs->deachintmsk);
+ } else {
+ v = FH_READ_REG32(&core_if->dev_if->dev_global_regs->daint) &
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->daintmsk);
+ }
+
+ return ((v & 0xffff0000) >> 16);
+}
+
+/**
+ * This function returns the Device IN EP Interrupt register
+ */
+static inline uint32_t fh_otg_read_dev_in_ep_intr(fh_otg_core_if_t * core_if,
+ fh_ep_t * ep)
+{
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ uint32_t v, msk, emp;
+
+ if (core_if->multiproc_int_enable) {
+ msk =
+ FH_READ_REG32(&dev_if->
+ dev_global_regs->diepeachintmsk[ep->num]);
+ emp =
+ FH_READ_REG32(&dev_if->
+ dev_global_regs->dtknqr4_fifoemptymsk);
+ msk |= ((emp >> ep->num) & 0x1) << 7;
+ v = FH_READ_REG32(&dev_if->in_ep_regs[ep->num]->diepint) & msk;
+ } else {
+ msk = FH_READ_REG32(&dev_if->dev_global_regs->diepmsk);
+ emp =
+ FH_READ_REG32(&dev_if->
+ dev_global_regs->dtknqr4_fifoemptymsk);
+ msk |= ((emp >> ep->num) & 0x1) << 7;
+ v = FH_READ_REG32(&dev_if->in_ep_regs[ep->num]->diepint) & msk;
+ }
+
+ return v;
+}
+
+/**
+ * This function returns the Device OUT EP Interrupt register
+ */
+static inline uint32_t fh_otg_read_dev_out_ep_intr(fh_otg_core_if_t *
+ _core_if, fh_ep_t * _ep)
+{
+ fh_otg_dev_if_t *dev_if = _core_if->dev_if;
+ uint32_t v;
+ doepmsk_data_t msk = {.d32 = 0 };
+
+ if (_core_if->multiproc_int_enable) {
+ msk.d32 =
+ FH_READ_REG32(&dev_if->
+ dev_global_regs->doepeachintmsk[_ep->num]);
+ if (_core_if->pti_enh_enable) {
+ msk.b.pktdrpsts = 1;
+ }
+ v = FH_READ_REG32(&dev_if->
+ out_ep_regs[_ep->num]->doepint) & msk.d32;
+ } else {
+ msk.d32 = FH_READ_REG32(&dev_if->dev_global_regs->doepmsk);
+ if (_core_if->pti_enh_enable) {
+ msk.b.pktdrpsts = 1;
+ }
+ v = FH_READ_REG32(&dev_if->
+ out_ep_regs[_ep->num]->doepint) & msk.d32;
+ }
+ return v;
+}
+
+/**
+ * This function returns the Host All Channel Interrupt register
+ */
+static inline uint32_t fh_otg_read_host_all_channels_intr(fh_otg_core_if_t *
+ _core_if)
+{
+ return (FH_READ_REG32(&_core_if->host_if->host_global_regs->haint));
+}
+
+static inline uint32_t fh_otg_read_host_channel_intr(fh_otg_core_if_t *
+ _core_if, fh_hc_t * _hc)
+{
+ return (FH_READ_REG32
+ (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint));
+}
+
+/**
+ * This function returns the mode of the operation, host or device.
+ *
+ * @return 0 - Device Mode, 1 - Host Mode
+ */
+static inline uint32_t fh_otg_mode(fh_otg_core_if_t * _core_if)
+{
+ return (FH_READ_REG32(&_core_if->core_global_regs->gintsts) & 0x1);
+}
+
+/**@}*/
+
+/**
+ * FH_otg CIL callback structure. This structure allows the HCD and
+ * PCD to register functions used for starting and stopping the PCD
+ * and HCD for role change on for a DRD.
+ */
+typedef struct fh_otg_cil_callbacks {
+ /** Start function for role change */
+ int (*start) (void *_p);
+ /** Stop Function for role change */
+ int (*stop) (void *_p);
+ /** Disconnect Function for role change */
+ int (*disconnect) (void *_p);
+ /** Resume/Remote wakeup Function */
+ int (*resume_wakeup) (void *_p);
+ /** Suspend function */
+ int (*suspend) (void *_p);
+ /** Session Start (SRP) */
+ int (*session_start) (void *_p);
+#ifdef CONFIG_USB_FH_OTG_LPM
+ /** Sleep (switch to L0 state) */
+ int (*sleep) (void *_p);
+#endif
+ /** Pointer passed to start() and stop() */
+ void *p;
+} fh_otg_cil_callbacks_t;
+
+extern void fh_otg_cil_register_pcd_callbacks(fh_otg_core_if_t * _core_if,
+ fh_otg_cil_callbacks_t * _cb,
+ void *_p);
+extern void fh_otg_cil_register_hcd_callbacks(fh_otg_core_if_t * _core_if,
+ fh_otg_cil_callbacks_t * _cb,
+ void *_p);
+
+void fh_otg_initiate_srp(void * core_if);
+
+//////////////////////////////////////////////////////////////////////
+/** Start the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_hcd_start(fh_otg_core_if_t * core_if)
+{
+ if (core_if->hcd_cb && core_if->hcd_cb->start) {
+ core_if->hcd_cb->start(core_if->hcd_cb->p);
+ }
+}
+
+/** Stop the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_hcd_stop(fh_otg_core_if_t * core_if)
+{
+ if (core_if->hcd_cb && core_if->hcd_cb->stop) {
+ core_if->hcd_cb->stop(core_if->hcd_cb->p);
+ }
+}
+
+/** Disconnect the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_hcd_disconnect(fh_otg_core_if_t * core_if)
+{
+ if (core_if->hcd_cb && core_if->hcd_cb->disconnect) {
+ core_if->hcd_cb->disconnect(core_if->hcd_cb->p);
+ }
+}
+
+/** Inform the HCD the a New Session has begun. Helper function for
+ * using the HCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_hcd_session_start(fh_otg_core_if_t * core_if)
+{
+ if (core_if->hcd_cb && core_if->hcd_cb->session_start) {
+ core_if->hcd_cb->session_start(core_if->hcd_cb->p);
+ }
+}
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+/**
+ * Inform the HCD about LPM sleep.
+ * Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_hcd_sleep(fh_otg_core_if_t * core_if)
+{
+ if (core_if->hcd_cb && core_if->hcd_cb->sleep) {
+ core_if->hcd_cb->sleep(core_if->hcd_cb->p);
+ }
+}
+#endif
+
+/** Resume the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_hcd_resume(fh_otg_core_if_t * core_if)
+{
+ if (core_if->hcd_cb && core_if->hcd_cb->resume_wakeup) {
+ core_if->hcd_cb->resume_wakeup(core_if->hcd_cb->p);
+ }
+}
+
+/** Start the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_pcd_start(fh_otg_core_if_t * core_if)
+{
+ if (core_if->pcd_cb && core_if->pcd_cb->start) {
+ core_if->pcd_cb->start(core_if->pcd_cb->p);
+ }
+}
+
+/** Stop the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_pcd_stop(fh_otg_core_if_t * core_if)
+{
+ if (core_if->pcd_cb && core_if->pcd_cb->stop) {
+ core_if->pcd_cb->stop(core_if->pcd_cb->p);
+ }
+}
+
+/** Suspend the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_pcd_suspend(fh_otg_core_if_t * core_if)
+{
+ if (core_if->pcd_cb && core_if->pcd_cb->suspend) {
+ core_if->pcd_cb->suspend(core_if->pcd_cb->p);
+ }
+}
+
+/** Resume the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static inline void cil_pcd_resume(fh_otg_core_if_t * core_if)
+{
+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////
+
+#endif
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil_intr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil_intr.c
new file mode 100644
index 00000000..4b1cd2e4
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil_intr.c
@@ -0,0 +1,1739 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_cil_intr.c $
+ * $Revision: #40 $
+ * $Date: 2015/10/12 $
+ * $Change: 2972621 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the FH_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * This file contains the Common Interrupt handlers.
+ */
+#include "../fh_common_port/fh_os.h"
+#include "fh_otg_regs.h"
+#include "fh_otg_cil.h"
+#include "fh_otg_driver.h"
+#include "fh_otg_pcd.h"
+#include "fh_otg_hcd.h"
+
+#ifdef DEBUG
+inline const char *op_state_str(fh_otg_core_if_t * core_if)
+{
+ return (core_if->op_state == A_HOST ? "a_host" :
+ (core_if->op_state == A_SUSPEND ? "a_suspend" :
+ (core_if->op_state == A_PERIPHERAL ? "a_peripheral" :
+ (core_if->op_state == B_PERIPHERAL ? "b_peripheral" :
+ (core_if->op_state == B_HOST ? "b_host" : "unknown")))));
+}
+#endif
+
+/** This function will log a debug message
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+int32_t fh_otg_handle_mode_mismatch_intr(fh_otg_core_if_t * core_if)
+{
+ gintsts_data_t gintsts;
+ FH_WARN("Mode Mismatch Interrupt: currently in %s mode\n",
+ fh_otg_mode(core_if) ? "Host" : "Device");
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.modemismatch = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This function handles the OTG Interrupts. It reads the OTG
+ * Interrupt Register (GOTGINT) to determine what interrupt has
+ * occurred.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+int32_t fh_otg_handle_otg_intr(fh_otg_core_if_t * core_if)
+{
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ gotgint_data_t gotgint;
+ gotgctl_data_t gotgctl;
+ gintmsk_data_t gintmsk;
+ gpwrdn_data_t gpwrdn;
+
+ gotgint.d32 = FH_READ_REG32(&global_regs->gotgint);
+ gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
+ FH_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32,
+ op_state_str(core_if));
+
+ if (gotgint.b.sesenddet) {
+ FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Session End Detected++ (%s)\n",
+ op_state_str(core_if));
+ gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
+
+ if (core_if->op_state == B_HOST) {
+ if (core_if->adp_enable && FH_WORKQ_PENDING(core_if->wq_otg)) {
+
+ /* During ST_B_ADP test after HNP HSOTG tries to go to B_HOST
+ * mode but PET is not expecting fully functional host at that
+ * point and switches off the VBUS expecting immediate ADP probe */
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_mdelay(20);
+ fh_otg_adp_probe_start(core_if);
+ goto exit_interrupt;
+ }
+ cil_pcd_start(core_if);
+ core_if->op_state = B_PERIPHERAL;
+ } else {
+ /* If not B_HOST and Device HNP still set. HNP
+ * Did not succeed!*/
+ if (gotgctl.b.devhnpen) {
+ FH_DEBUGPL(DBG_ANY, "Session End Detected\n");
+ __FH_ERROR("Device Not Connected/Responding!\n");
+ }
+
+ /* If Session End Detected the B-Cable has
+ * been disconnected. */
+ /* Reset PCD and Gadget driver to a
+ * clean state. */
+ core_if->lx_state = FH_OTG_L0;
+ FH_SPINUNLOCK(core_if->lock);
+ cil_pcd_stop(core_if);
+ FH_SPINLOCK(core_if->lock);
+
+ if (core_if->otg_ver) {
+ /** PET testing*/
+ gotgctl.d32 = 0;
+ gotgctl.b.devhnpen = 1;
+ FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
+ if (core_if->test_mode == 6) {
+ FH_WORKQ_SCHEDULE_DELAYED(core_if->wq_otg, fh_otg_initiate_srp,
+ core_if, 3000, "initate SRP"); //manukz: old value was 50
+ core_if->test_mode = 0;
+ } else if (core_if->adp_enable) {
+ if (core_if->power_down == 2) {
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
+ }
+
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_otg_adp_sense_start(core_if);
+ }
+ }
+ }
+exit_interrupt:
+ if (core_if->otg_ver == 0) {
+ gotgctl.d32 = 0;
+ gotgctl.b.devhnpen = 1;
+ FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
+ }
+ }
+ if (gotgint.b.sesreqsucstschng) {
+ FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Session Reqeust Success Status Change++\n");
+ gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
+ if (gotgctl.b.sesreqscs) {
+
+ if ((core_if->core_params->phy_type ==
+ FH_PHY_TYPE_PARAM_FS) && (core_if->core_params->i2c_enable)) {
+ core_if->srp_success = 1;
+ } else {
+ FH_SPINUNLOCK(core_if->lock);
+ cil_pcd_resume(core_if);
+ FH_SPINLOCK(core_if->lock);
+ /* Clear Session Request */
+ gotgctl.d32 = 0;
+ gotgctl.b.sesreq = 1;
+ FH_MODIFY_REG32(&global_regs->gotgctl,
+ gotgctl.d32, 0);
+ }
+ }
+ }
+ if (gotgint.b.hstnegsucstschng) {
+ /* Print statements during the HNP interrupt handling
+ * can cause it to fail.*/
+ gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
+ /* WA for 3.00a- HW is not setting cur_mode, even sometimes
+ * this does not help*/
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a)
+ fh_udelay(100);
+ if (gotgctl.b.hstnegscs) {
+ if (fh_otg_is_host_mode(core_if)) {
+ core_if->op_state = B_HOST;
+ /*
+ * Need to disable SOF interrupt immediately.
+ * When switching from device to host, the PCD
+ * interrupt handler won't handle the
+ * interrupt if host mode is already set. The
+ * HCD interrupt handler won't get called if
+ * the HCD state is HALT. This means that the
+ * interrupt does not get handled and Linux
+ * complains loudly.
+ */
+ gintmsk.d32 = 0;
+ gintmsk.b.sofintr = 1;
+ /* To avoid multiple USB Suspend interrupts during
+ * OTG 2.0 role change */
+ if (core_if->otg_ver)
+ gintmsk.b.usbsuspend = 1;
+ FH_MODIFY_REG32(&global_regs->gintmsk,
+ gintmsk.d32, 0);
+ /* Call callback function with spin lock released */
+ FH_SPINUNLOCK(core_if->lock);
+ cil_pcd_stop(core_if);
+ /*
+ * Initialize the Core for Host mode.
+ */
+ if (core_if->otg_ver) {
+ fh_mdelay(100);
+ cil_hcd_start(core_if);
+ cil_hcd_session_start(core_if);
+ } else {
+ cil_hcd_start(core_if);
+ }
+ FH_SPINLOCK(core_if->lock);
+ }
+ } else {
+ gotgctl.d32 = 0;
+ gotgctl.b.hnpreq = 1;
+ gotgctl.b.devhnpen = 1;
+ FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
+ FH_DEBUGPL(DBG_ANY, "HNP Failed\n");
+ __FH_ERROR("Device Not Connected/Responding\n");
+ }
+ }
+ if (gotgint.b.hstnegdet) {
+ /* The disconnect interrupt is set at the same time as
+ * Host Negotiation Detected. During the mode
+ * switch all interrupts are cleared so the disconnect
+ * interrupt handler will not get executed.
+ */
+ FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Host Negotiation Detected++ (%s)\n",
+ (fh_otg_is_host_mode(core_if) ? "Host" :
+ "Device"));
+ if (fh_otg_is_device_mode(core_if)) {
+ FH_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n",
+ core_if->op_state);
+ FH_SPINUNLOCK(core_if->lock);
+ cil_hcd_disconnect(core_if);
+ cil_pcd_start(core_if);
+ FH_SPINLOCK(core_if->lock);
+ core_if->op_state = A_PERIPHERAL;
+ } else {
+ /*
+ * Need to disable SOF interrupt immediately. When
+ * switching from device to host, the PCD interrupt
+ * handler won't handle the interrupt if host mode is
+ * already set. The HCD interrupt handler won't get
+ * called if the HCD state is HALT. This means that
+ * the interrupt does not get handled and Linux
+ * complains loudly.
+ */
+ gintmsk.d32 = 0;
+ gintmsk.b.sofintr = 1;
+ FH_MODIFY_REG32(&global_regs->gintmsk, gintmsk.d32, 0);
+ FH_SPINUNLOCK(core_if->lock);
+ cil_pcd_stop(core_if);
+ cil_hcd_start(core_if);
+ FH_SPINLOCK(core_if->lock);
+ core_if->op_state = A_HOST;
+ }
+ }
+ if (gotgint.b.adevtoutchng) {
+ FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "A-Device Timeout Change++\n");
+ }
+ if (gotgint.b.debdone) {
+ FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " "Debounce Done++\n");
+ /* Need to power off VBUS after 10s if OTG2 non-hnp capable host*/
+ if (core_if->otg_ver && core_if->op_state == A_PERIPHERAL) {
+ FH_DEBUGPL(DBG_ANY, "a_peripheral->a_host\n");
+ /* Clear the a_peripheral flag, back to a_host. */
+ FH_SPINUNLOCK(core_if->lock);
+ cil_pcd_stop(core_if);
+ cil_hcd_start(core_if);
+ FH_SPINLOCK(core_if->lock);
+ core_if->op_state = A_HOST;
+ }
+
+ //if(core_if->otg_ver == 1)
+ //cil_hcd_session_start(core_if); mvardan (for ADP issue)
+ }
+
+ /* Clear GOTGINT */
+ FH_WRITE_REG32(&core_if->core_global_regs->gotgint, gotgint.d32);
+
+ return 1;
+}
+
+void w_conn_id_status_change(void *p)
+{
+ fh_otg_core_if_t *core_if = p;
+ uint32_t count = 0;
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+
+ gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ FH_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32);
+ FH_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts);
+
+ /* B-Device connector (Device Mode) */
+ if (gotgctl.b.conidsts) {
+ gotgctl_data_t gotgctl_local;
+ /* Wait for switch to device mode. */
+ while (!fh_otg_is_device_mode(core_if)) {
+ gotgctl_local.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ FH_DEBUGPL(DBG_ANY, "Waiting for Peripheral Mode, Mode=%s count = %d gotgctl=%08x\n",
+ (fh_otg_is_host_mode(core_if) ? "Host" :
+ "Peripheral"), count, gotgctl_local.d32);
+ fh_mdelay(1); //vahrama previous value was 100
+ if(!gotgctl_local.b.conidsts)
+ goto host;
+ if (++count > 10000)
+ break;
+ }
+ FH_ASSERT(++count < 10000,
+ "Connection id status change timed out");
+ core_if->op_state = B_PERIPHERAL;
+ if(core_if->otg_ver == 0)
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+ } else {
+host:
+ /* A-Device connector (Host Mode) */
+ while (!fh_otg_is_host_mode(core_if)) {
+ FH_DEBUGPL(DBG_ANY,"Waiting for Host Mode, Mode=%s\n",
+ (fh_otg_is_host_mode(core_if) ? "Host" :
+ "Peripheral"));
+ fh_mdelay(1); //vahrama previously was 100
+ if (++count > 10000)
+ break;
+ }
+ FH_ASSERT(++count < 10000,
+ "Connection id status change timed out");
+ core_if->op_state = A_HOST;
+ /*
+ * Initialize the Core for Host mode.
+ */
+ if (core_if->otg_ver)
+ /* To power off the bus in 10s from the beginning
+ * of test while denounce has not come yet */
+ cil_hcd_session_start(core_if);
+ else
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+ }
+}
+
+/**
+ * This function handles the Connector ID Status Change Interrupt. It
+ * reads the OTG Interrupt Register (GOTCTL) to determine whether this
+ * is a Device to Host Mode transition or a Host Mode to Device
+ * Transition.
+ *
+ * This only occurs when the cable is connected/removed from the PHY
+ * connector.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+int32_t fh_otg_handle_conn_id_status_change_intr(fh_otg_core_if_t * core_if)
+{
+
+ /*
+ * Need to disable SOF interrupt immediately. If switching from device
+ * to host, the PCD interrupt handler won't handle the interrupt if
+ * host mode is already set. The HCD interrupt handler won't get
+ * called if the HCD state is HALT. This means that the interrupt does
+ * not get handled and Linux complains loudly.
+ */
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+ gintsts_data_t gintsts = {.d32 = 0 };
+
+ gintmsk.b.sofintr = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+
+ FH_DEBUGPL(DBG_CIL,
+ " ++Connector ID Status Change Interrupt++ (%s)\n",
+ (fh_otg_is_host_mode(core_if) ? "Host" : "Device"));
+
+ FH_SPINUNLOCK(core_if->lock);
+
+ /* Needed to avoit conn_id_status change duplication */
+ //if (core_if->otg_ver)
+ //fh_mdelay(50);
+ /*
+ * Need to schedule a work, as there are possible DELAY function calls
+ * Release lock before scheduling workq as it holds spinlock during scheduling
+ */
+
+ FH_WORKQ_SCHEDULE(core_if->wq_otg, w_conn_id_status_change,
+ core_if, "connection id status change");
+ FH_SPINLOCK(core_if->lock);
+
+ /* Set flag and clear interrupt */
+ gintsts.b.conidstschng = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that a device is initiating the Session
+ * Request Protocol to request the host to turn on bus power so a new
+ * session can begin. The handler responds by turning on bus power. If
+ * the FH_otg controller is in low power mode, the handler brings the
+ * controller out of low power mode before turning on bus power.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+int32_t fh_otg_handle_session_req_intr(fh_otg_core_if_t * core_if)
+{
+ gintsts_data_t gintsts;
+
+#ifndef FH_HOST_ONLY
+ FH_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n");
+
+ if (fh_otg_is_device_mode(core_if)) {
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ FH_DEBUGPL(DBG_PCD, "SRP: Device mode\n");
+ gotgctl.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ if (gotgctl.b.sesreqscs)
+ FH_PRINTF("SRP Success\n");
+ else
+ FH_PRINTF("SRP Fail\n");
+ if (core_if->otg_ver) {
+ gotgctl.d32 = 0 ;
+ gotgctl.b.devhnpen = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl, gotgctl.d32, 0);
+ }
+ } else {
+ hprt0_data_t hprt0;
+ FH_PRINTF("SRP: Host mode\n");
+
+ /* Turn on the port power bit. */
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* Start the Connection timer. So a message can be displayed
+ * if connect does not occur within 10 seconds. */
+ cil_hcd_session_start(core_if);
+ }
+#endif
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.sessreqintr = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+void w_wakeup_detected(void *p)
+{
+ fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) p;
+ /*
+ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
+ * so that OPT tests pass with all PHYs).
+ */
+ hprt0_data_t hprt0 = {.d32 = 0 };
+#if 0
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ /* Restart the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+ fh_udelay(10);
+#endif //0
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ FH_DEBUGPL(DBG_ANY, "Resume: HPRT0=%0x\n", hprt0.d32);
+// fh_mdelay(70);
+ hprt0.b.prtres = 0; /* Resume */
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ FH_DEBUGPL(DBG_ANY, "Clear Resume: HPRT0=%0x\n",
+ FH_READ_REG32(core_if->host_if->hprt0));
+
+ cil_hcd_resume(core_if);
+
+ /** Change to L0 state*/
+ core_if->lx_state = FH_OTG_L0;
+}
+
+/**
+ * This interrupt indicates that the FH_otg controller has detected a
+ * resume or remote wakeup sequence. If the FH_otg controller is in
+ * low power mode, the handler must brings the controller out of low
+ * power mode. The controller automatically begins resume
+ * signaling. The handler schedules a time to stop resume signaling.
+ */
+int32_t fh_otg_handle_wakeup_detected_intr(fh_otg_core_if_t * core_if)
+{
+ gintsts_data_t gintsts;
+
+ FH_DEBUGPL(DBG_ANY,
+ "++Resume and Remote Wakeup Detected Interrupt++\n");
+
+ FH_PRINTF("%s lxstate = %d\n", __func__, core_if->lx_state);
+
+ if (fh_otg_is_device_mode(core_if)) {
+ dctl_data_t dctl = {.d32 = 0 };
+ FH_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n",
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->
+ dsts));
+ if (core_if->lx_state == FH_OTG_L2) {
+#ifdef PARTIAL_POWER_DOWN
+ if (core_if->hwcfg4.b.power_optimiz) {
+ pcgcctl_data_t power = {.d32 = 0 };
+
+ power.d32 = FH_READ_REG32(core_if->pcgcctl);
+ FH_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n",
+ power.d32);
+
+ power.b.stoppclk = 0;
+ FH_WRITE_REG32(core_if->pcgcctl, power.d32);
+
+ power.b.pwrclmp = 0;
+ FH_WRITE_REG32(core_if->pcgcctl, power.d32);
+
+ power.b.rstpdwnmodule = 0;
+ FH_WRITE_REG32(core_if->pcgcctl, power.d32);
+ }
+#endif
+ /* Clear the Remote Wakeup Signaling */
+ dctl.b.rmtwkupsig = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ dctl, dctl.d32, 0);
+
+ FH_SPINUNLOCK(core_if->lock);
+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
+ }
+ FH_SPINLOCK(core_if->lock);
+ } else {
+ glpmcfg_data_t lpmcfg;
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+
+ lpmcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ lpmcfg.b.hird_thres &= (~(1 << 4));
+ lpmcfg.b.en_utmi_sleep = 0;
+
+ /* Clear Enbl_L1Gating bit. */
+ pcgcctl.b.enbl_sleep_gating = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,0);
+
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg,
+ lpmcfg.d32);
+ }
+ /** Change to L0 state*/
+ core_if->lx_state = FH_OTG_L0;
+ } else {
+ if (core_if->lx_state != FH_OTG_L1) {
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+
+ /* Restart the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+ FH_TIMER_SCHEDULE(core_if->wkp_timer, 71);
+ } else {
+ /** Change to L0 state*/
+ core_if->lx_state = FH_OTG_L0;
+ }
+ }
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.wkupintr = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that the Wakeup Logic has detected a
+ * Device disconnect.
+ */
+static int32_t fh_otg_handle_pwrdn_disconnect_intr(fh_otg_core_if_t * core_if)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ gpwrdn_data_t gpwrdn_temp = {.d32 = 0 };
+ gpwrdn_temp.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+
+ FH_PRINTF("%s called\n", __FUNCTION__);
+
+ if (!core_if->hibernation_suspend) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return 1;
+ }
+
+ /* Switch on the voltage to the core */
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Reset the core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Disable power clamps */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Remove reset the core signal */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ core_if->hibernation_suspend = 0;
+
+ /* Disable PMU */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ if (gpwrdn_temp.b.idsts) {
+ core_if->op_state = B_PERIPHERAL;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+ } else {
+ core_if->op_state = A_HOST;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+ }
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that the Wakeup Logic has detected a
+ * remote wakeup sequence.
+ */
+static int32_t fh_otg_handle_pwrdn_wakeup_detected_intr(fh_otg_core_if_t * core_if)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ FH_DEBUGPL(DBG_ANY,
+ "++Powerdown Remote Wakeup Detected Interrupt++\n");
+
+ if (!core_if->hibernation_suspend) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return 1;
+ }
+
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ if (gpwrdn.b.idsts) { // Device Mode
+ if ((core_if->power_down == 2)
+ && (core_if->hibernation_suspend == 1)) {
+ fh_otg_device_hibernation_restore(core_if, 0, 0);
+ }
+ } else {
+ if ((core_if->power_down == 2)
+ && (core_if->hibernation_suspend == 1)) {
+ fh_otg_host_hibernation_restore(core_if, 1, 0);
+ }
+ }
+ return 1;
+}
+
+static int32_t fh_otg_handle_pwrdn_idsts_change(fh_otg_device_t * otg_dev)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ gpwrdn_data_t gpwrdn_temp = {.d32 = 0 };
+ fh_otg_core_if_t *core_if = otg_dev->core_if;
+
+ FH_DEBUGPL(DBG_ANY, "%s called\n", __FUNCTION__);
+ gpwrdn_temp.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ if (core_if->power_down == 2) {
+ if (!core_if->hibernation_suspend) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return 1;
+ }
+ FH_DEBUGPL(DBG_ANY, "Exit from hibernation on ID sts change\n");
+ /* Switch on the voltage to the core */
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Reset the core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Disable power clamps */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Remove reset the core signal */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /*Indicates that we are exiting from hibernation */
+ core_if->hibernation_suspend = 0;
+
+ /* Disable PMU */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ gpwrdn.d32 = core_if->gr_backup->gpwrdn_local;
+ if (gpwrdn.b.dis_vbus == 1) {
+ gpwrdn.d32 = 0;
+ gpwrdn.b.dis_vbus = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ }
+
+ if (gpwrdn_temp.b.idsts) {
+ core_if->op_state = B_PERIPHERAL;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+ } else {
+ core_if->op_state = A_HOST;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+ }
+ }
+
+ if (core_if->adp_enable) {
+ uint8_t is_host = 0;
+ FH_SPINUNLOCK(core_if->lock);
+ /* Change the core_if's lock to hcd/pcd lock depend on mode? */
+#ifndef FH_HOST_ONLY
+ if (gpwrdn_temp.b.idsts)
+ core_if->lock = otg_dev->pcd->lock;
+#endif
+#ifndef FH_DEVICE_ONLY
+ if (!gpwrdn_temp.b.idsts) {
+ core_if->lock = otg_dev->hcd->lock;
+ is_host = 1;
+ }
+#endif
+ FH_DEBUGPL(DBG_ANY, "RESTART ADP\n");
+ if (core_if->adp.probe_enabled)
+ fh_otg_adp_probe_stop(core_if);
+ if (core_if->adp.sense_enabled)
+ fh_otg_adp_sense_stop(core_if);
+ if (core_if->adp.sense_timer_started)
+ FH_TIMER_CANCEL(core_if->adp.sense_timer);
+ if (core_if->adp.vbuson_timer_started)
+ FH_TIMER_CANCEL(core_if->adp.vbuson_timer);
+ /* Do not need to reset ADP if we are coming back
+ * to the device mode after HNP. This is needed
+ * not to perform SRP after reverse, just do ADP
+ * probe and compare the RTIM values with the one
+ * before HNP */
+ if (core_if->op_state != B_HOST) {
+ core_if->adp.probe_timer_values[0] = -1;
+ core_if->adp.probe_timer_values[1] = -1;
+ core_if->adp.probe_counter = 0;
+ core_if->adp.gpwrdn = 0;
+ }
+ core_if->adp.sense_timer_started = 0;
+ core_if->adp.vbuson_timer_started = 0;
+
+ /* Disable PMU and restart ADP */
+ gpwrdn_temp.d32 = 0;
+ gpwrdn_temp.b.pmuactv = 1;
+ gpwrdn_temp.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_mdelay(110);
+ fh_otg_adp_start(core_if, is_host);
+ FH_SPINLOCK(core_if->lock);
+ }
+
+ return 1;
+}
+
+static int32_t fh_otg_handle_pwrdn_session_change(fh_otg_core_if_t * core_if)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ int32_t otg_cap_param = core_if->core_params->otg_cap;
+ FH_DEBUGPL(DBG_ANY, "%s called\n", __FUNCTION__);
+
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ if (core_if->power_down == 2) {
+ if (!core_if->hibernation_suspend) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return 1;
+ }
+
+ if ((otg_cap_param != FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE ||
+ otg_cap_param != FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE) &&
+ gpwrdn.b.bsessvld == 0) {
+ /* Save gpwrdn register for further usage if stschng interrupt */
+ core_if->gr_backup->gpwrdn_local =
+ FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ /*Exit from ISR and wait for stschng interrupt with bsessvld = 1 */
+ return 1;
+ }
+
+ /* Switch on the voltage to the core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Reset the core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Disable power clamps */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Remove reset the core signal */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /*Indicates that we are exiting from hibernation */
+ core_if->hibernation_suspend = 0;
+
+ /* Disable PMU */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ core_if->op_state = B_PERIPHERAL;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+
+ if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE ||
+ otg_cap_param == FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE) {
+ /*
+ * Initiate SRP after initial ADP probe.
+ */
+ fh_otg_initiate_srp(core_if);
+ }
+ } else if (core_if->adp_enable && core_if->op_state != A_HOST){
+ fh_otg_adp_probe_stop(core_if);
+ if (FH_WORKQ_PENDING(core_if->wq_otg))
+ core_if->stop_adpprb = 1;
+ /* Disable Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
+
+ /*
+ * Initialize the Core for Device mode.
+ */
+ core_if->op_state = B_PERIPHERAL;
+ cil_pcd_start(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ }
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that the Wakeup Logic has detected a
+ * status change either on IDDIG or BSessVld.
+ */
+static uint32_t fh_otg_handle_pwrdn_stschng_intr(fh_otg_device_t * otg_dev)
+{
+ int retval;
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ gpwrdn_data_t gpwrdn_temp = {.d32 = 0 };
+ fh_otg_core_if_t *core_if = otg_dev->core_if;
+
+ FH_DEBUGPL(DBG_CIL, "%s called\n", __FUNCTION__);
+
+ if (core_if->power_down == 2) {
+ if (core_if->hibernation_suspend <= 0) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return 1;
+ } else
+ gpwrdn_temp.d32 = core_if->gr_backup->gpwrdn_local;
+
+ } else {
+ gpwrdn_temp.d32 = core_if->adp.gpwrdn;
+ }
+
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+
+ if (gpwrdn.b.idsts ^ gpwrdn_temp.b.idsts) {
+ retval = fh_otg_handle_pwrdn_idsts_change(otg_dev);
+ } else if (gpwrdn.b.bsessvld ^ gpwrdn_temp.b.bsessvld) {
+ retval = fh_otg_handle_pwrdn_session_change(core_if);
+ }
+
+ return retval;
+}
+
+/**
+ * This interrupt indicates that the Wakeup Logic has detected a
+ * SRP.
+ */
+static int32_t fh_otg_handle_pwrdn_srp_intr(fh_otg_core_if_t * core_if)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+
+ FH_PRINTF("%s called\n", __FUNCTION__);
+
+ if (core_if->power_down == 2) {
+ if (!core_if->hibernation_suspend) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return 1;
+ }
+#ifdef FH_DEV_SRPCAP
+ if (core_if->pwron_timer_started) {
+ core_if->pwron_timer_started = 0;
+ FH_TIMER_CANCEL(core_if->pwron_timer);
+ }
+#endif
+
+ /* Switch on the voltage to the core */
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Reset the core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Disable power clamps */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Remove reset the core signal */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Indicates that we are exiting from hibernation */
+ core_if->hibernation_suspend = 0;
+
+ /* Disable PMU */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Programm Disable VBUS to 0 */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.dis_vbus = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /*Initialize the core as Host */
+ core_if->op_state = A_HOST;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+ }
+ /* Do not need to du anything if this is "old" SRP and we are already
+ * in the normal mode of operation */
+ if(core_if->adp_enable) {
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ if (!gpwrdn.b.pmuactv) {
+ return 1;
+ }
+
+ fh_otg_adp_probe_stop(core_if);
+ /* Disable Interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, gpwrdn.d32, 0);
+
+ /*
+ * Initialize the Core for Host mode.
+ */
+ core_if->op_state = A_HOST;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+ /* Start the Connection timer. So a message can be displayed
+ * if connect does not occur within 10 seconds. */
+ cil_hcd_session_start(core_if);
+ }
+
+ return 1;
+}
+
+/** This interrupt indicates that restore command after Hibernation
+ * was completed by the core. */
+int32_t fh_otg_handle_restore_done_intr(fh_otg_core_if_t * core_if)
+{
+ pcgcctl_data_t pcgcctl;
+ FH_DEBUGPL(DBG_ANY, "++Restore Done Interrupt++\n");
+
+ //TODO De-assert restore signal. 8.a
+ pcgcctl.d32 = FH_READ_REG32(core_if->pcgcctl);
+ if (pcgcctl.b.restoremode == 1) {
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+ /*
+ * If restore mode is Remote Wakeup,
+ * unmask Remote Wakeup interrupt.
+ */
+ gintmsk.b.wkupintr = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
+ 0, gintmsk.d32);
+ }
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that a device has been disconnected from
+ * the root port.
+ */
+int32_t fh_otg_handle_disconnect_intr(fh_otg_core_if_t * core_if)
+{
+ gintsts_data_t gintsts;
+
+ FH_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n",
+ (fh_otg_is_host_mode(core_if) ? "Host" : "Device"),
+ op_state_str(core_if));
+
+/** @todo Consolidate this if statement. */
+#ifndef FH_HOST_ONLY
+ if (core_if->op_state == B_HOST) {
+ /* If in device mode Disconnect and stop the HCD, then
+ * start the PCD. */
+ FH_SPINUNLOCK(core_if->lock);
+ cil_hcd_disconnect(core_if);
+ cil_pcd_start(core_if);
+ FH_SPINLOCK(core_if->lock);
+ core_if->op_state = B_PERIPHERAL;
+ } else if (fh_otg_is_device_mode(core_if)) {
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ gotgctl.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ if (gotgctl.b.hstsethnpen == 1) {
+ /* Do nothing, if HNP in process the OTG
+ * interrupt "Host Negotiation Detected"
+ * interrupt will do the mode switch.
+ */
+ } else if (gotgctl.b.devhnpen == 0) {
+ /* If in device mode Disconnect and stop the HCD, then
+ * start the PCD. */
+ FH_SPINUNLOCK(core_if->lock);
+ cil_hcd_disconnect(core_if);
+ cil_pcd_start(core_if);
+ FH_SPINLOCK(core_if->lock);
+ core_if->op_state = B_PERIPHERAL;
+ } else {
+ FH_DEBUGPL(DBG_ANY, "!a_peripheral && !devhnpen\n");
+ }
+ } else {
+ if (core_if->op_state == A_HOST) {
+ /* A-Cable still connected but device disconnected. */
+ cil_hcd_disconnect(core_if);
+ if (core_if->adp_enable) {
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ cil_hcd_stop(core_if);
+ /* Enable Power Down Logic */
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_otg_adp_probe_start(core_if);
+
+ /* Power off the core */
+ if (core_if->power_down == 2) {
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32
+ (&core_if->core_global_regs->gpwrdn,
+ gpwrdn.d32, 0);
+ }
+ }
+ }
+ }
+#endif
+ /* Change to L3(OFF) state */
+ core_if->lx_state = FH_OTG_L3;
+
+ gintsts.d32 = 0;
+ gintsts.b.disconnect = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that SUSPEND state has been detected on
+ * the USB.
+ *
+ * For HNP the USB Suspend interrupt signals the change from
+ * "a_peripheral" to "a_host".
+ *
+ * When power management is enabled the core will be put in low power
+ * mode.
+ */
+int32_t fh_otg_handle_usb_suspend_intr(fh_otg_core_if_t * core_if)
+{
+ dsts_data_t dsts;
+ gintsts_data_t gintsts;
+ dcfg_data_t dcfg;
+
+ FH_DEBUGPL(DBG_ANY, "USB SUSPEND\n");
+
+ if ((core_if->otg_ver == 1) && (core_if->op_state == A_PERIPHERAL)) {
+ core_if->lx_state = FH_OTG_L2;
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.usbsuspend = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+ }
+
+ if (fh_otg_is_device_mode(core_if)) {
+ /* Check the Device status register to determine if the Suspend
+ * state is active. */
+ dsts.d32 =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+ FH_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32);
+ FH_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d "
+ "HWCFG4.power Optimize=%d\n",
+ dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz);
+
+#ifdef PARTIAL_POWER_DOWN
+/** @todo Add a module parameter for power management. */
+
+ if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) {
+ pcgcctl_data_t power = {.d32 = 0 };
+ FH_DEBUGPL(DBG_CIL, "suspend\n");
+
+ power.b.pwrclmp = 1;
+ FH_WRITE_REG32(core_if->pcgcctl, power.d32);
+
+ power.b.rstpdwnmodule = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, power.d32);
+
+ power.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, power.d32);
+
+ } else {
+ FH_DEBUGPL(DBG_ANY, "disconnect?\n");
+ }
+#endif
+ /* PCD callback for suspend. Release the lock inside of callback function */
+ cil_pcd_suspend(core_if);
+ if (core_if->power_down == 2) {
+ dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+ FH_DEBUGPL(DBG_ANY,"lx_state = %08x\n",core_if->lx_state);
+ FH_DEBUGPL(DBG_ANY," device address = %08d\n",dcfg.b.devaddr);
+
+ if (core_if->lx_state != FH_OTG_L3 && dcfg.b.devaddr) {
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ gusbcfg_data_t gusbcfg = {.d32 = 0 };
+
+ /* Change to L2(suspend) state */
+ core_if->lx_state = FH_OTG_L2;
+
+ /* Clear interrupt in gintsts */
+ gintsts.d32 = 0;
+ gintsts.b.usbsuspend = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->
+ gintsts, gintsts.d32);
+ FH_PRINTF("Start of hibernation completed\n");
+ fh_otg_save_global_regs(core_if);
+ fh_otg_save_dev_regs(core_if);
+
+ gusbcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->
+ gusbcfg);
+ if (gusbcfg.b.ulpi_utmi_sel == 1) {
+ /* ULPI interface */
+ /* Suspend the Phy Clock */
+ pcgcctl.d32 = 0;
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0,
+ pcgcctl.d32);
+ fh_udelay(10);
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ } else {
+ /* UTMI+ Interface */
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0,
+ pcgcctl.d32);
+ fh_udelay(10);
+ }
+
+ /* Set flag to indicate that we are in hibernation */
+ core_if->hibernation_suspend = 1;
+ /* Enable interrupts from wake up logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Unmask device mode interrupts in GPWRDN */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.rst_det_msk = 1;
+ gpwrdn.b.lnstchng_msk = 1;
+ gpwrdn.b.sts_chngint_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Enable Power Down Clamp */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Switch off VDD */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+
+ /* Save gpwrdn register for further usage if stschng interrupt */
+ core_if->gr_backup->gpwrdn_local =
+ FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ FH_PRINTF("Hibernation completed\n");
+
+ return 1;
+ }
+ } else if (core_if->power_down == 3) {
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
+ FH_DEBUGPL(DBG_ANY, "lx_state = %08x\n",core_if->lx_state);
+ FH_DEBUGPL(DBG_ANY, " device address = %08d\n",dcfg.b.devaddr);
+
+ if (core_if->lx_state != FH_OTG_L3 && dcfg.b.devaddr) {
+ FH_DEBUGPL(DBG_ANY, "Start entering to extended hibernation\n");
+ core_if->xhib = 1;
+
+ /* Clear interrupt in gintsts */
+ gintsts.d32 = 0;
+ gintsts.b.usbsuspend = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->
+ gintsts, gintsts.d32);
+
+ fh_otg_save_global_regs(core_if);
+ fh_otg_save_dev_regs(core_if);
+
+ /* Wait for 10 PHY clocks */
+ fh_udelay(10);
+
+ /* Program GPIO register while entering to xHib */
+ FH_WRITE_REG32(&core_if->core_global_regs->ggpio, 0x1);
+
+ pcgcctl.b.enbl_extnd_hiber = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
+
+ pcgcctl.d32 = 0;
+ pcgcctl.b.extnd_hiber_pwrclmp = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
+
+ pcgcctl.d32 = 0;
+ pcgcctl.b.extnd_hiber_switch = 1;
+ core_if->gr_backup->xhib_gpwrdn = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+ core_if->gr_backup->xhib_pcgcctl = FH_READ_REG32(core_if->pcgcctl) | pcgcctl.d32;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
+
+ FH_DEBUGPL(DBG_ANY, "Finished entering to extended hibernation\n");
+
+ return 1;
+ }
+ }
+ if ((core_if->otg_ver == 1) && (core_if->core_params->otg_cap == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE)) {
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
+ if (gotgctl.b.devhnpen && core_if->otg_ver == 1){
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ fh_mdelay(5);
+ /**@todo Is the gotgctl.devhnpen cleared
+ * by a USB Reset? */
+ gotgctl.b.devhnpen = 1;
+ gotgctl.b.hnpreq = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gotgctl,
+ gotgctl.d32);
+ }
+ }
+ } else {
+ if (core_if->op_state == A_PERIPHERAL) {
+ FH_DEBUGPL(DBG_ANY, "a_peripheral->a_host\n");
+ /* Clear the a_peripheral flag, back to a_host. */
+ FH_SPINUNLOCK(core_if->lock);
+ cil_pcd_stop(core_if);
+ cil_hcd_start(core_if);
+ FH_SPINLOCK(core_if->lock);
+ core_if->op_state = A_HOST;
+ }
+ }
+
+ /* Change to L2(suspend) state */
+ core_if->lx_state = FH_OTG_L2;
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.usbsuspend = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+static int32_t fh_otg_handle_xhib_exit_intr(fh_otg_core_if_t * core_if)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ gahbcfg_data_t gahbcfg = {.d32 = 0 };
+
+ fh_udelay(10);
+
+ /* Program GPIO register while entering to xHib */
+ FH_WRITE_REG32(&core_if->core_global_regs->ggpio, 0x0);
+
+ pcgcctl.d32 = core_if->gr_backup->xhib_pcgcctl;
+ pcgcctl.b.extnd_hiber_pwrclmp = 0;
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+ fh_udelay(10);
+
+ gpwrdn.d32 = core_if->gr_backup->xhib_gpwrdn;
+ gpwrdn.b.restore = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32);
+ fh_udelay(10);
+
+ restore_lpm_i2c_regs(core_if);
+
+ pcgcctl.d32 = core_if->gr_backup->pcgcctl_local & (0x3FFFF << 14);
+ pcgcctl.b.max_xcvrselect = 1;
+ pcgcctl.b.ess_reg_restored = 0;
+ pcgcctl.b.extnd_hiber_switch = 0;
+ pcgcctl.b.extnd_hiber_pwrclmp = 0;
+ pcgcctl.b.enbl_extnd_hiber = 1;
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+
+ gahbcfg.d32 = core_if->gr_backup->gahbcfg_local;
+ gahbcfg.b.glblintrmsk = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gahbcfg.d32);
+
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, 0x1 << 16);
+
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
+ core_if->gr_backup->gusbcfg_local);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg,
+ core_if->dr_backup->dcfg);
+
+ pcgcctl.d32 = 0;
+ pcgcctl.d32 = core_if->gr_backup->pcgcctl_local & (0x3FFFF << 14);
+ pcgcctl.b.max_xcvrselect = 1;
+ pcgcctl.d32 |= 0x608;
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+ fh_udelay(10);
+
+ pcgcctl.d32 = 0;
+ pcgcctl.d32 = core_if->gr_backup->pcgcctl_local & (0x3FFFF << 14);
+ pcgcctl.b.max_xcvrselect = 1;
+ pcgcctl.b.ess_reg_restored = 1;
+ pcgcctl.b.enbl_extnd_hiber = 1;
+ pcgcctl.b.rstpdwnmodule = 1;
+ pcgcctl.b.restoremode = 1;
+ FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
+
+ FH_DEBUGPL(DBG_ANY, "%s called\n", __FUNCTION__);
+
+ return 1;
+}
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+/**
+ * This function hadles LPM transaction received interrupt.
+ */
+static int32_t fh_otg_handle_lpm_intr(fh_otg_core_if_t * core_if)
+{
+ glpmcfg_data_t lpmcfg;
+ gintsts_data_t gintsts;
+
+ if (!core_if->core_params->lpm_enable) {
+ FH_PRINTF("Unexpected LPM interrupt\n");
+ }
+
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ FH_PRINTF("LPM config register = 0x%08x\n", lpmcfg.d32);
+
+ if (fh_otg_is_host_mode(core_if)) {
+ cil_hcd_sleep(core_if);
+ } else {
+
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+
+ lpmcfg.b.hird_thres |= (1 << 4);
+ lpmcfg.b.en_utmi_sleep = 1;
+
+ pcgcctl.b.enbl_sleep_gating = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl,0,pcgcctl.d32);
+
+ if(fh_otg_get_param_besl_enable(core_if)) {
+ lpmcfg.b.en_besl = 1;
+ }
+
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg,
+ lpmcfg.d32);
+ }
+
+ /* Examine prt_sleep_sts after TL1TokenTetry period max (10 us) */
+ fh_udelay(10);
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ if (lpmcfg.b.prt_sleep_sts) {
+ /* Save the current state */
+ core_if->lx_state = FH_OTG_L1;
+ }
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.lpmtranrcvd = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+#endif /* CONFIG_USB_FH_OTG_LPM */
+
+/**
+ * This function returns the Core Interrupt register.
+ */
+static inline uint32_t fh_otg_read_common_intr(fh_otg_core_if_t * core_if)
+{
+ gahbcfg_data_t gahbcfg = {.d32 = 0 };
+ gintsts_data_t gintsts;
+ gintmsk_data_t gintmsk;
+ gintmsk_data_t gintmsk_common = {.d32 = 0 };
+ gintmsk_common.b.wkupintr = 1;
+ gintmsk_common.b.sessreqintr = 1;
+ gintmsk_common.b.conidstschng = 1;
+ gintmsk_common.b.otgintr = 1;
+ gintmsk_common.b.modemismatch = 1;
+ gintmsk_common.b.disconnect = 1;
+ gintmsk_common.b.usbsuspend = 1;
+#ifdef CONFIG_USB_FH_OTG_LPM
+ gintmsk_common.b.lpmtranrcvd = 1;
+#endif
+ gintmsk_common.b.restoredone = 1;
+ /** @todo: The port interrupt occurs while in device
+ * mode. Added code to CIL to clear the interrupt for now!
+ */
+ gintmsk_common.b.portintr = 1;
+
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ gintmsk.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
+ gahbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gahbcfg);
+
+#ifdef DEBUG
+ /* if any common interrupts set */
+ if (gintsts.d32 & gintmsk_common.d32) {
+ FH_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n",
+ gintsts.d32, gintmsk.d32);
+ }
+#endif
+ if (gahbcfg.b.glblintrmsk)
+ return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32);
+ else
+ return 0;
+
+}
+
+/* MACRO for clearing interupt bits in GPWRDN register */
+#define CLEAR_GPWRDN_INTR(__core_if,__intr) \
+do { \
+ gpwrdn_data_t gpwrdn = {.d32=0}; \
+ gpwrdn.b.__intr = 1; \
+ FH_MODIFY_REG32(&__core_if->core_global_regs->gpwrdn, \
+ 0, gpwrdn.d32); \
+} while (0)
+
+
+/**
+ * Common interrupt handler.
+ *
+ * The common interrupts are those that occur in both Host and Device mode.
+ * This handler handles the following interrupts:
+ * - Mode Mismatch Interrupt
+ * - Disconnect Interrupt
+ * - OTG Interrupt
+ * - Connector ID Status Change Interrupt
+ * - Session Request Interrupt.
+ * - Resume / Remote Wakeup Detected Interrupt.
+ * - LPM Transaction Received Interrupt
+ * - ADP Transaction Received Interrupt
+ *
+ */
+int32_t fh_otg_handle_common_intr(void *dev)
+{
+ int retval = 0;
+ gintsts_data_t gintsts;
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ fh_otg_device_t *otg_dev = dev;
+ fh_otg_core_if_t *core_if = otg_dev->core_if;
+ gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+
+ if (fh_otg_check_haps_status(core_if) == -1 ) {
+ FH_WARN("HAPS is disconnected");
+ return retval;
+ }
+
+ if (fh_otg_is_device_mode(core_if))
+ core_if->frame_num = fh_otg_get_frame_number(core_if);
+
+ if (core_if->lock)
+ FH_SPINLOCK(core_if->lock);
+
+ if (core_if->power_down == 3 && core_if->xhib == 1) {
+ FH_DEBUGPL(DBG_ANY, "Exiting from xHIB state\n");
+ retval |= fh_otg_handle_xhib_exit_intr(core_if);
+ core_if->xhib = 2;
+ if (core_if->lock)
+ FH_SPINUNLOCK(core_if->lock);
+
+ return retval;
+ }
+
+ if (core_if->hibernation_suspend <= 0) {
+ gintsts.d32 = fh_otg_read_common_intr(core_if);
+
+ if (gintsts.b.modemismatch) {
+ retval |= fh_otg_handle_mode_mismatch_intr(core_if);
+ }
+ if (gintsts.b.otgintr) {
+ retval |= fh_otg_handle_otg_intr(core_if);
+ }
+ if (gintsts.b.conidstschng) {
+ retval |=
+ fh_otg_handle_conn_id_status_change_intr(core_if);
+ }
+ if (gintsts.b.disconnect) {
+ retval |= fh_otg_handle_disconnect_intr(core_if);
+ }
+ if (gintsts.b.sessreqintr) {
+ retval |= fh_otg_handle_session_req_intr(core_if);
+ }
+ if (gintsts.b.wkupintr) {
+ retval |= fh_otg_handle_wakeup_detected_intr(core_if);
+ }
+ if (gintsts.b.usbsuspend) {
+ retval |= fh_otg_handle_usb_suspend_intr(core_if);
+ }
+#ifdef CONFIG_USB_FH_OTG_LPM
+ if (gintsts.b.lpmtranrcvd) {
+ retval |= fh_otg_handle_lpm_intr(core_if);
+ }
+#endif
+ if (gintsts.b.restoredone) {
+ gintsts.d32 = 0;
+ if (core_if->power_down == 2)
+ core_if->hibernation_suspend = -1;
+ else if (core_if->power_down == 3 && core_if->xhib == 2) {
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ dctl_data_t dctl = {.d32 = 0 };
+
+ FH_WRITE_REG32(&core_if->core_global_regs->
+ gintsts, 0xFFFFFFFF);
+
+ FH_DEBUGPL(DBG_ANY,
+ "RESTORE DONE generated\n");
+
+ gpwrdn.b.restore = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ pcgcctl.b.rstpdwnmodule = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+
+ FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, core_if->gr_backup->gusbcfg_local);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, core_if->dr_backup->dcfg);
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, core_if->dr_backup->dctl);
+ fh_udelay(50);
+
+ dctl.b.pwronprgdone = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ fh_udelay(10);
+
+ fh_otg_restore_global_regs(core_if);
+ fh_otg_restore_dev_regs(core_if, 0);
+
+ dctl.d32 = 0;
+ dctl.b.pwronprgdone = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
+ fh_udelay(10);
+
+ pcgcctl.d32 = 0;
+ pcgcctl.b.enbl_extnd_hiber = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+
+ /* The core will be in ON STATE */
+ core_if->lx_state = FH_OTG_L0;
+ core_if->xhib = 0;
+
+ FH_SPINUNLOCK(core_if->lock);
+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
+ }
+ FH_SPINLOCK(core_if->lock);
+
+ }
+
+ gintsts.b.restoredone = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts,gintsts.d32);
+ FH_PRINTF(" --Restore done interrupt received-- \n");
+ retval |= 1;
+ }
+ if (gintsts.b.portintr && fh_otg_is_device_mode(core_if)) {
+ /* The port interrupt occurs while in device mode with HPRT0
+ * Port Enable/Disable.
+ */
+ gintsts.d32 = 0;
+ gintsts.b.portintr = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts,gintsts.d32);
+ retval |= 1;
+
+ }
+ } else {
+ FH_DEBUGPL(DBG_ANY, "gpwrdn=%08x\n", gpwrdn.d32);
+
+ if (gpwrdn.b.disconn_det && gpwrdn.b.disconn_det_msk) {
+ CLEAR_GPWRDN_INTR(core_if, disconn_det);
+ if (gpwrdn.b.linestate == 0) {
+ fh_otg_handle_pwrdn_disconnect_intr(core_if);
+ } else {
+ FH_PRINTF("Disconnect detected while linestate is not 0\n");
+ }
+
+ retval |= 1;
+ }
+ if (gpwrdn.b.lnstschng && gpwrdn.b.lnstchng_msk) {
+ CLEAR_GPWRDN_INTR(core_if, lnstschng);
+ /* remote wakeup from hibernation */
+ if (gpwrdn.b.linestate == 2 || gpwrdn.b.linestate == 1) {
+ fh_otg_handle_pwrdn_wakeup_detected_intr(core_if);
+ } else {
+ FH_PRINTF("gpwrdn.linestate = %d\n", gpwrdn.b.linestate);
+ }
+ retval |= 1;
+ }
+ if (gpwrdn.b.rst_det && gpwrdn.b.rst_det_msk) {
+ CLEAR_GPWRDN_INTR(core_if, rst_det);
+ if (gpwrdn.b.linestate == 0) {
+ FH_PRINTF("Reset detected\n");
+ retval |= fh_otg_device_hibernation_restore(core_if, 0, 1);
+ }
+ }
+ if (gpwrdn.b.srp_det && gpwrdn.b.srp_det_msk) {
+ CLEAR_GPWRDN_INTR(core_if, srp_det);
+ fh_otg_handle_pwrdn_srp_intr(core_if);
+ retval |= 1;
+ }
+ }
+ /* Handle ADP interrupt here */
+ if (gpwrdn.b.adp_int) {
+ CLEAR_GPWRDN_INTR(core_if, adp_int);
+ fh_otg_adp_handle_intr(core_if);
+ retval |= 1;
+ }
+ if (gpwrdn.b.sts_chngint && gpwrdn.b.sts_chngint_msk) {
+ CLEAR_GPWRDN_INTR(core_if, sts_chngint);
+ fh_otg_handle_pwrdn_stschng_intr(otg_dev);
+
+ retval |= 1;
+ }
+ if (gpwrdn.b.srp_det && gpwrdn.b.srp_det_msk) {
+ CLEAR_GPWRDN_INTR(core_if, srp_det);
+ fh_otg_handle_pwrdn_srp_intr(core_if);
+ retval |= 1;
+ }
+ if (core_if->lock)
+ FH_SPINUNLOCK(core_if->lock);
+
+ return retval;
+}
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_core_if.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_core_if.h
new file mode 100644
index 00000000..e4c4e172
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_core_if.h
@@ -0,0 +1,748 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_core_if.h $
+ * $Revision: #20 $
+ * $Date: 2015/10/12 $
+ * $Change: 2972621 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#if !defined(__FH_CORE_IF_H__)
+#define __FH_CORE_IF_H__
+
+#include "../fh_common_port/fh_os.h"
+#include <mach/fh_usb.h>
+
+/** @file
+ * This file defines FH_OTG Core API
+ */
+
+struct fh_otg_core_if;
+typedef struct fh_otg_core_if fh_otg_core_if_t;
+
+/** Maximum number of Periodic FIFOs */
+#define MAX_PERIO_FIFOS 15
+/** Maximum number of Periodic FIFOs */
+#define MAX_TX_FIFOS 15
+
+/** Maximum number of Endpoints/HostChannels */
+#define MAX_EPS_CHANNELS 16
+
+extern fh_otg_core_if_t *fh_otg_cil_init(const uint32_t * _reg_base_addr,
+ struct fh_usb_platform_data *fh_usb_data);
+extern void fh_otg_core_init(fh_otg_core_if_t * _core_if);
+extern void fh_otg_cil_remove(fh_otg_core_if_t * _core_if);
+
+extern void fh_otg_enable_global_interrupts(fh_otg_core_if_t * _core_if);
+extern void fh_otg_disable_global_interrupts(fh_otg_core_if_t * _core_if);
+
+extern uint8_t fh_otg_is_device_mode(fh_otg_core_if_t * _core_if);
+extern uint8_t fh_otg_is_host_mode(fh_otg_core_if_t * _core_if);
+
+extern uint8_t fh_otg_is_dma_enable(fh_otg_core_if_t * core_if);
+
+/** This function should be called on every hardware interrupt. */
+extern int32_t fh_otg_handle_common_intr(void *otg_dev);
+
+
+/** @name OTG Core Parameters */
+/** @{ */
+
+/**
+ * Specifies the OTG capabilities. The driver will automatically
+ * detect the value for this parameter if none is specified.
+ * 0 - HNP and SRP capable (default)
+ * 1 - SRP Only capable
+ * 2 - No HNP/SRP capable
+ */
+extern int fh_otg_set_param_otg_cap(fh_otg_core_if_t * core_if, int32_t val);
+extern int32_t fh_otg_get_param_otg_cap(fh_otg_core_if_t * core_if);
+#define FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+#define fh_param_otg_cap_default FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE
+
+extern int fh_otg_set_param_opt(fh_otg_core_if_t * core_if, int32_t val);
+extern int32_t fh_otg_get_param_opt(fh_otg_core_if_t * core_if);
+#define fh_param_opt_default 1
+
+/**
+ * Specifies whether to use slave or DMA mode for accessing the data
+ * FIFOs. The driver will automatically detect the value for this
+ * parameter if none is specified.
+ * 0 - Slave
+ * 1 - DMA (default, if available)
+ */
+extern int fh_otg_set_param_dma_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_dma_enable(fh_otg_core_if_t * core_if);
+#define fh_param_dma_enable_default 1
+
+/**
+ * When DMA mode is enabled specifies whether to use
+ * address DMA or DMA Descritor mode for accessing the data
+ * FIFOs in device mode. The driver will automatically detect
+ * the value for this parameter if none is specified.
+ * 0 - address DMA
+ * 1 - DMA Descriptor(default, if available)
+ */
+extern int fh_otg_set_param_dma_desc_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_dma_desc_enable(fh_otg_core_if_t * core_if);
+#define fh_param_dma_desc_enable_default 1
+
+/** The DMA Burst size (applicable only for External DMA
+ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ */
+extern int fh_otg_set_param_dma_burst_size(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_dma_burst_size(fh_otg_core_if_t * core_if);
+#define fh_param_dma_burst_size_default 32
+
+/**
+ * Specifies the maximum speed of operation in host and device mode.
+ * The actual speed depends on the speed of the attached device and
+ * the value of phy_type. The actual speed depends on the speed of the
+ * attached device.
+ * 0 - High Speed (default)
+ * 1 - Full Speed
+ */
+extern int fh_otg_set_param_speed(fh_otg_core_if_t * core_if, int32_t val);
+extern int32_t fh_otg_get_param_speed(fh_otg_core_if_t * core_if);
+#define fh_param_speed_default 0
+#define FH_SPEED_PARAM_HIGH 0
+#define FH_SPEED_PARAM_FULL 1
+
+/** Specifies whether low power mode is supported when attached
+ * to a Full Speed or Low Speed device in host mode.
+ * 0 - Don't support low power mode (default)
+ * 1 - Support low power mode
+ */
+extern int fh_otg_set_param_host_support_fs_ls_low_power(fh_otg_core_if_t *
+ core_if, int32_t val);
+extern int32_t fh_otg_get_param_host_support_fs_ls_low_power(fh_otg_core_if_t
+ * core_if);
+#define fh_param_host_support_fs_ls_low_power_default 0
+
+/** Specifies the PHY clock rate in low power mode when connected to a
+ * Low Speed device in host mode. This parameter is applicable only if
+ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
+ * then defaults to 6 MHZ otherwise 48 MHZ.
+ *
+ * 0 - 48 MHz
+ * 1 - 6 MHz
+ */
+extern int fh_otg_set_param_host_ls_low_power_phy_clk(fh_otg_core_if_t *
+ core_if, int32_t val);
+extern int32_t fh_otg_get_param_host_ls_low_power_phy_clk(fh_otg_core_if_t *
+ core_if);
+#define fh_param_host_ls_low_power_phy_clk_default 0
+#define FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+
+#define FH_CC_CLK_FREQ_30MHZ 0x7530
+
+/**
+ * 0 - Use cC FIFO size parameters
+ * 1 - Allow dynamic FIFO sizing (default)
+ */
+extern int fh_otg_set_param_enable_dynamic_fifo(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_enable_dynamic_fifo(fh_otg_core_if_t *
+ core_if);
+#define fh_param_enable_dynamic_fifo_default 1
+
+/** Total number of 4-byte words in the data FIFO memory. This
+ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
+ * Tx FIFOs.
+ * 32 to 32768 (default 8192)
+ * Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ */
+extern int fh_otg_set_param_data_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_data_fifo_size(fh_otg_core_if_t * core_if);
+#define fh_param_data_fifo_size_default 2560
+
+/** Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1064)
+ */
+extern int fh_otg_set_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if);
+#define fh_param_dev_rx_fifo_size_default 1064
+
+/** Number of 4-byte words in the non-periodic Tx FIFO in device mode
+ * when dynamic FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+extern int fh_otg_set_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t *
+ core_if, int32_t val);
+extern int32_t fh_otg_get_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t *
+ core_if);
+#define fh_param_dev_nperio_tx_fifo_size_default 128
+
+/** Number of 4-byte words in each of the periodic Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+extern int fh_otg_set_param_dev_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val, int fifo_num);
+extern int32_t fh_otg_get_param_dev_perio_tx_fifo_size(fh_otg_core_if_t *
+ core_if, int fifo_num);
+#define fh_param_dev_perio_tx_fifo_size_default 256
+
+/** Number of 4-byte words in the Rx FIFO in host mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+extern int fh_otg_set_param_host_rx_fifo_size(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_host_rx_fifo_size(fh_otg_core_if_t * core_if);
+#define fh_param_host_rx_fifo_size_default 512
+
+/** Number of 4-byte words in the non-periodic Tx FIFO in host mode
+ * when Dynamic FIFO sizing is enabled in the core.
+ * 16 to 32768 (default 1024)
+ */
+extern int fh_otg_set_param_host_nperio_tx_fifo_size(fh_otg_core_if_t *
+ core_if, int32_t val);
+extern int32_t fh_otg_get_param_host_nperio_tx_fifo_size(fh_otg_core_if_t *
+ core_if);
+#define fh_param_host_nperio_tx_fifo_size_default 128
+
+/** Number of 4-byte words in the host periodic Tx FIFO when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+extern int fh_otg_set_param_host_perio_tx_fifo_size(fh_otg_core_if_t *
+ core_if, int32_t val);
+extern int32_t fh_otg_get_param_host_perio_tx_fifo_size(fh_otg_core_if_t *
+ core_if);
+#define fh_param_host_perio_tx_fifo_size_default 256
+
+/** The maximum transfer size supported in bytes.
+ * 2047 to 65,535 (default 65,535)
+ */
+extern int fh_otg_set_param_max_transfer_size(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_max_transfer_size(fh_otg_core_if_t * core_if);
+#define fh_param_max_transfer_size_default 65535
+
+/** The maximum number of packets in a transfer.
+ * 15 to 511 (default 511)
+ */
+extern int fh_otg_set_param_max_packet_count(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_max_packet_count(fh_otg_core_if_t * core_if);
+#define fh_param_max_packet_count_default 511
+
+/** The number of host channel registers to use.
+ * 1 to 16 (default 12)
+ * Note: The FPGA configuration supports a maximum of 12 host channels.
+ */
+extern int fh_otg_set_param_host_channels(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_host_channels(fh_otg_core_if_t * core_if);
+#define fh_param_host_channels_default 16
+
+/** The number of endpoints in addition to EP0 available for device
+ * mode operations.
+ * 1 to 15 (default 6 IN and OUT)
+ * Note: The FPGA configuration supports a maximum of 6 IN and OUT
+ * endpoints in addition to EP0.
+ */
+extern int fh_otg_set_param_dev_endpoints(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_dev_endpoints(fh_otg_core_if_t * core_if);
+#define fh_param_dev_endpoints_default 8
+
+/**
+ * Specifies the type of PHY interface to use. By default, the driver
+ * will automatically detect the phy_type.
+ *
+ * 0 - Full Speed PHY
+ * 1 - UTMI+ (default)
+ * 2 - ULPI
+ */
+extern int fh_otg_set_param_phy_type(fh_otg_core_if_t * core_if, int32_t val);
+extern int32_t fh_otg_get_param_phy_type(fh_otg_core_if_t * core_if);
+#define FH_PHY_TYPE_PARAM_FS 0
+#define FH_PHY_TYPE_PARAM_UTMI 1
+#define FH_PHY_TYPE_PARAM_ULPI 2
+#define fh_param_phy_type_default FH_PHY_TYPE_PARAM_UTMI
+
+/**
+ * Specifies the UTMI+ Data Width. This parameter is
+ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
+ * PHY_TYPE, this parameter indicates the data width between
+ * the MAC and the ULPI Wrapper.) Also, this parameter is
+ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
+ * to "8 and 16 bits", meaning that the core has been
+ * configured to work at either data path width.
+ *
+ * 8 or 16 bits (default 16)
+ */
+extern int fh_otg_set_param_phy_utmi_width(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_phy_utmi_width(fh_otg_core_if_t * core_if);
+#define fh_param_phy_utmi_width_default 16
+
+/**
+ * Specifies whether the ULPI operates at double or single
+ * data rate. This parameter is only applicable if PHY_TYPE is
+ * ULPI.
+ *
+ * 0 - single data rate ULPI interface with 8 bit wide data
+ * bus (default)
+ * 1 - double data rate ULPI interface with 4 bit wide data
+ * bus
+ */
+extern int fh_otg_set_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if);
+#define fh_param_phy_ulpi_ddr_default 0
+
+/**
+ * Specifies whether to use the internal or external supply to
+ * drive the vbus with a ULPI phy.
+ */
+extern int fh_otg_set_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if);
+#define FH_PHY_ULPI_INTERNAL_VBUS 0
+#define FH_PHY_ULPI_EXTERNAL_VBUS 1
+#define fh_param_phy_ulpi_ext_vbus_default FH_PHY_ULPI_INTERNAL_VBUS
+
+/**
+ * Specifies whether to use the I2Cinterface for full speed PHY. This
+ * parameter is only applicable if PHY_TYPE is FS.
+ * 0 - No (default)
+ * 1 - Yes
+ */
+extern int fh_otg_set_param_i2c_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_i2c_enable(fh_otg_core_if_t * core_if);
+#define fh_param_i2c_enable_default 0
+
+extern int fh_otg_set_param_ulpi_fs_ls(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_ulpi_fs_ls(fh_otg_core_if_t * core_if);
+#define fh_param_ulpi_fs_ls_default 0
+
+extern int fh_otg_set_param_ts_dline(fh_otg_core_if_t * core_if, int32_t val);
+extern int32_t fh_otg_get_param_ts_dline(fh_otg_core_if_t * core_if);
+#define fh_param_ts_dline_default 0
+
+/**
+ * Specifies whether dedicated transmit FIFOs are
+ * enabled for non periodic IN endpoints in device mode
+ * 0 - No
+ * 1 - Yes
+ */
+extern int fh_otg_set_param_en_multiple_tx_fifo(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_en_multiple_tx_fifo(fh_otg_core_if_t *
+ core_if);
+#define fh_param_en_multiple_tx_fifo_default 1
+
+/** Number of 4-byte words in each of the Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+extern int fh_otg_set_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int fifo_num, int32_t val);
+extern int32_t fh_otg_get_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if,
+ int fifo_num);
+#define fh_param_dev_tx_fifo_size_default 256
+
+/** Thresholding enable flag-
+ * bit 0 - enable non-ISO Tx thresholding
+ * bit 1 - enable ISO Tx thresholding
+ * bit 2 - enable Rx thresholding
+ */
+extern int fh_otg_set_param_thr_ctl(fh_otg_core_if_t * core_if, int32_t val);
+extern int32_t fh_otg_get_thr_ctl(fh_otg_core_if_t * core_if, int fifo_num);
+#define fh_param_thr_ctl_default 0
+
+/** Thresholding length for Tx
+ * FIFOs in 32 bit DWORDs
+ */
+extern int fh_otg_set_param_tx_thr_length(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_tx_thr_length(fh_otg_core_if_t * core_if);
+#define fh_param_tx_thr_length_default 64
+
+/** Thresholding length for Rx
+ * FIFOs in 32 bit DWORDs
+ */
+extern int fh_otg_set_param_rx_thr_length(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_rx_thr_length(fh_otg_core_if_t * core_if);
+#define fh_param_rx_thr_length_default 64
+
+/**
+ * Specifies whether LPM (Link Power Management) support is enabled
+ */
+extern int fh_otg_set_param_lpm_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_lpm_enable(fh_otg_core_if_t * core_if);
+#define fh_param_lpm_enable_default 1
+
+/**
+ * Specifies whether LPM Errata (Link Power Management) support is enabled
+ */
+extern int fh_otg_set_param_besl_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_besl_enable(fh_otg_core_if_t * core_if);
+#define fh_param_besl_enable_default 0
+
+/**
+ * Specifies baseline_besl default value
+ */
+extern int fh_otg_set_param_baseline_besl(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_baseline_besl(fh_otg_core_if_t * core_if);
+#define fh_param_baseline_besl_default 0
+
+/**
+ * Specifies deep_besl default value
+ */
+extern int fh_otg_set_param_deep_besl(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_deep_besl(fh_otg_core_if_t * core_if);
+#define fh_param_deep_besl_default 15
+
+/**
+ * Specifies whether PTI enhancement is enabled
+ */
+extern int fh_otg_set_param_pti_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_pti_enable(fh_otg_core_if_t * core_if);
+#define fh_param_pti_enable_default 0
+
+/**
+ * Specifies whether MPI enhancement is enabled
+ */
+extern int fh_otg_set_param_mpi_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_mpi_enable(fh_otg_core_if_t * core_if);
+#define fh_param_mpi_enable_default 0
+
+/**
+ * Specifies whether ADP capability is enabled
+ */
+extern int fh_otg_set_param_adp_enable(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_adp_enable(fh_otg_core_if_t * core_if);
+#define fh_param_adp_enable_default 0
+
+/**
+ * Specifies whether IC_USB capability is enabled
+ */
+
+extern int fh_otg_set_param_ic_usb_cap(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_ic_usb_cap(fh_otg_core_if_t * core_if);
+#define fh_param_ic_usb_cap_default 0
+
+extern int fh_otg_set_param_ahb_thr_ratio(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_ahb_thr_ratio(fh_otg_core_if_t * core_if);
+#define fh_param_ahb_thr_ratio_default 0
+
+extern int fh_otg_set_param_power_down(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_power_down(fh_otg_core_if_t * core_if);
+#define fh_param_power_down_default 0
+
+extern int fh_otg_set_param_reload_ctl(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_reload_ctl(fh_otg_core_if_t * core_if);
+#define fh_param_reload_ctl_default 0
+
+extern int fh_otg_set_param_dev_out_nak(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_dev_out_nak(fh_otg_core_if_t * core_if);
+#define fh_param_dev_out_nak_default 0
+
+extern int fh_otg_set_param_cont_on_bna(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_cont_on_bna(fh_otg_core_if_t * core_if);
+#define fh_param_cont_on_bna_default 0
+
+extern int fh_otg_set_param_ahb_single(fh_otg_core_if_t * core_if,
+ int32_t val);
+extern int32_t fh_otg_get_param_ahb_single(fh_otg_core_if_t * core_if);
+#define fh_param_ahb_single_default 0
+
+extern int fh_otg_set_param_otg_ver(fh_otg_core_if_t * core_if, int32_t val);
+extern int32_t fh_otg_get_param_otg_ver(fh_otg_core_if_t * core_if);
+#define fh_param_otg_ver_default 0
+
+/** @} */
+
+/** @name Access to registers and bit-fields */
+
+/**
+ * Dump core registers and SPRAM
+ */
+extern void fh_otg_dump_dev_registers(fh_otg_core_if_t * _core_if);
+extern void fh_otg_dump_spram(fh_otg_core_if_t * _core_if);
+extern void fh_otg_dump_host_registers(fh_otg_core_if_t * _core_if);
+extern void fh_otg_dump_global_registers(fh_otg_core_if_t * _core_if);
+
+/**
+ * Get host negotiation status.
+ */
+extern uint32_t fh_otg_get_hnpstatus(fh_otg_core_if_t * core_if);
+
+/**
+ * Get srp status
+ */
+extern uint32_t fh_otg_get_srpstatus(fh_otg_core_if_t * core_if);
+
+/**
+ * Set hnpreq bit in the GOTGCTL register.
+ */
+extern void fh_otg_set_hnpreq(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get Content of SNPSID register.
+ */
+extern uint32_t fh_otg_get_gsnpsid(fh_otg_core_if_t * core_if);
+
+/**
+ * Get current mode.
+ * Returns 0 if in device mode, and 1 if in host mode.
+ */
+extern uint32_t fh_otg_get_mode(fh_otg_core_if_t * core_if);
+
+/**
+ * Get value of hnpcapable field in the GUSBCFG register
+ */
+extern uint32_t fh_otg_get_hnpcapable(fh_otg_core_if_t * core_if);
+/**
+ * Set value of hnpcapable field in the GUSBCFG register
+ */
+extern void fh_otg_set_hnpcapable(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of srpcapable field in the GUSBCFG register
+ */
+extern uint32_t fh_otg_get_srpcapable(fh_otg_core_if_t * core_if);
+/**
+ * Set value of srpcapable field in the GUSBCFG register
+ */
+extern void fh_otg_set_srpcapable(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of devspeed field in the DCFG register
+ */
+extern uint32_t fh_otg_get_devspeed(fh_otg_core_if_t * core_if);
+/**
+ * Set value of devspeed field in the DCFG register
+ */
+extern void fh_otg_set_devspeed(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get the value of busconnected field from the HPRT0 register
+ */
+extern uint32_t fh_otg_get_busconnected(fh_otg_core_if_t * core_if);
+
+/**
+ * Gets the device enumeration Speed.
+ */
+extern uint32_t fh_otg_get_enumspeed(fh_otg_core_if_t * core_if);
+
+/**
+ * Get value of prtpwr field from the HPRT0 register
+ */
+extern uint32_t fh_otg_get_prtpower(fh_otg_core_if_t * core_if);
+
+/**
+ * Get value of flag indicating core state - hibernated or not
+ */
+extern uint32_t fh_otg_get_core_state(fh_otg_core_if_t * core_if);
+
+/**
+ * Set value of prtpwr field from the HPRT0 register
+ */
+extern void fh_otg_set_prtpower(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of prtsusp field from the HPRT0 regsiter
+ */
+extern uint32_t fh_otg_get_prtsuspend(fh_otg_core_if_t * core_if);
+/**
+ * Set value of prtpwr field from the HPRT0 register
+ */
+extern void fh_otg_set_prtsuspend(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of ModeChTimEn field from the HCFG regsiter
+ */
+extern uint32_t fh_otg_get_mode_ch_tim(fh_otg_core_if_t * core_if);
+/**
+ * Set value of ModeChTimEn field from the HCFG regsiter
+ */
+extern void fh_otg_set_mode_ch_tim(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of Fram Interval field from the HFIR regsiter
+ */
+extern uint32_t fh_otg_get_fr_interval(fh_otg_core_if_t * core_if);
+/**
+ * Set value of Frame Interval field from the HFIR regsiter
+ */
+extern void fh_otg_set_fr_interval(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Set value of prtres field from the HPRT0 register
+ *FIXME Remove?
+ */
+extern void fh_otg_set_prtresume(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of rmtwkupsig bit in DCTL register
+ */
+extern uint32_t fh_otg_get_remotewakesig(fh_otg_core_if_t * core_if);
+
+/**
+ * Get value of besl_reject bit in DCTL register
+ */
+
+extern uint32_t fh_otg_get_beslreject(fh_otg_core_if_t * core_if);
+
+/**
+ * Set value of besl_reject bit in DCTL register
+ */
+
+extern void fh_otg_set_beslreject(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of prt_sleep_sts field from the GLPMCFG register
+ */
+extern uint32_t fh_otg_get_lpm_portsleepstatus(fh_otg_core_if_t * core_if);
+
+/**
+ * Get value of rem_wkup_en field from the GLPMCFG register
+ */
+extern uint32_t fh_otg_get_lpm_remotewakeenabled(fh_otg_core_if_t * core_if);
+
+/**
+ * Get value of appl_resp field from the GLPMCFG register
+ */
+extern uint32_t fh_otg_get_lpmresponse(fh_otg_core_if_t * core_if);
+/**
+ * Set value of appl_resp field from the GLPMCFG register
+ */
+extern void fh_otg_set_lpmresponse(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of hsic_connect field from the GLPMCFG register
+ */
+extern uint32_t fh_otg_get_hsic_connect(fh_otg_core_if_t * core_if);
+/**
+ * Set value of hsic_connect field from the GLPMCFG register
+ */
+extern void fh_otg_set_hsic_connect(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * Get value of inv_sel_hsic field from the GLPMCFG register.
+ */
+extern uint32_t fh_otg_get_inv_sel_hsic(fh_otg_core_if_t * core_if);
+/**
+ * Set value of inv_sel_hsic field from the GLPMFG register.
+ */
+extern void fh_otg_set_inv_sel_hsic(fh_otg_core_if_t * core_if, uint32_t val);
+/**
+ * Set value of hird_thresh field from the GLPMFG register.
+ */
+extern void fh_otg_set_hirdthresh(fh_otg_core_if_t * core_if, uint32_t val);
+/**
+ * Get value of hird_thresh field from the GLPMFG register.
+ */
+extern uint32_t fh_otg_get_hirdthresh(fh_otg_core_if_t * core_if);
+
+
+/*
+ * Some functions for accessing registers
+ */
+
+/**
+ * GOTGCTL register
+ */
+extern uint32_t fh_otg_get_gotgctl(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_gotgctl(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * GUSBCFG register
+ */
+extern uint32_t fh_otg_get_gusbcfg(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_gusbcfg(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * GRXFSIZ register
+ */
+extern uint32_t fh_otg_get_grxfsiz(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_grxfsiz(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * GNPTXFSIZ register
+ */
+extern uint32_t fh_otg_get_gnptxfsiz(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_gnptxfsiz(fh_otg_core_if_t * core_if, uint32_t val);
+
+extern uint32_t fh_otg_get_gpvndctl(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_gpvndctl(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * GGPIO register
+ */
+extern uint32_t fh_otg_get_ggpio(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_ggpio(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * GUID register
+ */
+extern uint32_t fh_otg_get_guid(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_guid(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * HPRT0 register
+ */
+extern uint32_t fh_otg_get_hprt0(fh_otg_core_if_t * core_if);
+extern void fh_otg_set_hprt0(fh_otg_core_if_t * core_if, uint32_t val);
+
+/**
+ * GHPTXFSIZE
+ */
+extern uint32_t fh_otg_get_hptxfsiz(fh_otg_core_if_t * core_if);
+
+/** @} */
+
+#endif /* __FH_CORE_IF_H__ */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_dbg.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_dbg.h
new file mode 100644
index 00000000..661bb020
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_dbg.h
@@ -0,0 +1,113 @@
+/* ==========================================================================
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef __FH_OTG_DBG_H__
+#define __FH_OTG_DBG_H__
+
+/** @file
+ * This file defines debug levels.
+ * Debugging support vanishes in non-debug builds.
+ */
+
+/**
+ * The Debug Level bit-mask variable.
+ */
+extern uint32_t g_dbg_lvl;
+/**
+ * Set the Debug Level variable.
+ */
+static inline uint32_t SET_DEBUG_LEVEL(const uint32_t new)
+{
+ uint32_t old = g_dbg_lvl;
+ g_dbg_lvl = new;
+ return old;
+}
+
+/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */
+#define DBG_CIL (0x2)
+/** When debug level has the DBG_CILV bit set, display CIL Verbose debug
+ * messages */
+#define DBG_CILV (0x20)
+/** When debug level has the DBG_PCD bit set, display PCD (Device) debug
+ * messages */
+#define DBG_PCD (0x4)
+/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug
+ * messages */
+#define DBG_PCDV (0x40)
+/** When debug level has the DBG_HCD bit set, display Host debug messages */
+#define DBG_HCD (0x8)
+/** When debug level has the DBG_HCDV bit set, display Verbose Host debug
+ * messages */
+#define DBG_HCDV (0x80)
+/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host
+ * mode. */
+#define DBG_HCD_URB (0x800)
+
+/** When debug level has any bit set, display debug messages */
+#define DBG_ANY (0xFF)
+
+/** All debug messages off */
+#define DBG_OFF 0
+
+/** Prefix string for FH_DEBUG print macros. */
+#define USB_FH "FH_otg: "
+
+/**
+ * Print a debug message when the Global debug level variable contains
+ * the bit defined in <code>lvl</code>.
+ *
+ * @param[in] lvl - Debug level, use one of the DBG_ constants above.
+ * @param[in] x - like printf
+ *
+ * Example:<p>
+ * <code>
+ * FH_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr);
+ * </code>
+ * <br>
+ * results in:<br>
+ * <code>
+ * usb-FH_otg: fh_otg_cil_init(ca867000)
+ * </code>
+ */
+#ifdef DEBUG
+
+# define FH_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)__FH_DEBUG(USB_FH x ); }while(0)
+# define FH_DEBUGP(x...) FH_DEBUGPL(DBG_ANY, x )
+
+# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl)
+
+#else
+
+# define FH_DEBUGPL(lvl, x...) do{}while(0)
+# define FH_DEBUGP(x...)
+
+# define CHK_DEBUG_LEVEL(level) (0)
+
+#endif /*DEBUG*/
+#endif
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.c
new file mode 100644
index 00000000..da5896bc
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.c
@@ -0,0 +1,1523 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_driver.c $
+ * $Revision: #105 $
+ * $Date: 2015/10/13 $
+ * $Change: 2974245 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ * The fh_otg_driver module provides the initialization and cleanup entry
+ * points for the FH_otg driver. This module will be dynamically installed
+ * after Linux is booted using the insmod command. When the module is
+ * installed, the fh_otg_driver_init function is called. When the module is
+ * removed (using rmmod), the fh_otg_driver_cleanup function is called.
+ *
+ * This module also defines a data structure for the fh_otg_driver, which is
+ * used in conjunction with the standard ARM lm_device structure. These
+ * structures allow the OTG driver to comply with the standard Linux driver
+ * model in which devices and drivers are registered with a bus driver. This
+ * has the benefit that Linux can expose attributes of the driver and device
+ * in its special sysfs file system. Users can then read or write files in
+ * this file system to perform diagnostics on the driver components or the
+ * device.
+ */
+
+#include <linux/platform_device.h>
+
+#include "fh_otg_os_dep.h"
+#include "../fh_common_port/fh_os.h"
+#include "fh_otg_dbg.h"
+#include "fh_otg_driver.h"
+#include "fh_otg_attr.h"
+#include "fh_otg_core_if.h"
+#include "fh_otg_pcd_if.h"
+#include "fh_otg_hcd_if.h"
+#include <linux/clk.h>
+
+
+#define FH_DRIVER_VERSION "3.30a 13-OCT-2015"
+#define FH_DRIVER_DESC "HS OTG USB Controller driver"
+
+static const char fh_driver_name[] = "fh_otg";
+int g_irq;
+
+extern int pcd_init(struct platform_device *dev, int irq);
+
+extern int hcd_init(struct platform_device *dev, int irq);
+
+extern int pcd_remove(struct platform_device *dev, int irq);
+
+extern void hcd_remove(struct platform_device *dev);
+
+extern void fh_otg_adp_start(fh_otg_core_if_t * core_if, uint8_t is_host);
+
+/*-------------------------------------------------------------------------*/
+/* Encapsulate the module parameter settings */
+
+struct fh_otg_driver_module_params {
+ int32_t opt;
+ int32_t otg_cap;
+ int32_t dma_enable;
+ int32_t dma_desc_enable;
+ int32_t dma_burst_size;
+ int32_t speed;
+ int32_t host_support_fs_ls_low_power;
+ int32_t host_ls_low_power_phy_clk;
+ int32_t enable_dynamic_fifo;
+ int32_t data_fifo_size;
+ int32_t dev_rx_fifo_size;
+ int32_t dev_nperio_tx_fifo_size;
+ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS];
+ int32_t host_rx_fifo_size;
+ int32_t host_nperio_tx_fifo_size;
+ int32_t host_perio_tx_fifo_size;
+ int32_t max_transfer_size;
+ int32_t max_packet_count;
+ int32_t host_channels;
+ int32_t dev_endpoints;
+ int32_t phy_type;
+ int32_t phy_utmi_width;
+ int32_t phy_ulpi_ddr;
+ int32_t phy_ulpi_ext_vbus;
+ int32_t i2c_enable;
+ int32_t ulpi_fs_ls;
+ int32_t ts_dline;
+ int32_t en_multiple_tx_fifo;
+ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS];
+ uint32_t thr_ctl;
+ uint32_t tx_thr_length;
+ uint32_t rx_thr_length;
+ int32_t pti_enable;
+ int32_t mpi_enable;
+ int32_t lpm_enable;
+ int32_t besl_enable;
+ int32_t baseline_besl;
+ int32_t deep_besl;
+ int32_t ic_usb_cap;
+ int32_t ahb_thr_ratio;
+ int32_t power_down;
+ int32_t reload_ctl;
+ int32_t dev_out_nak;
+ int32_t cont_on_bna;
+ int32_t ahb_single;
+ int32_t otg_ver;
+ int32_t adp_enable;
+};
+
+static struct fh_otg_driver_module_params fh_otg_module_params = {
+ .opt = -1,
+ .otg_cap = 0,
+ .dma_enable = 1,
+ .dma_desc_enable = 1,
+ .dma_burst_size = -1,
+ .speed = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0,
+ .enable_dynamic_fifo = 1,
+ .data_fifo_size = -1,
+ .dev_rx_fifo_size = 549,
+ .dev_nperio_tx_fifo_size = 256,
+ .dev_perio_tx_fifo_size = {
+ /* dev_perio_tx_fifo_size_1 */
+ 256,
+ 256,
+ 256,
+ 256,
+ 256,
+ 32,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16
+ /* 15 */
+ },
+ .host_rx_fifo_size = 542,
+ .host_nperio_tx_fifo_size = 256,
+ .host_perio_tx_fifo_size = 512,
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 8,
+ .dev_endpoints = 5,
+ .phy_type = 1,
+ .phy_utmi_width = 16,
+ .phy_ulpi_ddr = 0,
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .ts_dline = 0,
+ .en_multiple_tx_fifo = 1,
+ .dev_tx_fifo_size = {
+ /* dev_tx_fifo_size */
+ 256,
+ 256,
+ 256,
+ 256,
+ 256,
+ 32,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16,
+ 16
+ /* 15 */
+ },
+ .thr_ctl = 0,
+ .tx_thr_length = -1,
+ .rx_thr_length = -1,
+ .pti_enable = 0,
+ .mpi_enable = 0,
+ .lpm_enable = 0,
+ .besl_enable = 0,
+ .baseline_besl = 0,
+ .deep_besl = -1,
+ .ic_usb_cap = 0,
+ .ahb_thr_ratio = 0,
+ .power_down = 0,
+ .reload_ctl = 0,
+ .dev_out_nak = 0,
+ .cont_on_bna = 0,
+ .ahb_single = 0,
+ .otg_ver = 0,
+ .adp_enable = -1,
+};
+
+/**
+ * Global Debug Level Mask.
+ */
+uint32_t g_dbg_lvl = 0; /* OFF */
+
+#ifdef DEBUG
+/**
+ * This function shows the Driver Version.
+ */
+static ssize_t version_show(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, sizeof(FH_DRIVER_VERSION) + 2, "%s\n",
+ FH_DRIVER_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * This function shows the driver Debug Level.
+ */
+static ssize_t dbg_level_show(struct device_driver *drv, char *buf)
+{
+ return sprintf(buf, "0x%0x\n", g_dbg_lvl);
+}
+
+/**
+ * This function stores the driver Debug Level.
+ */
+static ssize_t dbg_level_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ g_dbg_lvl = simple_strtoul(buf, NULL, 16);
+ return count;
+}
+
+static DRIVER_ATTR(debuglevel, S_IRUGO | S_IWUSR, dbg_level_show,
+ dbg_level_store);
+#endif
+/**
+ * This function is called during module intialization
+ * to pass module parameters to the FH_OTG CORE.
+ */
+static int set_parameters(fh_otg_core_if_t * core_if)
+{
+ int retval = 0;
+ int i;
+
+ if (fh_otg_module_params.otg_cap != -1) {
+ retval +=
+ fh_otg_set_param_otg_cap(core_if,
+ fh_otg_module_params.otg_cap);
+ }
+ printk(KERN_ERR "dma_enable :%d\n", fh_otg_module_params.dma_enable);
+ if (fh_otg_module_params.dma_enable != -1) {
+ retval +=
+ fh_otg_set_param_dma_enable(core_if,
+ fh_otg_module_params.
+ dma_enable);
+ }
+ printk(KERN_ERR "dma_desc_enable :%d\n", fh_otg_module_params.dma_desc_enable);
+ if (fh_otg_module_params.dma_desc_enable != -1) {
+ retval +=
+ fh_otg_set_param_dma_desc_enable(core_if,
+ fh_otg_module_params.
+ dma_desc_enable);
+ }
+ if (fh_otg_module_params.opt != -1) {
+ retval +=
+ fh_otg_set_param_opt(core_if, fh_otg_module_params.opt);
+ }
+ if (fh_otg_module_params.dma_burst_size != -1) {
+ retval +=
+ fh_otg_set_param_dma_burst_size(core_if,
+ fh_otg_module_params.
+ dma_burst_size);
+ }
+ if (fh_otg_module_params.host_support_fs_ls_low_power != -1) {
+ retval +=
+ fh_otg_set_param_host_support_fs_ls_low_power(core_if,
+ fh_otg_module_params.
+ host_support_fs_ls_low_power);
+ }
+ if (fh_otg_module_params.enable_dynamic_fifo != -1) {
+ retval +=
+ fh_otg_set_param_enable_dynamic_fifo(core_if,
+ fh_otg_module_params.
+ enable_dynamic_fifo);
+ }
+ if (fh_otg_module_params.data_fifo_size != -1) {
+ retval +=
+ fh_otg_set_param_data_fifo_size(core_if,
+ fh_otg_module_params.
+ data_fifo_size);
+ }
+ if (fh_otg_module_params.dev_rx_fifo_size != -1) {
+ retval +=
+ fh_otg_set_param_dev_rx_fifo_size(core_if,
+ fh_otg_module_params.
+ dev_rx_fifo_size);
+ }
+ if (fh_otg_module_params.dev_nperio_tx_fifo_size != -1) {
+ retval +=
+ fh_otg_set_param_dev_nperio_tx_fifo_size(core_if,
+ fh_otg_module_params.
+ dev_nperio_tx_fifo_size);
+ }
+ if (fh_otg_module_params.host_rx_fifo_size != -1) {
+ retval +=
+ fh_otg_set_param_host_rx_fifo_size(core_if,
+ fh_otg_module_params.host_rx_fifo_size);
+ }
+ if (fh_otg_module_params.host_nperio_tx_fifo_size != -1) {
+ retval +=
+ fh_otg_set_param_host_nperio_tx_fifo_size(core_if,
+ fh_otg_module_params.
+ host_nperio_tx_fifo_size);
+ }
+ if (fh_otg_module_params.host_perio_tx_fifo_size != -1) {
+ retval +=
+ fh_otg_set_param_host_perio_tx_fifo_size(core_if,
+ fh_otg_module_params.
+ host_perio_tx_fifo_size);
+ }
+ if (fh_otg_module_params.max_transfer_size != -1) {
+ retval +=
+ fh_otg_set_param_max_transfer_size(core_if,
+ fh_otg_module_params.
+ max_transfer_size);
+ }
+ if (fh_otg_module_params.max_packet_count != -1) {
+ retval +=
+ fh_otg_set_param_max_packet_count(core_if,
+ fh_otg_module_params.
+ max_packet_count);
+ }
+ if (fh_otg_module_params.host_channels != -1) {
+ retval +=
+ fh_otg_set_param_host_channels(core_if,
+ fh_otg_module_params.
+ host_channels);
+ }
+ if (fh_otg_module_params.dev_endpoints != -1) {
+ retval +=
+ fh_otg_set_param_dev_endpoints(core_if,
+ fh_otg_module_params.
+ dev_endpoints);
+ }
+ if (fh_otg_module_params.phy_type != -1) {
+ retval +=
+ fh_otg_set_param_phy_type(core_if,
+ fh_otg_module_params.phy_type);
+ }
+ if (fh_otg_module_params.speed != -1) {
+ retval +=
+ fh_otg_set_param_speed(core_if,
+ fh_otg_module_params.speed);
+ }
+ if (fh_otg_module_params.host_ls_low_power_phy_clk != -1) {
+ retval +=
+ fh_otg_set_param_host_ls_low_power_phy_clk(core_if,
+ fh_otg_module_params.
+ host_ls_low_power_phy_clk);
+ }
+ if (fh_otg_module_params.phy_ulpi_ddr != -1) {
+ retval +=
+ fh_otg_set_param_phy_ulpi_ddr(core_if,
+ fh_otg_module_params.
+ phy_ulpi_ddr);
+ }
+ if (fh_otg_module_params.phy_ulpi_ext_vbus != -1) {
+ retval +=
+ fh_otg_set_param_phy_ulpi_ext_vbus(core_if,
+ fh_otg_module_params.
+ phy_ulpi_ext_vbus);
+ }
+ if (fh_otg_module_params.phy_utmi_width != -1) {
+ retval +=
+ fh_otg_set_param_phy_utmi_width(core_if,
+ fh_otg_module_params.
+ phy_utmi_width);
+ }
+ if (fh_otg_module_params.ulpi_fs_ls != -1) {
+ retval +=
+ fh_otg_set_param_ulpi_fs_ls(core_if,
+ fh_otg_module_params.ulpi_fs_ls);
+ }
+ if (fh_otg_module_params.ts_dline != -1) {
+ retval +=
+ fh_otg_set_param_ts_dline(core_if,
+ fh_otg_module_params.ts_dline);
+ }
+ if (fh_otg_module_params.i2c_enable != -1) {
+ retval +=
+ fh_otg_set_param_i2c_enable(core_if,
+ fh_otg_module_params.
+ i2c_enable);
+ }
+ if (fh_otg_module_params.en_multiple_tx_fifo != -1) {
+ retval +=
+ fh_otg_set_param_en_multiple_tx_fifo(core_if,
+ fh_otg_module_params.
+ en_multiple_tx_fifo);
+ }
+ for (i = 0; i < 15; i++) {
+ if (fh_otg_module_params.dev_perio_tx_fifo_size[i] != -1) {
+ retval +=
+ fh_otg_set_param_dev_perio_tx_fifo_size(core_if,
+ fh_otg_module_params.
+ dev_perio_tx_fifo_size
+ [i], i);
+ }
+ }
+
+ for (i = 0; i < 15; i++) {
+ if (fh_otg_module_params.dev_tx_fifo_size[i] != -1) {
+ retval += fh_otg_set_param_dev_tx_fifo_size(core_if,
+ fh_otg_module_params.
+ dev_tx_fifo_size
+ [i], i);
+ }
+ }
+ if (fh_otg_module_params.thr_ctl != -1) {
+ retval +=
+ fh_otg_set_param_thr_ctl(core_if,
+ fh_otg_module_params.thr_ctl);
+ }
+ if (fh_otg_module_params.mpi_enable != -1) {
+ retval +=
+ fh_otg_set_param_mpi_enable(core_if,
+ fh_otg_module_params.
+ mpi_enable);
+ }
+ if (fh_otg_module_params.pti_enable != -1) {
+ retval +=
+ fh_otg_set_param_pti_enable(core_if,
+ fh_otg_module_params.
+ pti_enable);
+ }
+ if (fh_otg_module_params.lpm_enable != -1) {
+ retval +=
+ fh_otg_set_param_lpm_enable(core_if,
+ fh_otg_module_params.
+ lpm_enable);
+ }
+ if (fh_otg_module_params.besl_enable != -1) {
+ retval +=
+ fh_otg_set_param_besl_enable(core_if,
+ fh_otg_module_params.
+ besl_enable);
+ }
+ if (fh_otg_module_params.baseline_besl != -1) {
+ retval +=
+ fh_otg_set_param_baseline_besl(core_if,
+ fh_otg_module_params.
+ baseline_besl);
+ }
+ if (fh_otg_module_params.deep_besl != -1) {
+ retval +=
+ fh_otg_set_param_deep_besl(core_if,
+ fh_otg_module_params.
+ deep_besl);
+ }
+ if (fh_otg_module_params.ic_usb_cap != -1) {
+ retval +=
+ fh_otg_set_param_ic_usb_cap(core_if,
+ fh_otg_module_params.
+ ic_usb_cap);
+ }
+ if (fh_otg_module_params.tx_thr_length != -1) {
+ retval +=
+ fh_otg_set_param_tx_thr_length(core_if,
+ fh_otg_module_params.tx_thr_length);
+ }
+ if (fh_otg_module_params.rx_thr_length != -1) {
+ retval +=
+ fh_otg_set_param_rx_thr_length(core_if,
+ fh_otg_module_params.
+ rx_thr_length);
+ }
+ if (fh_otg_module_params.ahb_thr_ratio != -1) {
+ retval +=
+ fh_otg_set_param_ahb_thr_ratio(core_if,
+ fh_otg_module_params.ahb_thr_ratio);
+ }
+ if (fh_otg_module_params.power_down != -1) {
+ retval +=
+ fh_otg_set_param_power_down(core_if,
+ fh_otg_module_params.power_down);
+ }
+ if (fh_otg_module_params.reload_ctl != -1) {
+ retval +=
+ fh_otg_set_param_reload_ctl(core_if,
+ fh_otg_module_params.reload_ctl);
+ }
+
+ if (fh_otg_module_params.dev_out_nak != -1) {
+ retval +=
+ fh_otg_set_param_dev_out_nak(core_if,
+ fh_otg_module_params.dev_out_nak);
+ }
+
+ if (fh_otg_module_params.cont_on_bna != -1) {
+ retval +=
+ fh_otg_set_param_cont_on_bna(core_if,
+ fh_otg_module_params.cont_on_bna);
+ }
+
+ if (fh_otg_module_params.ahb_single != -1) {
+ retval +=
+ fh_otg_set_param_ahb_single(core_if,
+ fh_otg_module_params.ahb_single);
+ }
+
+ if (fh_otg_module_params.otg_ver != -1) {
+ retval +=
+ fh_otg_set_param_otg_ver(core_if,
+ fh_otg_module_params.otg_ver);
+ }
+ if (fh_otg_module_params.adp_enable != -1) {
+ retval +=
+ fh_otg_set_param_adp_enable(core_if,
+ fh_otg_module_params.
+ adp_enable);
+ }
+ return retval;
+}
+
+
+/**
+ * This function is the top level interrupt handler for the Common
+ * (Device and host modes) interrupts.
+ */
+static irqreturn_t fh_otg_common_irq(int irq, void *dev)
+{
+ int32_t retval = IRQ_NONE;
+
+ retval = fh_otg_handle_common_intr(dev);
+ if (retval != 0) {
+ S3C2410X_CLEAR_EINTPEND();
+ }
+ return IRQ_RETVAL(retval);
+}
+
+/**
+ * This function is called when a lm_device is unregistered with the
+ * fh_otg_driver. This happens, for example, when the rmmod command is
+ * executed. The device may or may not be electrically present. If it is
+ * present, the driver stops device processing. Any resources used on behalf
+ * of this device are freed.
+ *
+ * @param _dev
+ */
+static int fh_otg_driver_remove(struct platform_device *dev)
+{
+ int retval = 0;
+ fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
+
+
+ printk(KERN_ERR "%s (%p)\n", __func__, dev);
+
+ if (!otg_dev) {
+ /* Memory allocation for the fh_otg_device failed. */
+ FH_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
+ return -EINVAL;
+ }
+#ifndef FH_DEVICE_ONLY
+ if (otg_dev->hcd) {
+ hcd_remove(dev);
+ } else {
+ FH_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
+
+ }
+#endif
+
+#ifndef FH_HOST_ONLY
+ if (otg_dev->pcd) {
+ pcd_remove(dev, g_irq);
+ } else {
+ FH_DEBUGPL(DBG_ANY, "%s: otg_dev->pcd NULL!\n", __func__);
+
+ }
+#endif
+ /*
+ * Free the IRQ
+ */
+ if (otg_dev->common_irq_installed) {
+ free_irq(g_irq, otg_dev);
+ } else {
+ FH_DEBUGPL(DBG_ANY, "%s: There is no installed irq!\n", __func__);
+
+ }
+
+ if (otg_dev->core_if) {
+ fh_otg_cil_remove(otg_dev->core_if);
+ } else {
+ FH_DEBUGPL(DBG_ANY, "%s: otg_dev->core_if NULL!\n", __func__);
+
+ }
+
+ /*
+ * Remove the device attributes
+ */
+ //fh_otg_attr_remove(dev);
+
+ /*
+ * Return the memory.
+ */
+ if (otg_dev->os_dep.base)
+ iounmap(otg_dev->os_dep.base);
+ FH_FREE(otg_dev);
+
+ /*
+ * Clear the drvdata pointer.
+ */
+
+ release_mem_region(otg_dev->os_dep.rsrc_start, otg_dev->os_dep.rsrc_len);
+ platform_set_drvdata(dev, 0);
+ g_irq = 0;
+ return retval;
+}
+
+/**
+ * This function is called when an lm_device is bound to a
+ * fh_otg_driver. It creates the driver components required to
+ * control the device (CIL, HCD, and PCD) and it initializes the
+ * device. The driver components are stored in a fh_otg_device
+ * structure. A reference to the fh_otg_device is saved in the
+ * lm_device. This allows the driver to access the fh_otg_device
+ * structure on subsequent calls to driver methods for this device.
+ *
+ * @param _dev Bus device
+ */
+static int fh_otg_driver_probe(struct platform_device *dev)
+{
+ int retval = 0;
+ fh_otg_device_t *fh_otg_device;
+ struct resource *res;
+ int irq;
+ struct fh_usb_platform_data *fh_usb_data;
+#if !defined(CONFIG_ARCH_FH8856)
+ struct clk *usb_clk;
+#endif
+
+ //printk(KERN_ERR "fh_otg_driver_probe(%p)\n", dev);
+
+ fh_otg_device = FH_ALLOC(sizeof(fh_otg_device_t));
+
+ if (!fh_otg_device) {
+ dev_err(&dev->dev, "kmalloc of fh_otg_device failed\n");
+ return -ENOMEM;
+ }
+
+#if !defined(CONFIG_ARCH_FH8856)
+ usb_clk = clk_get(NULL, "usb_clk");
+ if (IS_ERR(usb_clk)) {
+ retval = PTR_ERR(usb_clk);
+ return -EPERM;
+ }
+ clk_enable(usb_clk);
+#endif
+
+ fh_usb_data = dev->dev.platform_data;
+ if (fh_usb_data != NULL && fh_usb_data->power_on)
+ fh_usb_data->power_on();
+
+ if (fh_usb_data != NULL && fh_usb_data->utmi_rst)
+ fh_usb_data->utmi_rst();
+
+ if (fh_usb_data != NULL && fh_usb_data->phy_rst)
+ fh_usb_data->phy_rst();
+
+ if (fh_usb_data != NULL && fh_usb_data->hcd_resume)
+ fh_usb_data->hcd_resume();
+
+ memset(fh_otg_device, 0, sizeof(*fh_otg_device));
+ fh_otg_device->os_dep.reg_offset = 0xFFFFFFFF;
+
+ /*
+ * Map the FH_otg Core memory into virtual address space.
+ */
+ res = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&dev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&dev->dev));
+ return -ENODEV;
+ }
+ irq = res->start;
+ g_irq = irq;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&dev->dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(&dev->dev));
+ return -ENODEV;
+ }
+ fh_otg_device->os_dep.rsrc_start = res->start;
+ fh_otg_device->os_dep.rsrc_len = res->end - res->start + 1;
+
+
+ printk(KERN_ERR "resource: start=%08x, len=%08x\n",
+ (unsigned)fh_otg_device->os_dep.rsrc_start,
+ (unsigned)fh_otg_device->os_dep.rsrc_len);
+
+ if (!request_mem_region
+ (fh_otg_device->os_dep.rsrc_start, fh_otg_device->os_dep.rsrc_len,
+ "fh_otg")) {
+ dev_dbg(&dev->dev, "error requesting memory\n");
+ FH_FREE(fh_otg_device);
+ return -EFAULT;
+ }
+
+ fh_otg_device->os_dep.base =
+ ioremap_nocache(fh_otg_device->os_dep.rsrc_start,
+ fh_otg_device->os_dep.rsrc_len);
+ if (fh_otg_device->os_dep.base == NULL) {
+ dev_dbg(&dev->dev, "error mapping memory\n");
+ release_mem_region(fh_otg_device->os_dep.rsrc_start,
+ fh_otg_device->os_dep.rsrc_len);
+ FH_FREE(fh_otg_device);
+ return -EFAULT;
+ }
+ //printk(KERN_INFO "base=0x%p (before adjust) \n",
+ // fh_otg_device->os_dep.base);
+ fh_otg_device->os_dep.base = (char *)fh_otg_device->os_dep.base;
+ printk(KERN_ERR "base=0x%p (after adjust) \n",
+ fh_otg_device->os_dep.base);
+ printk(KERN_INFO "%s: mapped PA 0x%x to VA 0x%p\n", __func__,
+ (unsigned)fh_otg_device->os_dep.rsrc_start,
+ fh_otg_device->os_dep.base);
+
+ /*
+ * Initialize driver data to point to the global FH_otg
+ * Device structure.
+ */
+
+ dev_dbg(&dev->dev, "fh_otg_device=0x%p\n", fh_otg_device);
+
+ fh_otg_device->core_if = fh_otg_cil_init(fh_otg_device->os_dep.base,
+ fh_usb_data);
+ if (!fh_otg_device->core_if) {
+ dev_err(&dev->dev, "CIL initialization failed!\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Attempt to ensure this device is really a FH_otg Controller.
+ * Read and verify the SNPSID register contents. The value should be
+ * 0x45F42XXX or 0x45F42XXX, which corresponds to either "OT2" or "OTG3",
+ * as in "OTG version 2.XX" or "OTG version 3.XX".
+ */
+
+ if (((fh_otg_get_gsnpsid(fh_otg_device->core_if) & 0xFFFFF000) != 0x4F542000) &&
+ ((fh_otg_get_gsnpsid(fh_otg_device->core_if) & 0xFFFFF000) != 0x4F543000) &&
+ ((fh_otg_get_gsnpsid(fh_otg_device->core_if) & 0xFFFFF000) != 0x4F544000)) {
+ dev_err(&dev->dev, "Bad value for SNPSID: 0x%08x\n",
+ fh_otg_get_gsnpsid(fh_otg_device->core_if));
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ /*
+ * Validate parameter values.
+ */
+ if (set_parameters(fh_otg_device->core_if)) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ /*
+ * Create Device Attributes in sysfs
+ */
+ /*fh_otg_attr_create(dev);*/
+
+
+ /*
+ * Disable the global interrupt until all the interrupt
+ * handlers are installed.
+ */
+ fh_otg_disable_global_interrupts(fh_otg_device->core_if);
+
+ /*
+ * Install the interrupt handler for the common interrupts before
+ * enabling common interrupts in core_init below.
+ */
+ retval = request_irq(irq, fh_otg_common_irq,
+ IRQF_SHARED | IRQF_DISABLED | IRQ_LEVEL, "fh_otg",
+ fh_otg_device);
+ if (retval) {
+ FH_ERROR("request of irq%d failed\n", irq);
+ retval = -EBUSY;
+ goto fail;
+ } else {
+ fh_otg_device->common_irq_installed = 1;
+ }
+
+ /*
+ * Initialize the FH_otg core.
+ */
+ fh_otg_core_init(fh_otg_device->core_if);
+ fh_otg_device->os_dep.pdev = dev;
+ platform_set_drvdata(dev, fh_otg_device);
+
+#ifndef FH_HOST_ONLY
+ /*
+ * Initialize the PCD
+ */
+ retval = pcd_init(dev, irq);
+ if (retval != 0) {
+ FH_ERROR("pcd_init failed\n");
+ fh_otg_device->pcd = NULL;
+ goto fail;
+ }
+#endif
+
+#ifndef FH_DEVICE_ONLY
+ /*
+ * Initialize the HCD
+ */
+ retval = hcd_init(dev, irq);
+ if (retval != 0) {
+ FH_ERROR("hcd_init failed\n");
+ fh_otg_device->hcd = NULL;
+ goto fail;
+ }
+#endif
+
+ /*
+ * Enable the global interrupt after all the interrupt
+ * handlers are installed if there is no ADP support else
+ * perform initial actions required for Internal ADP logic.
+ */
+ if (!fh_otg_get_param_adp_enable(fh_otg_device->core_if))
+ fh_otg_enable_global_interrupts(fh_otg_device->core_if);
+ else
+ fh_otg_adp_start(fh_otg_device->core_if,
+ fh_otg_is_host_mode(fh_otg_device->core_if));
+
+
+ return 0;
+
+fail:
+ fh_otg_driver_remove(dev);
+ return retval;
+}
+
+static const struct of_device_id fh_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-usb", .data = NULL },
+ { .compatible = "rockchip,rk3066-usb", .data = NULL },
+ { .compatible = "fullhan,fh2", .data = NULL },
+ { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+ {},
+};
+MODULE_DEVICE_TABLE(of, fh_of_match_table);
+
+static struct platform_driver fh_otg_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = fh_driver_name,
+ .of_match_table = fh_of_match_table,
+ },
+
+ .probe = fh_otg_driver_probe,
+ .remove = fh_otg_driver_remove,
+};
+
+
+static int __init fh_otg_driver_init(void)
+{
+ return platform_driver_register(&fh_otg_driver);
+}
+module_init(fh_otg_driver_init);
+
+static void __exit fh_otg_driver_cleanup(void)
+{
+ platform_driver_unregister(&fh_otg_driver);
+}
+
+module_exit(fh_otg_driver_cleanup);
+
+MODULE_DESCRIPTION(FH_DRIVER_DESC);
+MODULE_AUTHOR("Synopsys Inc.");
+MODULE_LICENSE("GPL");
+
+module_param_named(otg_cap, fh_otg_module_params.otg_cap, int, 0444);
+MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None");
+module_param_named(opt, fh_otg_module_params.opt, int, 0444);
+MODULE_PARM_DESC(opt, "OPT Mode");
+module_param_named(dma_enable, fh_otg_module_params.dma_enable, int, 0444);
+MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled");
+
+module_param_named(dma_desc_enable, fh_otg_module_params.dma_desc_enable, int,
+ 0444);
+MODULE_PARM_DESC(dma_desc_enable,
+ "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled");
+
+module_param_named(dma_burst_size, fh_otg_module_params.dma_burst_size, int,
+ 0444);
+MODULE_PARM_DESC(dma_burst_size,
+ "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256");
+module_param_named(speed, fh_otg_module_params.speed, int, 0444);
+MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed");
+module_param_named(host_support_fs_ls_low_power,
+ fh_otg_module_params.host_support_fs_ls_low_power, int,
+ 0444);
+MODULE_PARM_DESC(host_support_fs_ls_low_power,
+ "Support Low Power w/FS or LS 0=Support 1=Don't Support");
+module_param_named(host_ls_low_power_phy_clk,
+ fh_otg_module_params.host_ls_low_power_phy_clk, int, 0444);
+MODULE_PARM_DESC(host_ls_low_power_phy_clk,
+ "Low Speed Low Power Clock 0=48Mhz 1=6Mhz");
+module_param_named(enable_dynamic_fifo,
+ fh_otg_module_params.enable_dynamic_fifo, int, 0444);
+MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing");
+module_param_named(data_fifo_size, fh_otg_module_params.data_fifo_size, int,
+ 0444);
+MODULE_PARM_DESC(data_fifo_size,
+ "Total number of words in the data FIFO memory 32-32768");
+module_param_named(dev_rx_fifo_size, fh_otg_module_params.dev_rx_fifo_size,
+ int, 0444);
+MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(dev_nperio_tx_fifo_size,
+ fh_otg_module_params.dev_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(dev_nperio_tx_fifo_size,
+ "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(dev_perio_tx_fifo_size_1,
+ fh_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_1,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_2,
+ fh_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_2,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_3,
+ fh_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_3,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_4,
+ fh_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_4,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_5,
+ fh_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_5,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_6,
+ fh_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_6,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_7,
+ fh_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_7,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_8,
+ fh_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_8,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_9,
+ fh_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_9,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_10,
+ fh_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_10,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_11,
+ fh_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_11,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_12,
+ fh_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_12,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_13,
+ fh_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_13,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_14,
+ fh_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_14,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_15,
+ fh_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_15,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(host_rx_fifo_size, fh_otg_module_params.host_rx_fifo_size,
+ int, 0444);
+MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(host_nperio_tx_fifo_size,
+ fh_otg_module_params.host_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_nperio_tx_fifo_size,
+ "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(host_perio_tx_fifo_size,
+ fh_otg_module_params.host_perio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_perio_tx_fifo_size,
+ "Number of words in the host periodic Tx FIFO 16-32768");
+module_param_named(max_transfer_size, fh_otg_module_params.max_transfer_size,
+ int, 0444);
+/** @todo Set the max to 512K, modify checks */
+MODULE_PARM_DESC(max_transfer_size,
+ "The maximum transfer size supported in bytes 2047-65535");
+module_param_named(max_packet_count, fh_otg_module_params.max_packet_count,
+ int, 0444);
+MODULE_PARM_DESC(max_packet_count,
+ "The maximum number of packets in a transfer 15-511");
+module_param_named(host_channels, fh_otg_module_params.host_channels, int,
+ 0444);
+MODULE_PARM_DESC(host_channels,
+ "The number of host channel registers to use 1-16");
+module_param_named(dev_endpoints, fh_otg_module_params.dev_endpoints, int,
+ 0444);
+MODULE_PARM_DESC(dev_endpoints,
+ "The number of endpoints in addition to EP0 available for device mode 1-15");
+module_param_named(phy_type, fh_otg_module_params.phy_type, int, 0444);
+MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI");
+module_param_named(phy_utmi_width, fh_otg_module_params.phy_utmi_width, int,
+ 0444);
+MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits");
+module_param_named(phy_ulpi_ddr, fh_otg_module_params.phy_ulpi_ddr, int, 0444);
+MODULE_PARM_DESC(phy_ulpi_ddr,
+ "ULPI at double or single data rate 0=Single 1=Double");
+module_param_named(phy_ulpi_ext_vbus, fh_otg_module_params.phy_ulpi_ext_vbus,
+ int, 0444);
+MODULE_PARM_DESC(phy_ulpi_ext_vbus,
+ "ULPI PHY using internal or external vbus 0=Internal");
+module_param_named(i2c_enable, fh_otg_module_params.i2c_enable, int, 0444);
+MODULE_PARM_DESC(i2c_enable, "FS PHY Interface");
+module_param_named(ulpi_fs_ls, fh_otg_module_params.ulpi_fs_ls, int, 0444);
+MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only");
+module_param_named(ts_dline, fh_otg_module_params.ts_dline, int, 0444);
+MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs");
+module_param_named(debug, g_dbg_lvl, int, 0444);
+MODULE_PARM_DESC(debug, "");
+
+module_param_named(en_multiple_tx_fifo,
+ fh_otg_module_params.en_multiple_tx_fifo, int, 0444);
+MODULE_PARM_DESC(en_multiple_tx_fifo,
+ "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled");
+module_param_named(dev_tx_fifo_size_1,
+ fh_otg_module_params.dev_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_2,
+ fh_otg_module_params.dev_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_3,
+ fh_otg_module_params.dev_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_4,
+ fh_otg_module_params.dev_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_5,
+ fh_otg_module_params.dev_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_6,
+ fh_otg_module_params.dev_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_7,
+ fh_otg_module_params.dev_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_8,
+ fh_otg_module_params.dev_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_9,
+ fh_otg_module_params.dev_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_10,
+ fh_otg_module_params.dev_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_11,
+ fh_otg_module_params.dev_tx_fifo_size[10], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_12,
+ fh_otg_module_params.dev_tx_fifo_size[11], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_13,
+ fh_otg_module_params.dev_tx_fifo_size[12], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_14,
+ fh_otg_module_params.dev_tx_fifo_size[13], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_15,
+ fh_otg_module_params.dev_tx_fifo_size[14], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768");
+
+module_param_named(thr_ctl, fh_otg_module_params.thr_ctl, int, 0444);
+MODULE_PARM_DESC(thr_ctl,
+ "Thresholding enable flag bit 0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled");
+module_param_named(tx_thr_length, fh_otg_module_params.tx_thr_length, int,
+ 0444);
+MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs");
+module_param_named(rx_thr_length, fh_otg_module_params.rx_thr_length, int,
+ 0444);
+MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs");
+
+module_param_named(pti_enable, fh_otg_module_params.pti_enable, int, 0444);
+module_param_named(mpi_enable, fh_otg_module_params.mpi_enable, int, 0444);
+module_param_named(lpm_enable, fh_otg_module_params.lpm_enable, int, 0444);
+MODULE_PARM_DESC(lpm_enable, "LPM Enable 0=LPM Disabled 1=LPM Enabled");
+
+module_param_named(besl_enable, fh_otg_module_params.besl_enable, int, 0444);
+MODULE_PARM_DESC(besl_enable, "BESL Enable 0=BESL Disabled 1=BESL Enabled");
+module_param_named(baseline_besl, fh_otg_module_params.baseline_besl, int, 0444);
+MODULE_PARM_DESC(baseline_besl, "Set the baseline besl value");
+module_param_named(deep_besl, fh_otg_module_params.deep_besl, int, 0444);
+MODULE_PARM_DESC(deep_besl, "Set the deep besl value");
+
+module_param_named(ic_usb_cap, fh_otg_module_params.ic_usb_cap, int, 0444);
+MODULE_PARM_DESC(ic_usb_cap,
+ "IC_USB Capability 0=IC_USB Disabled 1=IC_USB Enabled");
+module_param_named(ahb_thr_ratio, fh_otg_module_params.ahb_thr_ratio, int,
+ 0444);
+MODULE_PARM_DESC(ahb_thr_ratio, "AHB Threshold Ratio");
+module_param_named(power_down, fh_otg_module_params.power_down, int, 0444);
+MODULE_PARM_DESC(power_down, "Power Down Mode");
+module_param_named(reload_ctl, fh_otg_module_params.reload_ctl, int, 0444);
+MODULE_PARM_DESC(reload_ctl, "HFIR Reload Control");
+module_param_named(dev_out_nak, fh_otg_module_params.dev_out_nak, int, 0444);
+MODULE_PARM_DESC(dev_out_nak, "Enable Device OUT NAK");
+module_param_named(cont_on_bna, fh_otg_module_params.cont_on_bna, int, 0444);
+MODULE_PARM_DESC(cont_on_bna, "Enable Enable Continue on BNA");
+module_param_named(ahb_single, fh_otg_module_params.ahb_single, int, 0444);
+MODULE_PARM_DESC(ahb_single, "Enable AHB Single Support");
+module_param_named(adp_enable, fh_otg_module_params.adp_enable, int, 0444);
+MODULE_PARM_DESC(adp_enable, "ADP Enable 0=ADP Disabled 1=ADP Enabled");
+module_param_named(otg_ver, fh_otg_module_params.otg_ver, int, 0444);
+MODULE_PARM_DESC(otg_ver, "OTG revision supported 0=OTG 1.3 1=OTG 2.0");
+
+/** @page "Module Parameters"
+ *
+ * The following parameters may be specified when starting the module.
+ * These parameters define how the FH_otg controller should be
+ * configured. Parameter values are passed to the CIL initialization
+ * function fh_otg_cil_init
+ *
+ * Example: <code>modprobe fh_otg speed=1 otg_cap=1</code>
+ *
+
+ <table>
+ <tr><td>Parameter Name</td><td>Meaning</td></tr>
+
+ <tr>
+ <td>otg_cap</td>
+ <td>Specifies the OTG capabilities. The driver will automatically detect the
+ value for this parameter if none is specified.
+ - 0: HNP and SRP capable (default, if available)
+ - 1: SRP Only capable
+ - 2: No HNP/SRP capable
+ </td></tr>
+
+ <tr>
+ <td>dma_enable</td>
+ <td>Specifies whether to use slave or DMA mode for accessing the data FIFOs.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Slave
+ - 1: DMA (default, if available)
+ </td></tr>
+
+ <tr>
+ <td>dma_burst_size</td>
+ <td>The DMA Burst size (applicable only for External DMA Mode).
+ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ </td></tr>
+
+ <tr>
+ <td>speed</td>
+ <td>Specifies the maximum speed of operation in host and device mode. The
+ actual speed depends on the speed of the attached device and the value of
+ phy_type.
+ - 0: High Speed (default)
+ - 1: Full Speed
+ </td></tr>
+
+ <tr>
+ <td>host_support_fs_ls_low_power</td>
+ <td>Specifies whether low power mode is supported when attached to a Full
+ Speed or Low Speed device in host mode.
+ - 0: Don't support low power mode (default)
+ - 1: Support low power mode
+ </td></tr>
+
+ <tr>
+ <td>host_ls_low_power_phy_clk</td>
+ <td>Specifies the PHY clock rate in low power mode when connected to a Low
+ Speed device in host mode. This parameter is applicable only if
+ HOST_SUPPORT_FS_LS_LOW_POWER is enabled.
+ - 0: 48 MHz (default)
+ - 1: 6 MHz
+ </td></tr>
+
+ <tr>
+ <td>enable_dynamic_fifo</td>
+ <td> Specifies whether FIFOs may be resized by the driver software.
+ - 0: Use cC FIFO size parameters
+ - 1: Allow dynamic FIFO sizing (default)
+ </td></tr>
+
+ <tr>
+ <td>data_fifo_size</td>
+ <td>Total number of 4-byte words in the data FIFO memory. This memory
+ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs.
+ - Values: 32 to 32768 (default 8192)
+
+ Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ </td></tr>
+
+ <tr>
+ <td>dev_rx_fifo_size</td>
+ <td>Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ FIFO sizing is enabled.
+ - Values: 16 to 32768 (default 1064)
+ </td></tr>
+
+ <tr>
+ <td>dev_nperio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the non-periodic Tx FIFO in device mode when
+ dynamic FIFO sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>dev_perio_tx_fifo_size_n (n = 1 to 15)</td>
+ <td>Number of 4-byte words in each of the periodic Tx FIFOs in device mode
+ when dynamic FIFO sizing is enabled.
+ - Values: 4 to 768 (default 256)
+ </td></tr>
+
+ <tr>
+ <td>host_rx_fifo_size</td>
+ <td>Number of 4-byte words in the Rx FIFO in host mode when dynamic FIFO
+ sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>host_nperio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the non-periodic Tx FIFO in host mode when
+ dynamic FIFO sizing is enabled in the core.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>host_perio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the host periodic Tx FIFO when dynamic FIFO
+ sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>max_transfer_size</td>
+ <td>The maximum transfer size supported in bytes.
+ - Values: 2047 to 65,535 (default 65,535)
+ </td></tr>
+
+ <tr>
+ <td>max_packet_count</td>
+ <td>The maximum number of packets in a transfer.
+ - Values: 15 to 511 (default 511)
+ </td></tr>
+
+ <tr>
+ <td>host_channels</td>
+ <td>The number of host channel registers to use.
+ - Values: 1 to 16 (default 12)
+
+ Note: The FPGA configuration supports a maximum of 12 host channels.
+ </td></tr>
+
+ <tr>
+ <td>dev_endpoints</td>
+ <td>The number of endpoints in addition to EP0 available for device mode
+ operations.
+ - Values: 1 to 15 (default 6 IN and OUT)
+
+ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in
+ addition to EP0.
+ </td></tr>
+
+ <tr>
+ <td>phy_type</td>
+ <td>Specifies the type of PHY interface to use. By default, the driver will
+ automatically detect the phy_type.
+ - 0: Full Speed
+ - 1: UTMI+ (default, if available)
+ - 2: ULPI
+ </td></tr>
+
+ <tr>
+ <td>phy_utmi_width</td>
+ <td>Specifies the UTMI+ Data Width. This parameter is applicable for a
+ phy_type of UTMI+. Also, this parameter is applicable only if the
+ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the
+ core has been configured to work at either data path width.
+ - Values: 8 or 16 bits (default 16)
+ </td></tr>
+
+ <tr>
+ <td>phy_ulpi_ddr</td>
+ <td>Specifies whether the ULPI operates at double or single data rate. This
+ parameter is only applicable if phy_type is ULPI.
+ - 0: single data rate ULPI interface with 8 bit wide data bus (default)
+ - 1: double data rate ULPI interface with 4 bit wide data bus
+ </td></tr>
+
+ <tr>
+ <td>i2c_enable</td>
+ <td>Specifies whether to use the I2C interface for full speed PHY. This
+ parameter is only applicable if PHY_TYPE is FS.
+ - 0: Disabled (default)
+ - 1: Enabled
+ </td></tr>
+
+ <tr>
+ <td>ulpi_fs_ls</td>
+ <td>Specifies whether to use ULPI FS/LS mode only.
+ - 0: Disabled (default)
+ - 1: Enabled
+ </td></tr>
+
+ <tr>
+ <td>ts_dline</td>
+ <td>Specifies whether term select D-Line pulsing for all PHYs is enabled.
+ - 0: Disabled (default)
+ - 1: Enabled
+ </td></tr>
+
+ <tr>
+ <td>en_multiple_tx_fifo</td>
+ <td>Specifies whether dedicatedto tx fifos are enabled for non periodic IN EPs.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Disabled
+ - 1: Enabled (default, if available)
+ </td></tr>
+
+ <tr>
+ <td>dev_tx_fifo_size_n (n = 1 to 15)</td>
+ <td>Number of 4-byte words in each of the Tx FIFOs in device mode
+ when dynamic FIFO sizing is enabled.
+ - Values: 4 to 768 (default 256)
+ </td></tr>
+
+ <tr>
+ <td>tx_thr_length</td>
+ <td>Transmit Threshold length in 32 bit double words
+ - Values: 8 to 128 (default 64)
+ </td></tr>
+
+ <tr>
+ <td>rx_thr_length</td>
+ <td>Receive Threshold length in 32 bit double words
+ - Values: 8 to 128 (default 64)
+ </td></tr>
+
+<tr>
+ <td>thr_ctl</td>
+ <td>Specifies whether to enable Thresholding for Device mode. Bits 0, 1, 2 of
+ this parmater specifies if thresholding is enabled for non-Iso Tx, Iso Tx and
+ Rx transfers accordingly.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - Values: 0 to 7 (default 0)
+ Bit values indicate:
+ - 0: Thresholding disabled
+ - 1: Thresholding enabled
+ </td></tr>
+
+<tr>
+ <td>dma_desc_enable</td>
+ <td>Specifies whether to enable Descriptor DMA mode.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Descriptor DMA disabled
+ - 1: Descriptor DMA (default, if available)
+ </td></tr>
+
+<tr>
+ <td>mpi_enable</td>
+ <td>Specifies whether to enable MPI enhancement mode.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: MPI disabled (default)
+ - 1: MPI enable
+ </td></tr>
+
+<tr>
+ <td>pti_enable</td>
+ <td>Specifies whether to enable PTI enhancement support.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: PTI disabled (default)
+ - 1: PTI enable
+ </td></tr>
+
+<tr>
+ <td>lpm_enable</td>
+ <td>Specifies whether to enable LPM support.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: LPM disabled
+ - 1: LPM enable (default, if available)
+ </td></tr>
+
+ <tr>
+ <td>besl_enable</td>
+ <td>Specifies whether to enable LPM Errata support.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: LPM Errata disabled (default)
+ - 1: LPM Errata enable
+ </td></tr>
+
+ <tr>
+ <td>baseline_besl</td>
+ <td>Specifies the baseline besl value.
+ - Values: 0 to 15 (default 0)
+ </td></tr>
+
+ <tr>
+ <td>deep_besl</td>
+ <td>Specifies the deep besl value.
+ - Values: 0 to 15 (default 15)
+ </td></tr>
+
+<tr>
+ <td>ic_usb_cap</td>
+ <td>Specifies whether to enable IC_USB capability.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: IC_USB disabled (default, if available)
+ - 1: IC_USB enable
+ </td></tr>
+
+<tr>
+ <td>ahb_thr_ratio</td>
+ <td>Specifies AHB Threshold ratio.
+ - Values: 0 to 3 (default 0)
+ </td></tr>
+
+<tr>
+ <td>power_down</td>
+ <td>Specifies Power Down(Hibernation) Mode.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Power Down disabled (default)
+ - 2: Power Down enabled
+ </td></tr>
+
+ <tr>
+ <td>reload_ctl</td>
+ <td>Specifies whether dynamic reloading of the HFIR register is allowed during
+ run time. The driver will automatically detect the value for this parameter if
+ none is specified. In case the HFIR value is reloaded when HFIR.RldCtrl == 1'b0
+ the core might misbehave.
+ - 0: Reload Control disabled (default)
+ - 1: Reload Control enabled
+ </td></tr>
+
+ <tr>
+ <td>dev_out_nak</td>
+ <td>Specifies whether Device OUT NAK enhancement enabled or no.
+ The driver will automatically detect the value for this parameter if
+ none is specified. This parameter is valid only when OTG_EN_DESC_DMA == 1'b1.
+ - 0: The core does not set NAK after Bulk OUT transfer complete (default)
+ - 1: The core sets NAK after Bulk OUT transfer complete
+ </td></tr>
+
+ <tr>
+ <td>cont_on_bna</td>
+ <td>Specifies whether Enable Continue on BNA enabled or no.
+ After receiving BNA interrupt the core disables the endpoint,when the
+ endpoint is re-enabled by the application the
+ - 0: Core starts processing from the DOEPDMA descriptor (default)
+ - 1: Core starts processing from the descriptor which received the BNA.
+ This parameter is valid only when OTG_EN_DESC_DMA == 1'b1.
+ </td></tr>
+
+ <tr>
+ <td>ahb_single</td>
+ <td>This bit when programmed supports SINGLE transfers for remainder data
+ in a transfer for DMA mode of operation.
+ - 0: The remainder data will be sent using INCR burst size (default)
+ - 1: The remainder data will be sent using SINGLE burst size.
+ </td></tr>
+
+<tr>
+ <td>adp_enable</td>
+ <td>Specifies whether ADP feature is enabled.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: ADP feature disabled (default)
+ - 1: ADP feature enabled
+ </td></tr>
+
+ <tr>
+ <td>otg_ver</td>
+ <td>Specifies whether OTG is performing as USB OTG Revision 2.0 or Revision 1.3
+ USB OTG device.
+ - 0: OTG 2.0 support disabled (default)
+ - 1: OTG 2.0 support enabled
+ </td></tr>
+
+*/
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.h
new file mode 100644
index 00000000..cddb6347
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.h
@@ -0,0 +1,86 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_driver.h $
+ * $Revision: #21 $
+ * $Date: 2015/10/12 $
+ * $Change: 2972621 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef __FH_OTG_DRIVER_H__
+#define __FH_OTG_DRIVER_H__
+
+/** @file
+ * This file contains the interface to the Linux driver.
+ */
+#include "fh_otg_os_dep.h"
+#include "fh_otg_core_if.h"
+
+/* Type declarations */
+struct fh_otg_pcd;
+struct fh_otg_hcd;
+
+/**
+ * This structure is a wrapper that encapsulates the driver components used to
+ * manage a single FH_otg controller.
+ */
+typedef struct fh_otg_device {
+ /** Structure containing OS-dependent stuff. KEEP THIS STRUCT AT THE
+ * VERY BEGINNING OF THE DEVICE STRUCT. OSes such as FreeBSD and NetBSD
+ * require this. */
+ struct os_dependent os_dep;
+
+ /** Pointer to the core interface structure. */
+ fh_otg_core_if_t *core_if;
+
+ /** Pointer to the PCD structure. */
+ struct fh_otg_pcd *pcd;
+
+ /** Pointer to the HCD structure. */
+ struct fh_otg_hcd *hcd;
+
+ /** Flag to indicate whether the common IRQ handler is installed. */
+ uint8_t common_irq_installed;
+
+} fh_otg_device_t;
+
+/*We must clear S3C24XX_EINTPEND external interrupt register
+ * because after clearing in this register trigerred IRQ from
+ * H/W core in kernel interrupt can be occured again before OTG
+ * handlers clear all IRQ sources of Core registers because of
+ * timing latencies and Low Level IRQ Type.
+ */
+#ifdef CONFIG_MACH_IPMATE
+#define S3C2410X_CLEAR_EINTPEND() \
+do { \
+ __raw_writel(1UL << 11,S3C24XX_EINTPEND); \
+} while (0)
+#else
+#define S3C2410X_CLEAR_EINTPEND() do { } while (0)
+#endif
+
+#endif
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.c
new file mode 100755
index 00000000..449dccc5
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.c
@@ -0,0 +1,3459 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd.c $
+ * $Revision: #110 $
+ * $Date: 2013/05/19 $
+ * $Change: 2234022 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_DEVICE_ONLY
+
+/** @file
+ * This file implements HCD Core. All code in this file is portable and doesn't
+ * use any OS specific functions.
+ * Interface provided by HCD Core is defined in <code><hcd_if.h></code>
+ * header file.
+ */
+
+#include "fh_otg_hcd.h"
+#include "fh_otg_regs.h"
+
+fh_otg_hcd_t *fh_otg_hcd_alloc_hcd(void)
+{
+ return FH_ALLOC(sizeof(fh_otg_hcd_t));
+}
+
+/**
+ * Connection timeout function. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds.
+ */
+void fh_otg_hcd_connect_timeout(void *ptr)
+{
+ fh_otg_hcd_t *hcd;
+ gpwrdn_data_t gpwrdn;
+ FH_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, ptr);
+ FH_PRINTF("Connect Timeout\n");
+ __FH_ERROR("Device Not Connected/Responding\n");
+ /** Remove buspower after 10s */
+ hcd = ptr;
+ if (hcd->core_if->otg_ver)
+ fh_otg_set_prtpower(hcd->core_if, 0);
+ if (hcd->core_if->adp_enable && !hcd->core_if->adp.probe_enabled) {
+ cil_hcd_disconnect(hcd->core_if);
+ gpwrdn.d32 = 0;
+ /* Enable Power Down Logic */
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ gpwrdn.b.dis_vbus = 1;
+ FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+
+ /* Unmask SRP detected interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+
+ fh_mdelay(220);
+ fh_otg_adp_probe_start(hcd->core_if);
+ }
+}
+
+#ifdef DEBUG
+static void dump_channel_info(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ if (qh->channel != NULL) {
+ fh_hc_t *hc = qh->channel;
+ fh_list_link_t *item;
+ fh_otg_qh_t *qh_item;
+ int num_channels = hcd->core_if->core_params->host_channels;
+ int i;
+
+ fh_otg_hc_regs_t *hc_regs;
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+
+ hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+ hcdma = FH_READ_REG32(&hc_regs->hcdma);
+
+ FH_PRINTF(" Assigned to channel %p:\n", hc);
+ FH_PRINTF(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32,
+ hcsplt.d32);
+ FH_PRINTF(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32,
+ hcdma);
+ FH_PRINTF(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ FH_PRINTF(" ep_type: %d\n", hc->ep_type);
+ FH_PRINTF(" max_packet: %d\n", hc->max_packet);
+ FH_PRINTF(" data_pid_start: %d\n", hc->data_pid_start);
+ FH_PRINTF(" xfer_started: %d\n", hc->xfer_started);
+ FH_PRINTF(" halt_status: %d\n", hc->halt_status);
+ FH_PRINTF(" xfer_buff: %p\n", hc->xfer_buff);
+ FH_PRINTF(" xfer_len: %d\n", hc->xfer_len);
+ FH_PRINTF(" qh: %p\n", hc->qh);
+ FH_PRINTF(" NP inactive sched:\n");
+ FH_LIST_FOREACH(item, &hcd->non_periodic_sched_inactive) {
+ qh_item =
+ FH_LIST_ENTRY(item, fh_otg_qh_t, qh_list_entry);
+ FH_PRINTF(" %p\n", qh_item);
+ }
+ FH_PRINTF(" NP active sched:\n");
+ FH_LIST_FOREACH(item, &hcd->non_periodic_sched_active) {
+ qh_item =
+ FH_LIST_ENTRY(item, fh_otg_qh_t, qh_list_entry);
+ FH_PRINTF(" %p\n", qh_item);
+ }
+ FH_PRINTF(" Channels: \n");
+ for (i = 0; i < num_channels; i++) {
+ fh_hc_t *hc = hcd->hc_ptr_array[i];
+ FH_PRINTF(" %2d: %p\n", i, hc);
+ }
+ }
+}
+#endif /* DEBUG */
+
+/**
+ * Work queue function for starting the HCD when A-Cable is connected.
+ * The hcd_start() must be called in a process context.
+ */
+static void hcd_start_func(void *_vp)
+{
+ fh_otg_hcd_t *hcd = (fh_otg_hcd_t *) _vp;
+
+ FH_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, hcd);
+ if (hcd) {
+ hcd->fops->start(hcd);
+ }
+}
+
+static void del_xfer_timers(fh_otg_hcd_t * hcd)
+{
+#ifdef DEBUG
+ int i;
+ int num_channels = hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ FH_TIMER_CANCEL(hcd->core_if->hc_xfer_timer[i]);
+ }
+#endif
+}
+
+static void del_timers(fh_otg_hcd_t * hcd)
+{
+ del_xfer_timers(hcd);
+ FH_TIMER_CANCEL(hcd->conn_timer);
+}
+
+/**
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ */
+static void kill_urbs_in_qh_list(fh_otg_hcd_t * hcd, fh_list_link_t * qh_list)
+{
+ fh_list_link_t *qh_item;
+ fh_otg_qh_t *qh;
+ fh_otg_qtd_t *qtd, *qtd_tmp;
+
+ FH_LIST_FOREACH(qh_item, qh_list) {
+ qh = FH_LIST_ENTRY(qh_item, fh_otg_qh_t, qh_list_entry);
+ FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp,
+ &qh->qtd_list, qtd_list_entry) {
+ qtd = FH_CIRCLEQ_FIRST(&qh->qtd_list);
+ if (qtd->urb != NULL) {
+ if(!qtd->urb->priv) {
+ FH_ERROR("urb->priv is NULL !!!!\n");
+ return;
+ }
+ if(!hcd->fops)
+ FH_ERROR("hcd->fops is NULL !!!!!\n");
+ if(!hcd->fops->complete)
+ FH_ERROR("fops->complete is NULL !!!!\n");
+ hcd->fops->complete(hcd, qtd->urb->priv,
+ qtd->urb, -FH_E_TIMEOUT);
+ fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+ }
+
+ }
+ }
+}
+
+/**
+ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ */
+static void kill_all_urbs(fh_otg_hcd_t * hcd)
+{
+ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
+ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
+}
+
+/**
+ * Start the connection timer. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds. The
+ * timer is deleted if a port connect interrupt occurs before the
+ * timer expires.
+ */
+static void fh_otg_hcd_start_connect_timer(fh_otg_hcd_t * hcd)
+{
+ FH_TIMER_SCHEDULE(hcd->conn_timer, 10000 /* 10 secs */ );
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t fh_otg_hcd_session_start_cb(void *p)
+{
+ fh_otg_hcd_t *fh_otg_hcd;
+ FH_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
+ fh_otg_hcd = p;
+ fh_otg_hcd_start_connect_timer(fh_otg_hcd);
+ return 1;
+}
+
+/**
+ * HCD Callback function for starting the HCD when A-Cable is
+ * connected.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t fh_otg_hcd_start_cb(void *p)
+{
+ fh_otg_hcd_t *fh_otg_hcd = p;
+ fh_otg_core_if_t *core_if;
+ hprt0_data_t hprt0;
+ uint32_t timeout = 50;
+
+ core_if = fh_otg_hcd->core_if;
+ /**@todo vahrama: Check the timeout value for OTG 2.0 */
+ if (core_if->otg_ver)
+ timeout = 25;
+ if (core_if->op_state == B_HOST) {
+ /*
+ * Reset the port. During a HNP mode switch the reset
+ * needs to occur within 1ms and have a duration of at
+ * least 50ms.
+ */
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ if (core_if->otg_ver) {
+ fh_mdelay(60);
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ }
+ }
+ FH_WORKQ_SCHEDULE_DELAYED(core_if->wq_otg,
+ hcd_start_func, fh_otg_hcd, timeout,
+ "start hcd");
+
+ return 1;
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t fh_otg_hcd_disconnect_cb(void *p)
+{
+ gintsts_data_t intr;
+ fh_otg_hcd_t *fh_otg_hcd = p;
+
+ /*
+ * Set status flags for the hub driver.
+ */
+ fh_otg_hcd->flags.b.port_connect_status_change = 1;
+ fh_otg_hcd->flags.b.port_connect_status = 0;
+
+ /*
+ * Shutdown any transfers in process by clearing the Tx FIFO Empty
+ * interrupt mask and status bits and disabling subsequent host
+ * channel interrupts.
+ */
+ intr.d32 = 0;
+ intr.b.nptxfempty = 1;
+ intr.b.ptxfempty = 1;
+ intr.b.hcintr = 1;
+ FH_MODIFY_REG32(&fh_otg_hcd->core_if->core_global_regs->gintmsk,
+ intr.d32, 0);
+ FH_MODIFY_REG32(&fh_otg_hcd->core_if->core_global_regs->gintsts,
+ intr.d32, 0);
+
+ /*
+ * Turn off the vbus power only if the core has transitioned to device
+ * mode. If still in host mode, need to keep power on to detect a
+ * reconnection.
+ */
+ if (fh_otg_is_device_mode(fh_otg_hcd->core_if)) {
+ if (fh_otg_hcd->core_if->op_state != A_SUSPEND) {
+ hprt0_data_t hprt0 = {.d32 = 0 };
+ FH_PRINTF("Disconnect: PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0,
+ hprt0.d32);
+ }
+ /** Delete timers if become device */
+ del_timers(fh_otg_hcd);
+ fh_otg_disable_host_interrupts(fh_otg_hcd->core_if);
+ }
+
+ /* Respond with an error status to all URBs in the schedule. */
+ kill_all_urbs(fh_otg_hcd);
+
+ if (fh_otg_is_host_mode(fh_otg_hcd->core_if)) {
+ /* Clean up any host channels that were in use. */
+ int num_channels;
+ int i;
+ fh_hc_t *channel;
+ fh_otg_hc_regs_t *hc_regs;
+ hcchar_data_t hcchar;
+
+ if (fh_otg_hcd->core_if->otg_ver == 1)
+ del_xfer_timers(fh_otg_hcd);
+ else
+ del_timers(fh_otg_hcd);
+
+ num_channels = fh_otg_hcd->core_if->core_params->host_channels;
+
+ if (!fh_otg_hcd->core_if->dma_enable) {
+ /* Flush out any channel requests in slave mode. */
+ for (i = 0; i < num_channels; i++) {
+ channel = fh_otg_hcd->hc_ptr_array[i];
+ if (FH_CIRCLEQ_EMPTY_ENTRY
+ (channel, hc_list_entry)) {
+ hc_regs =
+ fh_otg_hcd->core_if->
+ host_if->hc_regs[i];
+ hcchar.d32 =
+ FH_READ_REG32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ FH_WRITE_REG32
+ (&hc_regs->hcchar,
+ hcchar.d32);
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ channel = fh_otg_hcd->hc_ptr_array[i];
+ if (FH_CIRCLEQ_EMPTY_ENTRY(channel, hc_list_entry)) {
+ hc_regs =
+ fh_otg_hcd->core_if->host_if->hc_regs[i];
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ /* Halt the channel. */
+ hcchar.b.chdis = 1;
+ FH_WRITE_REG32(&hc_regs->hcchar,
+ hcchar.d32);
+ }
+
+ fh_otg_hc_cleanup(fh_otg_hcd->core_if,
+ channel);
+ FH_CIRCLEQ_INSERT_TAIL
+ (&fh_otg_hcd->free_hc_list, channel,
+ hc_list_entry);
+ /*
+ * Added for Descriptor DMA to prevent channel double cleanup
+ * in release_channel_ddma(). Which called from ep_disable
+ * when device disconnect.
+ */
+ channel->qh = NULL;
+ }
+ }
+ }
+
+ if (fh_otg_hcd->fops->disconnect) {
+ fh_otg_hcd->fops->disconnect(fh_otg_hcd);
+ }
+
+ return 1;
+}
+
+/**
+ * HCD Callback function for stopping the HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t fh_otg_hcd_stop_cb(void *p)
+{
+ fh_otg_hcd_t *fh_otg_hcd = p;
+
+ FH_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
+ fh_otg_hcd_stop(fh_otg_hcd);
+ return 1;
+}
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+/**
+ * HCD Callback function for sleep of HCD.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int fh_otg_hcd_sleep_cb(void *p)
+{
+ fh_otg_hcd_t *hcd = p;
+
+ fh_otg_hcd_free_hc_from_lpm(hcd);
+
+ return 0;
+}
+#endif
+
+/**
+ * HCD Callback function for Remote Wakeup.
+ *
+ * @param p void pointer to the <code>struct usb_hcd</code>
+ */
+static int fh_otg_hcd_rem_wakeup_cb(void *p)
+{
+ fh_otg_hcd_t *hcd = p;
+
+ if (hcd->core_if->lx_state == FH_OTG_L2) {
+ hcd->flags.b.port_suspend_change = 1;
+ }
+#ifdef CONFIG_USB_FH_OTG_LPM
+ else {
+ hcd->flags.b.port_l1_change = 1;
+ }
+#endif
+ return 0;
+}
+
+/**
+ * Halts the FH_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+void fh_otg_hcd_stop(fh_otg_hcd_t * hcd)
+{
+ hprt0_data_t hprt0 = {.d32 = 0 };
+
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD STOP\n");
+
+ /*
+ * The root hub should be disconnected before this function is called.
+ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+ * and the QH lists (via ..._hcd_endpoint_disable).
+ */
+
+ /* Turn off all host-specific interrupts. */
+ fh_otg_disable_host_interrupts(hcd->core_if);
+
+ /* Turn off the vbus power */
+ FH_PRINTF("PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ FH_WRITE_REG32(hcd->core_if->host_if->hprt0, hprt0.d32);
+ fh_mdelay(1);
+}
+
+int fh_otg_hcd_urb_enqueue(fh_otg_hcd_t * hcd,
+ fh_otg_hcd_urb_t * fh_otg_urb, void **ep_handle,
+ int atomic_alloc)
+{
+ fh_irqflags_t flags;
+ int retval = 0;
+ fh_otg_qtd_t *qtd;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ if (!hcd->flags.b.port_connect_status) {
+ /* No longer connected. */
+ FH_ERROR("Not connected\n");
+ return -FH_E_NO_DEVICE;
+ }
+
+ qtd = fh_otg_hcd_qtd_create(fh_otg_urb, atomic_alloc);
+ if (qtd == NULL) {
+ FH_ERROR("FH OTG HCD URB Enqueue failed creating QTD\n");
+ return -FH_E_NO_MEMORY;
+ }
+
+ retval =
+ fh_otg_hcd_qtd_add(qtd, hcd, (fh_otg_qh_t **) ep_handle, atomic_alloc);
+ if (retval < 0) {
+ FH_ERROR("FH OTG HCD URB Enqueue failed adding QTD. "
+ "Error status %d\n", retval);
+ fh_otg_hcd_qtd_free(qtd);
+ } else {
+ qtd->qh = *ep_handle;
+ }
+ intr_mask.d32 = FH_READ_REG32(&hcd->core_if->core_global_regs->gintmsk);
+ if (!intr_mask.b.sofintr && retval == 0) {
+ fh_otg_transaction_type_e tr_type;
+ if ((qtd->qh->ep_type == UE_BULK)
+ && !(qtd->urb->flags & URB_GIVEBACK_ASAP)) {
+ /* Do not schedule SG transactions until qtd has URB_GIVEBACK_ASAP set */
+ return 0;
+ }
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ tr_type = fh_otg_hcd_select_transactions(hcd);
+ if (tr_type != FH_OTG_TRANSACTION_NONE) {
+ fh_otg_hcd_queue_transactions(hcd, tr_type);
+ }
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+ }
+
+ return retval;
+}
+
+int fh_otg_hcd_urb_dequeue(fh_otg_hcd_t * hcd,
+ fh_otg_hcd_urb_t * fh_otg_urb)
+{
+ fh_otg_qh_t *qh;
+ fh_otg_qtd_t *urb_qtd;
+
+ urb_qtd = fh_otg_urb->qtd;
+ qh = urb_qtd->qh;
+ if (!urb_qtd) {
+ printk(KERN_ERR "## Urb QTD is NULL ##\n");
+ return -EINVAL;
+ }
+
+ qh = urb_qtd->qh;
+ if (!qh) {
+ printk(KERN_ERR "## Urb QH is NULL ##\n");
+ return -EINVAL;
+ }
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ if (urb_qtd->in_process) {
+ dump_channel_info(hcd, qh);
+ }
+ }
+#endif
+ if (urb_qtd->in_process && qh->channel) {
+ /* The QTD is in process (it has been assigned to a channel). */
+ if (hcd->flags.b.port_connect_status) {
+ /*
+ * If still connected (i.e. in host mode), halt the
+ * channel so it can be used for other transfers. If
+ * no longer connected, the host registers can't be
+ * written to halt the channel since the core is in
+ * device mode.
+ */
+ fh_otg_hc_halt(hcd->core_if, qh->channel,
+ FH_OTG_HC_XFER_URB_DEQUEUE);
+ }
+ }
+
+ /*
+ * Free the QTD and clean up the associated QH. Leave the QH in the
+ * schedule if it has any remaining QTDs.
+ */
+
+ if (!hcd->core_if->dma_desc_enable) {
+ uint8_t b = urb_qtd->in_process;
+ fh_otg_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
+ if (b) {
+ fh_otg_hcd_qh_deactivate(hcd, qh, 0);
+ qh->channel = NULL;
+ } else if (FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
+ fh_otg_hcd_qh_remove(hcd, qh);
+ }
+ } else {
+ fh_otg_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
+ }
+ return 0;
+}
+
+int fh_otg_hcd_endpoint_disable(fh_otg_hcd_t * hcd, void *ep_handle,
+ int retry)
+{
+ fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
+ int retval = 0;
+ fh_irqflags_t flags;
+
+ if (retry < 0) {
+ retval = -FH_E_INVALID;
+ goto done;
+ }
+
+ if (!qh) {
+ retval = -FH_E_INVALID;
+ goto done;
+ }
+
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+
+ while (!FH_CIRCLEQ_EMPTY(&qh->qtd_list) && retry) {
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+ retry--;
+ fh_msleep(5);
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ }
+
+ fh_otg_hcd_qh_remove(hcd, qh);
+
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+ /*
+ * Split fh_otg_hcd_qh_remove_and_free() into qh_remove
+ * and qh_free to prevent stack dump on FH_DMA_FREE() with
+ * irq_disabled (spinlock_irqsave) in fh_otg_hcd_desc_list_free()
+ * and fh_otg_hcd_frame_list_alloc().
+ */
+ fh_otg_hcd_qh_free(hcd, qh);
+
+done:
+ return retval;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+int fh_otg_hcd_endpoint_reset(fh_otg_hcd_t * hcd, void *ep_handle)
+{
+ int retval = 0;
+ fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
+ if (!qh)
+ return -FH_E_INVALID;
+
+ qh->data_toggle = FH_OTG_HC_PID_DATA0;
+ return retval;
+}
+#endif
+
+/**
+ * HCD Callback structure for handling mode switching.
+ */
+static fh_otg_cil_callbacks_t hcd_cil_callbacks = {
+ .start = fh_otg_hcd_start_cb,
+ .stop = fh_otg_hcd_stop_cb,
+ .disconnect = fh_otg_hcd_disconnect_cb,
+ .session_start = fh_otg_hcd_session_start_cb,
+ .resume_wakeup = fh_otg_hcd_rem_wakeup_cb,
+#ifdef CONFIG_USB_FH_OTG_LPM
+ .sleep = fh_otg_hcd_sleep_cb,
+#endif
+ .p = 0,
+};
+
+/**
+ * Reset tasklet function
+ */
+static void reset_tasklet_func(void *data)
+{
+ fh_otg_hcd_t *fh_otg_hcd = (fh_otg_hcd_t *) data;
+ fh_otg_core_if_t *core_if = fh_otg_hcd->core_if;
+ hprt0_data_t hprt0;
+
+ FH_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
+
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ fh_mdelay(60);
+
+ hprt0.b.prtrst = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ fh_otg_hcd->flags.b.port_reset_change = 1;
+}
+
+static void qh_list_free(fh_otg_hcd_t * hcd, fh_list_link_t * qh_list)
+{
+ fh_list_link_t *item;
+ fh_otg_qh_t *qh;
+ fh_irqflags_t flags;
+
+ if (!qh_list->next) {
+ /* The list hasn't been initialized yet. */
+ return;
+ }
+ /*
+ * Hold spinlock here. Not needed in that case if bellow
+ * function is being called from ISR
+ */
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ /* Ensure there are no QTDs or URBs left. */
+ kill_urbs_in_qh_list(hcd, qh_list);
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+
+ FH_LIST_FOREACH(item, qh_list) {
+ qh = FH_LIST_ENTRY(item, fh_otg_qh_t, qh_list_entry);
+ fh_otg_hcd_qh_remove_and_free(hcd, qh);
+ }
+}
+
+/**
+ * Exit from Hibernation if Host did not detect SRP from connected SRP capable
+ * Device during SRP time by host power up.
+ */
+void fh_otg_hcd_power_up(void *ptr)
+{
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
+
+ FH_PRINTF("%s called\n", __FUNCTION__);
+
+ if (!core_if->hibernation_suspend) {
+ FH_PRINTF("Already exited from Hibernation\n");
+ return;
+ }
+
+ /* Switch on the voltage to the core */
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Reset the core */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Disable power clamps */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ /* Remove reset the core signal */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnrstn = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Disable PMU interrupt */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ core_if->hibernation_suspend = 0;
+
+ /* Disable PMU */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+ fh_udelay(10);
+
+ /* Enable VBUS */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.dis_vbus = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
+
+ core_if->op_state = A_HOST;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_hcd_start(core_if);
+}
+
+/**
+ * Frees secondary storage associated with the fh_otg_hcd structure contained
+ * in the struct usb_hcd field.
+ */
+static void fh_otg_hcd_free(fh_otg_hcd_t * fh_otg_hcd)
+{
+ int i;
+
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD FREE\n");
+
+ del_timers(fh_otg_hcd);
+
+ /* Free memory for QH/QTD lists */
+ qh_list_free(fh_otg_hcd, &fh_otg_hcd->non_periodic_sched_inactive);
+ qh_list_free(fh_otg_hcd, &fh_otg_hcd->non_periodic_sched_active);
+ qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_inactive);
+ qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_ready);
+ qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_assigned);
+ qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_queued);
+
+ /* Free memory for the host channels. */
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ fh_hc_t *hc = fh_otg_hcd->hc_ptr_array[i];
+
+#ifdef DEBUG
+ if (fh_otg_hcd->core_if->hc_xfer_timer[i]) {
+ FH_TIMER_FREE(fh_otg_hcd->core_if->hc_xfer_timer[i]);
+ }
+#endif
+ if (hc != NULL) {
+ FH_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n",
+ i, hc);
+ FH_FREE(hc);
+ }
+ }
+
+ if (fh_otg_hcd->core_if->dma_enable) {
+ if (fh_otg_hcd->status_buf_dma) {
+ FH_DMA_FREE(FH_OTG_HCD_STATUS_BUF_SIZE,
+ fh_otg_hcd->status_buf,
+ fh_otg_hcd->status_buf_dma);
+ }
+ } else if (fh_otg_hcd->status_buf != NULL) {
+ FH_FREE(fh_otg_hcd->status_buf);
+ }
+ FH_SPINLOCK_FREE(fh_otg_hcd->lock);
+ /* Set core_if's lock pointer to NULL */
+ fh_otg_hcd->core_if->lock = NULL;
+
+ FH_TIMER_FREE(fh_otg_hcd->conn_timer);
+ FH_TASK_FREE(fh_otg_hcd->reset_tasklet);
+
+#ifdef FH_DEV_SRPCAP
+ if (fh_otg_hcd->core_if->power_down == 2 &&
+ fh_otg_hcd->core_if->pwron_timer) {
+ FH_TIMER_FREE(fh_otg_hcd->core_if->pwron_timer);
+ }
+#endif
+ FH_FREE(fh_otg_hcd);
+}
+
+int fh_otg_hcd_init(fh_otg_hcd_t * hcd, fh_otg_core_if_t * core_if)
+{
+ int retval = 0;
+ int num_channels;
+ int i;
+ fh_hc_t *channel;
+
+ hcd->lock = FH_SPINLOCK_ALLOC();
+ if (!hcd->lock) {
+ FH_ERROR("Could not allocate lock for pcd");
+ FH_FREE(hcd);
+ retval = -FH_E_NO_MEMORY;
+ goto out;
+ }
+ hcd->core_if = core_if;
+
+ /* Register the HCD CIL Callbacks */
+ fh_otg_cil_register_hcd_callbacks(hcd->core_if,
+ &hcd_cil_callbacks, hcd);
+
+ /* Initialize the non-periodic schedule. */
+ FH_LIST_INIT(&hcd->non_periodic_sched_inactive);
+ FH_LIST_INIT(&hcd->non_periodic_sched_active);
+
+ /* Initialize the periodic schedule. */
+ FH_LIST_INIT(&hcd->periodic_sched_inactive);
+ FH_LIST_INIT(&hcd->periodic_sched_ready);
+ FH_LIST_INIT(&hcd->periodic_sched_assigned);
+ FH_LIST_INIT(&hcd->periodic_sched_queued);
+
+ /*
+ * Create a host channel descriptor for each host channel implemented
+ * in the controller. Initialize the channel descriptor array.
+ */
+ FH_CIRCLEQ_INIT(&hcd->free_hc_list);
+ num_channels = hcd->core_if->core_params->host_channels;
+ FH_MEMSET(hcd->hc_ptr_array, 0, sizeof(hcd->hc_ptr_array));
+ for (i = 0; i < num_channels; i++) {
+ channel = FH_ALLOC(sizeof(fh_hc_t));
+ if (channel == NULL) {
+ retval = -FH_E_NO_MEMORY;
+ FH_ERROR("%s: host channel allocation failed\n",
+ __func__);
+ fh_otg_hcd_free(hcd);
+ goto out;
+ }
+ channel->hc_num = i;
+ hcd->hc_ptr_array[i] = channel;
+#ifdef DEBUG
+ hcd->core_if->hc_xfer_timer[i] =
+ FH_TIMER_ALLOC("hc timer", hc_xfer_timeout,
+ &hcd->core_if->hc_xfer_info[i]);
+#endif
+ FH_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i,
+ channel);
+ }
+
+ /* Initialize the Connection timeout timer. */
+ hcd->conn_timer = FH_TIMER_ALLOC("Connection timer",
+ fh_otg_hcd_connect_timeout, hcd);
+
+ /* Initialize reset tasklet. */
+ hcd->reset_tasklet = FH_TASK_ALLOC("reset_tasklet", reset_tasklet_func, hcd);
+#ifdef FH_DEV_SRPCAP
+ if (hcd->core_if->power_down == 2) {
+ /* Initialize Power on timer for Host power up in case hibernation */
+ hcd->core_if->pwron_timer = FH_TIMER_ALLOC("PWRON TIMER",
+ fh_otg_hcd_power_up, core_if);
+ }
+#endif
+
+ /*
+ * Allocate space for storing data on status transactions. Normally no
+ * data is sent, but this space acts as a bit bucket. This must be
+ * done after usb_add_hcd since that function allocates the DMA buffer
+ * pool.
+ */
+ if (hcd->core_if->dma_enable) {
+ hcd->status_buf =
+ FH_DMA_ALLOC(FH_OTG_HCD_STATUS_BUF_SIZE,
+ &hcd->status_buf_dma);
+ } else {
+ hcd->status_buf = FH_ALLOC(FH_OTG_HCD_STATUS_BUF_SIZE);
+ }
+ if (!hcd->status_buf) {
+ retval = -FH_E_NO_MEMORY;
+ FH_ERROR("%s: status_buf allocation failed\n", __func__);
+ fh_otg_hcd_free(hcd);
+ goto out;
+ }
+
+ hcd->otg_port = 1;
+ hcd->frame_list = NULL;
+ hcd->frame_list_dma = 0;
+ hcd->periodic_qh_count = 0;
+out:
+ return retval;
+}
+
+void fh_otg_hcd_remove(fh_otg_hcd_t * hcd)
+{
+ /* Turn off all host-specific interrupts. */
+ fh_otg_disable_host_interrupts(hcd->core_if);
+
+ fh_otg_hcd_free(hcd);
+}
+
+/**
+ * Initializes dynamic portions of the FH_otg HCD state.
+ */
+static void fh_otg_hcd_reinit(fh_otg_hcd_t * hcd)
+{
+ int num_channels;
+ int i;
+ fh_hc_t *channel;
+ fh_hc_t *channel_tmp;
+
+ hcd->flags.d32 = 0;
+
+ hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
+ hcd->non_periodic_channels = 0;
+ hcd->periodic_channels = 0;
+
+ /*
+ * Put all channels in the free channel list and clean up channel
+ * states.
+ */
+ FH_CIRCLEQ_FOREACH_SAFE(channel, channel_tmp,
+ &hcd->free_hc_list, hc_list_entry) {
+ FH_CIRCLEQ_REMOVE(&hcd->free_hc_list, channel, hc_list_entry);
+ }
+
+ num_channels = hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ channel = hcd->hc_ptr_array[i];
+ FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, channel,
+ hc_list_entry);
+ fh_otg_hc_cleanup(hcd->core_if, channel);
+ }
+
+ /* Initialize the FH core for host mode operation. */
+ fh_otg_core_host_init(hcd->core_if);
+
+ /* Set core_if's lock pointer to the hcd->lock */
+ hcd->core_if->lock = hcd->lock;
+}
+
+/**
+ * Assigns transactions from a QTD to a free host channel and initializes the
+ * host channel to perform the transactions. The host channel is removed from
+ * the free list.
+ *
+ * @param hcd The HCD state structure.
+ * @param qh Transactions from the first QTD for this QH are selected and
+ * assigned to a free host channel.
+ */
+static int assign_and_init_hc(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ fh_hc_t *hc = NULL;
+ fh_otg_qtd_t *qtd;
+ fh_otg_hcd_urb_t *urb;
+ void* ptr = NULL;
+ hcchar_data_t hcchar;
+ int num_channels;
+ int i;
+
+ FH_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh);
+
+ qtd = FH_CIRCLEQ_FIRST(&qh->qtd_list);
+ if (!qtd) {
+ FH_ERROR("qh qtd is none\n");
+ return -ENOMEM;
+ }
+ urb = qtd->urb;
+ if (!urb) {
+ FH_ERROR("hcd urb is none\n");
+ return -ENOMEM;
+ }
+ num_channels = hcd->core_if->core_params->host_channels;
+
+ /* WA to not select channel with chdis bit set, this was
+ * observed after role switch as part of OTG 2.0 HNP
+ */
+ for (i = 0; i < num_channels; i++) {
+ hc = FH_CIRCLEQ_FIRST(&hcd->free_hc_list);
+ hcchar.d32 = FH_READ_REG32(&hcd->core_if->host_if->hc_regs[hc->hc_num]->hcchar);
+ FH_DEBUGPL(DBG_HCDV, "HC num = %d HCCHAR %08x\n", hc->hc_num, hcchar.d32);
+ if(!hcchar.b.chdis && !hcchar.b.chen)
+ break;
+ FH_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
+ FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
+ hc = NULL;
+ }
+ if (!hc) {
+ FH_ERROR("No free channel with en and dis bits 0\n");
+ return -ENOMEM;
+ }
+
+
+
+ /* Remove the host channel from the free list. */
+ FH_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
+
+ qh->channel = hc;
+
+ qtd->in_process = 1;
+
+ /*
+ * Use usb_pipedevice to determine device address. This address is
+ * 0 before the SET_ADDRESS command and the correct address afterward.
+ */
+ hc->dev_addr = fh_otg_hcd_get_dev_addr(&urb->pipe_info);
+ hc->ep_num = fh_otg_hcd_get_ep_num(&urb->pipe_info);
+ hc->speed = qh->dev_speed;
+ hc->max_packet = fh_max_packet(qh->maxp);
+
+ hc->xfer_started = 0;
+ hc->halt_status = FH_OTG_HC_XFER_NO_HALT_STATUS;
+ hc->error_state = (qtd->error_count > 0);
+ hc->halt_on_queue = 0;
+ hc->halt_pending = 0;
+ hc->requests = 0;
+
+ /*
+ * The following values may be modified in the transfer type section
+ * below. The xfer_len value may be reduced when the transfer is
+ * started to accommodate the max widths of the XferSize and PktCnt
+ * fields in the HCTSIZn register.
+ */
+
+ hc->ep_is_in = (fh_otg_hcd_is_pipe_in(&urb->pipe_info) != 0);
+ if (hc->ep_is_in) {
+ hc->do_ping = 0;
+ } else {
+ hc->do_ping = qh->ping_state;
+ }
+
+ hc->data_pid_start = qh->data_toggle;
+ hc->multi_count = 1;
+
+ if (urb->actual_length > urb->length &&
+ !fh_otg_hcd_is_pipe_in(&urb->pipe_info)) {
+ urb->actual_length = urb->length;
+ }
+
+ if (hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *) urb->dma + urb->actual_length;
+
+ /* For non-dword aligned case */
+ if (((unsigned long)hc->xfer_buff & 0x3)
+ && !hcd->core_if->dma_desc_enable) {
+ ptr = (uint8_t *) urb->buf + urb->actual_length;
+ }
+ } else {
+ hc->xfer_buff = (uint8_t *) urb->buf + urb->actual_length;
+ }
+ hc->xfer_len = urb->length - urb->actual_length;
+ hc->xfer_count = 0;
+
+ /*
+ * Set the split attributes
+ */
+ hc->do_split = 0;
+ if (qh->do_split) {
+ uint32_t hub_addr, port_addr;
+ hc->do_split = 1;
+ hc->xact_pos = qtd->isoc_split_pos;
+ hc->complete_split = qtd->complete_split;
+ hcd->fops->hub_info(hcd, urb->priv, &hub_addr, &port_addr);
+ hc->hub_addr = (uint8_t) hub_addr;
+ hc->port_addr = (uint8_t) port_addr;
+ }
+
+ switch (fh_otg_hcd_get_pipe_type(&urb->pipe_info)) {
+ case UE_CONTROL:
+ hc->ep_type = FH_OTG_EP_TYPE_CONTROL;
+ switch (qtd->control_phase) {
+ case FH_OTG_CONTROL_SETUP:
+ FH_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
+ hc->do_ping = 0;
+ hc->ep_is_in = 0;
+ hc->data_pid_start = FH_OTG_HC_PID_SETUP;
+ if (hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *) urb->setup_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) urb->setup_packet;
+ }
+ hc->xfer_len = 8;
+ ptr = NULL;
+ break;
+ case FH_OTG_CONTROL_DATA:
+ FH_DEBUGPL(DBG_HCDV, " Control data transaction\n");
+ hc->data_pid_start = qtd->data_toggle;
+ break;
+ case FH_OTG_CONTROL_STATUS:
+ /*
+ * Direction is opposite of data direction or IN if no
+ * data.
+ */
+ FH_DEBUGPL(DBG_HCDV, " Control status transaction\n");
+ if (urb->length == 0) {
+ hc->ep_is_in = 1;
+ } else {
+ hc->ep_is_in =
+ fh_otg_hcd_is_pipe_out(&urb->pipe_info);
+ }
+ if (hc->ep_is_in) {
+ hc->do_ping = 0;
+ }
+
+ hc->data_pid_start = FH_OTG_HC_PID_DATA1;
+
+ hc->xfer_len = 0;
+ if (hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *) hcd->status_buf_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) hcd->status_buf;
+ }
+ ptr = NULL;
+ break;
+ }
+ break;
+ case UE_BULK:
+ hc->ep_type = FH_OTG_EP_TYPE_BULK;
+ break;
+ case UE_INTERRUPT:
+ hc->ep_type = FH_OTG_EP_TYPE_INTR;
+ break;
+ case UE_ISOCHRONOUS:
+ {
+ struct fh_otg_hcd_iso_packet_desc *frame_desc;
+
+ hc->ep_type = FH_OTG_EP_TYPE_ISOC;
+
+ if (hcd->core_if->dma_desc_enable)
+ break;
+
+ frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+
+ frame_desc->status = 0;
+
+ if (hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *) urb->dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) urb->buf;
+ }
+ hc->xfer_buff +=
+ frame_desc->offset + qtd->isoc_split_offset;
+ hc->xfer_len =
+ frame_desc->length - qtd->isoc_split_offset;
+
+ /* For non-dword aligned buffers */
+ if (((unsigned long)hc->xfer_buff & 0x3)
+ && hcd->core_if->dma_enable) {
+ ptr =
+ (uint8_t *) urb->buf + frame_desc->offset +
+ qtd->isoc_split_offset;
+ } else
+ ptr = NULL;
+
+ if (hc->xact_pos == FH_HCSPLIT_XACTPOS_ALL) {
+ if (hc->xfer_len <= 188) {
+ hc->xact_pos = FH_HCSPLIT_XACTPOS_ALL;
+ } else {
+ hc->xact_pos =
+ FH_HCSPLIT_XACTPOS_BEGIN;
+ }
+ }
+ }
+ break;
+ }
+ /* non DWORD-aligned buffer case */
+ if (ptr) {
+ uint32_t buf_size;
+ if (hc->ep_type != FH_OTG_EP_TYPE_ISOC) {
+ buf_size = hcd->core_if->core_params->max_transfer_size;
+ } else {
+ buf_size = 4096;
+ }
+ if (!qh->dw_align_buf) {
+ qh->dw_align_buf = FH_DMA_ALLOC_ATOMIC(buf_size,
+ &qh->dw_align_buf_dma);
+ printk(KERN_ERR "FH_DMA_ALLOC_ATOMIC (%p)\n", qh->dw_align_buf);
+ if (!qh->dw_align_buf) {
+ FH_ERROR
+ ("%s: Failed to allocate memory to handle "
+ "non-dword aligned buffer case\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+ if (!hc->ep_is_in) {
+ fh_memcpy(qh->dw_align_buf, ptr, hc->xfer_len);
+ }
+ hc->align_buff = qh->dw_align_buf_dma;
+ } else {
+ hc->align_buff = 0;
+ }
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
+ hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+ /*
+ * This value may be modified when the transfer is started to
+ * reflect the actual transfer length.
+ */
+ hc->multi_count = fh_hb_mult(qh->maxp);
+ }
+
+ if (hcd->core_if->dma_desc_enable)
+ hc->desc_list_addr = qh->desc_list_dma;
+
+ fh_otg_hc_init(hcd->core_if, hc);
+ hc->qh = qh;
+
+ return 0;
+}
+
+/**
+ * This function selects transactions from the HCD transfer schedule and
+ * assigns them to available host channels. It is called from HCD interrupt
+ * handler functions.
+ *
+ * @param hcd The HCD state structure.
+ *
+ * @return The types of new transactions that were assigned to host channels.
+ */
+fh_otg_transaction_type_e fh_otg_hcd_select_transactions(fh_otg_hcd_t * hcd)
+{
+ fh_list_link_t *qh_ptr;
+ fh_otg_qh_t *qh;
+ int num_channels;
+ fh_otg_transaction_type_e ret_val = FH_OTG_TRANSACTION_NONE;
+
+#ifdef DEBUG_SOF
+ FH_DEBUGPL(DBG_HCD, " Select Transactions\n");
+#endif
+
+ /* Process entries in the periodic ready list. */
+ qh_ptr = FH_LIST_FIRST(&hcd->periodic_sched_ready);
+
+ while (qh_ptr != &hcd->periodic_sched_ready &&
+ !FH_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
+
+ qh = FH_LIST_ENTRY(qh_ptr, fh_otg_qh_t, qh_list_entry);
+ if (assign_and_init_hc(hcd, qh))
+ break;
+
+ /*
+ * Move the QH from the periodic ready schedule to the
+ * periodic assigned schedule.
+ */
+ qh_ptr = FH_LIST_NEXT(qh_ptr);
+ FH_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
+ &qh->qh_list_entry);
+
+ ret_val = FH_OTG_TRANSACTION_PERIODIC;
+ }
+
+ /*
+ * Process entries in the inactive portion of the non-periodic
+ * schedule. Some free host channels may not be used if they are
+ * reserved for periodic transfers.
+ */
+ qh_ptr = hcd->non_periodic_sched_inactive.next;
+ num_channels = hcd->core_if->core_params->host_channels;
+ while (qh_ptr != &hcd->non_periodic_sched_inactive &&
+ (hcd->non_periodic_channels <
+ num_channels - hcd->periodic_channels) &&
+ !FH_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
+ qh = FH_LIST_ENTRY(qh_ptr, fh_otg_qh_t, qh_list_entry);
+
+ if (FH_CIRCLEQ_EMPTY(&qh->qtd_list))
+ break;
+ if (assign_and_init_hc(hcd, qh))
+ break;
+
+ /*
+ * Move the QH from the non-periodic inactive schedule to the
+ * non-periodic active schedule.
+ */
+ qh_ptr = FH_LIST_NEXT(qh_ptr);
+ FH_LIST_MOVE_HEAD(&hcd->non_periodic_sched_active,
+ &qh->qh_list_entry);
+
+ if (ret_val == FH_OTG_TRANSACTION_NONE) {
+ ret_val = FH_OTG_TRANSACTION_NON_PERIODIC;
+ } else {
+ ret_val = FH_OTG_TRANSACTION_ALL;
+ }
+
+ hcd->non_periodic_channels++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * Attempts to queue a single transaction request for a host channel
+ * associated with either a periodic or non-periodic transfer. This function
+ * assumes that there is space available in the appropriate request queue. For
+ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
+ * is available in the appropriate Tx FIFO.
+ *
+ * @param hcd The HCD state structure.
+ * @param hc Host channel descriptor associated with either a periodic or
+ * non-periodic transfer.
+ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx
+ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
+ * transfers.
+ *
+ * @return 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO.
+ */
+static int queue_transaction(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc, uint16_t fifo_dwords_avail)
+{
+ int retval;
+
+ if (hcd->core_if->dma_enable) {
+ if (hcd->core_if->dma_desc_enable) {
+ if (!hc->xfer_started
+ || (hc->ep_type == FH_OTG_EP_TYPE_ISOC)) {
+ fh_otg_hcd_start_xfer_ddma(hcd, hc->qh);
+ hc->qh->ping_state = 0;
+ }
+ } else if (!hc->xfer_started) {
+ fh_otg_hc_start_transfer(hcd->core_if, hc);
+ hc->qh->ping_state = 0;
+ }
+ retval = 0;
+ } else if (hc->halt_pending) {
+ /* Don't queue a request if the channel has been halted. */
+ retval = 0;
+ } else if (hc->halt_on_queue) {
+ fh_otg_hc_halt(hcd->core_if, hc, hc->halt_status);
+ retval = 0;
+ } else if (hc->do_ping) {
+ if (!hc->xfer_started) {
+ fh_otg_hc_start_transfer(hcd->core_if, hc);
+ }
+ retval = 0;
+ } else if (!hc->ep_is_in || hc->data_pid_start == FH_OTG_HC_PID_SETUP) {
+ if ((fifo_dwords_avail * 4) >= hc->max_packet) {
+ if (!hc->xfer_started) {
+ fh_otg_hc_start_transfer(hcd->core_if, hc);
+ retval = 1;
+ } else {
+ retval =
+ fh_otg_hc_continue_transfer(hcd->core_if,
+ hc);
+ }
+ } else {
+ retval = -1;
+ }
+ } else {
+ if (!hc->xfer_started) {
+ fh_otg_hc_start_transfer(hcd->core_if, hc);
+ retval = 1;
+ } else {
+ retval = fh_otg_hc_continue_transfer(hcd->core_if, hc);
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the FH_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ */
+static void process_periodic_channels(fh_otg_hcd_t * hcd)
+{
+ hptxsts_data_t tx_status;
+ fh_list_link_t *qh_ptr;
+ fh_otg_qh_t *qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+
+ fh_otg_host_global_regs_t *host_regs;
+ host_regs = hcd->core_if->host_if->host_global_regs;
+
+ FH_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
+#ifdef DEBUG
+ tx_status.d32 = FH_READ_REG32(&host_regs->hptxsts);
+ FH_DEBUGPL(DBG_HCDV,
+ " P Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ FH_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.ptxfspcavail);
+#endif
+
+ qh_ptr = hcd->periodic_sched_assigned.next;
+ while (qh_ptr != &hcd->periodic_sched_assigned) {
+ tx_status.d32 = FH_READ_REG32(&host_regs->hptxsts);
+ if (tx_status.b.ptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+
+ qh = FH_LIST_ENTRY(qh_ptr, fh_otg_qh_t, qh_list_entry);
+
+ /*
+ * Set a flag if we're queuing high-bandwidth in slave mode.
+ * The flag prevents any halts to get into the request queue in
+ * the middle of multiple high-bandwidth packets getting queued.
+ */
+ if (!hcd->core_if->dma_enable && qh->channel->multi_count > 1) {
+ hcd->core_if->queuing_high_bandwidth = 1;
+ }
+ status =
+ queue_transaction(hcd, qh->channel,
+ tx_status.b.ptxfspcavail);
+ if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+
+ /*
+ * In Slave mode, stay on the current transfer until there is
+ * nothing more to do or the high-bandwidth request count is
+ * reached. In DMA mode, only need to queue one request. The
+ * controller automatically handles multiple packets for
+ * high-bandwidth transfers.
+ */
+ if (hcd->core_if->dma_enable || status == 0 ||
+ qh->channel->requests == qh->channel->multi_count) {
+ qh_ptr = qh_ptr->next;
+ /*
+ * Move the QH from the periodic assigned schedule to
+ * the periodic queued schedule.
+ */
+ FH_LIST_MOVE_HEAD(&hcd->periodic_sched_queued,
+ &qh->qh_list_entry);
+
+ /* done queuing high bandwidth */
+ hcd->core_if->queuing_high_bandwidth = 0;
+ }
+ }
+
+ if (!hcd->core_if->dma_enable) {
+ fh_otg_core_global_regs_t *global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ global_regs = hcd->core_if->core_global_regs;
+ intr_mask.b.ptxfempty = 1;
+#ifdef DEBUG
+ tx_status.d32 = FH_READ_REG32(&host_regs->hptxsts);
+ FH_DEBUGPL(DBG_HCDV,
+ " P Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ FH_DEBUGPL(DBG_HCDV,
+ " P Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.ptxfspcavail);
+#endif
+ if (!FH_LIST_EMPTY(&hcd->periodic_sched_assigned) ||
+ no_queue_space || no_fifo_space) {
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the periodic Tx
+ * FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ FH_MODIFY_REG32(&global_regs->gintmsk, 0,
+ intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32,
+ 0);
+ }
+ }
+}
+
+/**
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the FH_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ */
+static void process_non_periodic_channels(fh_otg_hcd_t * hcd)
+{
+ gnptxsts_data_t tx_status;
+ fh_list_link_t *orig_qh_ptr;
+ fh_otg_qh_t *qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+ int more_to_do = 0;
+
+ fh_otg_core_global_regs_t *global_regs =
+ hcd->core_if->core_global_regs;
+
+ FH_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
+#ifdef DEBUG
+ tx_status.d32 = FH_READ_REG32(&global_regs->gnptxsts);
+ FH_DEBUGPL(DBG_HCDV,
+ " NP Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ FH_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif
+ /*
+ * Keep track of the starting point. Skip over the start-of-list
+ * entry.
+ */
+ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
+ }
+ orig_qh_ptr = hcd->non_periodic_qh_ptr;
+
+ /*
+ * Process once through the active list or until no more space is
+ * available in the request queue or the Tx FIFO.
+ */
+ do {
+ tx_status.d32 = FH_READ_REG32(&global_regs->gnptxsts);
+ if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+
+ qh = FH_LIST_ENTRY(hcd->non_periodic_qh_ptr, fh_otg_qh_t,
+ qh_list_entry);
+ status =
+ queue_transaction(hcd, qh->channel,
+ tx_status.b.nptxfspcavail);
+
+ if (status > 0) {
+ more_to_do = 1;
+ } else if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+
+ /* Advance to next QH, skipping start-of-list entry. */
+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
+ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
+ hcd->non_periodic_qh_ptr =
+ hcd->non_periodic_qh_ptr->next;
+ }
+
+ } while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
+
+ if (!hcd->core_if->dma_enable) {
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ intr_mask.b.nptxfempty = 1;
+
+#ifdef DEBUG
+ tx_status.d32 = FH_READ_REG32(&global_regs->gnptxsts);
+ FH_DEBUGPL(DBG_HCDV,
+ " NP Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ FH_DEBUGPL(DBG_HCDV,
+ " NP Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif
+ if (more_to_do || no_queue_space || no_fifo_space) {
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the non-periodic
+ * Tx FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ FH_MODIFY_REG32(&global_regs->gintmsk, 0,
+ intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32,
+ 0);
+ }
+ }
+}
+
+/**
+ * This function processes the currently active host channels and queues
+ * transactions for these channels to the FH_otg controller. It is called
+ * from HCD interrupt handler functions.
+ *
+ * @param hcd The HCD state structure.
+ * @param tr_type The type(s) of transactions to queue (non-periodic,
+ * periodic, or both).
+ */
+void fh_otg_hcd_queue_transactions(fh_otg_hcd_t * hcd,
+ fh_otg_transaction_type_e tr_type)
+{
+#ifdef DEBUG_SOF
+ FH_DEBUGPL(DBG_HCD, "Queue Transactions\n");
+#endif
+ /* Process host channels associated with periodic transfers. */
+ if ((tr_type == FH_OTG_TRANSACTION_PERIODIC ||
+ tr_type == FH_OTG_TRANSACTION_ALL) &&
+ !FH_LIST_EMPTY(&hcd->periodic_sched_assigned)) {
+
+ process_periodic_channels(hcd);
+ }
+
+ /* Process host channels associated with non-periodic transfers. */
+ if (tr_type == FH_OTG_TRANSACTION_NON_PERIODIC ||
+ tr_type == FH_OTG_TRANSACTION_ALL) {
+ if (!FH_LIST_EMPTY(&hcd->non_periodic_sched_active)) {
+ process_non_periodic_channels(hcd);
+ } else {
+ /*
+ * Ensure NP Tx FIFO empty interrupt is disabled when
+ * there are no non-periodic transfers to process.
+ */
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+ gintmsk.b.nptxfempty = 1;
+ FH_MODIFY_REG32(&hcd->core_if->
+ core_global_regs->gintmsk, gintmsk.d32,
+ 0);
+ }
+ }
+}
+
+#ifdef FH_HS_ELECT_TST
+/*
+ * Quick and dirty hack to implement the HS Electrical Test
+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
+ *
+ * This code was copied from our userspace app "hset". It sends a
+ * Get Device Descriptor control sequence in two parts, first the
+ * Setup packet by itself, followed some time later by the In and
+ * Ack packets. Rather than trying to figure out how to add this
+ * functionality to the normal driver code, we just hijack the
+ * hardware, using these two function to drive the hardware
+ * directly.
+ */
+
+static fh_otg_core_global_regs_t *global_regs;
+static fh_otg_host_global_regs_t *hc_global_regs;
+static fh_otg_hc_regs_t *hc_regs;
+static uint32_t *data_fifo;
+
+static void do_setup(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+
+ /* Enable HAINTs */
+ FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ FH_WRITE_REG32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /*
+ * Send Setup packet (Get Device Descriptor)
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ hcchar.b.chdis = 1;
+// hcchar.b.chen = 1;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ fh_mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = FH_OTG_HC_PID_SETUP;
+ FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+
+ /* Fill FIFO with Setup data for Get Device Descriptor */
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+ FH_WRITE_REG32(data_fifo++, 0x01000680);
+ FH_WRITE_REG32(data_fifo++, 0x00080000);
+
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ /* Disable HCINTs */
+ FH_WRITE_REG32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+}
+
+static void do_in_ack(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+ host_grxsts_data_t grxsts;
+
+ /* Enable HAINTs */
+ FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ FH_WRITE_REG32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /*
+ * Receive Control In packet
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ fh_mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = FH_OTG_HC_PID_DATA1;
+ FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 1;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ /* Read RXSTS */
+ grxsts.d32 = FH_READ_REG32(&global_regs->grxstsp);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ switch (grxsts.b.pktsts) {
+ case FH_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer */
+ if (grxsts.b.bcnt > 0) {
+ int i;
+ int word_count = (grxsts.b.bcnt + 3) / 4;
+
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+
+ for (i = 0; i < word_count; i++) {
+ (void)FH_READ_REG32(data_fifo++);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ /* Read RXSTS */
+ grxsts.d32 = FH_READ_REG32(&global_regs->grxstsp);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ switch (grxsts.b.pktsts) {
+ case FH_GRXSTS_PKTSTS_IN_XFER_COMP:
+ break;
+
+ default:
+ break;
+ }
+
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+// usleep(100000);
+// mdelay(100);
+ fh_mdelay(1);
+
+ /*
+ * Send handshake packet
+ */
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ fh_mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 0;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = FH_OTG_HC_PID_DATA1;
+ FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
+
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ /* Disable HCINTs */
+ FH_WRITE_REG32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
+
+ /* Read HCINT */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+
+ /* Read HCCHAR */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+
+ /* Clear HCINT */
+ FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
+}
+#endif
+
+/** Handles hub class-specific requests. */
+int fh_otg_hcd_hub_control(fh_otg_hcd_t * fh_otg_hcd,
+ uint16_t typeReq,
+ uint16_t wValue,
+ uint16_t wIndex, uint8_t * buf, uint16_t wLength)
+{
+ int retval = 0;
+
+ fh_otg_core_if_t *core_if = fh_otg_hcd->core_if;
+ usb_hub_descriptor_t *hub_desc;
+ hprt0_data_t hprt0 = {.d32 = 0 };
+
+ uint32_t port_status;
+
+ switch (typeReq) {
+ case UCR_CLEAR_HUB_FEATURE:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearHubFeature 0x%x\n", wValue);
+ switch (wValue) {
+ case UHF_C_HUB_LOCAL_POWER:
+ case UHF_C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+ default:
+ retval = -FH_E_INVALID;
+ FH_ERROR("FH OTG HCD - "
+ "ClearHubFeature request %xh unknown\n",
+ wValue);
+ }
+ break;
+ case UCR_CLEAR_PORT_FEATURE:
+#ifdef CONFIG_USB_FH_OTG_LPM
+ if (wValue != UHF_PORT_L1)
+#endif
+ if (!wIndex || wIndex > 1)
+ goto error;
+
+ switch (wValue) {
+ case UHF_PORT_ENABLE:
+ FH_DEBUGPL(DBG_ANY, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtena = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case UHF_PORT_SUSPEND:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+
+ if (core_if->power_down == 2) {
+ fh_otg_host_hibernation_restore(core_if, 0, 0);
+ } else {
+ FH_WRITE_REG32(core_if->pcgcctl, 0);
+ fh_mdelay(5);
+
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtres = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ hprt0.b.prtsusp = 0;
+ /* Clear Resume bit */
+ fh_mdelay(100);
+ hprt0.b.prtres = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ }
+ break;
+#ifdef CONFIG_USB_FH_OTG_LPM
+ case UHF_PORT_L1:
+ {
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ glpmcfg_data_t lpmcfg = {.d32 = 0 };
+
+ lpmcfg.d32 =
+ FH_READ_REG32(&core_if->
+ core_global_regs->glpmcfg);
+ lpmcfg.b.en_utmi_sleep = 0;
+ lpmcfg.b.hird_thres &= (~(1 << 4));
+ lpmcfg.b.prt_sleep_sts = 1;
+ FH_WRITE_REG32(&core_if->
+ core_global_regs->glpmcfg,
+ lpmcfg.d32);
+
+ /* Clear Enbl_L1Gating bit. */
+ pcgcctl.b.enbl_sleep_gating = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,
+ 0);
+
+ fh_mdelay(5);
+
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtres = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0,
+ hprt0.d32);
+ /* This bit will be cleared in wakeup interrupt handle */
+ break;
+ }
+#endif
+ case UHF_PORT_POWER:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case UHF_PORT_INDICATOR:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+ /* Port inidicator not supported */
+ break;
+ case UHF_C_PORT_CONNECTION:
+ /* Clears drivers internal connect status change
+ * flag */
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+ fh_otg_hcd->flags.b.port_connect_status_change = 0;
+ break;
+ case UHF_C_PORT_RESET:
+ /* Clears the driver's internal Port Reset Change
+ * flag */
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+ fh_otg_hcd->flags.b.port_reset_change = 0;
+ break;
+ case UHF_C_PORT_ENABLE:
+ /* Clears the driver's internal Port
+ * Enable/Disable Change flag */
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+ fh_otg_hcd->flags.b.port_enable_change = 0;
+ break;
+ case UHF_C_PORT_SUSPEND:
+ /* Clears the driver's internal Port Suspend
+ * Change flag, which is set when resume signaling on
+ * the host port is complete */
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+ fh_otg_hcd->flags.b.port_suspend_change = 0;
+ break;
+#ifdef CONFIG_USB_FH_OTG_LPM
+ case UHF_C_PORT_L1:
+ fh_otg_hcd->flags.b.port_l1_change = 0;
+ break;
+#endif
+ case UHF_C_PORT_OVER_CURRENT:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+ fh_otg_hcd->flags.b.port_over_current_change = 0;
+ break;
+ default:
+ retval = -FH_E_INVALID;
+ FH_ERROR("FH OTG HCD - "
+ "ClearPortFeature request %xh "
+ "unknown or unsupported\n", wValue);
+ }
+ break;
+ case UCR_GET_HUB_DESCRIPTOR:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "GetHubDescriptor\n");
+ hub_desc = (usb_hub_descriptor_t *) buf;
+ hub_desc->bDescLength = 9;
+ hub_desc->bDescriptorType = 0x29;
+ hub_desc->bNbrPorts = 1;
+ USETW(hub_desc->wHubCharacteristics, 0x08);
+ hub_desc->bPwrOn2PwrGood = 1;
+ hub_desc->bHubContrCurrent = 0;
+ hub_desc->DeviceRemovable[0] = 0;
+ hub_desc->DeviceRemovable[1] = 0xff;
+ break;
+ case UCR_GET_HUB_STATUS:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "GetHubStatus\n");
+ FH_MEMSET(buf, 0, 4);
+ break;
+ case UCR_GET_PORT_STATUS:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "GetPortStatus wIndex = 0x%04x FLAGS=0x%08x\n",
+ wIndex, fh_otg_hcd->flags.d32);
+ if (!wIndex || wIndex > 1)
+ goto error;
+
+ port_status = 0;
+
+ if (fh_otg_hcd->flags.b.port_connect_status_change)
+ port_status |= (1 << UHF_C_PORT_CONNECTION);
+
+ if (fh_otg_hcd->flags.b.port_enable_change)
+ port_status |= (1 << UHF_C_PORT_ENABLE);
+
+ if (fh_otg_hcd->flags.b.port_suspend_change)
+ port_status |= (1 << UHF_C_PORT_SUSPEND);
+
+ if (fh_otg_hcd->flags.b.port_l1_change)
+ port_status |= (1 << UHF_C_PORT_L1);
+
+ if (fh_otg_hcd->flags.b.port_reset_change) {
+ port_status |= (1 << UHF_C_PORT_RESET);
+ }
+
+ if (fh_otg_hcd->flags.b.port_over_current_change) {
+ FH_WARN("Overcurrent change detected\n");
+ port_status |= (1 << UHF_C_PORT_OVER_CURRENT);
+ }
+
+ if (!fh_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return 0's for the remainder of the port status
+ * since the port register can't be read if the core
+ * is in device mode.
+ */
+ *((__le32 *) buf) = fh_cpu_to_le32(&port_status);
+ break;
+ }
+
+ hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
+ FH_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
+
+ if (hprt0.b.prtconnsts)
+ port_status |= (1 << UHF_PORT_CONNECTION);
+
+ if (hprt0.b.prtena)
+ port_status |= (1 << UHF_PORT_ENABLE);
+
+ if (hprt0.b.prtsusp)
+ port_status |= (1 << UHF_PORT_SUSPEND);
+
+ if (hprt0.b.prtovrcurract)
+ port_status |= (1 << UHF_PORT_OVER_CURRENT);
+
+ if (hprt0.b.prtrst)
+ port_status |= (1 << UHF_PORT_RESET);
+
+ if (hprt0.b.prtpwr)
+ port_status |= (1 << UHF_PORT_POWER);
+
+ if (hprt0.b.prtspd == FH_HPRT0_PRTSPD_HIGH_SPEED)
+ port_status |= (1 << UHF_PORT_HIGH_SPEED);
+ else if (hprt0.b.prtspd == FH_HPRT0_PRTSPD_LOW_SPEED)
+ port_status |= (1 << UHF_PORT_LOW_SPEED);
+
+ if (hprt0.b.prttstctl)
+ port_status |= (1 << UHF_PORT_TEST);
+ if (fh_otg_get_lpm_portsleepstatus(fh_otg_hcd->core_if)) {
+ port_status |= (1 << UHF_PORT_L1);
+ }
+ /*
+ For Synopsys HW emulation of Power down wkup_control asserts the
+ hreset_n and prst_n on suspned. This causes the HPRT0 to be zero.
+ We intentionally tell the software that port is in L2Suspend state.
+ Only for STE.
+ */
+ if ((core_if->power_down == 2)
+ && (core_if->hibernation_suspend == 1)) {
+ port_status |= (1 << UHF_PORT_SUSPEND);
+ }
+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+
+ *((__le32 *) buf) = fh_cpu_to_le32(&port_status);
+
+ break;
+ case UCR_SET_HUB_FEATURE:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "SetHubFeature\n");
+ /* No HUB features supported */
+ break;
+ case UCR_SET_PORT_FEATURE:
+ if (wValue != UHF_PORT_TEST && (!wIndex || wIndex > 1))
+ goto error;
+
+ if (!fh_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return without doing anything since the port
+ * register can't be written if the core is in device
+ * mode.
+ */
+ break;
+ }
+
+ switch (wValue) {
+ case UHF_PORT_SUSPEND:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+ if (fh_otg_hcd_otg_port(fh_otg_hcd) != wIndex) {
+ goto error;
+ }
+ if (core_if->power_down == 2) {
+ int timeout = 300;
+ fh_irqflags_t flags;
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ gusbcfg_data_t gusbcfg = {.d32 = 0 };
+#ifdef FH_DEV_SRPCAP
+ int32_t otg_cap_param = core_if->core_params->otg_cap;
+#endif
+ FH_PRINTF("Preparing for complete power-off\n");
+
+ /* Save registers before hibernation */
+ fh_otg_save_global_regs(core_if);
+ fh_otg_save_host_regs(core_if);
+
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 1;
+ hprt0.b.prtena = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ /* Spin hprt0.b.prtsusp to became 1 */
+ do {
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ if (hprt0.b.prtsusp) {
+ break;
+ }
+ fh_mdelay(1);
+ } while (--timeout);
+ if (!timeout) {
+ FH_WARN("Suspend wasn't genereted\n");
+ }
+ fh_udelay(10);
+
+ /*
+ * We need to disable interrupts to prevent servicing of any IRQ
+ * during going to hibernation
+ */
+ FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
+ core_if->lx_state = FH_OTG_L2;
+#ifdef FH_DEV_SRPCAP
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 0;
+ hprt0.b.prtena = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0,
+ hprt0.d32);
+#endif
+ gusbcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->
+ gusbcfg);
+ if (gusbcfg.b.ulpi_utmi_sel == 1) {
+ /* ULPI interface */
+ /* Suspend the Phy Clock */
+ pcgcctl.d32 = 0;
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0,
+ pcgcctl.d32);
+ fh_udelay(10);
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ } else {
+ /* UTMI+ Interface */
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
+ fh_udelay(10);
+ }
+#ifdef FH_DEV_SRPCAP
+ gpwrdn.d32 = 0;
+ gpwrdn.b.dis_vbus = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+#endif
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ gpwrdn.d32 = 0;
+#ifdef FH_DEV_SRPCAP
+ gpwrdn.b.srp_det_msk = 1;
+#endif
+ gpwrdn.b.disconn_det_msk = 1;
+ gpwrdn.b.lnstchng_msk = 1;
+ gpwrdn.b.sts_chngint_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Enable Power Down Clamp and all interrupts in GPWRDN */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnclmp = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+ fh_udelay(10);
+
+ /* Switch off VDD */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+
+#ifdef FH_DEV_SRPCAP
+ if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE)
+ {
+ core_if->pwron_timer_started = 1;
+ FH_TIMER_SCHEDULE(core_if->pwron_timer, 6000 /* 6 secs */ );
+ }
+#endif
+ /* Save gpwrdn register for further usage if stschng interrupt */
+ core_if->gr_backup->gpwrdn_local =
+ FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
+
+ /* Set flag to indicate that we are in hibernation */
+ core_if->hibernation_suspend = 1;
+ FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock,flags);
+
+ FH_PRINTF("Host hibernation completed\n");
+ // Exit from case statement
+ break;
+
+ }
+ if (fh_otg_hcd_otg_port(fh_otg_hcd) == wIndex &&
+ fh_otg_hcd->fops->get_b_hnp_enable(fh_otg_hcd)) {
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ gotgctl.b.hstsethnpen = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gotgctl, 0, gotgctl.d32);
+ core_if->op_state = A_SUSPEND;
+ }
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ {
+ fh_irqflags_t flags;
+ /* Update lx_state */
+ FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
+ core_if->lx_state = FH_OTG_L2;
+ FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags);
+ }
+ /* Suspend the Phy Clock */
+ if (core_if->otg_ver == 0) {
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, 0,
+ pcgcctl.d32);
+ fh_udelay(10);
+ }
+
+ /* For HNP the bus must be suspended for at least 200ms. */
+ if (fh_otg_hcd->fops->get_b_hnp_enable(fh_otg_hcd)) {
+ if (core_if->otg_ver) {
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+ }
+ fh_mdelay(200);
+ }
+
+ /** @todo - check how sw can wait for 1 sec to check asesvld??? */
+#if 0 //vahrama !!!!!!!!!!!!!!!!!!
+ if (core_if->adp_enable) {
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ gpwrdn_data_t gpwrdn;
+
+ while (gotgctl.b.asesvld == 1) {
+ gotgctl.d32 =
+ FH_READ_REG32(&core_if->
+ core_global_regs->
+ gotgctl);
+ fh_mdelay(100);
+ }
+
+ /* Enable Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+
+ /* Unmask SRP detected interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->
+ gpwrdn, 0, gpwrdn.d32);
+
+ fh_otg_adp_probe_start(core_if);
+ }
+#endif
+ break;
+ case UHF_PORT_POWER:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case UHF_PORT_RESET:
+ if ((core_if->power_down == 2)
+ && (core_if->hibernation_suspend == 1)) {
+ /* If we are going to exit from Hibernated
+ * state via USB RESET.
+ */
+ fh_otg_host_hibernation_restore(core_if, 0, 1);
+ } else {
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+
+ FH_DEBUGPL(DBG_HCD,
+ "FH OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_RESET\n");
+ {
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ pcgcctl.b.enbl_sleep_gating = 1;
+ pcgcctl.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
+ FH_WRITE_REG32(core_if->pcgcctl, 0);
+ }
+#ifdef CONFIG_USB_FH_OTG_LPM
+ {
+ glpmcfg_data_t lpmcfg;
+ lpmcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ if (lpmcfg.b.prt_sleep_sts) {
+ lpmcfg.b.en_utmi_sleep = 0;
+ lpmcfg.b.hird_thres &= (~(1 << 4));
+ FH_WRITE_REG32
+ (&core_if->core_global_regs->glpmcfg,
+ lpmcfg.d32);
+ fh_mdelay(1);
+ }
+ }
+#endif
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ /* Clear suspend bit if resetting from suspended state. */
+ hprt0.b.prtsusp = 0;
+ /* When B-Host the Port reset bit is set in
+ * the Start HCD Callback function, so that
+ * the reset is started within 1ms of the HNP
+ * success interrupt. */
+ if (!fh_otg_hcd_is_b_host(fh_otg_hcd)) {
+ hprt0.b.prtpwr = 1;
+ hprt0.b.prtrst = 1;
+ FH_PRINTF("Indeed it is in host mode hprt0 = %08x\n",hprt0.d32);
+ FH_WRITE_REG32(core_if->host_if->hprt0,
+ hprt0.d32);
+ }
+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+ fh_mdelay(60);
+ hprt0.b.prtrst = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ core_if->lx_state = FH_OTG_L0; /* Now back to the on state */
+ }
+ break;
+#ifdef FH_HS_ELECT_TST
+ case UHF_PORT_TEST:
+ {
+ uint32_t t;
+ gintmsk_data_t gintmsk;
+
+ t = (wIndex >> 8); /* MSB wIndex USB */
+ FH_DEBUGPL(DBG_HCD,
+ "FH OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_TEST %d\n",
+ t);
+ FH_WARN("USB_PORT_FEAT_TEST %d\n", t);
+ if (t < 6) {
+ hprt0.d32 = fh_otg_read_hprt0(core_if);
+ hprt0.b.prttstctl = t;
+ FH_WRITE_REG32(core_if->host_if->hprt0,
+ hprt0.d32);
+ } else {
+ /* Setup global vars with reg addresses (quick and
+ * dirty hack, should be cleaned up)
+ */
+ global_regs = core_if->core_global_regs;
+ hc_global_regs =
+ core_if->host_if->host_global_regs;
+ hc_regs =
+ (fh_otg_hc_regs_t *) ((char *)
+ global_regs +
+ 0x500);
+ data_fifo =
+ (uint32_t *) ((char *)global_regs +
+ 0x1000);
+
+ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
+ /* Save current interrupt mask */
+ gintmsk.d32 =
+ FH_READ_REG32
+ (&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ FH_WRITE_REG32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ fh_mdelay(15000);
+
+ /* Drive suspend on the root port */
+ hprt0.d32 =
+ fh_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 1;
+ hprt0.b.prtres = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* 15 second delay per the test spec */
+ fh_mdelay(15000);
+
+ /* Drive resume on the root port */
+ hprt0.d32 =
+ fh_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 0;
+ hprt0.b.prtres = 1;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+ fh_mdelay(100);
+
+ /* Clear the resume bit */
+ hprt0.b.prtres = 0;
+ FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* Restore interrupts */
+ FH_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
+ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
+ /* Save current interrupt mask */
+ gintmsk.d32 =
+ FH_READ_REG32
+ (&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ FH_WRITE_REG32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ fh_mdelay(15000);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ fh_mdelay(15000);
+
+ /* Restore interrupts */
+ FH_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
+ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
+ /* Save current interrupt mask */
+ gintmsk.d32 =
+ FH_READ_REG32
+ (&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ FH_WRITE_REG32(&global_regs->gintmsk, 0);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ fh_mdelay(15000);
+
+ /* Send the In and Ack packets */
+ do_in_ack();
+
+ /* 15 second delay so nothing else happens for awhile */
+ fh_mdelay(15000);
+
+ /* Restore interrupts */
+ FH_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
+ }
+ }
+ break;
+ }
+#endif /* FH_HS_ELECT_TST */
+
+ case UHF_PORT_INDICATOR:
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+ /* Not supported */
+ break;
+ default:
+ retval = -FH_E_INVALID;
+ FH_ERROR("FH OTG HCD - "
+ "SetPortFeature request %xh "
+ "unknown or unsupported\n", wValue);
+ break;
+ }
+ break;
+#ifdef CONFIG_USB_FH_OTG_LPM
+ case UCR_SET_AND_TEST_PORT_FEATURE:
+ if (wValue != UHF_PORT_L1) {
+ goto error;
+ }
+ {
+ int portnum, hird, devaddr, remwake;
+ glpmcfg_data_t lpmcfg;
+ uint32_t time_usecs;
+ gintsts_data_t gintsts;
+ gintmsk_data_t gintmsk;
+
+ if (!fh_otg_get_param_lpm_enable(core_if)) {
+ goto error;
+ }
+ if (wValue != UHF_PORT_L1 || wLength != 1) {
+ goto error;
+ }
+ /* Check if the port currently is in SLEEP state */
+ lpmcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ if (lpmcfg.b.prt_sleep_sts) {
+ FH_INFO("Port is already in sleep mode\n");
+ buf[0] = 0; /* Return success */
+ break;
+ }
+
+ portnum = wIndex & 0xf;
+ hird = (wIndex >> 4) & 0xf;
+ devaddr = (wIndex >> 8) & 0x7f;
+ remwake = (wIndex >> 15);
+
+ if (portnum != 1) {
+ retval = -FH_E_INVALID;
+ FH_WARN
+ ("Wrong port number(%d) in SetandTestPortFeature request\n",
+ portnum);
+ break;
+ }
+
+ FH_PRINTF
+ ("SetandTestPortFeature request: portnum = %d, hird = %d, devaddr = %d, rewake = %d\n",
+ portnum, hird, devaddr, remwake);
+ /* Disable LPM interrupt */
+ gintmsk.d32 = 0;
+ gintmsk.b.lpmtranrcvd = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
+ gintmsk.d32, 0);
+
+ if (fh_otg_hcd_send_lpm
+ (fh_otg_hcd, devaddr, hird, remwake)) {
+ retval = -FH_E_INVALID;
+ break;
+ }
+
+ time_usecs = 10 * (lpmcfg.b.retry_count + 1);
+ /* We will consider timeout if time_usecs microseconds pass,
+ * and we don't receive LPM transaction status.
+ * After receiving non-error responce(ACK/NYET/STALL) from device,
+ * core will set lpmtranrcvd bit.
+ */
+ do {
+ gintsts.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (gintsts.b.lpmtranrcvd) {
+ break;
+ }
+ fh_udelay(1);
+ } while (--time_usecs);
+ /* lpm_int bit will be cleared in LPM interrupt handler */
+
+ /* Now fill status
+ * 0x00 - Success
+ * 0x10 - NYET
+ * 0x11 - Timeout
+ */
+ if (!gintsts.b.lpmtranrcvd) {
+ buf[0] = 0x3; /* Completion code is Timeout */
+ fh_otg_hcd_free_hc_from_lpm(fh_otg_hcd);
+ } else {
+ lpmcfg.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ if (lpmcfg.b.lpm_resp == 0x3) {
+ /* ACK responce from the device */
+ buf[0] = 0x00; /* Success */
+ } else if (lpmcfg.b.lpm_resp == 0x2) {
+ /* NYET responce from the device */
+ buf[0] = 0x2;
+ } else {
+ /* Otherwise responce with Timeout */
+ buf[0] = 0x3;
+ }
+ }
+ FH_PRINTF("Device responce to LPM trans is %x\n",
+ lpmcfg.b.lpm_resp);
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0,
+ gintmsk.d32);
+
+ break;
+ }
+#endif /* CONFIG_USB_FH_OTG_LPM */
+ default:
+error:
+ retval = -FH_E_INVALID;
+ FH_WARN("FH OTG HCD - "
+ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
+ typeReq, wIndex, wValue);
+ break;
+ }
+
+ return retval;
+}
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+/** Returns index of host channel to perform LPM transaction. */
+int fh_otg_hcd_get_hc_for_lpm_tran(fh_otg_hcd_t * hcd, uint8_t devaddr)
+{
+ fh_otg_core_if_t *core_if = hcd->core_if;
+ fh_hc_t *hc;
+ hcchar_data_t hcchar;
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+
+ if (FH_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
+ FH_PRINTF("No free channel to select for LPM transaction\n");
+ return -1;
+ }
+
+ hc = FH_CIRCLEQ_FIRST(&hcd->free_hc_list);
+
+ /* Mask host channel interrupts. */
+ gintmsk.b.hcintr = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+
+ /* Fill fields that core needs for LPM transaction */
+ hcchar.b.devaddr = devaddr;
+ hcchar.b.epnum = 0;
+ hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
+ hcchar.b.mps = 64;
+ hcchar.b.lspddev = (hc->speed == FH_OTG_EP_SPEED_LOW);
+ hcchar.b.epdir = 0; /* OUT */
+ FH_WRITE_REG32(&core_if->host_if->hc_regs[hc->hc_num]->hcchar,
+ hcchar.d32);
+
+ /* Remove the host channel from the free list. */
+ FH_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
+
+ FH_PRINTF("hcnum = %d devaddr = %d\n", hc->hc_num, devaddr);
+
+ return hc->hc_num;
+}
+
+/** Release hc after performing LPM transaction */
+void fh_otg_hcd_free_hc_from_lpm(fh_otg_hcd_t * hcd)
+{
+ fh_hc_t *hc;
+ glpmcfg_data_t lpmcfg;
+ uint8_t hc_num;
+
+ lpmcfg.d32 = FH_READ_REG32(&hcd->core_if->core_global_regs->glpmcfg);
+ hc_num = lpmcfg.b.lpm_chan_index;
+
+ hc = hcd->hc_ptr_array[hc_num];
+
+ FH_PRINTF("Freeing channel %d after LPM\n", hc_num);
+ /* Return host channel to free list */
+ FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
+}
+
+int fh_otg_hcd_send_lpm(fh_otg_hcd_t * hcd, uint8_t devaddr, uint8_t hird,
+ uint8_t bRemoteWake)
+{
+ glpmcfg_data_t lpmcfg;
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+ int channel;
+
+ channel = fh_otg_hcd_get_hc_for_lpm_tran(hcd, devaddr);
+ if (channel < 0) {
+ return channel;
+ }
+
+ pcgcctl.b.enbl_sleep_gating = 1;
+ FH_MODIFY_REG32(hcd->core_if->pcgcctl, 0, pcgcctl.d32);
+
+ /* Read LPM config register */
+ lpmcfg.d32 = FH_READ_REG32(&hcd->core_if->core_global_regs->glpmcfg);
+
+ /* Program LPM transaction fields */
+ lpmcfg.b.rem_wkup_en = bRemoteWake;
+ lpmcfg.b.hird = hird;
+
+ if(fh_otg_get_param_besl_enable(hcd->core_if)) {
+ lpmcfg.b.hird_thres = 0x16;
+ lpmcfg.b.en_besl = 1;
+ } else {
+ lpmcfg.b.hird_thres = 0x1c;
+ }
+
+ lpmcfg.b.lpm_chan_index = channel;
+ lpmcfg.b.en_utmi_sleep = 1;
+ /* Program LPM config register */
+ FH_WRITE_REG32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+
+ /* Send LPM transaction */
+ lpmcfg.b.send_lpm = 1;
+ FH_WRITE_REG32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+
+ return 0;
+}
+
+#endif /* CONFIG_USB_FH_OTG_LPM */
+
+int fh_otg_hcd_is_status_changed(fh_otg_hcd_t * hcd, int port)
+{
+ int retval;
+
+ if (port != 1) {
+ return -FH_E_INVALID;
+ }
+
+ retval = (hcd->flags.b.port_connect_status_change ||
+ hcd->flags.b.port_reset_change ||
+ hcd->flags.b.port_enable_change ||
+ hcd->flags.b.port_suspend_change ||
+ hcd->flags.b.port_over_current_change);
+#ifdef DEBUG
+ if (retval) {
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB STATUS DATA:"
+ " Root port status changed\n");
+ FH_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
+ hcd->flags.b.port_connect_status_change);
+ FH_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
+ hcd->flags.b.port_reset_change);
+ FH_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
+ hcd->flags.b.port_enable_change);
+ FH_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
+ hcd->flags.b.port_suspend_change);
+ FH_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
+ hcd->flags.b.port_over_current_change);
+ }
+#endif
+ return retval;
+}
+
+int fh_otg_hcd_get_frame_number(fh_otg_hcd_t * fh_otg_hcd)
+{
+ hfnum_data_t hfnum;
+ hfnum.d32 =
+ FH_READ_REG32(&fh_otg_hcd->core_if->host_if->host_global_regs->
+ hfnum);
+
+#ifdef DEBUG_SOF
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD GET FRAME NUMBER %d\n",
+ hfnum.b.frnum);
+#endif
+ return hfnum.b.frnum;
+}
+
+int fh_otg_hcd_start(fh_otg_hcd_t * hcd,
+ struct fh_otg_hcd_function_ops *fops)
+{
+ int retval = 0;
+ hprt0_data_t hprt0;
+
+ hcd->fops = fops;
+ if (!fh_otg_is_device_mode(hcd->core_if) &&
+ (!hcd->core_if->adp_enable || hcd->core_if->adp.adp_started)) {
+ fh_otg_hcd_reinit(hcd);
+ } else {
+ if (hcd->core_if->adp_enable) {
+ /* Clear any interrupt pending in the HPRT, sometimes
+ * Port Connect Detected is not being cleared*/
+ hprt0.d32 = FH_READ_REG32(hcd->core_if->host_if->hprt0);
+ FH_WRITE_REG32(hcd->core_if->host_if->hprt0, hprt0.d32);
+ }
+ retval = -FH_E_NO_DEVICE;
+ }
+
+ return retval;
+}
+
+void *fh_otg_hcd_get_priv_data(fh_otg_hcd_t * hcd)
+{
+ return hcd->priv;
+}
+
+void fh_otg_hcd_set_priv_data(fh_otg_hcd_t * hcd, void *priv_data)
+{
+ hcd->priv = priv_data;
+}
+
+uint32_t fh_otg_hcd_otg_port(fh_otg_hcd_t * hcd)
+{
+ return hcd->otg_port;
+}
+
+uint32_t fh_otg_hcd_is_b_host(fh_otg_hcd_t * hcd)
+{
+ uint32_t is_b_host;
+ if (hcd->core_if->op_state == B_HOST) {
+ is_b_host = 1;
+ } else {
+ is_b_host = 0;
+ }
+
+ return is_b_host;
+}
+
+fh_otg_hcd_urb_t *fh_otg_hcd_urb_alloc(fh_otg_hcd_t * hcd,
+ int iso_desc_count, int atomic_alloc)
+{
+ fh_otg_hcd_urb_t *fh_otg_urb;
+ uint32_t size;
+
+ size =
+ sizeof(*fh_otg_urb) +
+ iso_desc_count * sizeof(struct fh_otg_hcd_iso_packet_desc);
+ if (atomic_alloc)
+ fh_otg_urb = FH_ALLOC_ATOMIC(size);
+ else
+ fh_otg_urb = FH_ALLOC(size);
+
+ fh_otg_urb->packet_count = iso_desc_count;
+
+ return fh_otg_urb;
+}
+
+void fh_otg_hcd_urb_set_pipeinfo(fh_otg_hcd_urb_t * fh_otg_urb,
+ uint8_t dev_addr, uint8_t ep_num,
+ uint8_t ep_type, uint8_t ep_dir, uint16_t mps)
+{
+ fh_otg_hcd_fill_pipe(&fh_otg_urb->pipe_info, dev_addr, ep_num,
+ ep_type, ep_dir, mps);
+#if 0
+ FH_PRINTF
+ ("addr = %d, ep_num = %d, ep_dir = 0x%x, ep_type = 0x%x, mps = %d\n",
+ dev_addr, ep_num, ep_dir, ep_type, mps);
+#endif
+}
+
+void fh_otg_hcd_urb_set_params(fh_otg_hcd_urb_t * fh_otg_urb,
+ void *urb_handle, void *buf, fh_dma_t dma,
+ uint32_t buflen, void *setup_packet,
+ fh_dma_t setup_dma, uint32_t flags,
+ uint16_t interval)
+{
+ fh_otg_urb->priv = urb_handle;
+ fh_otg_urb->buf = buf;
+ fh_otg_urb->dma = dma;
+ fh_otg_urb->length = buflen;
+ fh_otg_urb->setup_packet = setup_packet;
+ fh_otg_urb->setup_dma = setup_dma;
+ fh_otg_urb->flags = flags;
+ fh_otg_urb->interval = interval;
+ fh_otg_urb->status = -FH_E_IN_PROGRESS;
+}
+
+uint32_t fh_otg_hcd_urb_get_status(fh_otg_hcd_urb_t * fh_otg_urb)
+{
+ return fh_otg_urb->status;
+}
+
+uint32_t fh_otg_hcd_urb_get_actual_length(fh_otg_hcd_urb_t * fh_otg_urb)
+{
+ return fh_otg_urb->actual_length;
+}
+
+uint32_t fh_otg_hcd_urb_get_error_count(fh_otg_hcd_urb_t * fh_otg_urb)
+{
+ return fh_otg_urb->error_count;
+}
+
+void fh_otg_hcd_urb_set_iso_desc_params(fh_otg_hcd_urb_t * fh_otg_urb,
+ int desc_num, uint32_t offset,
+ uint32_t length)
+{
+ fh_otg_urb->iso_descs[desc_num].offset = offset;
+ fh_otg_urb->iso_descs[desc_num].length = length;
+}
+
+uint32_t fh_otg_hcd_urb_get_iso_desc_status(fh_otg_hcd_urb_t * fh_otg_urb,
+ int desc_num)
+{
+ return fh_otg_urb->iso_descs[desc_num].status;
+}
+
+uint32_t fh_otg_hcd_urb_get_iso_desc_actual_length(fh_otg_hcd_urb_t *
+ fh_otg_urb, int desc_num)
+{
+ return fh_otg_urb->iso_descs[desc_num].actual_length;
+}
+
+int fh_otg_hcd_is_bandwidth_allocated(fh_otg_hcd_t * hcd, void *ep_handle)
+{
+ int allocated = 0;
+ fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
+
+ if (qh) {
+ if (!FH_LIST_EMPTY(&qh->qh_list_entry)) {
+ allocated = 1;
+ }
+ }
+ return allocated;
+}
+
+int fh_otg_hcd_is_bandwidth_freed(fh_otg_hcd_t * hcd, void *ep_handle)
+{
+ fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
+ int freed = 0;
+ FH_ASSERT(qh, "qh is not allocated\n");
+
+ if (FH_LIST_EMPTY(&qh->qh_list_entry)) {
+ freed = 1;
+ }
+
+ return freed;
+}
+
+uint8_t fh_otg_hcd_get_ep_bandwidth(fh_otg_hcd_t * hcd, void *ep_handle)
+{
+ fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
+ FH_ASSERT(qh, "qh is not allocated\n");
+ return qh->usecs;
+}
+
+void fh_otg_hcd_dump_state(fh_otg_hcd_t * hcd)
+{
+#ifdef DEBUG
+ int num_channels;
+ int i;
+ gnptxsts_data_t np_tx_status;
+ hptxsts_data_t p_tx_status;
+
+ num_channels = hcd->core_if->core_params->host_channels;
+ FH_PRINTF("\n");
+ FH_PRINTF
+ ("************************************************************\n");
+ FH_PRINTF("HCD State:\n");
+ FH_PRINTF(" Num channels: %d\n", num_channels);
+ for (i = 0; i < num_channels; i++) {
+ fh_hc_t *hc = hcd->hc_ptr_array[i];
+ FH_PRINTF(" Channel %d:\n", i);
+ FH_PRINTF(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ FH_PRINTF(" speed: %d\n", hc->speed);
+ FH_PRINTF(" ep_type: %d\n", hc->ep_type);
+ FH_PRINTF(" max_packet: %d\n", hc->max_packet);
+ FH_PRINTF(" data_pid_start: %d\n", hc->data_pid_start);
+ FH_PRINTF(" multi_count: %d\n", hc->multi_count);
+ FH_PRINTF(" xfer_started: %d\n", hc->xfer_started);
+ FH_PRINTF(" xfer_buff: %p\n", hc->xfer_buff);
+ FH_PRINTF(" xfer_len: %d\n", hc->xfer_len);
+ FH_PRINTF(" xfer_count: %d\n", hc->xfer_count);
+ FH_PRINTF(" halt_on_queue: %d\n", hc->halt_on_queue);
+ FH_PRINTF(" halt_pending: %d\n", hc->halt_pending);
+ FH_PRINTF(" halt_status: %d\n", hc->halt_status);
+ FH_PRINTF(" do_split: %d\n", hc->do_split);
+ FH_PRINTF(" complete_split: %d\n", hc->complete_split);
+ FH_PRINTF(" hub_addr: %d\n", hc->hub_addr);
+ FH_PRINTF(" port_addr: %d\n", hc->port_addr);
+ FH_PRINTF(" xact_pos: %d\n", hc->xact_pos);
+ FH_PRINTF(" requests: %d\n", hc->requests);
+ FH_PRINTF(" qh: %p\n", hc->qh);
+ if (hc->xfer_started) {
+ hfnum_data_t hfnum;
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hfnum.d32 =
+ FH_READ_REG32(&hcd->core_if->
+ host_if->host_global_regs->hfnum);
+ hcchar.d32 =
+ FH_READ_REG32(&hcd->core_if->host_if->
+ hc_regs[i]->hcchar);
+ hctsiz.d32 =
+ FH_READ_REG32(&hcd->core_if->host_if->
+ hc_regs[i]->hctsiz);
+ hcint.d32 =
+ FH_READ_REG32(&hcd->core_if->host_if->
+ hc_regs[i]->hcint);
+ hcintmsk.d32 =
+ FH_READ_REG32(&hcd->core_if->host_if->
+ hc_regs[i]->hcintmsk);
+ FH_PRINTF(" hfnum: 0x%08x\n", hfnum.d32);
+ FH_PRINTF(" hcchar: 0x%08x\n", hcchar.d32);
+ FH_PRINTF(" hctsiz: 0x%08x\n", hctsiz.d32);
+ FH_PRINTF(" hcint: 0x%08x\n", hcint.d32);
+ FH_PRINTF(" hcintmsk: 0x%08x\n", hcintmsk.d32);
+ }
+ if (hc->xfer_started && hc->qh) {
+ fh_otg_qtd_t *qtd;
+ fh_otg_hcd_urb_t *urb;
+
+ FH_CIRCLEQ_FOREACH(qtd, &hc->qh->qtd_list, qtd_list_entry) {
+ if (!qtd->in_process)
+ break;
+
+ urb = qtd->urb;
+ FH_PRINTF(" URB Info:\n");
+ FH_PRINTF(" qtd: %p, urb: %p\n", qtd, urb);
+ if (urb) {
+ FH_PRINTF(" Dev: %d, EP: %d %s\n",
+ fh_otg_hcd_get_dev_addr(&urb->
+ pipe_info),
+ fh_otg_hcd_get_ep_num(&urb->
+ pipe_info),
+ fh_otg_hcd_is_pipe_in(&urb->
+ pipe_info) ?
+ "IN" : "OUT");
+ FH_PRINTF(" Max packet size: %d\n",
+ fh_otg_hcd_get_mps(&urb->
+ pipe_info));
+ FH_PRINTF(" transfer_buffer: %p\n",
+ urb->buf);
+ FH_PRINTF(" transfer_dma: %p\n",
+ (void *)urb->dma);
+ FH_PRINTF(" transfer_buffer_length: %d\n",
+ urb->length);
+ FH_PRINTF(" actual_length: %d\n",
+ urb->actual_length);
+ }
+ }
+ }
+ }
+ FH_PRINTF(" non_periodic_channels: %d\n", hcd->non_periodic_channels);
+ FH_PRINTF(" periodic_channels: %d\n", hcd->periodic_channels);
+ FH_PRINTF(" periodic_usecs: %d\n", hcd->periodic_usecs);
+ np_tx_status.d32 =
+ FH_READ_REG32(&hcd->core_if->core_global_regs->gnptxsts);
+ FH_PRINTF(" NP Tx Req Queue Space Avail: %d\n",
+ np_tx_status.b.nptxqspcavail);
+ FH_PRINTF(" NP Tx FIFO Space Avail: %d\n",
+ np_tx_status.b.nptxfspcavail);
+ p_tx_status.d32 =
+ FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hptxsts);
+ FH_PRINTF(" P Tx Req Queue Space Avail: %d\n",
+ p_tx_status.b.ptxqspcavail);
+ FH_PRINTF(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
+ fh_otg_hcd_dump_frrem(hcd);
+ fh_otg_dump_global_registers(hcd->core_if);
+ fh_otg_dump_host_registers(hcd->core_if);
+ FH_PRINTF
+ ("************************************************************\n");
+ FH_PRINTF("\n");
+#endif
+}
+
+#ifdef DEBUG
+void fh_print_setup_data(uint8_t * setup)
+{
+ int i;
+ if (CHK_DEBUG_LEVEL(DBG_HCD)) {
+ FH_PRINTF("Setup Data = MSB ");
+ for (i = 7; i >= 0; i--)
+ FH_PRINTF("%02x ", setup[i]);
+ FH_PRINTF("\n");
+ FH_PRINTF(" bmRequestType Tranfer = %s\n",
+ (setup[0] & 0x80) ? "Device-to-Host" :
+ "Host-to-Device");
+ FH_PRINTF(" bmRequestType Type = ");
+ switch ((setup[0] & 0x60) >> 5) {
+ case 0:
+ FH_PRINTF("Standard\n");
+ break;
+ case 1:
+ FH_PRINTF("Class\n");
+ break;
+ case 2:
+ FH_PRINTF("Vendor\n");
+ break;
+ case 3:
+ FH_PRINTF("Reserved\n");
+ break;
+ }
+ FH_PRINTF(" bmRequestType Recipient = ");
+ switch (setup[0] & 0x1f) {
+ case 0:
+ FH_PRINTF("Device\n");
+ break;
+ case 1:
+ FH_PRINTF("Interface\n");
+ break;
+ case 2:
+ FH_PRINTF("Endpoint\n");
+ break;
+ case 3:
+ FH_PRINTF("Other\n");
+ break;
+ default:
+ FH_PRINTF("Reserved\n");
+ break;
+ }
+ FH_PRINTF(" bRequest = 0x%0x\n", setup[1]);
+ FH_PRINTF(" wValue = 0x%0x\n", *((uint16_t *) & setup[2]));
+ FH_PRINTF(" wIndex = 0x%0x\n", *((uint16_t *) & setup[4]));
+ FH_PRINTF(" wLength = 0x%0x\n\n", *((uint16_t *) & setup[6]));
+ }
+}
+#endif
+
+void fh_otg_hcd_dump_frrem(fh_otg_hcd_t * hcd)
+{
+#if 0
+ FH_PRINTF("Frame remaining at SOF:\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->frrem_samples, hcd->frrem_accum,
+ (hcd->frrem_samples > 0) ?
+ hcd->frrem_accum / hcd->frrem_samples : 0);
+
+ FH_PRINTF("\n");
+ FH_PRINTF("Frame remaining at start_transfer (uframe 7):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->core_if->hfnum_7_samples,
+ hcd->core_if->hfnum_7_frrem_accum,
+ (hcd->core_if->hfnum_7_samples >
+ 0) ? hcd->core_if->hfnum_7_frrem_accum /
+ hcd->core_if->hfnum_7_samples : 0);
+ FH_PRINTF("Frame remaining at start_transfer (uframe 0):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->core_if->hfnum_0_samples,
+ hcd->core_if->hfnum_0_frrem_accum,
+ (hcd->core_if->hfnum_0_samples >
+ 0) ? hcd->core_if->hfnum_0_frrem_accum /
+ hcd->core_if->hfnum_0_samples : 0);
+ FH_PRINTF("Frame remaining at start_transfer (uframe 1-6):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->core_if->hfnum_other_samples,
+ hcd->core_if->hfnum_other_frrem_accum,
+ (hcd->core_if->hfnum_other_samples >
+ 0) ? hcd->core_if->hfnum_other_frrem_accum /
+ hcd->core_if->hfnum_other_samples : 0);
+
+ FH_PRINTF("\n");
+ FH_PRINTF("Frame remaining at sample point A (uframe 7):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a,
+ (hcd->hfnum_7_samples_a > 0) ?
+ hcd->hfnum_7_frrem_accum_a / hcd->hfnum_7_samples_a : 0);
+ FH_PRINTF("Frame remaining at sample point A (uframe 0):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a,
+ (hcd->hfnum_0_samples_a > 0) ?
+ hcd->hfnum_0_frrem_accum_a / hcd->hfnum_0_samples_a : 0);
+ FH_PRINTF("Frame remaining at sample point A (uframe 1-6):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->hfnum_other_samples_a, hcd->hfnum_other_frrem_accum_a,
+ (hcd->hfnum_other_samples_a > 0) ?
+ hcd->hfnum_other_frrem_accum_a /
+ hcd->hfnum_other_samples_a : 0);
+
+ FH_PRINTF("\n");
+ FH_PRINTF("Frame remaining at sample point B (uframe 7):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b,
+ (hcd->hfnum_7_samples_b > 0) ?
+ hcd->hfnum_7_frrem_accum_b / hcd->hfnum_7_samples_b : 0);
+ FH_PRINTF("Frame remaining at sample point B (uframe 0):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b,
+ (hcd->hfnum_0_samples_b > 0) ?
+ hcd->hfnum_0_frrem_accum_b / hcd->hfnum_0_samples_b : 0);
+ FH_PRINTF("Frame remaining at sample point B (uframe 1-6):\n");
+ FH_PRINTF(" samples %u, accum %llu, avg %llu\n",
+ hcd->hfnum_other_samples_b, hcd->hfnum_other_frrem_accum_b,
+ (hcd->hfnum_other_samples_b > 0) ?
+ hcd->hfnum_other_frrem_accum_b /
+ hcd->hfnum_other_samples_b : 0);
+#endif
+}
+
+#endif /* FH_DEVICE_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.h
new file mode 100644
index 00000000..bfc3944b
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.h
@@ -0,0 +1,803 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd.h $
+ * $Revision: #58 $
+ * $Date: 2011/09/15 $
+ * $Change: 1846647 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_DEVICE_ONLY
+#ifndef __FH_HCD_H__
+#define __FH_HCD_H__
+
+#include "fh_otg_os_dep.h"
+#include "../fh_common_port/usb.h"
+#include "../fh_common_port/fh_list.h"
+#include "fh_otg_hcd_if.h"
+#include "fh_otg_core_if.h"
+#include "fh_otg_cil.h"
+
+/**
+ * @file
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Host Contoller Driver (HCD).
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the FH_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+struct fh_otg_hcd_pipe_info {
+ uint8_t dev_addr;
+ uint8_t ep_num;
+ uint8_t pipe_type;
+ uint8_t pipe_dir;
+ uint16_t mps;
+};
+
+struct fh_otg_hcd_iso_packet_desc {
+ uint32_t offset;
+ uint32_t length;
+ uint32_t actual_length;
+ uint32_t status;
+};
+
+struct fh_otg_qtd;
+
+struct fh_otg_hcd_urb {
+ void *priv;
+ struct fh_otg_qtd *qtd;
+ void *buf;
+ fh_dma_t dma;
+ void *setup_packet;
+ fh_dma_t setup_dma;
+ uint32_t length;
+ uint32_t actual_length;
+ uint32_t status;
+ uint32_t error_count;
+ uint32_t packet_count;
+ uint32_t flags;
+ uint16_t interval;
+ struct fh_otg_hcd_pipe_info pipe_info;
+ struct fh_otg_hcd_iso_packet_desc iso_descs[0];
+};
+
+static inline uint8_t fh_otg_hcd_get_ep_num(struct fh_otg_hcd_pipe_info *pipe)
+{
+ return pipe->ep_num;
+}
+
+static inline uint8_t fh_otg_hcd_get_pipe_type(struct fh_otg_hcd_pipe_info
+ *pipe)
+{
+ return pipe->pipe_type;
+}
+
+static inline uint16_t fh_otg_hcd_get_mps(struct fh_otg_hcd_pipe_info *pipe)
+{
+ return pipe->mps;
+}
+
+static inline uint8_t fh_otg_hcd_get_dev_addr(struct fh_otg_hcd_pipe_info
+ *pipe)
+{
+ return pipe->dev_addr;
+}
+
+static inline uint8_t fh_otg_hcd_is_pipe_isoc(struct fh_otg_hcd_pipe_info
+ *pipe)
+{
+ return (pipe->pipe_type == UE_ISOCHRONOUS);
+}
+
+static inline uint8_t fh_otg_hcd_is_pipe_int(struct fh_otg_hcd_pipe_info
+ *pipe)
+{
+ return (pipe->pipe_type == UE_INTERRUPT);
+}
+
+static inline uint8_t fh_otg_hcd_is_pipe_bulk(struct fh_otg_hcd_pipe_info
+ *pipe)
+{
+ return (pipe->pipe_type == UE_BULK);
+}
+
+static inline uint8_t fh_otg_hcd_is_pipe_control(struct fh_otg_hcd_pipe_info
+ *pipe)
+{
+ return (pipe->pipe_type == UE_CONTROL);
+}
+
+static inline uint8_t fh_otg_hcd_is_pipe_in(struct fh_otg_hcd_pipe_info *pipe)
+{
+ return (pipe->pipe_dir == UE_DIR_IN);
+}
+
+static inline uint8_t fh_otg_hcd_is_pipe_out(struct fh_otg_hcd_pipe_info
+ *pipe)
+{
+ return (!fh_otg_hcd_is_pipe_in(pipe));
+}
+
+static inline void fh_otg_hcd_fill_pipe(struct fh_otg_hcd_pipe_info *pipe,
+ uint8_t devaddr, uint8_t ep_num,
+ uint8_t pipe_type, uint8_t pipe_dir,
+ uint16_t mps)
+{
+ pipe->dev_addr = devaddr;
+ pipe->ep_num = ep_num;
+ pipe->pipe_type = pipe_type;
+ pipe->pipe_dir = pipe_dir;
+ pipe->mps = mps;
+}
+
+/**
+ * Phases for control transfers.
+ */
+typedef enum fh_otg_control_phase {
+ FH_OTG_CONTROL_SETUP,
+ FH_OTG_CONTROL_DATA,
+ FH_OTG_CONTROL_STATUS
+} fh_otg_control_phase_e;
+
+/** Transaction types. */
+typedef enum fh_otg_transaction_type {
+ FH_OTG_TRANSACTION_NONE,
+ FH_OTG_TRANSACTION_PERIODIC,
+ FH_OTG_TRANSACTION_NON_PERIODIC,
+ FH_OTG_TRANSACTION_ALL
+} fh_otg_transaction_type_e;
+
+struct fh_otg_qh;
+
+/**
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+typedef struct fh_otg_qtd {
+ /**
+ * Determines the PID of the next data packet for the data phase of
+ * control transfers. Ignored for other transfer types.<br>
+ * One of the following values:
+ * - FH_OTG_HC_PID_DATA0
+ * - FH_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Current phase for control transfers (Setup, Data, or Status). */
+ fh_otg_control_phase_e control_phase;
+
+ /** Keep track of the current split type
+ * for FS/LS endpoints on a HS Hub */
+ uint8_t complete_split;
+
+ /** How many bytes transferred during SSPLIT OUT */
+ uint32_t ssplit_out_xfer_count;
+
+ /**
+ * Holds the number of bus errors that have occurred for a transaction
+ * within this transfer.
+ */
+ uint8_t error_count;
+
+ /**
+ * Index of the next frame descriptor for an isochronous transfer. A
+ * frame descriptor describes the buffer position and length of the
+ * data to be transferred in the next scheduled (micro)frame of an
+ * isochronous transfer. It also holds status for that transaction.
+ * The frame index starts at 0.
+ */
+ uint16_t isoc_frame_index;
+
+ /** Position of the ISOC split on full/low speed */
+ uint8_t isoc_split_pos;
+
+ /** Position of the ISOC split in the buffer for the current frame */
+ uint16_t isoc_split_offset;
+
+ /** URB for this transfer */
+ struct fh_otg_hcd_urb *urb;
+
+ struct fh_otg_qh *qh;
+
+ /** This list of QTDs */
+ FH_CIRCLEQ_ENTRY(fh_otg_qtd) qtd_list_entry;
+
+ /** Indicates if this QTD is currently processed by HW. */
+ uint8_t in_process;
+
+ /** Number of DMA descriptors for this QTD */
+ uint8_t n_desc;
+
+ /**
+ * Last activated frame(packet) index.
+ * Used in Descriptor DMA mode only.
+ */
+ uint16_t isoc_frame_index_last;
+
+} fh_otg_qtd_t;
+
+FH_CIRCLEQ_HEAD(fh_otg_qtd_list, fh_otg_qtd);
+
+/**
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+typedef struct fh_otg_qh {
+ /**
+ * Endpoint type.
+ * One of the following values:
+ * - UE_CONTROL
+ * - UE_BULK
+ * - UE_INTERRUPT
+ * - UE_ISOCHRONOUS
+ */
+ uint8_t ep_type;
+ uint8_t ep_is_in;
+
+ /** wMaxPacketSize Field of Endpoint Descriptor. */
+ uint16_t maxp;
+
+ /**
+ * Device speed.
+ * One of the following values:
+ * - FH_OTG_EP_SPEED_LOW
+ * - FH_OTG_EP_SPEED_FULL
+ * - FH_OTG_EP_SPEED_HIGH
+ */
+ uint8_t dev_speed;
+
+ /**
+ * Determines the PID of the next data packet for non-control
+ * transfers. Ignored for control transfers.<br>
+ * One of the following values:
+ * - FH_OTG_HC_PID_DATA0
+ * - FH_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Ping state if 1. */
+ uint8_t ping_state;
+
+ /**
+ * List of QTDs for this QH.
+ */
+ struct fh_otg_qtd_list qtd_list;
+
+ /** Host channel currently processing transfers for this QH. */
+ struct fh_hc *channel;
+
+ /** Full/low speed endpoint on high-speed hub requires split. */
+ uint8_t do_split;
+
+ /** @name Periodic schedule information */
+ /** @{ */
+
+ /** Bandwidth in microseconds per (micro)frame. */
+ uint16_t usecs;
+
+ /** Interval between transfers in (micro)frames. */
+ uint16_t interval;
+
+ /**
+ * (micro)frame to initialize a periodic transfer. The transfer
+ * executes in the following (micro)frame.
+ */
+ uint16_t sched_frame;
+
+ /** (micro)frame at which last start split was initialized. */
+ uint16_t start_split_frame;
+
+ /** @} */
+
+ /**
+ * Used instead of original buffer if
+ * it(physical address) is not dword-aligned.
+ */
+ uint8_t *dw_align_buf;
+ fh_dma_t dw_align_buf_dma;
+
+ /** Entry for QH in either the periodic or non-periodic schedule. */
+ fh_list_link_t qh_list_entry;
+
+ /** @name Descriptor DMA support */
+ /** @{ */
+
+ /** Descriptor List. */
+ fh_otg_host_dma_desc_t *desc_list;
+
+ /** Descriptor List physical address. */
+ fh_dma_t desc_list_dma;
+
+ /**
+ * Xfer Bytes array.
+ * Each element corresponds to a descriptor and indicates
+ * original XferSize size value for the descriptor.
+ */
+ uint32_t *n_bytes;
+
+ /** Actual number of transfer descriptors in a list. */
+ uint16_t ntd;
+
+ /** First activated isochronous transfer descriptor index. */
+ uint8_t td_first;
+ /** Last activated isochronous transfer descriptor index. */
+ uint8_t td_last;
+
+ /** @} */
+
+} fh_otg_qh_t;
+
+FH_CIRCLEQ_HEAD(hc_list, fh_hc);
+
+/**
+ * This structure holds the state of the HCD, including the non-periodic and
+ * periodic schedules.
+ */
+struct fh_otg_hcd {
+ /** The FH otg device pointer */
+ struct fh_otg_device *otg_dev;
+ /** FH OTG Core Interface Layer */
+ fh_otg_core_if_t *core_if;
+
+ /** Function HCD driver callbacks */
+ struct fh_otg_hcd_function_ops *fops;
+
+ /** Internal FH HCD Flags */
+ volatile union fh_otg_hcd_internal_flags {
+ uint32_t d32;
+ struct {
+ unsigned port_connect_status_change:1;
+ unsigned port_connect_status:1;
+ unsigned port_reset_change:1;
+ unsigned port_enable_change:1;
+ unsigned port_suspend_change:1;
+ unsigned port_over_current_change:1;
+ unsigned port_l1_change:1;
+ unsigned reserved:26;
+ } b;
+ } flags;
+
+ /**
+ * Inactive items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are not
+ * currently assigned to a host channel.
+ */
+ fh_list_link_t non_periodic_sched_inactive;
+
+ /**
+ * Active items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are
+ * currently assigned to a host channel.
+ */
+ fh_list_link_t non_periodic_sched_active;
+
+ /**
+ * Pointer to the next Queue Head to process in the active
+ * non-periodic schedule.
+ */
+ fh_list_link_t *non_periodic_qh_ptr;
+
+ /**
+ * Inactive items in the periodic schedule. This is a list of QHs for
+ * periodic transfers that are _not_ scheduled for the next frame.
+ * Each QH in the list has an interval counter that determines when it
+ * needs to be scheduled for execution. This scheduling mechanism
+ * allows only a simple calculation for periodic bandwidth used (i.e.
+ * must assume that all periodic transfers may need to execute in the
+ * same frame). However, it greatly simplifies scheduling and should
+ * be sufficient for the vast majority of OTG hosts, which need to
+ * connect to a small number of peripherals at one time.
+ *
+ * Items move from this list to periodic_sched_ready when the QH
+ * interval counter is 0 at SOF.
+ */
+ fh_list_link_t periodic_sched_inactive;
+
+ /**
+ * List of periodic QHs that are ready for execution in the next
+ * frame, but have not yet been assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_assigned as host
+ * channels become available during the current frame.
+ */
+ fh_list_link_t periodic_sched_ready;
+
+ /**
+ * List of periodic QHs to be executed in the next frame that are
+ * assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_queued as the
+ * transactions for the QH are queued to the FH_otg controller.
+ */
+ fh_list_link_t periodic_sched_assigned;
+
+ /**
+ * List of periodic QHs that have been queued for execution.
+ *
+ * Items move from this list to either periodic_sched_inactive or
+ * periodic_sched_ready when the channel associated with the transfer
+ * is released. If the interval for the QH is 1, the item moves to
+ * periodic_sched_ready because it must be rescheduled for the next
+ * frame. Otherwise, the item moves to periodic_sched_inactive.
+ */
+ fh_list_link_t periodic_sched_queued;
+
+ /**
+ * Total bandwidth claimed so far for periodic transfers. This value
+ * is in microseconds per (micro)frame. The assumption is that all
+ * periodic transfers may occur in the same (micro)frame.
+ */
+ uint16_t periodic_usecs;
+
+ /**
+ * Frame number read from the core at SOF. The value ranges from 0 to
+ * FH_HFNUM_MAX_FRNUM.
+ */
+ uint16_t frame_number;
+
+ /**
+ * Count of periodic QHs, if using several eps. For SOF enable/disable.
+ */
+ uint16_t periodic_qh_count;
+
+ /**
+ * Free host channels in the controller. This is a list of
+ * fh_hc_t items.
+ */
+ struct hc_list free_hc_list;
+ /**
+ * Number of host channels assigned to periodic transfers. Currently
+ * assuming that there is a dedicated host channel for each periodic
+ * transaction and at least one host channel available for
+ * non-periodic transactions.
+ */
+ int periodic_channels;
+
+ /**
+ * Number of host channels assigned to non-periodic transfers.
+ */
+ int non_periodic_channels;
+
+ /**
+ * Array of pointers to the host channel descriptors. Allows accessing
+ * a host channel descriptor given the host channel number. This is
+ * useful in interrupt handlers.
+ */
+ struct fh_hc *hc_ptr_array[MAX_EPS_CHANNELS];
+
+ /**
+ * Buffer to use for any data received during the status phase of a
+ * control transfer. Normally no data is transferred during the status
+ * phase. This buffer is used as a bit bucket.
+ */
+ uint8_t *status_buf;
+
+ /**
+ * DMA address for status_buf.
+ */
+ dma_addr_t status_buf_dma;
+#define FH_OTG_HCD_STATUS_BUF_SIZE 64
+
+ /**
+ * Connection timer. An OTG host must display a message if the device
+ * does not connect. Started when the VBus power is turned on via
+ * sysfs attribute "buspower".
+ */
+ fh_timer_t *conn_timer;
+
+ /* Tasket to do a reset */
+ fh_tasklet_t *reset_tasklet;
+
+ /* */
+ fh_spinlock_t *lock;
+
+ /**
+ * Private data that could be used by OS wrapper.
+ */
+ void *priv;
+
+ uint8_t otg_port;
+
+ /** Frame List */
+ uint32_t *frame_list;
+
+ /** Frame List DMA address */
+ dma_addr_t frame_list_dma;
+
+#ifdef DEBUG
+ uint32_t frrem_samples;
+ uint64_t frrem_accum;
+
+ uint32_t hfnum_7_samples_a;
+ uint64_t hfnum_7_frrem_accum_a;
+ uint32_t hfnum_0_samples_a;
+ uint64_t hfnum_0_frrem_accum_a;
+ uint32_t hfnum_other_samples_a;
+ uint64_t hfnum_other_frrem_accum_a;
+
+ uint32_t hfnum_7_samples_b;
+ uint64_t hfnum_7_frrem_accum_b;
+ uint32_t hfnum_0_samples_b;
+ uint64_t hfnum_0_frrem_accum_b;
+ uint32_t hfnum_other_samples_b;
+ uint64_t hfnum_other_frrem_accum_b;
+#endif
+};
+
+/** @name Transaction Execution Functions */
+/** @{ */
+extern fh_otg_transaction_type_e fh_otg_hcd_select_transactions(fh_otg_hcd_t
+ * hcd);
+extern void fh_otg_hcd_queue_transactions(fh_otg_hcd_t * hcd,
+ fh_otg_transaction_type_e tr_type);
+
+/** @} */
+
+/** @name Interrupt Handler Functions */
+/** @{ */
+extern int32_t fh_otg_hcd_handle_intr(fh_otg_hcd_t * fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_sof_intr(fh_otg_hcd_t * fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_rx_status_q_level_intr(fh_otg_hcd_t *
+ fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_np_tx_fifo_empty_intr(fh_otg_hcd_t *
+ fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_perio_tx_fifo_empty_intr(fh_otg_hcd_t *
+ fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_incomplete_periodic_intr(fh_otg_hcd_t *
+ fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_port_intr(fh_otg_hcd_t * fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_conn_id_status_change_intr(fh_otg_hcd_t *
+ fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_disconnect_intr(fh_otg_hcd_t * fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_hc_intr(fh_otg_hcd_t * fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_hc_n_intr(fh_otg_hcd_t * fh_otg_hcd,
+ uint32_t num);
+extern int32_t fh_otg_hcd_handle_session_req_intr(fh_otg_hcd_t * fh_otg_hcd);
+extern int32_t fh_otg_hcd_handle_wakeup_detected_intr(fh_otg_hcd_t *
+ fh_otg_hcd);
+/** @} */
+
+/** @name Schedule Queue Functions */
+/** @{ */
+
+/* Implemented in fh_otg_hcd_queue.c */
+extern fh_otg_qh_t *fh_otg_hcd_qh_create(fh_otg_hcd_t * hcd,
+ fh_otg_hcd_urb_t * urb, int atomic_alloc);
+extern void fh_otg_hcd_qh_free(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
+extern int fh_otg_hcd_qh_add(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
+extern void fh_otg_hcd_qh_remove(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
+extern void fh_otg_hcd_qh_deactivate(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
+ int sched_csplit);
+
+/** Remove and free a QH */
+static inline void fh_otg_hcd_qh_remove_and_free(fh_otg_hcd_t * hcd,
+ fh_otg_qh_t * qh)
+{
+ fh_irqflags_t flags;
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ fh_otg_hcd_qh_remove(hcd, qh);
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+ fh_otg_hcd_qh_free(hcd, qh);
+}
+
+/** Allocates memory for a QH structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline fh_otg_qh_t *fh_otg_hcd_qh_alloc(int atomic_alloc)
+{
+ if (atomic_alloc)
+ return (fh_otg_qh_t *) FH_ALLOC_ATOMIC(sizeof(fh_otg_qh_t));
+ else
+ return (fh_otg_qh_t *) FH_ALLOC(sizeof(fh_otg_qh_t));
+}
+
+extern fh_otg_qtd_t *fh_otg_hcd_qtd_create(fh_otg_hcd_urb_t * urb,
+ int atomic_alloc);
+extern void fh_otg_hcd_qtd_init(fh_otg_qtd_t * qtd, fh_otg_hcd_urb_t * urb);
+extern int fh_otg_hcd_qtd_add(fh_otg_qtd_t * qtd, fh_otg_hcd_t * fh_otg_hcd,
+ fh_otg_qh_t ** qh, int atomic_alloc);
+
+/** Allocates memory for a QTD structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline fh_otg_qtd_t *fh_otg_hcd_qtd_alloc(int atomic_alloc)
+{
+ if (atomic_alloc)
+ return (fh_otg_qtd_t *) FH_ALLOC_ATOMIC(sizeof(fh_otg_qtd_t));
+ else
+ return (fh_otg_qtd_t *) FH_ALLOC(sizeof(fh_otg_qtd_t));
+}
+
+/** Frees the memory for a QTD structure. QTD should already be removed from
+ * list.
+ * @param qtd QTD to free.*/
+static inline void fh_otg_hcd_qtd_free(fh_otg_qtd_t * qtd)
+{
+ FH_FREE(qtd);
+}
+
+/** Removes a QTD from list.
+ * @param hcd HCD instance.
+ * @param qtd QTD to remove from list.
+ * @param qh QTD belongs to.
+ */
+static inline void fh_otg_hcd_qtd_remove(fh_otg_hcd_t * hcd,
+ fh_otg_qtd_t * qtd,
+ fh_otg_qh_t * qh)
+{
+ FH_CIRCLEQ_REMOVE(&qh->qtd_list, qtd, qtd_list_entry);
+}
+
+/** Remove and free a QTD
+ * Need to disable IRQ and hold hcd lock while calling this function out of
+ * interrupt servicing chain */
+static inline void fh_otg_hcd_qtd_remove_and_free(fh_otg_hcd_t * hcd,
+ fh_otg_qtd_t * qtd,
+ fh_otg_qh_t * qh)
+{
+ fh_otg_hcd_qtd_remove(hcd, qtd, qh);
+ fh_otg_hcd_qtd_free(qtd);
+}
+
+/** @} */
+
+/** @name Descriptor DMA Supporting Functions */
+/** @{ */
+
+extern void fh_otg_hcd_start_xfer_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
+extern void fh_otg_hcd_complete_xfer_ddma(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_halt_status_e halt_status);
+
+extern int fh_otg_hcd_qh_init_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
+extern void fh_otg_hcd_qh_free_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
+
+/** @} */
+
+/** @name Internal Functions */
+/** @{ */
+fh_otg_qh_t *fh_urb_to_qh(fh_otg_hcd_urb_t * urb);
+/** @} */
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+extern int fh_otg_hcd_get_hc_for_lpm_tran(fh_otg_hcd_t * hcd,
+ uint8_t devaddr);
+extern void fh_otg_hcd_free_hc_from_lpm(fh_otg_hcd_t * hcd);
+#endif
+
+/** Gets the QH that contains the list_head */
+#define fh_list_to_qh(_list_head_ptr_) container_of(_list_head_ptr_, fh_otg_qh_t, qh_list_entry)
+
+/** Gets the QTD that contains the list_head */
+#define fh_list_to_qtd(_list_head_ptr_) container_of(_list_head_ptr_, fh_otg_qtd_t, qtd_list_entry)
+
+/** Check if QH is non-periodic */
+#define fh_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == UE_BULK) || \
+ (_qh_ptr_->ep_type == UE_CONTROL))
+
+/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */
+#define fh_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+/** Packet size for any kind of endpoint descriptor */
+#define fh_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/**
+ * Returns true if _frame1 is less than or equal to _frame2. The comparison is
+ * done modulo FH_HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int fh_frame_num_le(uint16_t frame1, uint16_t frame2)
+{
+ return ((frame2 - frame1) & FH_HFNUM_MAX_FRNUM) <=
+ (FH_HFNUM_MAX_FRNUM >> 1);
+}
+
+/**
+ * Returns true if _frame1 is greater than _frame2. The comparison is done
+ * modulo FH_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int fh_frame_num_gt(uint16_t frame1, uint16_t frame2)
+{
+ return (frame1 != frame2) &&
+ (((frame1 - frame2) & FH_HFNUM_MAX_FRNUM) <
+ (FH_HFNUM_MAX_FRNUM >> 1));
+}
+
+/**
+ * Increments _frame by the amount specified by _inc. The addition is done
+ * modulo FH_HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline uint16_t fh_frame_num_inc(uint16_t frame, uint16_t inc)
+{
+ return (frame + inc) & FH_HFNUM_MAX_FRNUM;
+}
+
+static inline uint16_t fh_full_frame_num(uint16_t frame)
+{
+ return (frame & FH_HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline uint16_t fh_micro_frame_num(uint16_t frame)
+{
+ return frame & 0x7;
+}
+
+void fh_otg_hcd_save_data_toggle(fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd);
+
+#ifdef DEBUG
+/**
+ * Macro to sample the remaining PHY clocks left in the current frame. This
+ * may be used during debugging to determine the average time it takes to
+ * execute sections of code. There are two possible sample points, "a" and
+ * "b", so the _letter argument must be one of these values.
+ *
+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
+ * example, "cat /sys/devices/lm0/hcd_frrem".
+ */
+#define fh_sample_frrem(_hcd, _qh, _letter) \
+{ \
+ hfnum_data_t hfnum; \
+ fh_otg_qtd_t *qtd; \
+ qtd = list_entry(_qh->qtd_list.next, fh_otg_qtd_t, qtd_list_entry); \
+ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \
+ hfnum.d32 = FH_READ_REG32(&_hcd->core_if->host_if->host_global_regs->hfnum); \
+ switch (hfnum.b.frnum & 0x7) { \
+ case 7: \
+ _hcd->hfnum_7_samples_##_letter++; \
+ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ case 0: \
+ _hcd->hfnum_0_samples_##_letter++; \
+ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ default: \
+ _hcd->hfnum_other_samples_##_letter++; \
+ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ } \
+ } \
+}
+#else
+#define fh_sample_frrem(_hcd, _qh, _letter)
+#endif
+#endif
+#endif /* FH_DEVICE_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_ddma.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_ddma.c
new file mode 100644
index 00000000..921c55d5
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_ddma.c
@@ -0,0 +1,1128 @@
+/*==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_ddma.c $
+ * $Revision: #11 $
+ * $Date: 2013/01/24 $
+ * $Change: 2150761 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_DEVICE_ONLY
+
+/** @file
+ * This file contains Descriptor DMA support implementation for host mode.
+ */
+
+#include "fh_otg_hcd.h"
+#include "fh_otg_regs.h"
+
+static inline uint8_t frame_list_idx(uint16_t frame)
+{
+ return (frame & (MAX_FRLIST_EN_NUM - 1));
+}
+
+static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
+{
+ return (idx + inc) &
+ (((speed ==
+ FH_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
+ MAX_DMA_DESC_NUM_GENERIC) - 1);
+}
+
+static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
+{
+ return (idx - inc) &
+ (((speed ==
+ FH_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
+ MAX_DMA_DESC_NUM_GENERIC) - 1);
+}
+
+static inline uint16_t max_desc_num(fh_otg_qh_t * qh)
+{
+ return (((qh->ep_type == UE_ISOCHRONOUS)
+ && (qh->dev_speed == FH_OTG_EP_SPEED_HIGH))
+ ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
+}
+static inline uint16_t frame_incr_val(fh_otg_qh_t * qh)
+{
+ return ((qh->dev_speed == FH_OTG_EP_SPEED_HIGH)
+ ? ((qh->interval + 8 - 1) / 8)
+ : qh->interval);
+}
+
+static int desc_list_alloc(fh_otg_qh_t * qh)
+{
+ int retval = 0;
+
+ qh->desc_list = (fh_otg_host_dma_desc_t *)
+ FH_DMA_ALLOC(sizeof(fh_otg_host_dma_desc_t) * max_desc_num(qh),
+ &qh->desc_list_dma);
+
+ if (!qh->desc_list) {
+ retval = -FH_E_NO_MEMORY;
+ FH_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
+
+ }
+
+ fh_memset(qh->desc_list, 0x00,
+ sizeof(fh_otg_host_dma_desc_t) * max_desc_num(qh));
+
+ qh->n_bytes =
+ (uint32_t *) FH_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
+
+ if (!qh->n_bytes) {
+ retval = -FH_E_NO_MEMORY;
+ FH_ERROR
+ ("%s: Failed to allocate array for descriptors' size actual values\n",
+ __func__);
+
+ }
+ return retval;
+
+}
+
+static void desc_list_free(fh_otg_qh_t * qh)
+{
+ if (qh->desc_list) {
+ FH_DMA_FREE(max_desc_num(qh), qh->desc_list,
+ qh->desc_list_dma);
+ qh->desc_list = NULL;
+ }
+
+ if (qh->n_bytes) {
+ FH_FREE(qh->n_bytes);
+ qh->n_bytes = NULL;
+ }
+}
+
+static int frame_list_alloc(fh_otg_hcd_t * hcd)
+{
+ int retval = 0;
+ if (hcd->frame_list)
+ return 0;
+
+ hcd->frame_list = FH_DMA_ALLOC(4 * MAX_FRLIST_EN_NUM,
+ &hcd->frame_list_dma);
+ if (!hcd->frame_list) {
+ retval = -FH_E_NO_MEMORY;
+ FH_ERROR("%s: Frame List allocation failed\n", __func__);
+ }
+
+ fh_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
+
+ return retval;
+}
+
+static void frame_list_free(fh_otg_hcd_t * hcd)
+{
+ if (!hcd->frame_list)
+ return;
+
+ FH_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
+ hcd->frame_list = NULL;
+}
+
+static void per_sched_enable(fh_otg_hcd_t * hcd, uint16_t fr_list_en)
+{
+
+ hcfg_data_t hcfg;
+
+ hcfg.d32 = FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
+
+ if (hcfg.b.perschedena) {
+ /* already enabled */
+ return;
+ }
+
+ FH_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
+ hcd->frame_list_dma);
+
+ switch (fr_list_en) {
+ case 64:
+ hcfg.b.frlisten = 3;
+ break;
+ case 32:
+ hcfg.b.frlisten = 2;
+ break;
+ case 16:
+ hcfg.b.frlisten = 1;
+ break;
+ case 8:
+ hcfg.b.frlisten = 0;
+ break;
+ default:
+ break;
+ }
+
+ hcfg.b.perschedena = 1;
+
+ FH_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
+ FH_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+
+}
+
+static void per_sched_disable(fh_otg_hcd_t * hcd)
+{
+ hcfg_data_t hcfg;
+
+ hcfg.d32 = FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
+
+ if (!hcfg.b.perschedena) {
+ /* already disabled */
+ return;
+ }
+ hcfg.b.perschedena = 0;
+
+ FH_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
+ FH_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+}
+
+/*
+ * Activates/Deactivates FrameList entries for the channel
+ * based on endpoint servicing period.
+ */
+void update_frame_list(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh, uint8_t enable)
+{
+ uint16_t i, j, inc;
+ fh_hc_t *hc = NULL;
+
+ if (!qh->channel) {
+ FH_ERROR("qh->channel = %p", qh->channel);
+ return;
+ }
+
+ if (!hcd) {
+ FH_ERROR("------hcd = %p", hcd);
+ return;
+ }
+
+ if (!hcd->frame_list) {
+ FH_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
+ return;
+ }
+
+ hc = qh->channel;
+ inc = frame_incr_val(qh);
+ if (qh->ep_type == UE_ISOCHRONOUS)
+ i = frame_list_idx(qh->sched_frame);
+ else
+ i = 0;
+
+ j = i;
+ do {
+ if (enable)
+ hcd->frame_list[j] |= (1 << hc->hc_num);
+ else
+ hcd->frame_list[j] &= ~(1 << hc->hc_num);
+ j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
+ }
+ while (j != i);
+ if (!enable)
+ return;
+ hc->schinfo = 0;
+ if (qh->channel->speed == FH_OTG_EP_SPEED_HIGH) {
+ j = 1;
+ /* TODO - check this */
+ inc = (8 + qh->interval - 1) / qh->interval;
+ for (i = 0; i < inc; i++) {
+ hc->schinfo |= j;
+ j = j << qh->interval;
+ }
+ } else {
+ hc->schinfo = 0xff;
+ }
+}
+
+#if 1
+void dump_frame_list(fh_otg_hcd_t * hcd)
+{
+ int i = 0;
+ FH_PRINTF("--FRAME LIST (hex) --\n");
+ for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
+ FH_PRINTF("%x\t", hcd->frame_list[i]);
+ if (!(i % 8) && i)
+ FH_PRINTF("\n");
+ }
+ FH_PRINTF("\n----\n");
+
+}
+#endif
+
+static void release_channel_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ fh_hc_t *hc = qh->channel;
+ if (fh_qh_is_non_per(qh))
+ hcd->non_periodic_channels--;
+ else
+ update_frame_list(hcd, qh, 0);
+
+ /*
+ * The condition is added to prevent double cleanup try in case of device
+ * disconnect. See channel cleanup in fh_otg_hcd_disconnect_cb().
+ */
+ if (hc->qh) {
+ fh_otg_hc_cleanup(hcd->core_if, hc);
+ FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
+ hc->qh = NULL;
+ }
+
+ qh->channel = NULL;
+ qh->ntd = 0;
+
+ if (qh->desc_list) {
+ fh_memset(qh->desc_list, 0x00,
+ sizeof(fh_otg_host_dma_desc_t) * max_desc_num(qh));
+ }
+}
+
+/**
+ * Initializes a QH structure's Descriptor DMA related members.
+ * Allocates memory for descriptor list.
+ * On first periodic QH, allocates memory for FrameList
+ * and enables periodic scheduling.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh The QH to init.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int fh_otg_hcd_qh_init_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ int retval = 0;
+
+ if (qh->do_split) {
+ FH_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
+ return -1;
+ }
+
+ retval = desc_list_alloc(qh);
+
+ if ((retval == 0)
+ && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
+ if (!hcd->frame_list) {
+ retval = frame_list_alloc(hcd);
+ /* Enable periodic schedule on first periodic QH */
+ if (retval == 0)
+ per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
+ }
+ }
+
+ qh->ntd = 0;
+
+ return retval;
+}
+
+/**
+ * Frees descriptor list memory associated with the QH.
+ * If QH is periodic and the last, frees FrameList memory
+ * and disables periodic scheduling.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh The QH to init.
+ */
+void fh_otg_hcd_qh_free_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ desc_list_free(qh);
+
+ /*
+ * Channel still assigned due to some reasons.
+ * Seen on Isoc URB dequeue. Channel halted but no subsequent
+ * ChHalted interrupt to release the channel. Afterwards
+ * when it comes here from endpoint disable routine
+ * channel remains assigned.
+ */
+ if (qh->channel)
+ release_channel_ddma(hcd, qh);
+
+ if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
+ && !hcd->periodic_channels && hcd->frame_list) {
+
+ per_sched_disable(hcd);
+ frame_list_free(hcd);
+ }
+}
+
+static uint8_t frame_to_desc_idx(fh_otg_qh_t * qh, uint16_t frame_idx)
+{
+ if (qh->dev_speed == FH_OTG_EP_SPEED_HIGH) {
+ /*
+ * Descriptor set(8 descriptors) index
+ * which is 8-aligned.
+ */
+ return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
+ } else {
+ return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
+ }
+}
+
+/*
+ * Determine starting frame for Isochronous transfer.
+ * Few frames skipped to prevent race condition with HC.
+ */
+static uint8_t calc_starting_frame(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
+ uint8_t * skip_frames)
+{
+ uint16_t frame = 0;
+ hcd->frame_number = fh_otg_hcd_get_frame_number(hcd);
+
+ /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
+
+ /*
+ * skip_frames is used to limit activated descriptors number
+ * to avoid the situation when HC services the last activated
+ * descriptor firstly.
+ * Example for FS:
+ * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
+ * corresponding to curr_frame+1, the descriptor corresponding to frame 2
+ * will be fetched. If the number of descriptors is max=64 (or greather) the
+ * list will be fully programmed with Active descriptors and it is possible
+ * case(rare) that the latest descriptor(considering rollback) corresponding
+ * to frame 2 will be serviced first. HS case is more probable because, in fact,
+ * up to 11 uframes(16 in the code) may be skipped.
+ */
+ if (qh->dev_speed == FH_OTG_EP_SPEED_HIGH) {
+ /*
+ * Consider uframe counter also, to start xfer asap.
+ * If half of the frame elapsed skip 2 frames otherwise
+ * just 1 frame.
+ * Starting descriptor index must be 8-aligned, so
+ * if the current frame is near to complete the next one
+ * is skipped as well.
+ */
+
+ if (fh_micro_frame_num(hcd->frame_number) >= 5) {
+ *skip_frames = 2 * 8;
+ frame = fh_frame_num_inc(hcd->frame_number, *skip_frames);
+ } else {
+ *skip_frames = 1 * 8;
+ frame = fh_frame_num_inc(hcd->frame_number, *skip_frames);
+ }
+
+ frame = fh_full_frame_num(frame);
+ } else {
+ /*
+ * Two frames are skipped for FS - the current and the next.
+ * But for descriptor programming, 1 frame(descriptor) is enough,
+ * see example above.
+ */
+ *skip_frames = 1;
+ frame = fh_frame_num_inc(hcd->frame_number, 2);
+ }
+
+ return frame;
+}
+
+/*
+ * Calculate initial descriptor index for isochronous transfer
+ * based on scheduled frame.
+ */
+static uint8_t recalc_initial_desc_idx(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ uint16_t frame = 0, fr_idx, fr_idx_tmp;
+ uint8_t skip_frames = 0;
+ /*
+ * With current ISOC processing algorithm the channel is being
+ * released when no more QTDs in the list(qh->ntd == 0).
+ * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
+ *
+ * So qh->channel != NULL branch is not used and just not removed from the
+ * source file. It is required for another possible approach which is,
+ * do not disable and release the channel when ISOC session completed,
+ * just move QH to inactive schedule until new QTD arrives.
+ * On new QTD, the QH moved back to 'ready' schedule,
+ * starting frame and therefore starting desc_index are recalculated.
+ * In this case channel is released only on ep_disable.
+ */
+
+ /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
+ if (qh->channel) {
+ frame = calc_starting_frame(hcd, qh, &skip_frames);
+ /*
+ * Calculate initial descriptor index based on FrameList current bitmap
+ * and servicing period.
+ */
+ fr_idx_tmp = frame_list_idx(frame);
+ fr_idx =
+ (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
+ fr_idx_tmp)
+ % frame_incr_val(qh);
+ fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
+ } else {
+ qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
+ fr_idx = frame_list_idx(qh->sched_frame);
+ }
+
+ qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
+
+ return skip_frames;
+}
+
+#define ISOC_URB_GIVEBACK_ASAP
+
+#define MAX_ISOC_XFER_SIZE_FS 1023
+#define MAX_ISOC_XFER_SIZE_HS 3072
+#define DESCNUM_THRESHOLD 4
+
+static void init_isoc_dma_desc(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
+ uint8_t skip_frames)
+{
+ struct fh_otg_hcd_iso_packet_desc *frame_desc;
+ fh_otg_qtd_t *qtd;
+ fh_otg_host_dma_desc_t *dma_desc;
+ uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
+
+ idx = qh->td_last;
+ inc = qh->interval;
+ n_desc = 0;
+
+ ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
+ if (skip_frames && !qh->channel)
+ ntd_max = ntd_max - skip_frames / qh->interval;
+
+ max_xfer_size =
+ (qh->dev_speed ==
+ FH_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
+ MAX_ISOC_XFER_SIZE_FS;
+
+ FH_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
+ while ((qh->ntd < ntd_max)
+ && (qtd->isoc_frame_index_last <
+ qtd->urb->packet_count)) {
+
+ dma_desc = &qh->desc_list[idx];
+ fh_memset(dma_desc, 0x00, sizeof(fh_otg_host_dma_desc_t));
+
+ frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
+
+ if (frame_desc->length > max_xfer_size)
+ qh->n_bytes[idx] = max_xfer_size;
+ else
+ qh->n_bytes[idx] = frame_desc->length;
+ dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
+ dma_desc->status.b_isoc.a = 1;
+ dma_desc->status.b_isoc.sts = 0;
+
+ dma_desc->buf = qtd->urb->dma + frame_desc->offset;
+
+ qh->ntd++;
+
+ qtd->isoc_frame_index_last++;
+
+#ifdef ISOC_URB_GIVEBACK_ASAP
+ /*
+ * Set IOC for each descriptor corresponding to the
+ * last frame of the URB.
+ */
+ if (qtd->isoc_frame_index_last ==
+ qtd->urb->packet_count)
+ dma_desc->status.b_isoc.ioc = 1;
+
+#endif
+ idx = desclist_idx_inc(idx, inc, qh->dev_speed);
+ n_desc++;
+
+ }
+ qtd->in_process = 1;
+ }
+
+ qh->td_last = idx;
+
+#ifdef ISOC_URB_GIVEBACK_ASAP
+ /* Set IOC for the last descriptor if descriptor list is full */
+ if (qh->ntd == ntd_max) {
+ idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
+ qh->desc_list[idx].status.b_isoc.ioc = 1;
+ }
+#else
+ /*
+ * Set IOC bit only for one descriptor.
+ * Always try to be ahead of HW processing,
+ * i.e. on IOC generation driver activates next descriptors but
+ * core continues to process descriptors followed the one with IOC set.
+ */
+
+ if (n_desc > DESCNUM_THRESHOLD) {
+ /*
+ * Move IOC "up". Required even if there is only one QTD
+ * in the list, cause QTDs migth continue to be queued,
+ * but during the activation it was only one queued.
+ * Actually more than one QTD might be in the list if this function called
+ * from XferCompletion - QTDs was queued during HW processing of the previous
+ * descriptor chunk.
+ */
+ idx = fh_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
+ } else {
+ /*
+ * Set the IOC for the latest descriptor
+ * if either number of descriptor is not greather than threshold
+ * or no more new descriptors activated.
+ */
+ idx = fh_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
+ }
+
+ qh->desc_list[idx].status.b_isoc.ioc = 1;
+#endif
+}
+
+static void init_non_isoc_dma_desc(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+
+ fh_hc_t *hc;
+ fh_otg_host_dma_desc_t *dma_desc;
+ fh_otg_qtd_t *qtd;
+ int num_packets, len, n_desc = 0;
+
+ hc = qh->channel;
+
+ /*
+ * Start with hc->xfer_buff initialized in
+ * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
+ * this pointer re-assigned to the buffer of the currently processed QTD.
+ * For non-SG request there is always one QTD active.
+ */
+
+ FH_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
+
+ if (n_desc) {
+ /* SG request - more than 1 QTDs */
+ hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
+ hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
+ }
+
+ qtd->n_desc = 0;
+
+ do {
+ dma_desc = &qh->desc_list[n_desc];
+ len = hc->xfer_len;
+
+ if (len > MAX_DMA_DESC_SIZE)
+ len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
+
+ if (hc->ep_is_in) {
+ if (len > 0) {
+ num_packets = (len + hc->max_packet - 1) / hc->max_packet;
+ } else {
+ /* Need 1 packet for transfer length of 0. */
+ num_packets = 1;
+ }
+ /* Always program an integral # of max packets for IN transfers. */
+ len = num_packets * hc->max_packet;
+ }
+
+ dma_desc->status.b.n_bytes = len;
+
+ qh->n_bytes[n_desc] = len;
+
+ if ((qh->ep_type == UE_CONTROL)
+ && (qtd->control_phase == FH_OTG_CONTROL_SETUP))
+ dma_desc->status.b.sup = 1; /* Setup Packet */
+
+ dma_desc->status.b.a = 1; /* Active descriptor */
+ dma_desc->status.b.sts = 0;
+
+ dma_desc->buf =
+ ((unsigned long)hc->xfer_buff & 0xffffffff);
+
+ /*
+ * Last descriptor(or single) of IN transfer
+ * with actual size less than MaxPacket.
+ */
+ if (len > hc->xfer_len) {
+ hc->xfer_len = 0;
+ } else {
+ hc->xfer_buff += len;
+ hc->xfer_len -= len;
+ }
+
+ qtd->n_desc++;
+ n_desc++;
+ }
+ while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
+
+
+ qtd->in_process = 1;
+
+ if (qh->ep_type == UE_CONTROL)
+ break;
+
+ if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
+ break;
+
+ if (qh->ep_is_in && (qh->ep_type == UE_BULK))
+ break;
+ }
+
+ if (n_desc) {
+ /* Request Transfer Complete interrupt for the last descriptor */
+ qh->desc_list[n_desc - 1].status.b.ioc = 1;
+ /* End of List indicator */
+ qh->desc_list[n_desc - 1].status.b.eol = 1;
+
+ hc->ntd = n_desc;
+ }
+}
+
+/**
+ * For Control and Bulk endpoints initializes descriptor list
+ * and starts the transfer.
+ *
+ * For Interrupt and Isochronous endpoints initializes descriptor list
+ * then updates FrameList, marking appropriate entries as active.
+ * In case of Isochronous, the starting descriptor index is calculated based
+ * on the scheduled frame, but only on the first transfer descriptor within a session.
+ * Then starts the transfer via enabling the channel.
+ * For Isochronous endpoint the channel is not halted on XferComplete
+ * interrupt so remains assigned to the endpoint(QH) until session is done.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh The QH to init.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+void fh_otg_hcd_start_xfer_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ /* Channel is already assigned */
+ fh_hc_t *hc = qh->channel;
+ uint8_t skip_frames = 0;
+
+ switch (hc->ep_type) {
+ case FH_OTG_EP_TYPE_CONTROL:
+ case FH_OTG_EP_TYPE_BULK:
+ init_non_isoc_dma_desc(hcd, qh);
+
+ fh_otg_hc_start_transfer_ddma(hcd->core_if, hc);
+ break;
+ case FH_OTG_EP_TYPE_INTR:
+ init_non_isoc_dma_desc(hcd, qh);
+
+ update_frame_list(hcd, qh, 1);
+
+ fh_otg_hc_start_transfer_ddma(hcd->core_if, hc);
+ break;
+ case FH_OTG_EP_TYPE_ISOC:
+
+ if (!qh->ntd)
+ skip_frames = recalc_initial_desc_idx(hcd, qh);
+
+ init_isoc_dma_desc(hcd, qh, skip_frames);
+
+ if (!hc->xfer_started) {
+
+ update_frame_list(hcd, qh, 1);
+
+ /*
+ * Always set to max, instead of actual size.
+ * Otherwise ntd will be changed with
+ * channel being enabled. Not recommended.
+ *
+ */
+ hc->ntd = max_desc_num(qh);
+ /* Enable channel only once for ISOC */
+ fh_otg_hc_start_transfer_ddma(hcd->core_if, hc);
+ }
+
+ break;
+ default:
+
+ break;
+ }
+}
+
+static void complete_isoc_xfer_ddma(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_halt_status_e halt_status)
+{
+ struct fh_otg_hcd_iso_packet_desc *frame_desc;
+ fh_otg_qtd_t *qtd, *qtd_tmp;
+ fh_otg_qh_t *qh;
+ fh_otg_host_dma_desc_t *dma_desc;
+ uint16_t idx, remain;
+ uint8_t urb_compl;
+
+ qh = hc->qh;
+ idx = qh->td_first;
+
+ if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE) {
+ FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
+ qtd->in_process = 0;
+ return;
+ } else if ((halt_status == FH_OTG_HC_XFER_AHB_ERR) ||
+ (halt_status == FH_OTG_HC_XFER_BABBLE_ERR)) {
+ /*
+ * Channel is halted in these error cases.
+ * Considered as serious issues.
+ * Complete all URBs marking all frames as failed,
+ * irrespective whether some of the descriptors(frames) succeeded or no.
+ * Pass error code to completion routine as well, to
+ * update urb->status, some of class drivers might use it to stop
+ * queing transfer requests.
+ */
+ int err = (halt_status == FH_OTG_HC_XFER_AHB_ERR)
+ ? (-FH_E_IO)
+ : (-FH_E_OVERFLOW);
+
+ FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
+ for (idx = 0; idx < qtd->urb->packet_count; idx++) {
+ frame_desc = &qtd->urb->iso_descs[idx];
+ frame_desc->status = err;
+ }
+ hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
+ fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+ }
+ return;
+ }
+
+ FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
+
+ if (!qtd->in_process)
+ break;
+
+ urb_compl = 0;
+
+ do {
+
+ dma_desc = &qh->desc_list[idx];
+
+ frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
+ remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
+
+ if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
+ /*
+ * XactError or, unable to complete all the transactions
+ * in the scheduled micro-frame/frame,
+ * both indicated by DMA_DESC_STS_PKTERR.
+ */
+ qtd->urb->error_count++;
+ frame_desc->actual_length = qh->n_bytes[idx] - remain;
+ frame_desc->status = -FH_E_PROTOCOL;
+ } else {
+ /* Success */
+
+ frame_desc->actual_length = qh->n_bytes[idx] - remain;
+ frame_desc->status = 0;
+ }
+
+ if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+ /*
+ * urb->status is not used for isoc transfers here.
+ * The individual frame_desc status are used instead.
+ */
+
+ hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
+ fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+
+ /*
+ * This check is necessary because urb_dequeue can be called
+ * from urb complete callback(sound driver example).
+ * All pending URBs are dequeued there, so no need for
+ * further processing.
+ */
+ if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE) {
+ return;
+ }
+
+ urb_compl = 1;
+
+ }
+
+ qh->ntd--;
+
+ /* Stop if IOC requested descriptor reached */
+ if (dma_desc->status.b_isoc.ioc) {
+ idx = desclist_idx_inc(idx, qh->interval, hc->speed);
+ goto stop_scan;
+ }
+
+ idx = desclist_idx_inc(idx, qh->interval, hc->speed);
+
+ if (urb_compl)
+ break;
+ }
+ while (idx != qh->td_first);
+ }
+stop_scan:
+ qh->td_first = idx;
+}
+
+uint8_t update_non_isoc_urb_state_ddma(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_qtd_t * qtd,
+ fh_otg_host_dma_desc_t * dma_desc,
+ fh_otg_halt_status_e halt_status,
+ uint32_t n_bytes, uint8_t * xfer_done)
+{
+
+ uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
+ fh_otg_hcd_urb_t *urb = qtd->urb;
+
+ if (halt_status == FH_OTG_HC_XFER_AHB_ERR) {
+ urb->status = -FH_E_IO;
+ return 1;
+ }
+ if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
+ switch (halt_status) {
+ case FH_OTG_HC_XFER_STALL:
+ urb->status = -FH_E_PIPE;
+ break;
+ case FH_OTG_HC_XFER_BABBLE_ERR:
+ urb->status = -FH_E_OVERFLOW;
+ break;
+ case FH_OTG_HC_XFER_XACT_ERR:
+ urb->status = -FH_E_PROTOCOL;
+ break;
+ default:
+ FH_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
+ halt_status);
+ break;
+ }
+ return 1;
+ }
+
+ if (dma_desc->status.b.a == 1) {
+ FH_DEBUGPL(DBG_HCDV,
+ "Active descriptor encountered on channel %d\n",
+ hc->hc_num);
+ return 0;
+ }
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_CONTROL) {
+ if (qtd->control_phase == FH_OTG_CONTROL_DATA) {
+ urb->actual_length += n_bytes - remain;
+ if (remain || urb->actual_length >= urb->length) {
+ /*
+ * For Control Data stage do not set urb->status=0 to prevent
+ * URB callback. Set it when Status phase done. See below.
+ */
+ *xfer_done = 1;
+ }
+
+ } else if (qtd->control_phase == FH_OTG_CONTROL_STATUS) {
+ urb->status = 0;
+ *xfer_done = 1;
+ }
+ /* No handling for SETUP stage */
+ } else {
+ /* BULK and INTR */
+ urb->actual_length += n_bytes - remain;
+ if (remain || urb->actual_length >= urb->length) {
+ urb->status = 0;
+ *xfer_done = 1;
+ }
+ }
+
+ return 0;
+}
+
+static void complete_non_isoc_xfer_ddma(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_halt_status_e halt_status)
+{
+ fh_otg_hcd_urb_t *urb = NULL;
+ fh_otg_qtd_t *qtd, *qtd_tmp;
+ fh_otg_qh_t *qh;
+ fh_otg_host_dma_desc_t *dma_desc;
+ uint32_t n_bytes, n_desc, i, tmp_desc;
+ uint8_t failed = 0, xfer_done;
+
+ n_desc = 0;
+
+ qh = hc->qh;
+
+ if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE) {
+ FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
+ qtd->in_process = 0;
+ }
+ return;
+ }
+
+ FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
+
+ urb = qtd->urb;
+
+ n_bytes = 0;
+ xfer_done = 0;
+ tmp_desc = qtd->n_desc;
+
+ for (i = 0; i < tmp_desc; i++) {
+ dma_desc = &qh->desc_list[n_desc];
+
+ n_bytes = qh->n_bytes[n_desc];
+
+ failed =
+ update_non_isoc_urb_state_ddma(hcd, hc, qtd,
+ dma_desc,
+ halt_status, n_bytes,
+ &xfer_done);
+
+ if (failed
+ || (xfer_done
+ && (urb->status != -FH_E_IN_PROGRESS))) {
+
+ hcd->fops->complete(hcd, urb->priv, urb,
+ urb->status);
+ fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+
+ if (failed)
+ goto stop_scan;
+ } else if (qh->ep_type == UE_CONTROL) {
+ if (qtd->control_phase == FH_OTG_CONTROL_SETUP) {
+ if (urb->length > 0) {
+ qtd->control_phase = FH_OTG_CONTROL_DATA;
+ } else {
+ qtd->control_phase = FH_OTG_CONTROL_STATUS;
+ }
+ FH_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
+ } else if (qtd->control_phase == FH_OTG_CONTROL_DATA) {
+ if (xfer_done) {
+ qtd->control_phase = FH_OTG_CONTROL_STATUS;
+ FH_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
+ } else if (i + 1 == tmp_desc) {
+ /*
+ * Last descriptor for Control data stage which is
+ * not completed yet.
+ */
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+ }
+ }
+ }
+
+ n_desc++;
+ if (n_desc >= tmp_desc)
+ break;
+ }
+
+ }
+
+stop_scan:
+
+ if (qh->ep_type != UE_CONTROL) {
+ /*
+ * Resetting the data toggle for bulk
+ * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
+ */
+ if (halt_status == FH_OTG_HC_XFER_STALL)
+ qh->data_toggle = FH_OTG_HC_PID_DATA0;
+ else
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+ }
+
+ if (halt_status == FH_OTG_HC_XFER_COMPLETE) {
+ hcint_data_t hcint;
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+ if (hcint.b.nyet) {
+ /*
+ * Got a NYET on the last transaction of the transfer. It
+ * means that the endpoint should be in the PING state at the
+ * beginning of the next transfer.
+ */
+ qh->ping_state = 1;
+ clear_hc_int(hc_regs, nyet);
+ }
+
+ }
+
+}
+
+/**
+ * This function is called from interrupt handlers.
+ * Scans the descriptor list, updates URB's status and
+ * calls completion routine for the URB if it's done.
+ * Releases the channel to be used by other transfers.
+ * In case of Isochronous endpoint the channel is not halted until
+ * the end of the session, i.e. QTD list is empty.
+ * If periodic channel released the FrameList is updated accordingly.
+ *
+ * Calls transaction selection routines to activate pending transfers.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param hc Host channel, the transfer is completed on.
+ * @param hc_regs Host channel registers.
+ * @param halt_status Reason the channel is being halted,
+ * or just XferComplete for isochronous transfer
+ */
+void fh_otg_hcd_complete_xfer_ddma(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_halt_status_e halt_status)
+{
+ uint8_t continue_isoc_xfer = 0;
+ fh_otg_transaction_type_e tr_type;
+ fh_otg_qh_t *qh = hc->qh;
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+
+ complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
+
+ /* Release the channel if halted or session completed */
+ if (halt_status != FH_OTG_HC_XFER_COMPLETE ||
+ FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
+
+ /* Halt the channel if session completed */
+ if (halt_status == FH_OTG_HC_XFER_COMPLETE) {
+ fh_otg_hc_halt(hcd->core_if, hc, halt_status);
+ }
+
+ release_channel_ddma(hcd, qh);
+ fh_otg_hcd_qh_remove(hcd, qh);
+ } else {
+ /* Keep in assigned schedule to continue transfer */
+ FH_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
+ &qh->qh_list_entry);
+ continue_isoc_xfer = 1;
+
+ }
+ /** @todo Consider the case when period exceeds FrameList size.
+ * Frame Rollover interrupt should be used.
+ */
+ } else {
+ /* Scan descriptor list to complete the URB(s), then release the channel */
+ complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
+
+ release_channel_ddma(hcd, qh);
+ fh_otg_hcd_qh_remove(hcd, qh);
+
+ if (!FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
+ /* Add back to inactive non-periodic schedule on normal completion */
+ fh_otg_hcd_qh_add(hcd, qh);
+ }
+
+ }
+ tr_type = fh_otg_hcd_select_transactions(hcd);
+ if (tr_type != FH_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
+ if (continue_isoc_xfer) {
+ if (tr_type == FH_OTG_TRANSACTION_NONE) {
+ tr_type = FH_OTG_TRANSACTION_PERIODIC;
+ } else if (tr_type == FH_OTG_TRANSACTION_NON_PERIODIC) {
+ tr_type = FH_OTG_TRANSACTION_ALL;
+ }
+ }
+ fh_otg_hcd_queue_transactions(hcd, tr_type);
+ }
+}
+
+#endif /* FH_DEVICE_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_if.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_if.h
new file mode 100644
index 00000000..55a0d658
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_if.h
@@ -0,0 +1,412 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_if.h $
+ * $Revision: #12 $
+ * $Date: 2011/10/26 $
+ * $Change: 1873028 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_DEVICE_ONLY
+#ifndef __FH_HCD_IF_H__
+#define __FH_HCD_IF_H__
+
+#include "fh_otg_core_if.h"
+
+/** @file
+ * This file defines FH_OTG HCD Core API.
+ */
+
+struct fh_otg_hcd;
+typedef struct fh_otg_hcd fh_otg_hcd_t;
+
+struct fh_otg_hcd_urb;
+typedef struct fh_otg_hcd_urb fh_otg_hcd_urb_t;
+
+/** @name HCD Function Driver Callbacks */
+/** @{ */
+
+/** This function is called whenever core switches to host mode. */
+typedef int (*fh_otg_hcd_start_cb_t) (fh_otg_hcd_t * hcd);
+
+/** This function is called when device has been disconnected */
+typedef int (*fh_otg_hcd_disconnect_cb_t) (fh_otg_hcd_t * hcd);
+
+/** Wrapper provides this function to HCD to core, so it can get hub information to which device is connected */
+typedef int (*fh_otg_hcd_hub_info_from_urb_cb_t) (fh_otg_hcd_t * hcd,
+ void *urb_handle,
+ uint32_t * hub_addr,
+ uint32_t * port_addr);
+/** Via this function HCD core gets device speed */
+typedef int (*fh_otg_hcd_speed_from_urb_cb_t) (fh_otg_hcd_t * hcd,
+ void *urb_handle);
+
+/** This function is called when urb is completed */
+typedef int (*fh_otg_hcd_complete_urb_cb_t) (fh_otg_hcd_t * hcd,
+ void *urb_handle,
+ fh_otg_hcd_urb_t * fh_otg_urb,
+ int32_t status);
+
+/** Via this function HCD core gets b_hnp_enable parameter */
+typedef int (*fh_otg_hcd_get_b_hnp_enable) (fh_otg_hcd_t * hcd);
+
+struct fh_otg_hcd_function_ops {
+ fh_otg_hcd_start_cb_t start;
+ fh_otg_hcd_disconnect_cb_t disconnect;
+ fh_otg_hcd_hub_info_from_urb_cb_t hub_info;
+ fh_otg_hcd_speed_from_urb_cb_t speed;
+ fh_otg_hcd_complete_urb_cb_t complete;
+ fh_otg_hcd_get_b_hnp_enable get_b_hnp_enable;
+};
+/** @} */
+
+/** @name HCD Core API */
+/** @{ */
+/** This function allocates fh_otg_hcd structure and returns pointer on it. */
+extern fh_otg_hcd_t *fh_otg_hcd_alloc_hcd(void);
+
+/** This function should be called to initiate HCD Core.
+ *
+ * @param hcd The HCD
+ * @param core_if The FH_OTG Core
+ *
+ * Returns -FH_E_NO_MEMORY if no enough memory.
+ * Returns 0 on success
+ */
+extern int fh_otg_hcd_init(fh_otg_hcd_t * hcd, fh_otg_core_if_t * core_if);
+
+/** Frees HCD
+ *
+ * @param hcd The HCD
+ */
+extern void fh_otg_hcd_remove(fh_otg_hcd_t * hcd);
+
+/** This function should be called on every hardware interrupt.
+ *
+ * @param fh_otg_hcd The HCD
+ *
+ * Returns non zero if interrupt is handled
+ * Return 0 if interrupt is not handled
+ */
+extern int32_t fh_otg_hcd_handle_intr(fh_otg_hcd_t * fh_otg_hcd);
+
+/**
+ * Returns private data set by
+ * fh_otg_hcd_set_priv_data function.
+ *
+ * @param hcd The HCD
+ */
+extern void *fh_otg_hcd_get_priv_data(fh_otg_hcd_t * hcd);
+
+/**
+ * Set private data.
+ *
+ * @param hcd The HCD
+ * @param priv_data pointer to be stored in private data
+ */
+extern void fh_otg_hcd_set_priv_data(fh_otg_hcd_t * hcd, void *priv_data);
+
+/**
+ * This function initializes the HCD Core.
+ *
+ * @param hcd The HCD
+ * @param fops The Function Driver Operations data structure containing pointers to all callbacks.
+ *
+ * Returns -FH_E_NO_DEVICE if Core is currently is in device mode.
+ * Returns 0 on success
+ */
+extern int fh_otg_hcd_start(fh_otg_hcd_t * hcd,
+ struct fh_otg_hcd_function_ops *fops);
+
+/**
+ * Halts the FH_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ *
+ * @param hcd The HCD
+ */
+extern void fh_otg_hcd_stop(fh_otg_hcd_t * hcd);
+
+/**
+ * Handles hub class-specific requests.
+ *
+ * @param fh_otg_hcd The HCD
+ * @param typeReq Request Type
+ * @param wValue wValue from control request
+ * @param wIndex wIndex from control request
+ * @param buf data buffer
+ * @param wLength data buffer length
+ *
+ * Returns -FH_E_INVALID if invalid argument is passed
+ * Returns 0 on success
+ */
+extern int fh_otg_hcd_hub_control(fh_otg_hcd_t * fh_otg_hcd,
+ uint16_t typeReq, uint16_t wValue,
+ uint16_t wIndex, uint8_t * buf,
+ uint16_t wLength);
+
+/**
+ * Returns otg port number.
+ *
+ * @param hcd The HCD
+ */
+extern uint32_t fh_otg_hcd_otg_port(fh_otg_hcd_t * hcd);
+
+/**
+ * Returns OTG version - either 1.3 or 2.0.
+ *
+ * @param core_if The core_if structure pointer
+ */
+extern uint16_t fh_otg_get_otg_version(fh_otg_core_if_t * core_if);
+
+/**
+ * Returns 1 if currently core is acting as B host, and 0 otherwise.
+ *
+ * @param hcd The HCD
+ */
+extern uint32_t fh_otg_hcd_is_b_host(fh_otg_hcd_t * hcd);
+
+/**
+ * Returns current frame number.
+ *
+ * @param hcd The HCD
+ */
+extern int fh_otg_hcd_get_frame_number(fh_otg_hcd_t * hcd);
+
+/**
+ * Dumps hcd state.
+ *
+ * @param hcd The HCD
+ */
+extern void fh_otg_hcd_dump_state(fh_otg_hcd_t * hcd);
+
+/**
+ * Dump the average frame remaining at SOF. This can be used to
+ * determine average interrupt latency. Frame remaining is also shown for
+ * start transfer and two additional sample points.
+ * Currently this function is not implemented.
+ *
+ * @param hcd The HCD
+ */
+extern void fh_otg_hcd_dump_frrem(fh_otg_hcd_t * hcd);
+
+/**
+ * Sends LPM transaction to the local device.
+ *
+ * @param hcd The HCD
+ * @param devaddr Device Address
+ * @param hird Host initiated resume duration
+ * @param bRemoteWake Value of bRemoteWake field in LPM transaction
+ *
+ * Returns negative value if sending LPM transaction was not succeeded.
+ * Returns 0 on success.
+ */
+extern int fh_otg_hcd_send_lpm(fh_otg_hcd_t * hcd, uint8_t devaddr,
+ uint8_t hird, uint8_t bRemoteWake);
+
+/* URB interface */
+
+/**
+ * Allocates memory for fh_otg_hcd_urb structure.
+ * Allocated memory should be freed by call of FH_FREE.
+ *
+ * @param hcd The HCD
+ * @param iso_desc_count Count of ISOC descriptors
+ * @param atomic_alloc Specefies whether to perform atomic allocation.
+ */
+extern fh_otg_hcd_urb_t *fh_otg_hcd_urb_alloc(fh_otg_hcd_t * hcd,
+ int iso_desc_count,
+ int atomic_alloc);
+
+/**
+ * Set pipe information in URB.
+ *
+ * @param hcd_urb FH_OTG URB
+ * @param devaddr Device Address
+ * @param ep_num Endpoint Number
+ * @param ep_type Endpoint Type
+ * @param ep_dir Endpoint Direction
+ * @param mps Max Packet Size
+ */
+extern void fh_otg_hcd_urb_set_pipeinfo(fh_otg_hcd_urb_t * hcd_urb,
+ uint8_t devaddr, uint8_t ep_num,
+ uint8_t ep_type, uint8_t ep_dir,
+ uint16_t mps);
+
+/* Transfer flags */
+#define URB_GIVEBACK_ASAP 0x1
+#define URB_SEND_ZERO_PACKET 0x2
+
+/**
+ * Sets fh_otg_hcd_urb parameters.
+ *
+ * @param urb FH_OTG URB allocated by fh_otg_hcd_urb_alloc function.
+ * @param urb_handle Unique handle for request, this will be passed back
+ * to function driver in completion callback.
+ * @param buf The buffer for the data
+ * @param dma The DMA buffer for the data
+ * @param buflen Transfer length
+ * @param sp Buffer for setup data
+ * @param sp_dma DMA address of setup data buffer
+ * @param flags Transfer flags
+ * @param interval Polling interval for interrupt or isochronous transfers.
+ */
+extern void fh_otg_hcd_urb_set_params(fh_otg_hcd_urb_t * urb,
+ void *urb_handle, void *buf,
+ fh_dma_t dma, uint32_t buflen, void *sp,
+ fh_dma_t sp_dma, uint32_t flags,
+ uint16_t interval);
+
+/** Gets status from fh_otg_hcd_urb
+ *
+ * @param fh_otg_urb FH_OTG URB
+ */
+extern uint32_t fh_otg_hcd_urb_get_status(fh_otg_hcd_urb_t * fh_otg_urb);
+
+/** Gets actual length from fh_otg_hcd_urb
+ *
+ * @param fh_otg_urb FH_OTG URB
+ */
+extern uint32_t fh_otg_hcd_urb_get_actual_length(fh_otg_hcd_urb_t *
+ fh_otg_urb);
+
+/** Gets error count from fh_otg_hcd_urb. Only for ISOC URBs
+ *
+ * @param fh_otg_urb FH_OTG URB
+ */
+extern uint32_t fh_otg_hcd_urb_get_error_count(fh_otg_hcd_urb_t *
+ fh_otg_urb);
+
+/** Set ISOC descriptor offset and length
+ *
+ * @param fh_otg_urb FH_OTG URB
+ * @param desc_num ISOC descriptor number
+ * @param offset Offset from beginig of buffer.
+ * @param length Transaction length
+ */
+extern void fh_otg_hcd_urb_set_iso_desc_params(fh_otg_hcd_urb_t * fh_otg_urb,
+ int desc_num, uint32_t offset,
+ uint32_t length);
+
+/** Get status of ISOC descriptor, specified by desc_num
+ *
+ * @param fh_otg_urb FH_OTG URB
+ * @param desc_num ISOC descriptor number
+ */
+extern uint32_t fh_otg_hcd_urb_get_iso_desc_status(fh_otg_hcd_urb_t *
+ fh_otg_urb, int desc_num);
+
+/** Get actual length of ISOC descriptor, specified by desc_num
+ *
+ * @param fh_otg_urb FH_OTG URB
+ * @param desc_num ISOC descriptor number
+ */
+extern uint32_t fh_otg_hcd_urb_get_iso_desc_actual_length(fh_otg_hcd_urb_t *
+ fh_otg_urb,
+ int desc_num);
+
+/** Queue URB. After transfer is completes, the complete callback will be called with the URB status
+ *
+ * @param fh_otg_hcd The HCD
+ * @param fh_otg_urb FH_OTG URB
+ * @param ep_handle Out parameter for returning endpoint handle
+ * @param atomic_alloc Flag to do atomic allocation if needed
+ *
+ * Returns -FH_E_NO_DEVICE if no device is connected.
+ * Returns -FH_E_NO_MEMORY if there is no enough memory.
+ * Returns 0 on success.
+ */
+extern int fh_otg_hcd_urb_enqueue(fh_otg_hcd_t * fh_otg_hcd,
+ fh_otg_hcd_urb_t * fh_otg_urb,
+ void **ep_handle, int atomic_alloc);
+
+/** De-queue the specified URB
+ *
+ * @param fh_otg_hcd The HCD
+ * @param fh_otg_urb FH_OTG URB
+ */
+extern int fh_otg_hcd_urb_dequeue(fh_otg_hcd_t * fh_otg_hcd,
+ fh_otg_hcd_urb_t * fh_otg_urb);
+
+/** Frees resources in the FH_otg controller related to a given endpoint.
+ * Any URBs for the endpoint must already be dequeued.
+ *
+ * @param hcd The HCD
+ * @param ep_handle Endpoint handle, returned by fh_otg_hcd_urb_enqueue function
+ * @param retry Number of retries if there are queued transfers.
+ *
+ * Returns -FH_E_INVALID if invalid arguments are passed.
+ * Returns 0 on success
+ */
+extern int fh_otg_hcd_endpoint_disable(fh_otg_hcd_t * hcd, void *ep_handle,
+ int retry);
+
+/* Resets the data toggle in qh structure. This function can be called from
+ * usb_clear_halt routine.
+ *
+ * @param hcd The HCD
+ * @param ep_handle Endpoint handle, returned by fh_otg_hcd_urb_enqueue function
+ *
+ * Returns -FH_E_INVALID if invalid arguments are passed.
+ * Returns 0 on success
+ */
+extern int fh_otg_hcd_endpoint_reset(fh_otg_hcd_t * hcd, void *ep_handle);
+
+/** Returns 1 if status of specified port is changed and 0 otherwise.
+ *
+ * @param hcd The HCD
+ * @param port Port number
+ */
+extern int fh_otg_hcd_is_status_changed(fh_otg_hcd_t * hcd, int port);
+
+/** Call this function to check if bandwidth was allocated for specified endpoint.
+ * Only for ISOC and INTERRUPT endpoints.
+ *
+ * @param hcd The HCD
+ * @param ep_handle Endpoint handle
+ */
+extern int fh_otg_hcd_is_bandwidth_allocated(fh_otg_hcd_t * hcd,
+ void *ep_handle);
+
+/** Call this function to check if bandwidth was freed for specified endpoint.
+ *
+ * @param hcd The HCD
+ * @param ep_handle Endpoint handle
+ */
+extern int fh_otg_hcd_is_bandwidth_freed(fh_otg_hcd_t * hcd, void *ep_handle);
+
+/** Returns bandwidth allocated for specified endpoint in microseconds.
+ * Only for ISOC and INTERRUPT endpoints.
+ *
+ * @param hcd The HCD
+ * @param ep_handle Endpoint handle
+ */
+extern uint8_t fh_otg_hcd_get_ep_bandwidth(fh_otg_hcd_t * hcd,
+ void *ep_handle);
+
+/** @} */
+
+#endif /* __FH_HCD_IF_H__ */
+#endif /* FH_DEVICE_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_intr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_intr.c
new file mode 100644
index 00000000..e891950f
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_intr.c
@@ -0,0 +1,2107 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_intr.c $
+ * $Revision: #94 $
+ * $Date: 2013/01/31 $
+ * $Change: 2155605 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_DEVICE_ONLY
+
+#include "fh_otg_hcd.h"
+#include "fh_otg_regs.h"
+
+/** @file
+ * This file contains the implementation of the HCD Interrupt handlers.
+ */
+
+/** This function handles interrupts for the HCD. */
+int32_t fh_otg_hcd_handle_intr(fh_otg_hcd_t * fh_otg_hcd)
+{
+ int retval = 0;
+
+ fh_otg_core_if_t *core_if = fh_otg_hcd->core_if;
+ gintsts_data_t gintsts;
+#ifdef DEBUG
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+#endif
+
+ if (fh_otg_check_haps_status(core_if) == -1 ) {
+ FH_WARN("HAPS is disconnected");
+ return retval;
+ }
+
+ /* Exit from ISR if core is hibernated */
+ if (core_if->hibernation_suspend == 1) {
+ return retval;
+ }
+ FH_SPINLOCK(fh_otg_hcd->lock);
+ /* Check if HOST Mode */
+ if (fh_otg_is_host_mode(core_if)) {
+ gintsts.d32 = fh_otg_read_core_intr(core_if);
+ if (!gintsts.d32) {
+ FH_SPINUNLOCK(fh_otg_hcd->lock);
+ return 0;
+ }
+#ifdef DEBUG
+ /* Don't print debug message in the interrupt handler on SOF */
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != FH_SOF_INTR_MASK)
+#endif
+ FH_DEBUGPL(DBG_HCD, "\n");
+#endif
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != FH_SOF_INTR_MASK)
+#endif
+ FH_DEBUGPL(DBG_HCD,
+ "FH OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
+ gintsts.d32);
+#endif
+
+ if (gintsts.b.sofintr) {
+ retval |= fh_otg_hcd_handle_sof_intr(fh_otg_hcd);
+ }
+ if (gintsts.b.rxstsqlvl) {
+ retval |=
+ fh_otg_hcd_handle_rx_status_q_level_intr
+ (fh_otg_hcd);
+ }
+ if (gintsts.b.nptxfempty) {
+ retval |=
+ fh_otg_hcd_handle_np_tx_fifo_empty_intr
+ (fh_otg_hcd);
+ }
+ if (gintsts.b.i2cintr) {
+ /** @todo Implement i2cintr handler. */
+ }
+ if (gintsts.b.portintr) {
+ retval |= fh_otg_hcd_handle_port_intr(fh_otg_hcd);
+ }
+ if (gintsts.b.hcintr) {
+ retval |= fh_otg_hcd_handle_hc_intr(fh_otg_hcd);
+ }
+ if (gintsts.b.ptxfempty) {
+ retval |=
+ fh_otg_hcd_handle_perio_tx_fifo_empty_intr
+ (fh_otg_hcd);
+ }
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != FH_SOF_INTR_MASK)
+#endif
+ {
+ FH_DEBUGPL(DBG_HCD,
+ "FH OTG HCD Finished Servicing Interrupts\n");
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD gintsts=0x%08x\n",
+ FH_READ_REG32(&global_regs->gintsts));
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD gintmsk=0x%08x\n",
+ FH_READ_REG32(&global_regs->gintmsk));
+ }
+#endif
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != FH_SOF_INTR_MASK)
+#endif
+ FH_DEBUGPL(DBG_HCD, "\n");
+#endif
+
+ }
+ FH_SPINUNLOCK(fh_otg_hcd->lock);
+ return retval;
+}
+
+#ifdef FH_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs
+#define FRAME_NUM_ARRAY_SIZE 1000
+/**
+ * This function is for debug only.
+ */
+static inline void track_missed_sofs(uint16_t curr_frame_number)
+{
+ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static int frame_num_idx = 0;
+ static uint16_t last_frame_num = FH_HFNUM_MAX_FRNUM;
+ static int dumped_frame_num_array = 0;
+
+ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+ if (((last_frame_num + 1) & FH_HFNUM_MAX_FRNUM) !=
+ curr_frame_number) {
+ frame_num_array[frame_num_idx] = curr_frame_number;
+ last_frame_num_array[frame_num_idx++] = last_frame_num;
+ }
+ } else if (!dumped_frame_num_array) {
+ int i;
+ FH_PRINTF("Frame Last Frame\n");
+ FH_PRINTF("----- ----------\n");
+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+ FH_PRINTF("0x%04x 0x%04x\n",
+ frame_num_array[i], last_frame_num_array[i]);
+ }
+ dumped_frame_num_array = 1;
+ }
+ last_frame_num = curr_frame_number;
+}
+#endif
+
+/**
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the FH_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller for the
+ * next (micro)frame.
+ */
+int32_t fh_otg_hcd_handle_sof_intr(fh_otg_hcd_t * hcd)
+{
+ hfnum_data_t hfnum;
+ fh_list_link_t *qh_entry;
+ fh_otg_qh_t *qh;
+ fh_otg_transaction_type_e tr_type;
+ gintsts_data_t gintsts = {.d32 = 0 };
+
+ hfnum.d32 =
+ FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+ FH_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
+#endif
+ hcd->frame_number = hfnum.b.frnum;
+
+#ifdef DEBUG
+ hcd->frrem_accum += hfnum.b.frrem;
+ hcd->frrem_samples++;
+#endif
+
+#ifdef FH_TRACK_MISSED_SOFS
+ track_missed_sofs(hcd->frame_number);
+#endif
+ /* Determine whether any periodic QHs should be executed. */
+ qh_entry = FH_LIST_FIRST(&hcd->periodic_sched_inactive);
+ while (qh_entry != &hcd->periodic_sched_inactive) {
+ qh = FH_LIST_ENTRY(qh_entry, fh_otg_qh_t, qh_list_entry);
+ qh_entry = qh_entry->next;
+ if (fh_frame_num_le(qh->sched_frame, hcd->frame_number)) {
+ /*
+ * Move QH to the ready list to be executed next
+ * (micro)frame.
+ */
+ FH_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
+ &qh->qh_list_entry);
+ }
+ }
+ tr_type = fh_otg_hcd_select_transactions(hcd);
+ if (tr_type != FH_OTG_TRANSACTION_NONE) {
+ fh_otg_hcd_queue_transactions(hcd, tr_type);
+ }
+
+ /* Clear interrupt */
+ gintsts.b.sofintr = 1;
+ FH_WRITE_REG32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
+ * least one packet in the Rx FIFO. The packets are moved from the FIFO to
+ * memory if the FH_otg controller is operating in Slave mode. */
+int32_t fh_otg_hcd_handle_rx_status_q_level_intr(fh_otg_hcd_t * fh_otg_hcd)
+{
+ host_grxsts_data_t grxsts;
+ fh_hc_t *hc = NULL;
+
+ FH_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
+
+ grxsts.d32 =
+ FH_READ_REG32(&fh_otg_hcd->core_if->core_global_regs->grxstsp);
+
+ hc = fh_otg_hcd->hc_ptr_array[grxsts.b.chnum];
+ if (!hc) {
+ FH_ERROR("Unable to get corresponding channel\n");
+ return 0;
+ }
+
+ /* Packet Status */
+ FH_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
+ FH_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
+ FH_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid,
+ hc->data_pid_start);
+ FH_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
+
+ switch (grxsts.b.pktsts) {
+ case FH_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer. */
+ if (grxsts.b.bcnt > 0) {
+ fh_otg_read_packet(fh_otg_hcd->core_if,
+ hc->xfer_buff, grxsts.b.bcnt);
+
+ /* Update the HC fields for the next packet received. */
+ hc->xfer_count += grxsts.b.bcnt;
+ hc->xfer_buff += grxsts.b.bcnt;
+ }
+
+ case FH_GRXSTS_PKTSTS_IN_XFER_COMP:
+ case FH_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
+ case FH_GRXSTS_PKTSTS_CH_HALTED:
+ /* Handled in interrupt, just ignore data */
+ break;
+ default:
+ FH_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
+ grxsts.b.pktsts);
+ break;
+ }
+
+ return 1;
+}
+
+/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode. */
+int32_t fh_otg_hcd_handle_np_tx_fifo_empty_intr(fh_otg_hcd_t * fh_otg_hcd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+ fh_otg_hcd_queue_transactions(fh_otg_hcd,
+ FH_OTG_TRANSACTION_NON_PERIODIC);
+ return 1;
+}
+
+/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode. */
+int32_t fh_otg_hcd_handle_perio_tx_fifo_empty_intr(fh_otg_hcd_t * fh_otg_hcd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
+ fh_otg_hcd_queue_transactions(fh_otg_hcd,
+ FH_OTG_TRANSACTION_PERIODIC);
+ return 1;
+}
+
+/** There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately. */
+int32_t fh_otg_hcd_handle_port_intr(fh_otg_hcd_t * fh_otg_hcd)
+{
+ int retval = 0;
+ hprt0_data_t hprt0;
+ hprt0_data_t hprt0_modify;
+
+ hprt0.d32 = FH_READ_REG32(fh_otg_hcd->core_if->host_if->hprt0);
+ hprt0_modify.d32 = FH_READ_REG32(fh_otg_hcd->core_if->host_if->hprt0);
+
+ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
+ * GINTSTS */
+
+ hprt0_modify.b.prtena = 0;
+ hprt0_modify.b.prtconndet = 0;
+ hprt0_modify.b.prtenchng = 0;
+ hprt0_modify.b.prtovrcurrchng = 0;
+
+ /* Port Connect Detected
+ * Set flag and clear if detected */
+ if (fh_otg_hcd->core_if->hibernation_suspend == 1) {
+ // Dont modify port status if we are in hibernation state
+ hprt0_modify.b.prtconndet = 1;
+ hprt0_modify.b.prtenchng = 1;
+ FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
+ hprt0.d32 = FH_READ_REG32(fh_otg_hcd->core_if->host_if->hprt0);
+ return retval;
+ }
+
+ if (hprt0.b.prtconndet) {
+ /** @todo - check if steps performed in 'else' block should be perfromed regardles adp */
+ if (fh_otg_hcd->core_if->adp_enable &&
+ fh_otg_hcd->core_if->adp.vbuson_timer_started == 1) {
+ FH_PRINTF("PORT CONNECT DETECTED ----------------\n");
+ FH_TIMER_CANCEL(fh_otg_hcd->core_if->adp.vbuson_timer);
+ fh_otg_hcd->core_if->adp.vbuson_timer_started = 0;
+ /* TODO - check if this is required, as
+ * host initialization was already performed
+ * after initial ADP probing
+ */
+ /*fh_otg_hcd->core_if->adp.vbuson_timer_started = 0;
+ fh_otg_core_init(fh_otg_hcd->core_if);
+ fh_otg_enable_global_interrupts(fh_otg_hcd->core_if);
+ cil_hcd_start(fh_otg_hcd->core_if);*/
+ } else {
+ hprt0_data_t hprt0_local;
+ FH_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
+ "Port Connect Detected--\n", hprt0.d32);
+ fh_otg_hcd->flags.b.port_connect_status_change = 1;
+ fh_otg_hcd->flags.b.port_connect_status = 1;
+ hprt0_modify.b.prtconndet = 1;
+ /* PET testing */
+ if (fh_otg_hcd->core_if->otg_ver && (fh_otg_hcd->core_if->test_mode == 7)) {
+ hprt0_local.d32 = fh_otg_read_hprt0(fh_otg_hcd->core_if);
+ hprt0_local.b.prtrst = 1;
+ FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0_local.d32);
+ fh_mdelay(60);
+ hprt0.d32 = fh_otg_read_hprt0(fh_otg_hcd->core_if);
+ hprt0.b.prtrst = 0;
+ FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
+ }
+
+ /* B-Device has connected, Delete the connection timer. */
+ FH_TIMER_CANCEL(fh_otg_hcd->conn_timer);
+ }
+ /* The Hub driver asserts a reset when it sees port connect
+ * status change flag */
+ retval |= 1;
+ }
+
+ /* Port Enable Changed
+ * Clear if detected - Set internal flag if disabled */
+ if (hprt0.b.prtenchng) {
+ FH_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Enable Changed--\n", hprt0.d32);
+ hprt0_modify.b.prtenchng = 1;
+ if (hprt0.b.prtena == 1) {
+ hfir_data_t hfir;
+ int do_reset = 0;
+ fh_otg_core_params_t *params =
+ fh_otg_hcd->core_if->core_params;
+ fh_otg_core_global_regs_t *global_regs =
+ fh_otg_hcd->core_if->core_global_regs;
+ fh_otg_host_if_t *host_if =
+ fh_otg_hcd->core_if->host_if;
+
+ /* Every time when port enables calculate
+ * HFIR.FrInterval
+ */
+ hfir.d32 = FH_READ_REG32(&host_if->host_global_regs->hfir);
+ hfir.b.frint = calc_frame_interval(fh_otg_hcd->core_if);
+ FH_WRITE_REG32(&host_if->host_global_regs->hfir, hfir.d32);
+
+ /* Check if we need to adjust the PHY clock speed for
+ * low power and adjust it */
+ if (params->host_support_fs_ls_low_power) {
+ gusbcfg_data_t usbcfg;
+
+ usbcfg.d32 =
+ FH_READ_REG32(&global_regs->gusbcfg);
+
+ if (hprt0.b.prtspd == FH_HPRT0_PRTSPD_LOW_SPEED
+ || hprt0.b.prtspd ==
+ FH_HPRT0_PRTSPD_FULL_SPEED) {
+ /*
+ * Low power
+ */
+ hcfg_data_t hcfg;
+ if (usbcfg.b.phylpwrclksel == 0) {
+ /* Set PHY low power clock select for FS/LS devices */
+ usbcfg.b.phylpwrclksel = 1;
+ FH_WRITE_REG32
+ (&global_regs->gusbcfg,
+ usbcfg.d32);
+ do_reset = 1;
+ }
+
+ hcfg.d32 =
+ FH_READ_REG32
+ (&host_if->host_global_regs->hcfg);
+
+ if (hprt0.b.prtspd ==
+ FH_HPRT0_PRTSPD_LOW_SPEED
+ && params->host_ls_low_power_phy_clk
+ ==
+ FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)
+ {
+ /* 6 MHZ */
+ FH_DEBUGPL(DBG_CIL,
+ "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
+ if (hcfg.b.fslspclksel !=
+ FH_HCFG_6_MHZ) {
+ hcfg.b.fslspclksel =
+ FH_HCFG_6_MHZ;
+ FH_WRITE_REG32
+ (&host_if->host_global_regs->hcfg,
+ hcfg.d32);
+ do_reset = 1;
+ }
+ } else {
+ /* 48 MHZ */
+ FH_DEBUGPL(DBG_CIL,
+ "FS_PHY programming HCFG to 48 MHz ()\n");
+ if (hcfg.b.fslspclksel !=
+ FH_HCFG_48_MHZ) {
+ hcfg.b.fslspclksel =
+ FH_HCFG_48_MHZ;
+ FH_WRITE_REG32
+ (&host_if->host_global_regs->hcfg,
+ hcfg.d32);
+ do_reset = 1;
+ }
+ }
+ } else {
+ /*
+ * Not low power
+ */
+ if (usbcfg.b.phylpwrclksel == 1) {
+ usbcfg.b.phylpwrclksel = 0;
+ FH_WRITE_REG32
+ (&global_regs->gusbcfg,
+ usbcfg.d32);
+ do_reset = 1;
+ }
+ }
+
+ if (do_reset) {
+ FH_TASK_SCHEDULE(fh_otg_hcd->reset_tasklet);
+ }
+ }
+
+ if (!do_reset) {
+ /* Port has been enabled set the reset change flag */
+ fh_otg_hcd->flags.b.port_reset_change = 1;
+ }
+ } else {
+ fh_otg_hcd->flags.b.port_enable_change = 1;
+ }
+ retval |= 1;
+ }
+
+ /** Overcurrent Change Interrupt */
+ if (hprt0.b.prtovrcurrchng) {
+ FH_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Overcurrent Changed--\n", hprt0.d32);
+ fh_otg_hcd->flags.b.port_over_current_change = 1;
+ hprt0_modify.b.prtovrcurrchng = 1;
+ retval |= 1;
+ }
+
+ /* Clear Port Interrupts */
+ FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
+
+ return retval;
+}
+
+/** This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately. */
+int32_t fh_otg_hcd_handle_hc_intr(fh_otg_hcd_t * fh_otg_hcd)
+{
+ int i;
+ int retval = 0;
+ haint_data_t haint;
+
+ /* Clear appropriate bits in HCINTn to clear the interrupt bit in
+ * GINTSTS */
+
+ haint.d32 = fh_otg_read_host_all_channels_intr(fh_otg_hcd->core_if);
+
+ for (i = 0; i < fh_otg_hcd->core_if->core_params->host_channels; i++) {
+ if (haint.b2.chint & (1 << i)) {
+ retval |= fh_otg_hcd_handle_hc_n_intr(fh_otg_hcd, i);
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * Gets the actual length of a transfer after the transfer halts. _halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where halt_status is FH_OTG_HC_XFER_COMPLETE,
+ * *short_read is set to 1 upon return if less than the requested
+ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
+ * return. short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static uint32_t get_actual_xfer_length(fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd,
+ fh_otg_halt_status_e halt_status,
+ int *short_read)
+{
+ hctsiz_data_t hctsiz;
+ uint32_t length;
+
+ if (short_read != NULL) {
+ *short_read = 0;
+ }
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+
+ if (halt_status == FH_OTG_HC_XFER_COMPLETE) {
+ if (hc->ep_is_in) {
+ length = hc->xfer_len - hctsiz.b.xfersize;
+ if (short_read != NULL) {
+ *short_read = (hctsiz.b.xfersize != 0);
+ }
+ } else if (hc->qh->do_split) {
+ length = qtd->ssplit_out_xfer_count;
+ } else {
+ length = hc->xfer_len;
+ }
+ } else {
+ /*
+ * Must use the hctsiz.pktcnt field to determine how much data
+ * has been transferred. This field reflects the number of
+ * packets that have been transferred via the USB. This is
+ * always an integral number of packets if the transfer was
+ * halted before its normal completion. (Can't use the
+ * hctsiz.xfersize field because that reflects the number of
+ * bytes transferred via the AHB, not the USB).
+ */
+ length =
+ (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
+ }
+
+ return length;
+}
+
+/**
+ * Updates the state of the URB after a Transfer Complete interrupt on the
+ * host channel. Updates the actual_length field of the URB based on the
+ * number of bytes transferred via the host channel. Sets the URB status
+ * if the data transfer is finished.
+ *
+ * @return 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise.
+ */
+static int update_urb_state_xfer_comp(fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_hcd_urb_t * urb,
+ fh_otg_qtd_t * qtd)
+{
+ int xfer_done = 0;
+ int short_read = 0;
+
+ int xfer_length;
+
+ xfer_length = get_actual_xfer_length(hc, hc_regs, qtd,
+ FH_OTG_HC_XFER_COMPLETE,
+ &short_read);
+
+
+ /* non DWORD-aligned buffer case handling. */
+ if (hc->align_buff && xfer_length && hc->ep_is_in) {
+ fh_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
+ xfer_length);
+ }
+
+ urb->actual_length += xfer_length;
+
+ if (xfer_length && (hc->ep_type == FH_OTG_EP_TYPE_BULK) &&
+ (urb->flags & URB_SEND_ZERO_PACKET)
+ && (urb->actual_length >= urb->length)
+ && !(urb->length % hc->max_packet)) {
+ xfer_done = 0;
+ } else if (short_read || urb->actual_length >= urb->length) {
+ xfer_done = 1;
+ urb->status = 0;
+ }
+
+#ifdef DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+ FH_DEBUGPL(DBG_HCDV, "FH_otg: %s: %s, channel %d\n",
+ __func__, (hc->ep_is_in ? "IN" : "OUT"),
+ hc->hc_num);
+ FH_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len);
+ FH_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n",
+ hctsiz.b.xfersize);
+ FH_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
+ urb->length);
+ FH_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
+ urb->actual_length);
+ FH_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
+ short_read, xfer_done);
+ }
+#endif
+
+ return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+void fh_otg_hcd_save_data_toggle(fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs, fh_otg_qtd_t * qtd)
+{
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+
+ if (hc->ep_type != FH_OTG_EP_TYPE_CONTROL) {
+ fh_otg_qh_t *qh = hc->qh;
+ if (hctsiz.b.pid == FH_HCTSIZ_DATA0) {
+ qh->data_toggle = FH_OTG_HC_PID_DATA0;
+ } else {
+ qh->data_toggle = FH_OTG_HC_PID_DATA1;
+ }
+ } else {
+ if (hctsiz.b.pid == FH_HCTSIZ_DATA0) {
+ qtd->data_toggle = FH_OTG_HC_PID_DATA0;
+ } else {
+ qtd->data_toggle = FH_OTG_HC_PID_DATA1;
+ }
+ }
+}
+
+/**
+ * Updates the state of an Isochronous URB when the transfer is stopped for
+ * any reason. The fields of the current entry in the frame descriptor array
+ * are set based on the transfer state and the input _halt_status. Completes
+ * the Isochronous URB if all the URB frames have been completed.
+ *
+ * @return FH_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return FH_OTG_HC_XFER_URB_COMPLETE.
+ */
+static fh_otg_halt_status_e
+update_isoc_urb_state(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd, fh_otg_halt_status_e halt_status)
+{
+ fh_otg_hcd_urb_t *urb = qtd->urb;
+ fh_otg_halt_status_e ret_val = halt_status;
+ struct fh_otg_hcd_iso_packet_desc *frame_desc;
+
+ frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+ switch (halt_status) {
+ case FH_OTG_HC_XFER_COMPLETE:
+ frame_desc->status = 0;
+ frame_desc->actual_length =
+ get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
+
+ /* non DWORD-aligned buffer case handling. */
+ if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
+ fh_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
+ hc->qh->dw_align_buf, frame_desc->actual_length);
+ }
+
+ break;
+ case FH_OTG_HC_XFER_FRAME_OVERRUN:
+ urb->error_count++;
+ if (hc->ep_is_in) {
+ frame_desc->status = -FH_E_NO_STREAM_RES;
+ } else {
+ frame_desc->status = -FH_E_COMMUNICATION;
+ }
+ frame_desc->actual_length = 0;
+ break;
+ case FH_OTG_HC_XFER_BABBLE_ERR:
+ urb->error_count++;
+ frame_desc->status = -FH_E_OVERFLOW;
+ /* Don't need to update actual_length in this case. */
+ break;
+ case FH_OTG_HC_XFER_XACT_ERR:
+ urb->error_count++;
+ frame_desc->status = -FH_E_PROTOCOL;
+ frame_desc->actual_length =
+ get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
+
+ /* non DWORD-aligned buffer case handling. */
+ if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
+ fh_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
+ hc->qh->dw_align_buf, frame_desc->actual_length);
+ }
+ /* Skip whole frame */
+ if (hc->qh->do_split && (hc->ep_type == FH_OTG_EP_TYPE_ISOC) &&
+ hc->ep_is_in && hcd->core_if->dma_enable) {
+ qtd->complete_split = 0;
+ qtd->isoc_split_offset = 0;
+ }
+
+ break;
+ default:
+ FH_ASSERT(1, "Unhandled _halt_status (%d)\n", halt_status);
+ break;
+ }
+ if (++qtd->isoc_frame_index == urb->packet_count) {
+ /*
+ * urb->status is not used for isoc transfers.
+ * The individual frame_desc statuses are used instead.
+ */
+ hcd->fops->complete(hcd, urb->priv, urb, 0);
+ ret_val = FH_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ ret_val = FH_OTG_HC_XFER_COMPLETE;
+ }
+ return ret_val;
+}
+
+/**
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void deactivate_qh(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh, int free_qtd)
+{
+ int continue_split = 0;
+ fh_otg_qtd_t *qtd;
+
+ FH_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
+
+ qtd = FH_CIRCLEQ_FIRST(&qh->qtd_list);
+
+ if (qtd->complete_split) {
+ continue_split = 1;
+ } else if (qtd->isoc_split_pos == FH_HCSPLIT_XACTPOS_MID ||
+ qtd->isoc_split_pos == FH_HCSPLIT_XACTPOS_END) {
+ continue_split = 1;
+ }
+
+ if (free_qtd) {
+ fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
+ continue_split = 0;
+ }
+
+ qh->channel = NULL;
+ fh_otg_hcd_qh_deactivate(hcd, qh, continue_split);
+}
+
+/**
+ * Releases a host channel for use by other transfers. Attempts to select and
+ * queue more transactions since at least one host channel is available.
+ *
+ * @param hcd The HCD state structure.
+ * @param hc The host channel to release.
+ * @param qtd The QTD associated with the host channel. This QTD may be freed
+ * if the transfer is complete or an error has occurred.
+ * @param halt_status Reason the channel is being released. This status
+ * determines the actions taken by this function.
+ */
+static void release_channel(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_qtd_t * qtd,
+ fh_otg_halt_status_e halt_status)
+{
+ fh_otg_transaction_type_e tr_type;
+ int free_qtd;
+
+ FH_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n",
+ __func__, hc->hc_num, halt_status);
+
+ switch (halt_status) {
+ case FH_OTG_HC_XFER_URB_COMPLETE:
+ free_qtd = 1;
+ break;
+ case FH_OTG_HC_XFER_AHB_ERR:
+ case FH_OTG_HC_XFER_STALL:
+ case FH_OTG_HC_XFER_BABBLE_ERR:
+ free_qtd = 1;
+ break;
+ case FH_OTG_HC_XFER_XACT_ERR:
+ if (qtd->error_count >= 3) {
+ FH_DEBUGPL(DBG_HCDV,
+ " Complete URB with transaction error\n");
+ free_qtd = 1;
+ qtd->urb->status = -FH_E_PROTOCOL;
+ hcd->fops->complete(hcd, qtd->urb->priv,
+ qtd->urb, -FH_E_PROTOCOL);
+ } else {
+ free_qtd = 0;
+ }
+ break;
+ case FH_OTG_HC_XFER_URB_DEQUEUE:
+ /*
+ * The QTD has already been removed and the QH has been
+ * deactivated. Don't want to do anything except release the
+ * host channel and try to queue more transfers.
+ */
+ goto cleanup;
+ case FH_OTG_HC_XFER_NO_HALT_STATUS:
+ free_qtd = 0;
+ break;
+ case FH_OTG_HC_XFER_PERIODIC_INCOMPLETE:
+ FH_DEBUGPL(DBG_HCDV,
+ " Complete URB with I/O error\n");
+ free_qtd = 1;
+ qtd->urb->status = -FH_E_IO;
+ hcd->fops->complete(hcd, qtd->urb->priv,
+ qtd->urb, -FH_E_IO);
+ break;
+ default:
+ free_qtd = 0;
+ break;
+ }
+
+ deactivate_qh(hcd, hc->qh, free_qtd);
+
+cleanup:
+ /*
+ * Release the host channel for use by other transfers. The cleanup
+ * function clears the channel interrupt enables and conditions, so
+ * there's no need to clear the Channel Halted interrupt separately.
+ */
+ fh_otg_hc_cleanup(hcd->core_if, hc);
+ FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
+
+ switch (hc->ep_type) {
+ case FH_OTG_EP_TYPE_CONTROL:
+ case FH_OTG_EP_TYPE_BULK:
+ hcd->non_periodic_channels--;
+ break;
+
+ default:
+ /*
+ * Don't release reservations for periodic channels here.
+ * That's done when a periodic transfer is descheduled (i.e.
+ * when the QH is removed from the periodic schedule).
+ */
+ break;
+ }
+
+ /* Try to queue more transfers now that there's a free channel. */
+ tr_type = fh_otg_hcd_select_transactions(hcd);
+ if (tr_type != FH_OTG_TRANSACTION_NONE) {
+ fh_otg_hcd_queue_transactions(hcd, tr_type);
+ }
+}
+
+/**
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void halt_channel(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_qtd_t * qtd, fh_otg_halt_status_e halt_status)
+{
+ if (hcd->core_if->dma_enable) {
+ release_channel(hcd, hc, qtd, halt_status);
+ return;
+ }
+
+ /* Slave mode processing... */
+ fh_otg_hc_halt(hcd->core_if, hc, halt_status);
+
+ if (hc->halt_on_queue) {
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+ fh_otg_core_global_regs_t *global_regs;
+ global_regs = hcd->core_if->core_global_regs;
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_CONTROL ||
+ hc->ep_type == FH_OTG_EP_TYPE_BULK) {
+ /*
+ * Make sure the Non-periodic Tx FIFO empty interrupt
+ * is enabled so that the non-periodic schedule will
+ * be processed.
+ */
+ gintmsk.b.nptxfempty = 1;
+ FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
+ } else {
+ /*
+ * Move the QH from the periodic queued schedule to
+ * the periodic assigned schedule. This allows the
+ * halt to be queued when the periodic schedule is
+ * processed.
+ */
+ FH_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
+ &hc->qh->qh_list_entry);
+
+ /*
+ * Make sure the Periodic Tx FIFO Empty interrupt is
+ * enabled so that the periodic schedule will be
+ * processed.
+ */
+ gintmsk.b.ptxfempty = 1;
+ FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
+ }
+ }
+}
+
+/**
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void complete_non_periodic_xfer(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd,
+ fh_otg_halt_status_e halt_status)
+{
+ hcint_data_t hcint;
+
+ qtd->error_count = 0;
+
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+ if (hcint.b.nyet) {
+ /*
+ * Got a NYET on the last transaction of the transfer. This
+ * means that the endpoint should be in the PING state at the
+ * beginning of the next transfer.
+ */
+ hc->qh->ping_state = 1;
+ clear_hc_int(hc_regs, nyet);
+ }
+
+ /*
+ * Always halt and release the host channel to make it available for
+ * more transfers. There may still be more phases for a control
+ * transfer or more data packets for a bulk transfer at this point,
+ * but the host channel is still halted. A channel will be reassigned
+ * to the transfer when the non-periodic schedule is processed after
+ * the channel is released. This allows transactions to be queued
+ * properly via fh_otg_hcd_queue_transactions, which also enables the
+ * Tx FIFO Empty interrupt if necessary.
+ */
+ if (hc->ep_is_in) {
+ /*
+ * IN transfers in Slave mode require an explicit disable to
+ * halt the channel. (In DMA mode, this call simply releases
+ * the channel.)
+ */
+ halt_channel(hcd, hc, qtd, halt_status);
+ } else {
+ /*
+ * The channel is automatically disabled by the core for OUT
+ * transfers in Slave mode.
+ */
+ release_channel(hcd, hc, qtd, halt_status);
+ }
+}
+
+/**
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void complete_periodic_xfer(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd,
+ fh_otg_halt_status_e halt_status)
+{
+ hctsiz_data_t hctsiz;
+ qtd->error_count = 0;
+
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+ if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) {
+ /* Core halts channel in these cases. */
+ release_channel(hcd, hc, qtd, halt_status);
+ } else {
+ /* Flush any outstanding requests from the Tx queue. */
+ halt_channel(hcd, hc, qtd, halt_status);
+ }
+}
+
+static int32_t handle_xfercomp_isoc_split_in(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ uint32_t len;
+ struct fh_otg_hcd_iso_packet_desc *frame_desc;
+ frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
+
+ len = get_actual_xfer_length(hc, hc_regs, qtd,
+ FH_OTG_HC_XFER_COMPLETE, NULL);
+
+ if (!len) {
+ qtd->complete_split = 0;
+ qtd->isoc_split_offset = 0;
+ return 0;
+ }
+ frame_desc->actual_length += len;
+
+ if (hc->align_buff && len)
+ fh_memcpy(qtd->urb->buf + frame_desc->offset +
+ qtd->isoc_split_offset, hc->qh->dw_align_buf, len);
+ qtd->isoc_split_offset += len;
+
+ if (frame_desc->length == frame_desc->actual_length) {
+ frame_desc->status = 0;
+ qtd->isoc_frame_index++;
+ qtd->complete_split = 0;
+ qtd->isoc_split_offset = 0;
+ }
+
+ if (qtd->isoc_frame_index == qtd->urb->packet_count) {
+ hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
+ release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_URB_COMPLETE);
+ } else {
+ release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NO_HALT_STATUS);
+ }
+
+ return 1; /* Indicates that channel released */
+}
+
+/**
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xfercomp_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ int urb_xfer_done;
+ fh_otg_halt_status_e halt_status = FH_OTG_HC_XFER_COMPLETE;
+ fh_otg_hcd_urb_t *urb = qtd->urb;
+ int pipe_type = fh_otg_hcd_get_pipe_type(&urb->pipe_info);
+
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transfer Complete--\n", hc->hc_num);
+
+ if (hcd->core_if->dma_desc_enable) {
+ fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, halt_status);
+ if (pipe_type == UE_ISOCHRONOUS) {
+ /* Do not disable the interrupt, just clear it */
+ clear_hc_int(hc_regs, xfercomp);
+ return 1;
+ }
+ goto handle_xfercomp_done;
+ }
+
+ /*
+ * Handle xfer complete on CSPLIT.
+ */
+
+ if (hc->qh->do_split) {
+ if ((hc->ep_type == FH_OTG_EP_TYPE_ISOC) && hc->ep_is_in
+ && hcd->core_if->dma_enable) {
+ if (qtd->complete_split
+ && handle_xfercomp_isoc_split_in(hcd, hc, hc_regs,
+ qtd))
+ goto handle_xfercomp_done;
+ } else {
+ qtd->complete_split = 0;
+ }
+ }
+
+ /* Update the QTD and URB states. */
+ switch (pipe_type) {
+ case UE_CONTROL:
+ switch (qtd->control_phase) {
+ case FH_OTG_CONTROL_SETUP:
+ if (urb->length > 0) {
+ qtd->control_phase = FH_OTG_CONTROL_DATA;
+ } else {
+ qtd->control_phase = FH_OTG_CONTROL_STATUS;
+ }
+ FH_DEBUGPL(DBG_HCDV,
+ " Control setup transaction done\n");
+ halt_status = FH_OTG_HC_XFER_COMPLETE;
+ break;
+ case FH_OTG_CONTROL_DATA:{
+ urb_xfer_done =
+ update_urb_state_xfer_comp(hc, hc_regs, urb,
+ qtd);
+ if (urb_xfer_done) {
+ qtd->control_phase =
+ FH_OTG_CONTROL_STATUS;
+ FH_DEBUGPL(DBG_HCDV,
+ " Control data transfer done\n");
+ } else {
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+ }
+ halt_status = FH_OTG_HC_XFER_COMPLETE;
+ break;
+ }
+ case FH_OTG_CONTROL_STATUS:
+ FH_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
+ if (urb->status == -FH_E_IN_PROGRESS) {
+ urb->status = 0;
+ }
+ hcd->fops->complete(hcd, urb->priv, urb, urb->status);
+ halt_status = FH_OTG_HC_XFER_URB_COMPLETE;
+ if (!hcd->core_if->dma_enable && hcd->core_if->otg_ver == 1)
+ qtd->urb = NULL;
+ break;
+ }
+
+ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
+ break;
+ case UE_BULK:
+ FH_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
+ urb_xfer_done =
+ update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
+ if (urb_xfer_done) {
+ hcd->fops->complete(hcd, urb->priv, urb, urb->status);
+ halt_status = FH_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = FH_OTG_HC_XFER_COMPLETE;
+ }
+
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
+ break;
+ case UE_INTERRUPT:
+ FH_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
+ urb_xfer_done =
+ update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
+
+ /*
+ * Interrupt URB is done on the first transfer complete
+ * interrupt.
+ */
+ if (urb_xfer_done) {
+ hcd->fops->complete(hcd, urb->priv, urb, urb->status);
+ halt_status = FH_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = FH_OTG_HC_XFER_COMPLETE;
+ }
+
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+ complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
+ break;
+ case UE_ISOCHRONOUS:
+ FH_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
+ if (qtd->isoc_split_pos == FH_HCSPLIT_XACTPOS_ALL) {
+ halt_status =
+ update_isoc_urb_state(hcd, hc, hc_regs, qtd,
+ FH_OTG_HC_XFER_COMPLETE);
+ }
+ complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
+ break;
+ }
+
+handle_xfercomp_done:
+ disable_hc_int(hc_regs, xfercompl);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_stall_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ fh_otg_hcd_urb_t *urb = qtd->urb;
+ int pipe_type = fh_otg_hcd_get_pipe_type(&urb->pipe_info);
+
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "STALL Received--\n", hc->hc_num);
+
+ if (hcd->core_if->dma_desc_enable) {
+ fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, FH_OTG_HC_XFER_STALL);
+ goto handle_stall_done;
+ }
+
+ if (pipe_type == UE_CONTROL) {
+ hcd->fops->complete(hcd, urb->priv, urb, -FH_E_PIPE);
+ }
+
+ if (pipe_type == UE_BULK || pipe_type == UE_INTERRUPT) {
+ hcd->fops->complete(hcd, urb->priv, urb, -FH_E_PIPE);
+ /*
+ * USB protocol requires resetting the data toggle for bulk
+ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+ * setup command is issued to the endpoint. Anticipate the
+ * CLEAR_FEATURE command since a STALL has occurred and reset
+ * the data toggle now.
+ */
+ hc->qh->data_toggle = 0;
+ }
+
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_STALL);
+
+handle_stall_done:
+ disable_hc_int(hc_regs, stall);
+
+ return 1;
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void update_urb_state_xfer_intr(fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_hcd_urb_t * urb,
+ fh_otg_qtd_t * qtd,
+ fh_otg_halt_status_e halt_status)
+{
+ uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd,
+ halt_status, NULL);
+ /* non DWORD-aligned buffer case handling. */
+ if (hc->align_buff && bytes_transferred && hc->ep_is_in) {
+ fh_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
+ bytes_transferred);
+ }
+
+ urb->actual_length += bytes_transferred;
+
+#ifdef DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+ FH_DEBUGPL(DBG_HCDV, "FH_otg: %s: %s, channel %d\n",
+ __func__, (hc->ep_is_in ? "IN" : "OUT"),
+ hc->hc_num);
+ FH_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n",
+ hc->start_pkt_count);
+ FH_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
+ FH_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet);
+ FH_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n",
+ bytes_transferred);
+ FH_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
+ urb->actual_length);
+ FH_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
+ urb->length);
+ }
+#endif
+}
+
+/**
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nak_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NAK Received--\n", hc->hc_num);
+
+ /*
+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+ * interrupt. Re-start the SSPLIT transfer.
+ */
+ if (hc->do_split) {
+ if (hc->complete_split) {
+ qtd->error_count = 0;
+ }
+ qtd->complete_split = 0;
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NAK);
+ goto handle_nak_done;
+ }
+
+ switch (fh_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+ case UE_CONTROL:
+ case UE_BULK:
+ if (hcd->core_if->dma_enable && hc->ep_is_in) {
+ /*
+ * NAK interrupts are enabled on bulk/control IN
+ * transfers in DMA mode for the sole purpose of
+ * resetting the error count after a transaction error
+ * occurs. The core will continue transferring data.
+ */
+ qtd->error_count = 0;
+ goto handle_nak_done;
+ }
+
+ /*
+ * NAK interrupts normally occur during OUT transfers in DMA
+ * or Slave mode. For IN transfers, more requests will be
+ * queued as request queue space is available.
+ */
+ qtd->error_count = 0;
+
+ if (!hc->qh->ping_state) {
+ update_urb_state_xfer_intr(hc, hc_regs,
+ qtd->urb, qtd,
+ FH_OTG_HC_XFER_NAK);
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+
+ if (hc->speed == FH_OTG_EP_SPEED_HIGH)
+ hc->qh->ping_state = 1;
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will
+ * start/continue.
+ */
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NAK);
+ break;
+ case UE_INTERRUPT:
+ qtd->error_count = 0;
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NAK);
+ break;
+ case UE_ISOCHRONOUS:
+ /* Should never get called for isochronous transfers. */
+ FH_ASSERT(1, "NACK interrupt for ISOC transfer\n");
+ break;
+ }
+
+handle_nak_done:
+ disable_hc_int(hc_regs, nak);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static int32_t handle_hc_ack_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "ACK Received--\n", hc->hc_num);
+
+ if (hc->do_split) {
+ /*
+ * Handle ACK on SSPLIT.
+ * ACK should not occur in CSPLIT.
+ */
+ if (!hc->ep_is_in && hc->data_pid_start != FH_OTG_HC_PID_SETUP) {
+ qtd->ssplit_out_xfer_count = hc->xfer_len;
+ }
+ if (!(hc->ep_type == FH_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) {
+ /* Don't need complete for isochronous out transfers. */
+ qtd->complete_split = 1;
+ }
+
+ /* ISOC OUT */
+ if (hc->ep_type == FH_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
+ switch (hc->xact_pos) {
+ case FH_HCSPLIT_XACTPOS_ALL:
+ break;
+ case FH_HCSPLIT_XACTPOS_END:
+ qtd->isoc_split_pos = FH_HCSPLIT_XACTPOS_ALL;
+ qtd->isoc_split_offset = 0;
+ break;
+ case FH_HCSPLIT_XACTPOS_BEGIN:
+ case FH_HCSPLIT_XACTPOS_MID:
+ /*
+ * For BEGIN or MID, calculate the length for
+ * the next microframe to determine the correct
+ * SSPLIT token, either MID or END.
+ */
+ {
+ struct fh_otg_hcd_iso_packet_desc
+ *frame_desc;
+
+ frame_desc =
+ &qtd->urb->
+ iso_descs[qtd->isoc_frame_index];
+ qtd->isoc_split_offset += 188;
+
+ if ((frame_desc->length -
+ qtd->isoc_split_offset) <= 188) {
+ qtd->isoc_split_pos =
+ FH_HCSPLIT_XACTPOS_END;
+ } else {
+ qtd->isoc_split_pos =
+ FH_HCSPLIT_XACTPOS_MID;
+ }
+
+ }
+ break;
+ }
+ } else {
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_ACK);
+ }
+ } else {
+ qtd->error_count = 0;
+
+ if (hc->qh->ping_state) {
+ hc->qh->ping_state = 0;
+ /*
+ * Halt the channel so the transfer can be re-started
+ * from the appropriate point. This only happens in
+ * Slave mode. In DMA mode, the ping_state is cleared
+ * when the transfer is started because the core
+ * automatically executes the PING, then the transfer.
+ */
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_ACK);
+ }
+ }
+
+ /*
+ * If the ACK occurred when _not_ in the PING state, let the channel
+ * continue transferring data after clearing the error count.
+ */
+
+ disable_hc_int(hc_regs, ack);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nyet_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NYET Received--\n", hc->hc_num);
+
+ /*
+ * NYET on CSPLIT
+ * re-do the CSPLIT immediately on non-periodic
+ */
+ if (hc->do_split && hc->complete_split) {
+ if (hc->ep_is_in && (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
+ && hcd->core_if->dma_enable) {
+ qtd->complete_split = 0;
+ qtd->isoc_split_offset = 0;
+ if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+ hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
+ release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_URB_COMPLETE);
+ }
+ else
+ release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NO_HALT_STATUS);
+ goto handle_nyet_done;
+ }
+
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
+ hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+ int frnum = fh_otg_hcd_get_frame_number(hcd);
+
+ if (fh_full_frame_num(frnum) !=
+ fh_full_frame_num(hc->qh->sched_frame)) {
+ /*
+ * No longer in the same full speed frame.
+ * Treat this as a transaction error.
+ */
+#if 0
+ /** @todo Fix system performance so this can
+ * be treated as an error. Right now complete
+ * splits cannot be scheduled precisely enough
+ * due to other system activity, so this error
+ * occurs regularly in Slave mode.
+ */
+ qtd->error_count++;
+#endif
+ qtd->complete_split = 0;
+ halt_channel(hcd, hc, qtd,
+ FH_OTG_HC_XFER_XACT_ERR);
+ /** @todo add support for isoc release */
+ goto handle_nyet_done;
+ }
+ }
+
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NYET);
+ goto handle_nyet_done;
+ }
+
+ hc->qh->ping_state = 1;
+ qtd->error_count = 0;
+
+ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
+ FH_OTG_HC_XFER_NYET);
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+
+ /*
+ * Halt the channel and re-start the transfer so the PING
+ * protocol will start.
+ */
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NYET);
+
+handle_nyet_done:
+ disable_hc_int(hc_regs, nyet);
+ return 1;
+}
+
+/**
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_babble_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Babble Error--\n", hc->hc_num);
+
+ if (hcd->core_if->dma_desc_enable) {
+ fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+ FH_OTG_HC_XFER_BABBLE_ERR);
+ goto handle_babble_done;
+ }
+
+ if (hc->ep_type != FH_OTG_EP_TYPE_ISOC) {
+ hcd->fops->complete(hcd, qtd->urb->priv,
+ qtd->urb, -FH_E_OVERFLOW);
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_BABBLE_ERR);
+ } else {
+ fh_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
+ FH_OTG_HC_XFER_BABBLE_ERR);
+ halt_channel(hcd, hc, qtd, halt_status);
+ }
+
+handle_babble_done:
+ disable_hc_int(hc_regs, bblerr);
+ return 1;
+}
+
+/**
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static int32_t handle_hc_ahberr_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+ char *pipetype, *speed;
+
+ fh_otg_hcd_urb_t *urb = qtd->urb;
+
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "AHB Error--\n", hc->hc_num);
+
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+ hcdma = FH_READ_REG32(&hc_regs->hcdma);
+
+ FH_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
+ FH_ERROR("AHB ERROR, Xfer size %d\n", hc->xfer_len);
+ FH_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ FH_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD URB Enqueue\n");
+ FH_ERROR(" Device address: %d\n",
+ fh_otg_hcd_get_dev_addr(&urb->pipe_info));
+ FH_ERROR(" Endpoint: %d, %s\n",
+ fh_otg_hcd_get_ep_num(&urb->pipe_info),
+ (fh_otg_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"));
+
+ switch (fh_otg_hcd_get_pipe_type(&urb->pipe_info)) {
+ case UE_CONTROL:
+ pipetype = "CONTROL";
+ break;
+ case UE_BULK:
+ pipetype = "BULK";
+ break;
+ case UE_INTERRUPT:
+ pipetype = "INTERRUPT";
+ break;
+ case UE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS";
+ break;
+ default:
+ pipetype = "UNKNOWN";
+ break;
+ }
+
+ FH_ERROR(" Endpoint type: %s\n", pipetype);
+
+ switch (hc->speed) {
+ case FH_OTG_EP_SPEED_HIGH:
+ speed = "HIGH";
+ break;
+ case FH_OTG_EP_SPEED_FULL:
+ speed = "FULL";
+ break;
+ case FH_OTG_EP_SPEED_LOW:
+ speed = "LOW";
+ break;
+ default:
+ speed = "UNKNOWN";
+ break;
+ };
+
+ FH_ERROR(" Speed: %s\n", speed);
+
+ FH_ERROR(" Max packet size: %d\n",
+ fh_otg_hcd_get_mps(&urb->pipe_info));
+ FH_ERROR(" Data buffer length: %d\n", urb->length);
+ FH_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
+ urb->buf, (void *)urb->dma);
+ FH_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
+ urb->setup_packet, (void *)urb->setup_dma);
+ FH_ERROR(" Interval: %d\n", urb->interval);
+
+ /* Core haltes the channel for Descriptor DMA mode */
+ if (hcd->core_if->dma_desc_enable) {
+ fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+ FH_OTG_HC_XFER_AHB_ERR);
+ goto handle_ahberr_done;
+ }
+
+ hcd->fops->complete(hcd, urb->priv, urb, -FH_E_IO);
+
+ /*
+ * Force a channel halt. Don't call halt_channel because that won't
+ * write to the HCCHARn register in DMA mode to force the halt.
+ */
+ fh_otg_hc_halt(hcd->core_if, hc, FH_OTG_HC_XFER_AHB_ERR);
+handle_ahberr_done:
+ disable_hc_int(hc_regs, ahberr);
+ return 1;
+}
+
+/**
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xacterr_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transaction Error--\n", hc->hc_num);
+
+ if (hcd->core_if->dma_desc_enable) {
+ fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+ FH_OTG_HC_XFER_XACT_ERR);
+ goto handle_xacterr_done;
+ }
+
+ switch (fh_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+ case UE_CONTROL:
+ case UE_BULK:
+ qtd->error_count++;
+ if (!hc->qh->ping_state) {
+
+ update_urb_state_xfer_intr(hc, hc_regs,
+ qtd->urb, qtd,
+ FH_OTG_HC_XFER_XACT_ERR);
+ fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
+ if (!hc->ep_is_in && hc->speed == FH_OTG_EP_SPEED_HIGH) {
+ hc->qh->ping_state = 1;
+ }
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will start.
+ */
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_XACT_ERR);
+ break;
+ case UE_INTERRUPT:
+ qtd->error_count++;
+ if (hc->do_split && hc->complete_split) {
+ qtd->complete_split = 0;
+ }
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_XACT_ERR);
+ break;
+ case UE_ISOCHRONOUS:
+ {
+ fh_otg_halt_status_e halt_status;
+ halt_status =
+ update_isoc_urb_state(hcd, hc, hc_regs, qtd,
+ FH_OTG_HC_XFER_XACT_ERR);
+
+ halt_channel(hcd, hc, qtd, halt_status);
+ }
+ break;
+ }
+handle_xacterr_done:
+ disable_hc_int(hc_regs, xacterr);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_frmovrun_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Frame Overrun--\n", hc->hc_num);
+
+ switch (fh_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+ case UE_CONTROL:
+ case UE_BULK:
+ break;
+ case UE_INTERRUPT:
+ halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_FRAME_OVERRUN);
+ break;
+ case UE_ISOCHRONOUS:
+ {
+ fh_otg_halt_status_e halt_status;
+ halt_status =
+ update_isoc_urb_state(hcd, hc, hc_regs, qtd,
+ FH_OTG_HC_XFER_FRAME_OVERRUN);
+
+ halt_channel(hcd, hc, qtd, halt_status);
+ }
+ break;
+ }
+
+ disable_hc_int(hc_regs, frmovrun);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_datatglerr_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Data Toggle Error--\n", hc->hc_num);
+
+ if (hc->ep_is_in) {
+ qtd->error_count = 0;
+ } else {
+ FH_ERROR("Data Toggle Error on OUT transfer,"
+ "channel %d\n", hc->hc_num);
+ }
+
+ disable_hc_int(hc_regs, datatglerr);
+
+ return 1;
+}
+
+#ifdef DEBUG
+/**
+ * This function is for debug only. It checks that a valid halt status is set
+ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ * @return 1 if halt status is ok, 0 otherwise.
+ */
+static inline int halt_status_ok(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hcsplt_data_t hcsplt;
+
+ if (hc->halt_status == FH_OTG_HC_XFER_NO_HALT_STATUS) {
+ /*
+ * This code is here only as a check. This condition should
+ * never happen. Ignore the halt if it does occur.
+ */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+ hcintmsk.d32 = FH_READ_REG32(&hc_regs->hcintmsk);
+ hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
+ FH_WARN
+ ("%s: hc->halt_status == FH_OTG_HC_XFER_NO_HALT_STATUS, "
+ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
+ "hcint 0x%08x, hcintmsk 0x%08x, "
+ "hcsplt 0x%08x, qtd->complete_split %d\n", __func__,
+ hc->hc_num, hcchar.d32, hctsiz.d32, hcint.d32,
+ hcintmsk.d32, hcsplt.d32, qtd->complete_split);
+
+ FH_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
+ __func__, hc->hc_num);
+ FH_WARN("\n");
+ clear_hc_int(hc_regs, chhltd);
+ return 0;
+ }
+
+ /*
+ * This code is here only as a check. hcchar.chdis should
+ * never be set when the halt interrupt occurs. Halt the
+ * channel again if it does occur.
+ */
+ hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
+ if (hcchar.b.chdis) {
+ FH_WARN("%s: hcchar.chdis set unexpectedly, "
+ "hcchar 0x%08x, trying to halt again\n",
+ __func__, hcchar.d32);
+ clear_hc_int(hc_regs, chhltd);
+ hc->halt_pending = 0;
+ halt_channel(hcd, hc, qtd, hc->halt_status);
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+/**
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void handle_hc_chhltd_intr_dma(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ int out_nak_enh = 0;
+
+ /* For core with OUT NAK enhancement, the flow for high-
+ * speed CONTROL/BULK OUT is handled a little differently.
+ */
+ if (hcd->core_if->snpsid >= OTG_CORE_REV_2_71a) {
+ if (hc->speed == FH_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
+ (hc->ep_type == FH_OTG_EP_TYPE_CONTROL ||
+ hc->ep_type == FH_OTG_EP_TYPE_BULK)) {
+ out_nak_enh = 1;
+ }
+ }
+
+ if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE ||
+ (hc->halt_status == FH_OTG_HC_XFER_AHB_ERR
+ && !hcd->core_if->dma_desc_enable)) {
+ /*
+ * Just release the channel. A dequeue can happen on a
+ * transfer timeout. In the case of an AHB Error, the channel
+ * was forced to halt because there's no way to gracefully
+ * recover.
+ */
+ if (hcd->core_if->dma_desc_enable)
+ fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
+ hc->halt_status);
+ else
+ release_channel(hcd, hc, qtd, hc->halt_status);
+ return;
+ }
+
+ /* Read the HCINTn register to determine the cause for the halt. */
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+ hcintmsk.d32 = FH_READ_REG32(&hc_regs->hcintmsk);
+
+ if (hcint.b.xfercomp) {
+ /** @todo This is here because of a possible hardware bug. Spec
+ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+ * interrupt w/ACK bit set should occur, but I only see the
+ * XFERCOMP bit, even with it masked out. This is a workaround
+ * for that behavior. Should fix this when hardware is fixed.
+ */
+ if (hc->ep_type == FH_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
+ handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
+ }
+ handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.stall) {
+ handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.xacterr && !hcd->core_if->dma_desc_enable) {
+ if (out_nak_enh) {
+ if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
+ FH_DEBUG("XactErr with NYET/NAK/ACK\n");
+ qtd->error_count = 0;
+ } else {
+ FH_DEBUG("XactErr without NYET/NAK/ACK\n");
+ }
+ }
+
+ /*
+ * Must handle xacterr before nak or ack. Could get a xacterr
+ * at the same time as either of these on a BULK/CONTROL OUT
+ * that started with a PING. The xacterr takes precedence.
+ */
+ handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.xcs_xact && hcd->core_if->dma_desc_enable) {
+ handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.ahberr && hcd->core_if->dma_desc_enable) {
+ handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.bblerr) {
+ handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.frmovrun) {
+ handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd);
+ } else if (!out_nak_enh) {
+ if (hcint.b.nyet) {
+ /*
+ * Must handle nyet before nak or ack. Could get a nyet at the
+ * same time as either of those on a BULK/CONTROL OUT that
+ * started with a PING. The nyet takes precedence.
+ */
+ handle_hc_nyet_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.nak && !hcintmsk.b.nak) {
+ /*
+ * If nak is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the nak is handled by
+ * the nak interrupt handler, not here. Handle nak here for
+ * BULK/CONTROL OUT transfers, which halt on a NAK to allow
+ * rewinding the buffer pointer.
+ */
+ handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
+ } else if (hcint.b.ack && !hcintmsk.b.ack) {
+ /*
+ * If ack is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the ack is handled by
+ * the ack interrupt handler, not here. Handle ack here for
+ * split transfers. Start splits halt on ACK.
+ */
+ handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
+ } else {
+ if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
+ hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
+ /*
+ * A periodic transfer halted with no other channel
+ * interrupts set. Assume it was halted by the core
+ * because it could not be completed in its scheduled
+ * (micro)frame.
+ */
+#ifdef DEBUG
+ FH_PRINTF
+ ("%s: Halt channel %d (assume incomplete periodic transfer)\n",
+ __func__, hc->hc_num);
+#endif
+ halt_channel(hcd, hc, qtd,
+ FH_OTG_HC_XFER_PERIODIC_INCOMPLETE);
+ } else {
+ FH_ERROR
+ ("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
+ "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n",
+ __func__, hc->hc_num, hcint.d32,
+ FH_READ_REG32(&hcd->
+ core_if->core_global_regs->
+ gintsts));
+ disable_hc_int(hc_regs, chhltd);
+ }
+
+ }
+ } else {
+ FH_PRINTF("NYET/NAK/ACK/other in non-error case, 0x%08x\n",
+ hcint.d32);
+ disable_hc_int(hc_regs, chhltd);
+ }
+}
+
+/**
+ * Handles a host channel Channel Halted interrupt.
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static int32_t handle_hc_chhltd_intr(fh_otg_hcd_t * hcd,
+ fh_hc_t * hc,
+ fh_otg_hc_regs_t * hc_regs,
+ fh_otg_qtd_t * qtd)
+{
+ FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Channel Halted--\n", hc->hc_num);
+
+ if (hcd->core_if->dma_enable) {
+ handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd);
+ } else {
+#ifdef DEBUG
+ if (!halt_status_ok(hcd, hc, hc_regs, qtd)) {
+ return 1;
+ }
+#endif
+ release_channel(hcd, hc, qtd, hc->halt_status);
+ }
+
+ return 1;
+}
+
+/** Handles interrupt for a specific Host Channel */
+int32_t fh_otg_hcd_handle_hc_n_intr(fh_otg_hcd_t * fh_otg_hcd, uint32_t num)
+{
+ int retval = 0;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ fh_hc_t *hc;
+ fh_otg_hc_regs_t *hc_regs;
+ fh_otg_qtd_t *qtd;
+
+ FH_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
+
+ hc = fh_otg_hcd->hc_ptr_array[num];
+ hc_regs = fh_otg_hcd->core_if->host_if->hc_regs[num];
+ qtd = FH_CIRCLEQ_FIRST(&hc->qh->qtd_list);
+
+ hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
+ hcintmsk.d32 = FH_READ_REG32(&hc_regs->hcintmsk);
+ FH_DEBUGPL(DBG_HCDV,
+ " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
+ hcint.d32 = hcint.d32 & hcintmsk.d32;
+
+ if (!fh_otg_hcd->core_if->dma_enable) {
+ if (hcint.b.chhltd && hcint.d32 != 0x2) {
+ hcint.b.chhltd = 0;
+ }
+ }
+
+ if (hcint.b.xfercomp) {
+ retval |=
+ handle_hc_xfercomp_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ /*
+ * If NYET occurred at same time as Xfer Complete, the NYET is
+ * handled by the Xfer Complete interrupt handler. Don't want
+ * to call the NYET interrupt handler in this case.
+ */
+ hcint.b.nyet = 0;
+ }
+ if (hcint.b.chhltd) {
+ retval |= handle_hc_chhltd_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.ahberr) {
+ retval |= handle_hc_ahberr_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.stall) {
+ retval |= handle_hc_stall_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.nak) {
+ retval |= handle_hc_nak_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.ack) {
+ retval |= handle_hc_ack_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.nyet) {
+ retval |= handle_hc_nyet_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.xacterr) {
+ retval |= handle_hc_xacterr_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.bblerr) {
+ retval |= handle_hc_babble_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.frmovrun) {
+ retval |=
+ handle_hc_frmovrun_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.datatglerr) {
+ retval |=
+ handle_hc_datatglerr_intr(fh_otg_hcd, hc, hc_regs, qtd);
+ }
+
+ return retval;
+}
+
+#endif /* FH_DEVICE_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_linux.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_linux.c
new file mode 100755
index 00000000..01ffd025
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_linux.c
@@ -0,0 +1,873 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_linux.c $
+ * $Revision: #25 $
+ * $Date: 2015/09/08 $
+ * $Change: 2943025 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the implementation of the HCD. In Linux, the HCD
+ * implements the hc_driver API.
+ */
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <asm/io.h>
+#include <linux/usb.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#include <../drivers/usb/core/hcd.h>
+#else
+#include <linux/usb/hcd.h>
+#endif
+
+#include "fh_otg_hcd_if.h"
+#include "fh_otg_dbg.h"
+#include "fh_otg_driver.h"
+#include "fh_otg_hcd.h"
+/**
+ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is
+ * qualified with its direction (possible 32 endpoints per device).
+ */
+#define fh_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
+
+static const char fh_otg_hcd_name[] = "fh_otg_hcd";
+
+/** @name Linux HC Driver API Functions */
+/** @{ */
+static int urb_enqueue(struct usb_hcd *hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct usb_host_endpoint *ep,
+#endif
+ struct urb *urb, gfp_t mem_flags);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
+#else
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+#endif
+
+static void endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+static void endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+#endif
+static irqreturn_t fh_otg_hcd_irq(struct usb_hcd *hcd);
+extern int hcd_start(struct usb_hcd *hcd);
+extern void hcd_stop(struct usb_hcd *hcd);
+static int get_frame_number(struct usb_hcd *hcd);
+extern int hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength);
+
+struct wrapper_priv_data {
+ fh_otg_hcd_t *fh_otg_hcd;
+};
+
+/** @} */
+
+static struct hc_driver fh_otg_hc_driver = {
+
+ .description = fh_otg_hcd_name,
+ .product_desc = "FH OTG Controller",
+ .hcd_priv_size = sizeof(struct wrapper_priv_data),
+
+ .irq = fh_otg_hcd_irq,
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,12,0)
+ .flags = HCD_MEMORY | HCD_USB2,
+#else
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+#endif
+
+
+ .start = hcd_start,
+ .stop = hcd_stop,
+
+ .urb_enqueue = urb_enqueue,
+ .urb_dequeue = urb_dequeue,
+ .endpoint_disable = endpoint_disable,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+ .endpoint_reset = endpoint_reset,
+#endif
+ .get_frame_number = get_frame_number,
+
+ .hub_status_data = hub_status_data,
+ .hub_control = hub_control,
+ //.bus_suspend =
+ //.bus_resume =
+};
+
+/** Gets the fh_otg_hcd from a struct usb_hcd */
+static inline fh_otg_hcd_t *hcd_to_fh_otg_hcd(struct usb_hcd *hcd)
+{
+ struct wrapper_priv_data *p;
+ p = (struct wrapper_priv_data *)(hcd->hcd_priv);
+ return p->fh_otg_hcd;
+}
+
+/** Gets the struct usb_hcd that contains a fh_otg_hcd_t. */
+static inline struct usb_hcd *fh_otg_hcd_to_hcd(fh_otg_hcd_t * fh_otg_hcd)
+{
+ return fh_otg_hcd_get_priv_data(fh_otg_hcd);
+}
+
+/** Gets the usb_host_endpoint associated with an URB. */
+inline struct usb_host_endpoint *fh_urb_to_endpoint(struct urb *urb)
+{
+ struct usb_device *dev = urb->dev;
+ int ep_num = usb_pipeendpoint(urb->pipe);
+
+ if (usb_pipein(urb->pipe))
+ return dev->ep_in[ep_num];
+ else
+ return dev->ep_out[ep_num];
+}
+
+static int _disconnect(fh_otg_hcd_t * hcd)
+{
+ struct usb_hcd *usb_hcd = fh_otg_hcd_to_hcd(hcd);
+
+ usb_hcd->self.is_b_host = 0;
+ return 0;
+}
+
+static int _start(fh_otg_hcd_t * hcd)
+{
+ struct usb_hcd *usb_hcd = fh_otg_hcd_to_hcd(hcd);
+
+ usb_hcd->self.is_b_host = fh_otg_hcd_is_b_host(hcd);
+ hcd_start(usb_hcd);
+
+ return 0;
+}
+
+static int _hub_info(fh_otg_hcd_t * hcd, void *urb_handle, uint32_t * hub_addr,
+ uint32_t * port_addr)
+{
+ struct urb *urb = (struct urb *)urb_handle;
+ if (urb->dev->tt) {
+ *hub_addr = urb->dev->tt->hub->devnum;
+ } else {
+ *hub_addr = 0;
+ }
+ *port_addr = urb->dev->ttport;
+ return 0;
+}
+
+static int _speed(fh_otg_hcd_t * hcd, void *urb_handle)
+{
+ struct urb *urb = (struct urb *)urb_handle;
+ return urb->dev->speed;
+}
+
+static int _get_b_hnp_enable(fh_otg_hcd_t * hcd)
+{
+ struct usb_hcd *usb_hcd = fh_otg_hcd_to_hcd(hcd);
+ return usb_hcd->self.b_hnp_enable;
+}
+
+static void allocate_bus_bandwidth(struct usb_hcd *hcd, uint32_t bw,
+ struct urb *urb)
+{
+ hcd_to_bus(hcd)->bandwidth_allocated += bw / urb->interval;
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ hcd_to_bus(hcd)->bandwidth_isoc_reqs++;
+ } else {
+ hcd_to_bus(hcd)->bandwidth_int_reqs++;
+ }
+}
+
+static void free_bus_bandwidth(struct usb_hcd *hcd, uint32_t bw,
+ struct urb *urb)
+{
+ hcd_to_bus(hcd)->bandwidth_allocated -= bw / urb->interval;
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ hcd_to_bus(hcd)->bandwidth_isoc_reqs--;
+ } else {
+ hcd_to_bus(hcd)->bandwidth_int_reqs--;
+ }
+}
+
+/**
+ * Sets the final status of an URB and returns it to the device driver. Any
+ * required cleanup of the URB is performed.
+ */
+static int _complete(fh_otg_hcd_t * hcd, void *urb_handle,
+ fh_otg_hcd_urb_t * fh_otg_urb, int32_t status)
+{
+ struct urb *urb = (struct urb *)urb_handle;
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ FH_PRINTF("%s: urb %p, device %d, ep %d %s, status=%d\n",
+ __func__, urb, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "IN" : "OUT", status);
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < urb->number_of_packets; i++) {
+ FH_PRINTF(" ISO Desc %d status: %d\n",
+ i, urb->iso_frame_desc[i].status);
+ }
+ }
+ }
+#endif
+
+ urb->actual_length = fh_otg_hcd_urb_get_actual_length(fh_otg_urb);
+ /* Convert status value. */
+ switch (status) {
+ case -FH_E_PROTOCOL:
+ status = -EPROTO;
+ break;
+ case -FH_E_IN_PROGRESS:
+ status = -EINPROGRESS;
+ break;
+ case -FH_E_PIPE:
+ status = -EPIPE;
+ break;
+ case -FH_E_IO:
+ status = -EIO;
+ break;
+ case -FH_E_TIMEOUT:
+ status = -ETIMEDOUT;
+ break;
+ case -FH_E_OVERFLOW:
+ status = -EOVERFLOW;
+ break;
+ default:
+ if (status) {
+ FH_PRINTF("Uknown urb status %d\n", status);
+
+ }
+ }
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+
+ urb->error_count = fh_otg_hcd_urb_get_error_count(fh_otg_urb);
+ for (i = 0; i < urb->number_of_packets; ++i) {
+ urb->iso_frame_desc[i].actual_length =
+ fh_otg_hcd_urb_get_iso_desc_actual_length
+ (fh_otg_urb, i);
+ urb->iso_frame_desc[i].status =
+ fh_otg_hcd_urb_get_iso_desc_status(fh_otg_urb, i);
+ }
+ }
+
+ urb->status = status;
+ if (!status) {
+ if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+ (urb->actual_length < urb->transfer_buffer_length)) {
+ urb->status = -EREMOTEIO;
+ }
+ }
+
+ if ((usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) ||
+ (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
+ struct usb_host_endpoint *ep = fh_urb_to_endpoint(urb);
+ if (ep) {
+ free_bus_bandwidth(fh_otg_hcd_to_hcd(hcd),
+ fh_otg_hcd_get_ep_bandwidth(hcd,
+ ep->hcpriv),
+ urb);
+ }
+ }
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
+ usb_hcd_unlink_urb_from_ep(fh_otg_hcd_to_hcd(hcd), urb);
+#endif
+
+ urb->hcpriv = NULL;
+ FH_FREE(fh_otg_urb);
+
+ //printk("\nfuck...\n");
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ FH_SPINUNLOCK(hcd->lock);
+ usb_hcd_giveback_urb(fh_otg_hcd_to_hcd(hcd), urb);
+ FH_SPINLOCK(hcd->lock);
+#elseif LINUX_VERSION_CODE <= KERNEL_VERSION(3,7,7)
+ FH_SPINUNLOCK(hcd->lock);
+ usb_hcd_giveback_urb(fh_otg_hcd_to_hcd(hcd), urb, status);
+ FH_SPINLOCK(hcd->lock);
+#else
+ usb_hcd_giveback_urb(fh_otg_hcd_to_hcd(hcd), urb, status);
+#endif
+
+ return 0;
+}
+
+static struct fh_otg_hcd_function_ops hcd_fops = {
+ .start = _start,
+ .disconnect = _disconnect,
+ .hub_info = _hub_info,
+ .speed = _speed,
+ .complete = _complete,
+ .get_b_hnp_enable = _get_b_hnp_enable,
+};
+
+/**
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and fh_otg_hcd structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int hcd_init(struct platform_device *dev, int irq)
+{
+ struct usb_hcd *hcd = NULL;
+ fh_otg_hcd_t *fh_otg_hcd = NULL;
+ fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
+
+ int retval = 0;
+
+ printk(KERN_ERR "FH OTG HCD INIT (%p)\n", otg_dev);
+
+ /* Set device flags indicating whether the HCD supports DMA */
+ if (otg_dev->core_if->dma_enable > 0) {
+ if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32)) < 0)
+ printk(KERN_ERR "can't set DMA mask\n");
+ if (dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32)) < 0)
+ printk(KERN_ERR "can't set coherent DMA mask\n");
+ }
+
+ /*
+ * Allocate memory for the base HCD plus the FH OTG HCD.
+ * Initialize the base HCD.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
+ hcd = usb_create_hcd(&fh_otg_hc_driver, &dev->dev, dev->dev.bus_id);
+#else
+ hcd = usb_create_hcd(&fh_otg_hc_driver, &dev->dev, dev_name(&dev->dev));
+
+ hcd->has_tt = 1;
+// hcd->uses_new_polling = 1;
+// hcd->poll_rh = 0;
+#endif
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto error1;
+ }
+
+ printk(KERN_ERR "hcd regs before base(%p)\n", otg_dev->os_dep.base);
+ hcd->regs = otg_dev->os_dep.base;
+
+ /* Initialize the FH OTG HCD. */
+ fh_otg_hcd = fh_otg_hcd_alloc_hcd();
+ if (!fh_otg_hcd) {
+ goto error2;
+ }
+ ((struct wrapper_priv_data *)(hcd->hcd_priv))->fh_otg_hcd =
+ fh_otg_hcd;
+ otg_dev->hcd = fh_otg_hcd;
+
+ if (fh_otg_hcd_init(fh_otg_hcd, otg_dev->core_if)) {
+ goto error2;
+ }
+
+ otg_dev->hcd->otg_dev = otg_dev;
+ hcd->self.otg_port = fh_otg_hcd_otg_port(fh_otg_hcd);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33) //don't support for LM(with 2.6.20.1 kernel)
+ //hcd->self.otg_version = fh_otg_get_otg_version(otg_dev->core_if);
+ /* Don't support SG list at this point */
+ hcd->self.sg_tablesize = 0;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
+ /* Do not to do HNP polling if not capable */
+ if (otg_dev->core_if->otg_ver)
+ hcd->self.is_hnp_cap = fh_otg_get_hnpcapable(otg_dev->core_if);
+#endif
+ /*
+ * Finish generic HCD initialization and start the HCD. This function
+ * allocates the DMA buffer pool, registers the USB bus, requests the
+ * IRQ line, and calls hcd_start method.
+ */
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ if (retval < 0) {
+ goto error2;
+ }
+
+ fh_otg_hcd_set_priv_data(fh_otg_hcd, hcd);
+ platform_set_drvdata(dev, otg_dev);
+ return 0;
+
+error2:
+ usb_put_hcd(hcd);
+error1:
+ return retval;
+}
+
+/**
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void hcd_remove(struct platform_device *dev)
+{
+ fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
+
+
+ fh_otg_hcd_t *fh_otg_hcd;
+ struct usb_hcd *hcd;
+
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD REMOVE\n");
+
+ if (!otg_dev) {
+ FH_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
+ return;
+ }
+
+ fh_otg_hcd = otg_dev->hcd;
+
+ if (!fh_otg_hcd) {
+ FH_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
+ return;
+ }
+
+ hcd = fh_otg_hcd_to_hcd(fh_otg_hcd);
+
+ if (!hcd) {
+ FH_DEBUGPL(DBG_ANY,
+ "%s: fh_otg_hcd_to_hcd(fh_otg_hcd) NULL!\n",
+ __func__);
+ return;
+ }
+ usb_remove_hcd(hcd);
+ fh_otg_hcd_set_priv_data(fh_otg_hcd, NULL);
+ fh_otg_hcd_remove(fh_otg_hcd);
+ usb_put_hcd(hcd);
+}
+
+/* =========================================================================
+ * Linux HC Driver Functions
+ * ========================================================================= */
+
+/** Initializes the FH_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure. */
+int hcd_start(struct usb_hcd *hcd)
+{
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+ struct usb_bus *bus;
+
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD START\n");
+ bus = hcd_to_bus(hcd);
+
+ hcd->state = HC_STATE_RUNNING;
+ if (fh_otg_hcd_start(fh_otg_hcd, &hcd_fops)) {
+ if (fh_otg_hcd->core_if->otg_ver && fh_otg_is_device_mode(fh_otg_hcd->core_if))
+ fh_otg_hcd->core_if->op_state = B_PERIPHERAL;
+ return 0;
+ }
+
+ /* Initialize and connect root hub if one is not already attached */
+ if (bus->root_hub) {
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD Has Root Hub\n");
+ /* Inform the HUB driver to resume. */
+ usb_hcd_resume_root_hub(hcd);
+ }
+
+ return 0;
+}
+
+/**
+ * Halts the FH_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+void hcd_stop(struct usb_hcd *hcd)
+{
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+
+ fh_otg_hcd_stop(fh_otg_hcd);
+}
+
+/** Returns the current frame number. */
+static int get_frame_number(struct usb_hcd *hcd)
+{
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+
+ return fh_otg_hcd_get_frame_number(fh_otg_hcd);
+}
+
+#ifdef DEBUG
+static void dump_urb_info(struct urb *urb, char *fn_name)
+{
+ printk("%s, urb %p\n", fn_name, urb);
+ printk(" Device address: %d\n", usb_pipedevice(urb->pipe));
+ printk(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+ (usb_pipein(urb->pipe) ? "IN" : "OUT"));
+ printk(" Endpoint type: %s\n", ( {
+ char *pipetype;
+ switch (usb_pipetype(urb->pipe)) {
+case PIPE_CONTROL:
+pipetype = "CONTROL"; break; case PIPE_BULK:
+pipetype = "BULK"; break; case PIPE_INTERRUPT:
+pipetype = "INTERRUPT"; break; case PIPE_ISOCHRONOUS:
+pipetype = "ISOCHRONOUS"; break; default:
+ pipetype = "UNKNOWN"; break;};
+ pipetype;}
+ )) ;
+ printk(" Speed: %s\n", ( {
+ char *speed; switch (urb->dev->speed) {
+case USB_SPEED_HIGH:
+speed = "HIGH"; break; case USB_SPEED_FULL:
+speed = "FULL"; break; case USB_SPEED_LOW:
+speed = "LOW"; break; default:
+ speed = "UNKNOWN"; break;};
+ speed;}
+ )) ;
+ printk(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ printk(" Data buffer length: %d\n", urb->transfer_buffer_length);
+ printk(" Transfer buffer: %p, Transfer DMA: %p\n",
+ urb->transfer_buffer, (void *)urb->transfer_dma);
+ printk(" Setup buffer: %p, Setup DMA: %p\n",
+ urb->setup_packet, (void *)urb->setup_dma);
+ printk(" Interval: %d\n", urb->interval);
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < urb->number_of_packets; i++) {
+ printk(" ISO Desc %d:\n", i);
+ printk(" offset: %d, length %d\n",
+ urb->iso_frame_desc[i].offset,
+ urb->iso_frame_desc[i].length);
+ }
+ }
+}
+#endif
+
+/** Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB. */
+static int ___urb_enqueue(struct usb_hcd *hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ struct usb_host_endpoint *ep,
+#endif
+ struct urb *urb, gfp_t mem_flags)
+{
+ int retval = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
+ struct usb_host_endpoint *ep = urb->ep;
+#endif
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+ fh_otg_hcd_urb_t *fh_otg_urb;
+ int i;
+ int alloc_bandwidth = 0;
+ uint8_t ep_type = 0;
+ uint32_t flags = 0;
+ void *buf;
+
+ //dump_urb_info(urb, "urb_enqueue");
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(urb, "urb_enqueue");
+ }
+#endif
+
+ if ((usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ || (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
+ if (!fh_otg_hcd_is_bandwidth_allocated
+ (fh_otg_hcd, &ep->hcpriv)) {
+ alloc_bandwidth = 1;
+ }
+ }
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ ep_type = USB_ENDPOINT_XFER_CONTROL;
+ break;
+ case PIPE_ISOCHRONOUS:
+ ep_type = USB_ENDPOINT_XFER_ISOC;
+ break;
+ case PIPE_BULK:
+ ep_type = USB_ENDPOINT_XFER_BULK;
+ break;
+ case PIPE_INTERRUPT:
+ ep_type = USB_ENDPOINT_XFER_INT;
+ break;
+ default:
+ FH_WARN("Wrong ep type\n");
+ }
+
+ fh_otg_urb = fh_otg_hcd_urb_alloc(fh_otg_hcd,
+ urb->number_of_packets,
+ mem_flags == GFP_ATOMIC ? 1 : 0);
+
+ fh_otg_hcd_urb_set_pipeinfo(fh_otg_urb, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe), ep_type,
+ usb_pipein(urb->pipe),
+ usb_maxpacket(urb->dev, urb->pipe,
+ !(usb_pipein(urb->pipe))));
+
+ buf = urb->transfer_buffer;
+ if (hcd->self.uses_dma) {
+ /*
+ * Calculate virtual address from physical address,
+ * because some class driver may not fill transfer_buffer.
+ * In Buffer DMA mode virual address is used,
+ * when handling non DWORD aligned buffers.
+ */
+ buf = phys_to_virt(urb->transfer_dma);
+ }
+
+ if (!(urb->transfer_flags & URB_NO_INTERRUPT))
+ flags |= URB_GIVEBACK_ASAP;
+ if (urb->transfer_flags & URB_ZERO_PACKET)
+ flags |= URB_SEND_ZERO_PACKET;
+
+ fh_otg_hcd_urb_set_params(fh_otg_urb, urb, buf,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ urb->setup_packet,
+ urb->setup_dma, flags, urb->interval);
+
+ for (i = 0; i < urb->number_of_packets; ++i) {
+ fh_otg_hcd_urb_set_iso_desc_params(fh_otg_urb, i,
+ urb->
+ iso_frame_desc[i].offset,
+ urb->
+ iso_frame_desc[i].length);
+ }
+
+ urb->hcpriv = fh_otg_urb;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
+ FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &irqflags);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, irqflags);
+ if (retval){
+ goto fail1;
+ }
+#endif
+
+ retval = fh_otg_hcd_urb_enqueue(fh_otg_hcd, fh_otg_urb, &ep->hcpriv,
+ mem_flags == GFP_ATOMIC ? 1 : 0);
+ if (retval){
+ goto fail2;
+ }
+
+ if (alloc_bandwidth) {
+ allocate_bus_bandwidth(hcd,
+ fh_otg_hcd_get_ep_bandwidth
+ (fh_otg_hcd, ep->hcpriv), urb);
+ }
+
+ return 0;
+
+fail2:
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
+ FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &irqflags);
+ fh_otg_urb->priv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, irqflags);
+#endif
+#ifdef DEBUG
+fail1:
+#endif
+ urb->hcpriv = NULL;
+ FH_FREE(fh_otg_urb);
+
+ return retval;
+}
+
+static int urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags)
+{
+ int ret;
+ unsigned long flagxx;
+
+ local_irq_save(flagxx);
+ ret = ___urb_enqueue(hcd, urb, GFP_ATOMIC);
+ local_irq_restore(flagxx);
+
+ return ret;
+}
+
+/** Aborts/cancels a USB transfer request. Always returns 0 to indicate
+ * success. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+#else
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+#endif
+{
+ fh_irqflags_t flags;
+ fh_otg_hcd_t *fh_otg_hcd;
+ int rc = 0;
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD URB Dequeue\n");
+
+ fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(urb, "urb_dequeue");
+ }
+#endif
+
+ FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto out;
+#endif
+
+ if (!urb->hcpriv) {
+ FH_DEBUGPL(DBG_HCD, "urb->hcpriv is NULL\n");
+ goto out;
+ }
+
+ rc = fh_otg_hcd_urb_dequeue(fh_otg_hcd, urb->hcpriv);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+#endif
+
+ FH_FREE(urb->hcpriv);
+ urb->hcpriv = NULL;
+
+ /* Higher layer software sets URB status. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ usb_hcd_giveback_urb(hcd, urb);
+#else
+ FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags); //mvardan
+ usb_hcd_giveback_urb(hcd, urb, status);
+ FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags); //mvardan
+#endif
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ FH_PRINTF("Called usb_hcd_giveback_urb()\n");
+ FH_PRINTF(" urb->status = %d\n", urb->status);
+ }
+out:
+ FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags);
+
+ return rc;
+}
+
+/* Frees resources in the FH_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued. */
+static void endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+
+ FH_DEBUGPL(DBG_HCD,
+ "FH OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
+ "endpoint=%d\n", ep->desc.bEndpointAddress,
+ fh_ep_addr_to_endpoint(ep->desc.bEndpointAddress));
+ fh_otg_hcd_endpoint_disable(fh_otg_hcd, ep->hcpriv, 250);
+ ep->hcpriv = NULL;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+/* Resets endpoint specific parameter values, in current version used to reset
+ * the data toggle(as a WA). This function can be called from usb_clear_halt routine */
+static void endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ fh_irqflags_t flags;
+ struct usb_device *udev = NULL;
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ int is_control = usb_endpoint_xfer_control(&ep->desc);
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+ struct platform_device *_dev = fh_otg_hcd->otg_dev->os_dep.pdev;
+
+ if (_dev)
+ udev = to_usb_device(&_dev->dev);
+ else
+ return;
+
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD EP RESET: Endpoint Num=0x%02d\n", epnum);
+
+ FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
+ usb_settoggle(udev, epnum, is_out, 0);
+ if (is_control)
+ usb_settoggle(udev, epnum, !is_out, 0);
+
+ if (ep->hcpriv) {
+ fh_otg_hcd_endpoint_reset(fh_otg_hcd, ep->hcpriv);
+ }
+ FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags);
+}
+#endif
+
+/** Handles host mode interrupts for the FH_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs */
+static irqreturn_t fh_otg_hcd_irq(struct usb_hcd *hcd)
+{
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+ int32_t retval = fh_otg_hcd_handle_intr(fh_otg_hcd);
+ if (retval != 0) {
+ S3C2410X_CLEAR_EINTPEND();
+ }
+ return IRQ_RETVAL(retval);
+}
+
+/** Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0. */
+int hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
+
+ buf[0] = 0;
+ buf[0] |= (fh_otg_hcd_is_status_changed(fh_otg_hcd, 1)) << 1;
+
+ return (buf[0] != 0);
+}
+
+/** Handles hub class-specific requests. */
+int hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+ int retval;
+
+ retval = fh_otg_hcd_hub_control(hcd_to_fh_otg_hcd(hcd),
+ typeReq, wValue, wIndex, buf, wLength);
+
+ switch (retval) {
+ case -FH_E_INVALID:
+ retval = -EINVAL;
+ break;
+ }
+
+ return retval;
+}
+
+#endif /* FH_DEVICE_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_queue.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_queue.c
new file mode 100644
index 00000000..37e085db
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_queue.c
@@ -0,0 +1,731 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_queue.c $
+ * $Revision: #45 $
+ * $Date: 2013/01/24 $
+ * $Change: 2150293 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors.
+ */
+
+#include "fh_otg_hcd.h"
+#include "fh_otg_regs.h"
+
+/**
+ * Free each QTD in the QH's QTD-list then free the QH. QH should already be
+ * removed from a list. QTD list should already be empty if called from URB
+ * Dequeue.
+ *
+ * @param hcd HCD instance.
+ * @param qh The QH to free.
+ */
+void fh_otg_hcd_qh_free(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ fh_otg_qtd_t *qtd, *qtd_tmp;
+ fh_irqflags_t flags;
+
+ /* Free each QTD in the QTD list */
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
+ FH_CIRCLEQ_REMOVE(&qh->qtd_list, qtd, qtd_list_entry);
+ fh_otg_hcd_qtd_free(qtd);
+ }
+
+ if (hcd->core_if->dma_desc_enable) {
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+ fh_otg_hcd_qh_free_ddma(hcd, qh);
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ } else if (qh->dw_align_buf) {
+ uint32_t buf_size;
+ if (qh->ep_type == UE_ISOCHRONOUS) {
+ buf_size = 4096;
+ } else {
+ buf_size = hcd->core_if->core_params->max_transfer_size;
+ }
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+ FH_DMA_FREE(buf_size, qh->dw_align_buf, qh->dw_align_buf_dma);
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ }
+
+ FH_FREE(qh);
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+ return;
+}
+
+#define BitStuffTime(bytecount) ((8 * 7* bytecount) / 6)
+#define HS_HOST_DELAY 5 /* nanoseconds */
+#define FS_LS_HOST_DELAY 1000 /* nanoseconds */
+#define HUB_LS_SETUP 333 /* nanoseconds */
+#define NS_TO_US(ns) ((ns + 500) / 1000)
+ /* convert & round nanoseconds to microseconds */
+
+static uint32_t calc_bus_time(int speed, int is_in, int is_isoc, int bytecount)
+{
+ unsigned long retval;
+
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ if (is_isoc) {
+ retval =
+ ((38 * 8 * 2083) +
+ (2083 * (3 + BitStuffTime(bytecount)))) / 1000 +
+ HS_HOST_DELAY;
+ } else {
+ retval =
+ ((55 * 8 * 2083) +
+ (2083 * (3 + BitStuffTime(bytecount)))) / 1000 +
+ HS_HOST_DELAY;
+ }
+ break;
+ case USB_SPEED_FULL:
+ if (is_isoc) {
+ retval =
+ (8354 * (31 + 10 * BitStuffTime(bytecount))) / 1000;
+ if (is_in) {
+ retval = 7268 + FS_LS_HOST_DELAY + retval;
+ } else {
+ retval = 6265 + FS_LS_HOST_DELAY + retval;
+ }
+ } else {
+ retval =
+ (8354 * (31 + 10 * BitStuffTime(bytecount))) / 1000;
+ retval = 9107 + FS_LS_HOST_DELAY + retval;
+ }
+ break;
+ case USB_SPEED_LOW:
+ if (is_in) {
+ retval =
+ (67667 * (31 + 10 * BitStuffTime(bytecount))) /
+ 1000;
+ retval =
+ 64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
+ retval;
+ } else {
+ retval =
+ (66700 * (31 + 10 * BitStuffTime(bytecount))) /
+ 1000;
+ retval =
+ 64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
+ retval;
+ }
+ break;
+ default:
+ FH_WARN("Unknown device speed\n");
+ retval = -1;
+ }
+
+ return NS_TO_US(retval);
+}
+
+/**
+ * Initializes a QH structure.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh The QH to init.
+ * @param urb Holds the information about the device/endpoint that we need
+ * to initialize the QH.
+ */
+#define SCHEDULE_SLOP 10
+void qh_init(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh, fh_otg_hcd_urb_t * urb)
+{
+ char *speed, *type;
+ int dev_speed;
+ uint32_t hub_addr, hub_port;
+
+ fh_memset(qh, 0, sizeof(fh_otg_qh_t));
+
+ /* Initialize QH */
+ qh->ep_type = fh_otg_hcd_get_pipe_type(&urb->pipe_info);
+ qh->ep_is_in = fh_otg_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
+
+ qh->data_toggle = FH_OTG_HC_PID_DATA0;
+ qh->maxp = fh_otg_hcd_get_mps(&urb->pipe_info);
+ FH_CIRCLEQ_INIT(&qh->qtd_list);
+ FH_LIST_INIT(&qh->qh_list_entry);
+ qh->channel = NULL;
+
+ /* FS/LS Enpoint on HS Hub
+ * NOT virtual root hub */
+ dev_speed = hcd->fops->speed(hcd, urb->priv);
+
+ hcd->fops->hub_info(hcd, urb->priv, &hub_addr, &hub_port);
+ qh->do_split = 0;
+
+ if (((dev_speed == USB_SPEED_LOW) ||
+ (dev_speed == USB_SPEED_FULL)) &&
+ (hub_addr != 0 && hub_addr != 1)) {
+ FH_DEBUGPL(DBG_HCD,
+ "QH init: EP %d: TT found at hub addr %d, for port %d\n",
+ fh_otg_hcd_get_ep_num(&urb->pipe_info), hub_addr,
+ hub_port);
+ qh->do_split = 1;
+ }
+
+ if (qh->ep_type == UE_INTERRUPT || qh->ep_type == UE_ISOCHRONOUS) {
+ /* Compute scheduling parameters once and save them. */
+ hprt0_data_t hprt;
+
+ /** @todo Account for split transfers in the bus time. */
+ int bytecount =
+ fh_hb_mult(qh->maxp) * fh_max_packet(qh->maxp);
+
+ qh->usecs =
+ calc_bus_time((qh->do_split ? USB_SPEED_HIGH : dev_speed),
+ qh->ep_is_in, (qh->ep_type == UE_ISOCHRONOUS),
+ bytecount);
+ /* Start in a slightly future (micro)frame. */
+ qh->sched_frame = fh_frame_num_inc(hcd->frame_number,
+ SCHEDULE_SLOP);
+ qh->interval = urb->interval;
+
+#if 0
+ /* Increase interrupt polling rate for debugging. */
+ if (qh->ep_type == UE_INTERRUPT) {
+ qh->interval = 8;
+ }
+#endif
+ hprt.d32 = FH_READ_REG32(hcd->core_if->host_if->hprt0);
+ if ((hprt.b.prtspd == FH_HPRT0_PRTSPD_HIGH_SPEED) &&
+ ((dev_speed == USB_SPEED_LOW) ||
+ (dev_speed == USB_SPEED_FULL))) {
+ qh->interval *= 8;
+ qh->sched_frame |= 0x7;
+ qh->start_split_frame = qh->sched_frame;
+ }
+
+ }
+
+ FH_DEBUGPL(DBG_HCD, "FH OTG HCD QH Initialized\n");
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - qh = %p\n", qh);
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - Device Address = %d\n",
+ fh_otg_hcd_get_dev_addr(&urb->pipe_info));
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - Endpoint %d, %s\n",
+ fh_otg_hcd_get_ep_num(&urb->pipe_info),
+ fh_otg_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
+ switch (dev_speed) {
+ case USB_SPEED_LOW:
+ qh->dev_speed = FH_OTG_EP_SPEED_LOW;
+ speed = "low";
+ break;
+ case USB_SPEED_FULL:
+ qh->dev_speed = FH_OTG_EP_SPEED_FULL;
+ speed = "full";
+ break;
+ case USB_SPEED_HIGH:
+ qh->dev_speed = FH_OTG_EP_SPEED_HIGH;
+ speed = "high";
+ break;
+ default:
+ speed = "?";
+ break;
+ }
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - Speed = %s\n", speed);
+
+ switch (qh->ep_type) {
+ case UE_ISOCHRONOUS:
+ type = "isochronous";
+ break;
+ case UE_INTERRUPT:
+ type = "interrupt";
+ break;
+ case UE_CONTROL:
+ type = "control";
+ break;
+ case UE_BULK:
+ type = "bulk";
+ break;
+ default:
+ type = "?";
+ break;
+ }
+
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - Type = %s\n", type);
+
+#ifdef DEBUG
+ if (qh->ep_type == UE_INTERRUPT) {
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - usecs = %d\n",
+ qh->usecs);
+ FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - interval = %d\n",
+ qh->interval);
+ }
+#endif
+
+}
+
+/**
+ * This function allocates and initializes a QH.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param urb Holds the information about the device/endpoint that we need
+ * to initialize the QH.
+ * @param atomic_alloc Flag to do atomic allocation if needed
+ *
+ * @return Returns pointer to the newly allocated QH, or NULL on error. */
+fh_otg_qh_t *fh_otg_hcd_qh_create(fh_otg_hcd_t * hcd,
+ fh_otg_hcd_urb_t * urb, int atomic_alloc)
+{
+ fh_otg_qh_t *qh;
+
+ /* Allocate memory */
+ /** @todo add memflags argument */
+ qh = fh_otg_hcd_qh_alloc(atomic_alloc);
+ if (qh == NULL) {
+ FH_ERROR("qh allocation failed");
+ return NULL;
+ }
+
+ qh_init(hcd, qh, urb);
+
+ if (hcd->core_if->dma_desc_enable
+ && (fh_otg_hcd_qh_init_ddma(hcd, qh) < 0)) {
+ fh_otg_hcd_qh_free(hcd, qh);
+ return NULL;
+ }
+
+ return qh;
+}
+
+/**
+ * Checks that a channel is available for a periodic transfer.
+ *
+ * @return 0 if successful, negative error code otherise.
+ */
+static int periodic_channel_available(fh_otg_hcd_t * hcd)
+{
+ /*
+ * Currently assuming that there is a dedicated host channnel for each
+ * periodic transaction plus at least one host channel for
+ * non-periodic transactions.
+ */
+ int status;
+ int num_channels;
+
+ num_channels = hcd->core_if->core_params->host_channels;
+ if ((hcd->periodic_channels + hcd->non_periodic_channels < num_channels)
+ && (hcd->periodic_channels < num_channels - 1)) {
+ status = 0;
+ } else {
+ FH_INFO("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
+ __func__, num_channels, hcd->periodic_channels, hcd->non_periodic_channels); //NOTICE
+ status = -FH_E_NO_SPACE;
+ }
+
+ return status;
+}
+
+/**
+ * Checks that there is sufficient bandwidth for the specified QH in the
+ * periodic schedule. For simplicity, this calculation assumes that all the
+ * transfers in the periodic schedule may occur in the same (micro)frame.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh QH containing periodic bandwidth required.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_periodic_bandwidth(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ int status;
+ int16_t max_claimed_usecs;
+
+ status = 0;
+
+ if ((qh->dev_speed == FH_OTG_EP_SPEED_HIGH) || qh->do_split) {
+ /*
+ * High speed mode.
+ * Max periodic usecs is 80% x 125 usec = 100 usec.
+ */
+
+ max_claimed_usecs = 100 - qh->usecs;
+ } else {
+ /*
+ * Full speed mode.
+ * Max periodic usecs is 90% x 1000 usec = 900 usec.
+ */
+ max_claimed_usecs = 900 - qh->usecs;
+ }
+
+ if (hcd->periodic_usecs > max_claimed_usecs) {
+ FH_INFO("%s: already claimed usecs %d, required usecs %d\n", __func__, hcd->periodic_usecs, qh->usecs); //NOTICE
+ status = -FH_E_NO_SPACE;
+ }
+
+ return status;
+}
+
+/**
+ * Checks that the max transfer size allowed in a host channel is large enough
+ * to handle the maximum data transfer in a single (micro)frame for a periodic
+ * transfer.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh QH for a periodic endpoint.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_max_xfer_size(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ int status;
+ uint32_t max_xfer_size;
+ uint32_t max_channel_xfer_size;
+
+ status = 0;
+
+ max_xfer_size = fh_max_packet(qh->maxp) * fh_hb_mult(qh->maxp);
+ max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size;
+
+ if (max_xfer_size > max_channel_xfer_size) {
+ FH_INFO("%s: Periodic xfer length %d > " "max xfer length for channel %d\n",
+ __func__, max_xfer_size, max_channel_xfer_size); //NOTICE
+ status = -FH_E_NO_SPACE;
+ }
+
+ return status;
+}
+
+/**
+ * Schedules an interrupt or isochronous transfer in the periodic schedule.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh QH for the periodic transfer. The QH should already contain the
+ * scheduling information.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int schedule_periodic(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ int status = 0;
+
+ status = periodic_channel_available(hcd);
+ if (status) {
+ FH_INFO("%s: No host channel available for periodic " "transfer.\n", __func__); //NOTICE
+ return status;
+ }
+
+ status = check_periodic_bandwidth(hcd, qh);
+ if (status) {
+ FH_INFO("%s: Insufficient periodic bandwidth for " "periodic transfer.\n", __func__); //NOTICE
+ return status;
+ }
+
+ status = check_max_xfer_size(hcd, qh);
+ if (status) {
+ FH_INFO("%s: Channel max transfer size too small " "for periodic transfer.\n", __func__); //NOTICE
+ return status;
+ }
+
+ if (hcd->core_if->dma_desc_enable) {
+ /* Don't rely on SOF and start in ready schedule */
+ FH_LIST_INSERT_TAIL(&hcd->periodic_sched_ready, &qh->qh_list_entry);
+ }
+ else {
+ /* Always start in the inactive schedule. */
+ FH_LIST_INSERT_TAIL(&hcd->periodic_sched_inactive, &qh->qh_list_entry);
+ }
+
+ /* Reserve the periodic channel. */
+ hcd->periodic_channels++;
+
+ /* Update claimed usecs per (micro)frame. */
+ hcd->periodic_usecs += qh->usecs;
+
+ return status;
+}
+
+/**
+ * This function adds a QH to either the non periodic or periodic schedule if
+ * it is not already in the schedule. If the QH is already in the schedule, no
+ * action is taken.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int fh_otg_hcd_qh_add(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ int status = 0;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ if (!FH_LIST_EMPTY(&qh->qh_list_entry)) {
+ /* QH already in a schedule. */
+ return status;
+ }
+
+ /* Add the new QH to the appropriate schedule */
+ if (fh_qh_is_non_per(qh)) {
+ /* Always start in the inactive schedule. */
+ FH_LIST_INSERT_TAIL(&hcd->non_periodic_sched_inactive,
+ &qh->qh_list_entry);
+ } else {
+ status = schedule_periodic(hcd, qh);
+ if ( !hcd->periodic_qh_count ) {
+ intr_mask.b.sofintr = 1;
+ FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ }
+ hcd->periodic_qh_count++;
+ }
+
+ return status;
+}
+
+/**
+ * Removes an interrupt or isochronous transfer from the periodic schedule.
+ *
+ * @param hcd The HCD state structure for the FH OTG controller.
+ * @param qh QH for the periodic transfer.
+ */
+static void deschedule_periodic(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ FH_LIST_REMOVE_INIT(&qh->qh_list_entry);
+
+ /* Release the periodic channel reservation. */
+ hcd->periodic_channels--;
+
+ /* Update claimed usecs per (micro)frame. */
+ hcd->periodic_usecs -= qh->usecs;
+}
+
+/**
+ * Removes a QH from either the non-periodic or periodic schedule. Memory is
+ * not freed.
+ *
+ * @param hcd The HCD state structure.
+ * @param qh QH to remove from schedule. */
+void fh_otg_hcd_qh_remove(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ if (FH_LIST_EMPTY(&qh->qh_list_entry)) {
+ /* QH is not in a schedule. */
+ return;
+ }
+
+ if (fh_qh_is_non_per(qh)) {
+ if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) {
+ hcd->non_periodic_qh_ptr =
+ hcd->non_periodic_qh_ptr->next;
+ }
+ FH_LIST_REMOVE_INIT(&qh->qh_list_entry);
+ } else {
+ deschedule_periodic(hcd, qh);
+ hcd->periodic_qh_count--;
+ if( !hcd->periodic_qh_count ) {
+ intr_mask.b.sofintr = 1;
+ FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+ }
+ }
+}
+
+/**
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void fh_otg_hcd_qh_deactivate(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
+ int sched_next_periodic_split)
+{
+ if (fh_qh_is_non_per(qh)) {
+ fh_otg_hcd_qh_remove(hcd, qh);
+ if (!FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
+ /* Add back to inactive non-periodic schedule. */
+ fh_otg_hcd_qh_add(hcd, qh);
+ }
+ } else {
+ uint16_t frame_number = fh_otg_hcd_get_frame_number(hcd);
+
+ if (qh->do_split) {
+ /* Schedule the next continuing periodic split transfer */
+ if (sched_next_periodic_split) {
+
+ qh->sched_frame = frame_number;
+ if (fh_frame_num_le(frame_number,
+ fh_frame_num_inc
+ (qh->start_split_frame,
+ 1))) {
+ /*
+ * Allow one frame to elapse after start
+ * split microframe before scheduling
+ * complete split, but DONT if we are
+ * doing the next start split in the
+ * same frame for an ISOC out.
+ */
+ if ((qh->ep_type != UE_ISOCHRONOUS) ||
+ (qh->ep_is_in != 0)) {
+ qh->sched_frame =
+ fh_frame_num_inc(qh->sched_frame, 1);
+ }
+ }
+ } else {
+ qh->sched_frame =
+ fh_frame_num_inc(qh->start_split_frame,
+ qh->interval);
+ if (fh_frame_num_le
+ (qh->sched_frame, frame_number)) {
+ qh->sched_frame = frame_number;
+ }
+ qh->sched_frame |= 0x7;
+ qh->start_split_frame = qh->sched_frame;
+ }
+ } else {
+ qh->sched_frame =
+ fh_frame_num_inc(qh->sched_frame, qh->interval);
+ if (fh_frame_num_le(qh->sched_frame, frame_number)) {
+ qh->sched_frame = frame_number;
+ }
+ }
+
+ if (FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
+ fh_otg_hcd_qh_remove(hcd, qh);
+ } else {
+ /*
+ * Remove from periodic_sched_queued and move to
+ * appropriate queue.
+ */
+ if (qh->sched_frame == frame_number) {
+ FH_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
+ &qh->qh_list_entry);
+ } else {
+ FH_LIST_MOVE_HEAD
+ (&hcd->periodic_sched_inactive,
+ &qh->qh_list_entry);
+ }
+ }
+ }
+}
+
+/**
+ * This function allocates and initializes a QTD.
+ *
+ * @param urb The URB to create a QTD from. Each URB-QTD pair will end up
+ * pointing to each other so each pair should have a unique correlation.
+ * @param atomic_alloc Flag to do atomic alloc if needed
+ *
+ * @return Returns pointer to the newly allocated QTD, or NULL on error. */
+fh_otg_qtd_t *fh_otg_hcd_qtd_create(fh_otg_hcd_urb_t * urb, int atomic_alloc)
+{
+ fh_otg_qtd_t *qtd;
+
+ qtd = fh_otg_hcd_qtd_alloc(atomic_alloc);
+ if (qtd == NULL) {
+ return NULL;
+ }
+
+ fh_otg_hcd_qtd_init(qtd, urb);
+ return qtd;
+}
+
+/**
+ * Initializes a QTD structure.
+ *
+ * @param qtd The QTD to initialize.
+ * @param urb The URB to use for initialization. */
+void fh_otg_hcd_qtd_init(fh_otg_qtd_t * qtd, fh_otg_hcd_urb_t * urb)
+{
+ fh_memset(qtd, 0, sizeof(fh_otg_qtd_t));
+ qtd->urb = urb;
+ if (fh_otg_hcd_get_pipe_type(&urb->pipe_info) == UE_CONTROL) {
+ /*
+ * The only time the QTD data toggle is used is on the data
+ * phase of control transfers. This phase always starts with
+ * DATA1.
+ */
+ qtd->data_toggle = FH_OTG_HC_PID_DATA1;
+ qtd->control_phase = FH_OTG_CONTROL_SETUP;
+ }
+
+ /* start split */
+ qtd->complete_split = 0;
+ qtd->isoc_split_pos = FH_HCSPLIT_XACTPOS_ALL;
+ qtd->isoc_split_offset = 0;
+ qtd->in_process = 0;
+
+ /* Store the qtd ptr in the urb to reference what QTD. */
+ urb->qtd = qtd;
+ return;
+}
+
+/**
+ * This function adds a QTD to the QTD-list of a QH. It will find the correct
+ * QH to place the QTD into. If it does not find a QH, then it will create a
+ * new QH. If the QH to which the QTD is added is not currently scheduled, it
+ * is placed into the proper schedule based on its EP type.
+ *
+ * @param[in] qtd The QTD to add
+ * @param[in] hcd The FH HCD structure
+ * @param[out] qh out parameter to return queue head
+ * @param atomic_alloc Flag to do atomic alloc if needed
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int fh_otg_hcd_qtd_add(fh_otg_qtd_t * qtd,
+ fh_otg_hcd_t * hcd, fh_otg_qh_t ** qh, int atomic_alloc)
+{
+ int retval = 0;
+ fh_irqflags_t flags;
+
+ fh_otg_hcd_urb_t *urb = qtd->urb;
+
+ /*
+ * Get the QH which holds the QTD-list to insert to. Create QH if it
+ * doesn't exist.
+ */
+ if (*qh == NULL) {
+ *qh = fh_otg_hcd_qh_create(hcd, urb, atomic_alloc);
+ if (*qh == NULL) {
+ retval = -1;
+ goto done;
+ }
+ }
+ FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
+ retval = fh_otg_hcd_qh_add(hcd, *qh);
+ if (retval == 0) {
+ FH_CIRCLEQ_INSERT_TAIL(&((*qh)->qtd_list), qtd,
+ qtd_list_entry);
+ }
+ FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
+
+done:
+
+ return retval;
+}
+
+#endif /* FH_DEVICE_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_os_dep.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_os_dep.h
new file mode 100644
index 00000000..cf5bf274
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_os_dep.h
@@ -0,0 +1,99 @@
+#ifndef _FH_OS_DEP_H_
+#define _FH_OS_DEP_H_
+
+/**
+ * @file
+ *
+ * This file contains OS dependent structures.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+# include <linux/irq.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+# include <linux/usb/ch9.h>
+#else
+# include <linux/usb_ch9.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+# include <linux/usb/gadget.h>
+#else
+# include <linux/usb_gadget.h>
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+# include <asm/irq.h>
+#endif
+
+#ifdef PCI_INTERFACE
+# include <asm/io.h>
+#endif
+
+#ifdef LM_INTERFACE
+# include <asm/unaligned.h>
+# include <asm/sizes.h>
+# include <asm/param.h>
+# include <asm/io.h>
+# include <asm/arch/lm.h>
+# include <asm/arch/irqs.h>
+# include <asm/arch/regs-irq.h>
+#endif
+
+/** The OS page size */
+#define FH_OS_PAGE_SIZE PAGE_SIZE
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
+typedef int gfp_t;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+# define IRQF_SHARED SA_SHIRQ
+#endif
+
+typedef struct os_dependent {
+ /** Base address returned from ioremap() */
+ void *base;
+
+ /** Register offset for Diagnostic API */
+ uint32_t reg_offset;
+
+ struct platform_device *pdev;
+
+ /** Start address of a PCI region */
+ resource_size_t rsrc_start;
+
+ /** Length address of a PCI region */
+ resource_size_t rsrc_len;
+
+} os_dependent_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FH_OS_DEP_H_ */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.c
new file mode 100644
index 00000000..134e91c0
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.c
@@ -0,0 +1,2989 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd.c $
+ * $Revision: #105 $
+ * $Date: 2013/05/16 $
+ * $Change: 2231774 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_HOST_ONLY
+
+/** @file
+ * This file implements PCD Core. All code in this file is portable and doesn't
+ * use any OS specific functions.
+ * PCD Core provides Interface, defined in <code><fh_otg_pcd_if.h></code>
+ * header file, which can be used to implement OS specific PCD interface.
+ *
+ * An important function of the PCD is managing interrupts generated
+ * by the FH_otg controller. The implementation of the FH_otg device
+ * mode interrupt service routines is in fh_otg_pcd_intr.c.
+ *
+ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
+ * @todo Does it work when the request size is greater than DEPTSIZ
+ * transfer size
+ *
+ */
+
+#include "fh_otg_pcd.h"
+
+#ifdef FH_UTE_CFI
+#include "fh_otg_cfi.h"
+
+extern int init_cfi(cfiobject_t * cfiobj);
+#endif
+
+/**
+ * Choose endpoint from ep arrays using usb_ep structure.
+ */
+static fh_otg_pcd_ep_t *get_ep_from_handle(fh_otg_pcd_t * pcd, void *handle)
+{
+ int i;
+ if (pcd->ep0.priv == handle) {
+ return &pcd->ep0;
+ }
+ for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
+ if (pcd->in_ep[i].priv == handle)
+ return &pcd->in_ep[i];
+ if (pcd->out_ep[i].priv == handle)
+ return &pcd->out_ep[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * This function completes a request. It call's the request call back.
+ */
+void fh_otg_request_done(fh_otg_pcd_ep_t * ep, fh_otg_pcd_request_t * req,
+ int32_t status)
+{
+ unsigned stopped = ep->stopped;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
+ FH_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ /* spin_unlock/spin_lock now done in fops->complete() */
+ ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
+ req->actual);
+
+ if (ep->pcd->request_pending > 0)
+ --ep->pcd->request_pending;
+
+ ep->stopped = stopped;
+ FH_FREE(req);
+}
+
+/**
+ * This function terminates all the requsts in the EP request queue.
+ */
+void fh_otg_request_nuke(fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_pcd_request_t *req;
+
+ ep->stopped = 1;
+
+ /* called with irqs blocked?? */
+ while (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ fh_otg_request_done(ep, req, -FH_E_SHUTDOWN);
+ }
+}
+
+void fh_otg_pcd_start(fh_otg_pcd_t * pcd,
+ const struct fh_otg_pcd_function_ops *fops)
+{
+ pcd->fops = fops;
+}
+
+/**
+ * PCD Callback function for initializing the PCD when switching to
+ * device mode.
+ *
+ * @param p void pointer to the <code>fh_otg_pcd_t</code>
+ */
+static int32_t fh_otg_pcd_start_cb(void *p)
+{
+ fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+
+ /*
+ * Initialized the Core for Device mode.
+ */
+ if (fh_otg_is_device_mode(core_if)) {
+ fh_otg_core_dev_init(core_if);
+ /* Set core_if's lock pointer to the pcd->lock */
+ core_if->lock = pcd->lock;
+ }
+ return 1;
+}
+
+/** CFI-specific buffer allocation function for EP */
+#ifdef FH_UTE_CFI
+uint8_t *cfiw_ep_alloc_buffer(fh_otg_pcd_t * pcd, void *pep, fh_dma_t * addr,
+ size_t buflen, int flags)
+{
+ fh_otg_pcd_ep_t *ep;
+ ep = get_ep_from_handle(pcd, pep);
+ if (!ep) {
+ FH_WARN("bad ep\n");
+ return -FH_E_INVALID;
+ }
+
+ return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
+ flags);
+}
+#else
+uint8_t *cfiw_ep_alloc_buffer(fh_otg_pcd_t * pcd, void *pep, fh_dma_t * addr,
+ size_t buflen, int flags);
+#endif
+
+/**
+ * PCD Callback function for notifying the PCD when resuming from
+ * suspend.
+ *
+ * @param p void pointer to the <code>fh_otg_pcd_t</code>
+ */
+static int32_t fh_otg_pcd_resume_cb(void *p)
+{
+ fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
+
+ if (pcd->fops->resume)
+ pcd->fops->resume(pcd);
+
+ /* Stop the SRP timeout timer. */
+ if ((GET_CORE_IF(pcd)->core_params->phy_type != FH_PHY_TYPE_PARAM_FS)
+ || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
+ if (GET_CORE_IF(pcd)->srp_timer_started) {
+ GET_CORE_IF(pcd)->srp_timer_started = 0;
+ FH_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
+ }
+ }
+ return 1;
+}
+
+/**
+ * PCD Callback function for notifying the PCD device is suspended.
+ *
+ * @param p void pointer to the <code>fh_otg_pcd_t</code>
+ */
+static int32_t fh_otg_pcd_suspend_cb(void *p)
+{
+ fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
+
+ if (pcd->fops->suspend) {
+ FH_SPINUNLOCK(pcd->lock);
+ pcd->fops->suspend(pcd);
+ FH_SPINLOCK(pcd->lock);
+ }
+
+ return 1;
+}
+
+/**
+ * PCD Callback function for stopping the PCD when switching to Host
+ * mode.
+ *
+ * @param p void pointer to the <code>fh_otg_pcd_t</code>
+ */
+static int32_t fh_otg_pcd_stop_cb(void *p)
+{
+ fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
+ extern void fh_otg_pcd_stop(fh_otg_pcd_t * _pcd);
+
+ fh_otg_pcd_stop(pcd);
+ return 1;
+}
+
+/**
+ * PCD Callback structure for handling mode switching.
+ */
+static fh_otg_cil_callbacks_t pcd_callbacks = {
+ .start = fh_otg_pcd_start_cb,
+ .stop = fh_otg_pcd_stop_cb,
+ .suspend = fh_otg_pcd_suspend_cb,
+ .resume_wakeup = fh_otg_pcd_resume_cb,
+ .p = 0, /* Set at registration */
+};
+
+/**
+ * This function allocates a DMA Descriptor chain for the Endpoint
+ * buffer to be used for a transfer to/from the specified endpoint.
+ */
+fh_otg_dev_dma_desc_t *fh_otg_ep_alloc_desc_chain(fh_dma_t * dma_desc_addr,
+ uint32_t count)
+{
+ return FH_DMA_ALLOC_ATOMIC(count * sizeof(fh_otg_dev_dma_desc_t),
+ dma_desc_addr);
+}
+
+struct fh_otg_dma_free {
+ fh_tasklet_t *tsklt;
+ uint32_t size;
+ void *virt_addr;
+ fh_dma_t dma_addr;
+};
+
+static void fh_otg_dma_free_func(void *data)
+{
+ struct fh_otg_dma_free *pDmafreeMgr =
+ (struct fh_otg_dma_free *)data;
+
+ FH_DMA_FREE(pDmafreeMgr->size,
+ pDmafreeMgr->virt_addr,
+ pDmafreeMgr->dma_addr);
+ FH_TASK_FREE(pDmafreeMgr->tsklt);
+ FH_FREE(pDmafreeMgr);
+}
+/**
+ * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
+ */
+void fh_otg_ep_free_desc_chain(fh_otg_dev_dma_desc_t * desc_addr,
+ uint32_t dma_desc_addr, uint32_t count)
+{
+ struct fh_otg_dma_free *pDmafreeMgr;
+ fh_tasklet_t *pDmafreeTsklt;
+
+ pDmafreeMgr = FH_ALLOC(sizeof(struct fh_otg_dma_free));
+ if (pDmafreeMgr == NULL)
+ goto alloc_err1;
+
+ pDmafreeTsklt = FH_TASK_ALLOC("dmafree_tasklet",
+ fh_otg_dma_free_func,
+ pDmafreeMgr);
+ if (pDmafreeTsklt == NULL)
+ goto alloc_err2;
+
+ pDmafreeMgr->tsklt = pDmafreeTsklt;
+ pDmafreeMgr->size = count * sizeof(fh_otg_dev_dma_desc_t);
+ pDmafreeMgr->virt_addr = desc_addr;
+ pDmafreeMgr->dma_addr = dma_desc_addr;
+
+ FH_TASK_SCHEDULE(pDmafreeTsklt);
+
+ return;
+
+alloc_err2:
+ FH_FREE(pDmafreeMgr);
+alloc_err1:
+ WARN_ON(true);
+}
+
+#ifdef FH_EN_ISOC
+
+/**
+ * This function initializes a descriptor chain for Isochronous transfer
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param fh_ep The EP to start the transfer on.
+ *
+ */
+void fh_otg_iso_ep_start_ddma_transfer(fh_otg_core_if_t * core_if,
+ fh_ep_t * fh_ep)
+{
+
+ dsts_data_t dsts = {.d32 = 0 };
+ depctl_data_t depctl = {.d32 = 0 };
+ volatile uint32_t *addr;
+ int i, j;
+ uint32_t len;
+
+ if (fh_ep->is_in)
+ fh_ep->desc_cnt = fh_ep->buf_proc_intrvl / fh_ep->bInterval;
+ else
+ fh_ep->desc_cnt =
+ fh_ep->buf_proc_intrvl * fh_ep->pkt_per_frm /
+ fh_ep->bInterval;
+
+ /** Allocate descriptors for double buffering */
+ fh_ep->iso_desc_addr =
+ fh_otg_ep_alloc_desc_chain(&fh_ep->iso_dma_desc_addr,
+ fh_ep->desc_cnt * 2);
+ if (fh_ep->desc_addr) {
+ FH_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
+ return;
+ }
+
+ dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+
+ /** ISO OUT EP */
+ if (fh_ep->is_in == 0) {
+ dev_dma_desc_sts_t sts = {.d32 = 0 };
+ fh_otg_dev_dma_desc_t *dma_desc = fh_ep->iso_desc_addr;
+ dma_addr_t dma_ad;
+ uint32_t data_per_desc;
+ fh_otg_dev_out_ep_regs_t *out_regs =
+ core_if->dev_if->out_ep_regs[fh_ep->num];
+ int offset;
+
+ addr = &core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl;
+ dma_ad = (dma_addr_t) FH_READ_REG32(&(out_regs->doepdma));
+
+ /** Buffer 0 descriptors setup */
+ dma_ad = fh_ep->dma_addr0;
+
+ sts.b_iso_out.bs = BS_HOST_READY;
+ sts.b_iso_out.rxsts = 0;
+ sts.b_iso_out.l = 0;
+ sts.b_iso_out.sp = 0;
+ sts.b_iso_out.ioc = 0;
+ sts.b_iso_out.pid = 0;
+ sts.b_iso_out.framenum = 0;
+
+ offset = 0;
+ for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
+ i += fh_ep->pkt_per_frm) {
+
+ for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
+ uint32_t len = (j + 1) * fh_ep->maxpacket;
+ if (len > fh_ep->data_per_frame)
+ data_per_desc =
+ fh_ep->data_per_frame -
+ j * fh_ep->maxpacket;
+ else
+ data_per_desc = fh_ep->maxpacket;
+ len = data_per_desc % 4;
+ if (len)
+ data_per_desc += 4 - len;
+
+ sts.b_iso_out.rxbytes = data_per_desc;
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ offset += data_per_desc;
+ dma_desc++;
+ dma_ad += data_per_desc;
+ }
+ }
+
+ for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
+ uint32_t len = (j + 1) * fh_ep->maxpacket;
+ if (len > fh_ep->data_per_frame)
+ data_per_desc =
+ fh_ep->data_per_frame -
+ j * fh_ep->maxpacket;
+ else
+ data_per_desc = fh_ep->maxpacket;
+ len = data_per_desc % 4;
+ if (len)
+ data_per_desc += 4 - len;
+ sts.b_iso_out.rxbytes = data_per_desc;
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ offset += data_per_desc;
+ dma_desc++;
+ dma_ad += data_per_desc;
+ }
+
+ sts.b_iso_out.ioc = 1;
+ len = (j + 1) * fh_ep->maxpacket;
+ if (len > fh_ep->data_per_frame)
+ data_per_desc =
+ fh_ep->data_per_frame - j * fh_ep->maxpacket;
+ else
+ data_per_desc = fh_ep->maxpacket;
+ len = data_per_desc % 4;
+ if (len)
+ data_per_desc += 4 - len;
+ sts.b_iso_out.rxbytes = data_per_desc;
+
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+ dma_desc++;
+
+ /** Buffer 1 descriptors setup */
+ sts.b_iso_out.ioc = 0;
+ dma_ad = fh_ep->dma_addr1;
+
+ offset = 0;
+ for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
+ i += fh_ep->pkt_per_frm) {
+ for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
+ uint32_t len = (j + 1) * fh_ep->maxpacket;
+ if (len > fh_ep->data_per_frame)
+ data_per_desc =
+ fh_ep->data_per_frame -
+ j * fh_ep->maxpacket;
+ else
+ data_per_desc = fh_ep->maxpacket;
+ len = data_per_desc % 4;
+ if (len)
+ data_per_desc += 4 - len;
+
+ data_per_desc =
+ sts.b_iso_out.rxbytes = data_per_desc;
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ offset += data_per_desc;
+ dma_desc++;
+ dma_ad += data_per_desc;
+ }
+ }
+ for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
+ data_per_desc =
+ ((j + 1) * fh_ep->maxpacket >
+ fh_ep->data_per_frame) ? fh_ep->data_per_frame -
+ j * fh_ep->maxpacket : fh_ep->maxpacket;
+ data_per_desc +=
+ (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
+ sts.b_iso_out.rxbytes = data_per_desc;
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ offset += data_per_desc;
+ dma_desc++;
+ dma_ad += data_per_desc;
+ }
+
+ sts.b_iso_out.ioc = 1;
+ sts.b_iso_out.l = 1;
+ data_per_desc =
+ ((j + 1) * fh_ep->maxpacket >
+ fh_ep->data_per_frame) ? fh_ep->data_per_frame -
+ j * fh_ep->maxpacket : fh_ep->maxpacket;
+ data_per_desc +=
+ (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
+ sts.b_iso_out.rxbytes = data_per_desc;
+
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ fh_ep->next_frame = 0;
+
+ /** Write dma_ad into DOEPDMA register */
+ FH_WRITE_REG32(&(out_regs->doepdma),
+ (uint32_t) fh_ep->iso_dma_desc_addr);
+
+ }
+ /** ISO IN EP */
+ else {
+ dev_dma_desc_sts_t sts = {.d32 = 0 };
+ fh_otg_dev_dma_desc_t *dma_desc = fh_ep->iso_desc_addr;
+ dma_addr_t dma_ad;
+ fh_otg_dev_in_ep_regs_t *in_regs =
+ core_if->dev_if->in_ep_regs[fh_ep->num];
+ unsigned int frmnumber;
+ fifosize_data_t txfifosize, rxfifosize;
+
+ txfifosize.d32 =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[fh_ep->num]->
+ dtxfsts);
+ rxfifosize.d32 =
+ FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
+
+ addr = &core_if->dev_if->in_ep_regs[fh_ep->num]->diepctl;
+
+ dma_ad = fh_ep->dma_addr0;
+
+ dsts.d32 =
+ FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+
+ sts.b_iso_in.bs = BS_HOST_READY;
+ sts.b_iso_in.txsts = 0;
+ sts.b_iso_in.sp =
+ (fh_ep->data_per_frame % fh_ep->maxpacket) ? 1 : 0;
+ sts.b_iso_in.ioc = 0;
+ sts.b_iso_in.pid = fh_ep->pkt_per_frm;
+
+ frmnumber = fh_ep->next_frame;
+
+ sts.b_iso_in.framenum = frmnumber;
+ sts.b_iso_in.txbytes = fh_ep->data_per_frame;
+ sts.b_iso_in.l = 0;
+
+ /** Buffer 0 descriptors setup */
+ for (i = 0; i < fh_ep->desc_cnt - 1; i++) {
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+ dma_desc++;
+
+ dma_ad += fh_ep->data_per_frame;
+ sts.b_iso_in.framenum += fh_ep->bInterval;
+ }
+
+ sts.b_iso_in.ioc = 1;
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+ ++dma_desc;
+
+ /** Buffer 1 descriptors setup */
+ sts.b_iso_in.ioc = 0;
+ dma_ad = fh_ep->dma_addr1;
+
+ for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
+ i += fh_ep->pkt_per_frm) {
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+ dma_desc++;
+
+ dma_ad += fh_ep->data_per_frame;
+ sts.b_iso_in.framenum += fh_ep->bInterval;
+
+ sts.b_iso_in.ioc = 0;
+ }
+ sts.b_iso_in.ioc = 1;
+ sts.b_iso_in.l = 1;
+
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ fh_ep->next_frame = sts.b_iso_in.framenum + fh_ep->bInterval;
+
+ /** Write dma_ad into diepdma register */
+ FH_WRITE_REG32(&(in_regs->diepdma),
+ (uint32_t) fh_ep->iso_dma_desc_addr);
+ }
+ /** Enable endpoint, clear nak */
+ depctl.d32 = 0;
+ depctl.b.epena = 1;
+ depctl.b.usbactep = 1;
+ depctl.b.cnak = 1;
+
+ FH_MODIFY_REG32(addr, depctl.d32, depctl.d32);
+ depctl.d32 = FH_READ_REG32(addr);
+}
+
+/**
+ * This function initializes a descriptor chain for Isochronous transfer
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ *
+ */
+void fh_otg_iso_ep_start_buf_transfer(fh_otg_core_if_t * core_if,
+ fh_ep_t * ep)
+{
+ depctl_data_t depctl = {.d32 = 0 };
+ volatile uint32_t *addr;
+
+ if (ep->is_in)
+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
+ else
+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
+
+ if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
+ return;
+ } else {
+ deptsiz_data_t deptsiz = {.d32 = 0 };
+
+ ep->xfer_len =
+ ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
+ ep->pkt_cnt =
+ (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
+ ep->xfer_count = 0;
+ ep->xfer_buff =
+ (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
+ ep->dma_addr =
+ (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
+
+ if (ep->is_in) {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.mc = ep->pkt_per_frm;
+ deptsiz.b.xfersize = ep->xfer_len;
+ deptsiz.b.pktcnt =
+ (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
+ dieptsiz, deptsiz.d32);
+
+ /* Write the DMA register */
+ FH_WRITE_REG32(&
+ (core_if->dev_if->in_ep_regs[ep->num]->
+ diepdma), (uint32_t) ep->dma_addr);
+
+ } else {
+ deptsiz.b.pktcnt =
+ (ep->xfer_len + (ep->maxpacket - 1)) /
+ ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
+
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
+ doeptsiz, deptsiz.d32);
+
+ /* Write the DMA register */
+ FH_WRITE_REG32(&
+ (core_if->dev_if->out_ep_regs[ep->num]->
+ doepdma), (uint32_t) ep->dma_addr);
+
+ }
+ /** Enable endpoint, clear nak */
+ depctl.d32 = 0;
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+
+ FH_MODIFY_REG32(addr, depctl.d32, depctl.d32);
+ }
+}
+
+/**
+ * This function does the setup for a data transfer for an EP and
+ * starts the transfer. For an IN transfer, the packets will be
+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
+ * the packets are unloaded from the Rx FIFO in the ISR.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ */
+
+static void fh_otg_iso_ep_start_transfer(fh_otg_core_if_t * core_if,
+ fh_ep_t * ep)
+{
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable) {
+ if (ep->is_in) {
+ ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
+ } else {
+ ep->desc_cnt = ep->pkt_cnt;
+ }
+ fh_otg_iso_ep_start_ddma_transfer(core_if, ep);
+ } else {
+ if (core_if->pti_enh_enable) {
+ fh_otg_iso_ep_start_buf_transfer(core_if, ep);
+ } else {
+ ep->cur_pkt_addr =
+ (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
+ xfer_buff0;
+ ep->cur_pkt_dma_addr =
+ (ep->proc_buf_num) ? ep->dma_addr1 : ep->
+ dma_addr0;
+ fh_otg_iso_ep_start_frm_transfer(core_if, ep);
+ }
+ }
+ } else {
+ ep->cur_pkt_addr =
+ (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
+ ep->cur_pkt_dma_addr =
+ (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
+ fh_otg_iso_ep_start_frm_transfer(core_if, ep);
+ }
+}
+
+/**
+ * This function stops transfer for an EP and
+ * resets the ep's variables.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ */
+
+void fh_otg_iso_ep_stop_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ depctl_data_t depctl = {.d32 = 0 };
+ volatile uint32_t *addr;
+
+ if (ep->is_in == 1) {
+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
+ } else {
+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
+ }
+
+ /* disable the ep */
+ depctl.d32 = FH_READ_REG32(addr);
+
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+
+ FH_WRITE_REG32(addr, depctl.d32);
+
+ if (core_if->dma_desc_enable &&
+ ep->iso_desc_addr && ep->iso_dma_desc_addr) {
+ fh_otg_ep_free_desc_chain(ep->iso_desc_addr,
+ ep->iso_dma_desc_addr,
+ ep->desc_cnt * 2);
+ }
+
+ /* reset varibales */
+ ep->dma_addr0 = 0;
+ ep->dma_addr1 = 0;
+ ep->xfer_buff0 = 0;
+ ep->xfer_buff1 = 0;
+ ep->data_per_frame = 0;
+ ep->data_pattern_frame = 0;
+ ep->sync_frame = 0;
+ ep->buf_proc_intrvl = 0;
+ ep->bInterval = 0;
+ ep->proc_buf_num = 0;
+ ep->pkt_per_frm = 0;
+ ep->pkt_per_frm = 0;
+ ep->desc_cnt = 0;
+ ep->iso_desc_addr = 0;
+ ep->iso_dma_desc_addr = 0;
+}
+
+int fh_otg_pcd_iso_ep_start(fh_otg_pcd_t * pcd, void *ep_handle,
+ uint8_t * buf0, uint8_t * buf1, fh_dma_t dma0,
+ fh_dma_t dma1, int sync_frame, int dp_frame,
+ int data_per_frame, int start_frame,
+ int buf_proc_intrvl, void *req_handle,
+ int atomic_alloc)
+{
+ fh_otg_pcd_ep_t *ep;
+ fh_irqflags_t flags = 0;
+ fh_ep_t *fh_ep;
+ int32_t frm_data;
+ dsts_data_t dsts;
+ fh_otg_core_if_t *core_if;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+
+ if (!ep || !ep->desc || ep->fh_ep.num == 0) {
+ FH_WARN("bad ep\n");
+ return -FH_E_INVALID;
+ }
+
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+ core_if = GET_CORE_IF(pcd);
+ fh_ep = &ep->fh_ep;
+
+ if (ep->iso_req_handle) {
+ FH_WARN("ISO request in progress\n");
+ }
+
+ fh_ep->dma_addr0 = dma0;
+ fh_ep->dma_addr1 = dma1;
+
+ fh_ep->xfer_buff0 = buf0;
+ fh_ep->xfer_buff1 = buf1;
+
+ fh_ep->data_per_frame = data_per_frame;
+
+ /** @todo - pattern data support is to be implemented in the future */
+ fh_ep->data_pattern_frame = dp_frame;
+ fh_ep->sync_frame = sync_frame;
+
+ fh_ep->buf_proc_intrvl = buf_proc_intrvl;
+
+ fh_ep->bInterval = 1 << (ep->desc->bInterval - 1);
+
+ fh_ep->proc_buf_num = 0;
+
+ fh_ep->pkt_per_frm = 0;
+ frm_data = ep->fh_ep.data_per_frame;
+ while (frm_data > 0) {
+ fh_ep->pkt_per_frm++;
+ frm_data -= ep->fh_ep.maxpacket;
+ }
+
+ dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+
+ if (start_frame == -1) {
+ fh_ep->next_frame = dsts.b.soffn + 1;
+ if (fh_ep->bInterval != 1) {
+ fh_ep->next_frame =
+ fh_ep->next_frame + (fh_ep->bInterval - 1 -
+ fh_ep->next_frame %
+ fh_ep->bInterval);
+ }
+ } else {
+ fh_ep->next_frame = start_frame;
+ }
+
+ if (!core_if->pti_enh_enable) {
+ fh_ep->pkt_cnt =
+ fh_ep->buf_proc_intrvl * fh_ep->pkt_per_frm /
+ fh_ep->bInterval;
+ } else {
+ fh_ep->pkt_cnt =
+ (fh_ep->data_per_frame *
+ (fh_ep->buf_proc_intrvl / fh_ep->bInterval)
+ - 1 + fh_ep->maxpacket) / fh_ep->maxpacket;
+ }
+
+ if (core_if->dma_desc_enable) {
+ fh_ep->desc_cnt =
+ fh_ep->buf_proc_intrvl * fh_ep->pkt_per_frm /
+ fh_ep->bInterval;
+ }
+
+ if (atomic_alloc) {
+ fh_ep->pkt_info =
+ FH_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * fh_ep->pkt_cnt);
+ } else {
+ fh_ep->pkt_info =
+ FH_ALLOC(sizeof(iso_pkt_info_t) * fh_ep->pkt_cnt);
+ }
+ if (!fh_ep->pkt_info) {
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ return -FH_E_NO_MEMORY;
+ }
+ if (core_if->pti_enh_enable) {
+ fh_memset(fh_ep->pkt_info, 0,
+ sizeof(iso_pkt_info_t) * fh_ep->pkt_cnt);
+ }
+
+ fh_ep->cur_pkt = 0;
+ ep->iso_req_handle = req_handle;
+
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ fh_otg_iso_ep_start_transfer(core_if, fh_ep);
+ return 0;
+}
+
+int fh_otg_pcd_iso_ep_stop(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle)
+{
+ fh_irqflags_t flags = 0;
+ fh_otg_pcd_ep_t *ep;
+ fh_ep_t *fh_ep;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+ if (!ep || !ep->desc || ep->fh_ep.num == 0) {
+ FH_WARN("bad ep\n");
+ return -FH_E_INVALID;
+ }
+ fh_ep = &ep->fh_ep;
+
+ fh_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), fh_ep);
+
+ FH_FREE(fh_ep->pkt_info);
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+ if (ep->iso_req_handle != req_handle) {
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ return -FH_E_INVALID;
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+
+ ep->iso_req_handle = 0;
+ return 0;
+}
+
+/**
+ * This function is used for perodical data exchnage between PCD and gadget drivers.
+ * for Isochronous EPs
+ *
+ * - Every time a sync period completes this function is called to
+ * perform data exchange between PCD and gadget
+ */
+void fh_otg_iso_buffer_done(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep,
+ void *req_handle)
+{
+ int i;
+ fh_ep_t *fh_ep;
+
+ fh_ep = &ep->fh_ep;
+
+ FH_SPINUNLOCK(ep->pcd->lock);
+ pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
+ fh_ep->proc_buf_num ^ 0x1);
+ FH_SPINLOCK(ep->pcd->lock);
+
+ for (i = 0; i < fh_ep->pkt_cnt; ++i) {
+ fh_ep->pkt_info[i].status = 0;
+ fh_ep->pkt_info[i].offset = 0;
+ fh_ep->pkt_info[i].length = 0;
+ }
+}
+
+int fh_otg_pcd_get_iso_packet_count(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *iso_req_handle)
+{
+ fh_otg_pcd_ep_t *ep;
+ fh_ep_t *fh_ep;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+ if (!ep->desc || ep->fh_ep.num == 0) {
+ FH_WARN("bad ep\n");
+ return -FH_E_INVALID;
+ }
+ fh_ep = &ep->fh_ep;
+
+ return fh_ep->pkt_cnt;
+}
+
+void fh_otg_pcd_get_iso_packet_params(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *iso_req_handle, int packet,
+ int *status, int *actual, int *offset)
+{
+ fh_otg_pcd_ep_t *ep;
+ fh_ep_t *fh_ep;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+ if (!ep)
+ FH_WARN("bad ep\n");
+
+ fh_ep = &ep->fh_ep;
+
+ *status = fh_ep->pkt_info[packet].status;
+ *actual = fh_ep->pkt_info[packet].length;
+ *offset = fh_ep->pkt_info[packet].offset;
+}
+
+#endif /* FH_EN_ISOC */
+
+static void fh_otg_pcd_init_ep(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * pcd_ep,
+ uint32_t is_in, uint32_t ep_num)
+{
+ /* Init EP structure */
+ pcd_ep->desc = 0;
+ pcd_ep->pcd = pcd;
+ pcd_ep->stopped = 1;
+ pcd_ep->queue_sof = 0;
+
+ /* Init FH ep structure */
+ pcd_ep->fh_ep.is_in = is_in;
+ pcd_ep->fh_ep.num = ep_num;
+ pcd_ep->fh_ep.active = 0;
+ pcd_ep->fh_ep.tx_fifo_num = 0;
+ /* Control until ep is actvated */
+ pcd_ep->fh_ep.type = FH_OTG_EP_TYPE_CONTROL;
+ pcd_ep->fh_ep.maxpacket = MAX_PACKET_SIZE;
+ pcd_ep->fh_ep.dma_addr = 0;
+ pcd_ep->fh_ep.start_xfer_buff = 0;
+ pcd_ep->fh_ep.xfer_buff = 0;
+ pcd_ep->fh_ep.xfer_len = 0;
+ pcd_ep->fh_ep.xfer_count = 0;
+ pcd_ep->fh_ep.sent_zlp = 0;
+ pcd_ep->fh_ep.total_len = 0;
+ pcd_ep->fh_ep.desc_addr = 0;
+ pcd_ep->fh_ep.dma_desc_addr = 0;
+ FH_CIRCLEQ_INIT(&pcd_ep->queue);
+}
+
+/**
+ * Initialize ep's
+ */
+static void fh_otg_pcd_reinit(fh_otg_pcd_t * pcd)
+{
+ int i;
+ uint32_t hwcfg1;
+ fh_otg_pcd_ep_t *ep;
+ int in_ep_cntr, out_ep_cntr;
+ uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
+ uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
+
+ /**
+ * Initialize the EP0 structure.
+ */
+ ep = &pcd->ep0;
+ fh_otg_pcd_init_ep(pcd, ep, 0, 0);
+
+ in_ep_cntr = 0;
+ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
+ for (i = 1; in_ep_cntr < num_in_eps; i++) {
+ if ((hwcfg1 & 0x1) == 0) {
+ fh_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
+ in_ep_cntr++;
+ /**
+ * @todo NGS: Add direction to EP, based on contents
+ * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
+ * sprintf(";r
+ */
+ fh_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
+
+ FH_CIRCLEQ_INIT(&ep->queue);
+ }
+ hwcfg1 >>= 2;
+ }
+
+ out_ep_cntr = 0;
+ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
+ for (i = 1; out_ep_cntr < num_out_eps; i++) {
+ if ((hwcfg1 & 0x1) == 0) {
+ fh_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
+ out_ep_cntr++;
+ /**
+ * @todo NGS: Add direction to EP, based on contents
+ * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
+ * sprintf(";r
+ */
+ fh_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
+ FH_CIRCLEQ_INIT(&ep->queue);
+ }
+ hwcfg1 >>= 2;
+ }
+
+ pcd->ep0state = EP0_DISCONNECT;
+ pcd->ep0.fh_ep.maxpacket = MAX_EP0_SIZE;
+ pcd->ep0.fh_ep.type = FH_OTG_EP_TYPE_CONTROL;
+}
+
+/**
+ * This function is called when the SRP timer expires. The SRP should
+ * complete within 6 seconds.
+ */
+static void srp_timeout(void *ptr)
+{
+ gotgctl_data_t gotgctl;
+ fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
+ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
+
+ gotgctl.d32 = FH_READ_REG32(addr);
+
+ core_if->srp_timer_started = 0;
+
+ if (core_if->adp_enable) {
+ if (gotgctl.b.bsesvld == 0) {
+ gpwrdn_data_t gpwrdn = {.d32 = 0 };
+ FH_PRINTF("SRP Timeout BSESSVLD = 0\n");
+ /* Power off the core */
+ if (core_if->power_down == 2) {
+ gpwrdn.b.pwrdnswtch = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->gpwrdn,
+ gpwrdn.d32, 0);
+ }
+
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuintsel = 1;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
+ gpwrdn.d32);
+ fh_otg_adp_probe_start(core_if);
+ } else {
+ FH_PRINTF("SRP Timeout BSESSVLD = 1\n");
+ core_if->op_state = B_PERIPHERAL;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+ }
+ }
+
+ if ((core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS) &&
+ (core_if->core_params->i2c_enable)) {
+ FH_PRINTF("SRP Timeout\n");
+
+ if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
+ }
+
+ /* Clear Session Request */
+ gotgctl.d32 = 0;
+ gotgctl.b.sesreq = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
+ gotgctl.d32, 0);
+
+ core_if->srp_success = 0;
+ } else {
+ __FH_ERROR("Device not connected/responding\n");
+ gotgctl.b.sesreq = 0;
+ FH_WRITE_REG32(addr, gotgctl.d32);
+ }
+ } else if (gotgctl.b.sesreq) {
+ FH_PRINTF("SRP Timeout\n");
+
+ __FH_ERROR("Device not connected/responding\n");
+ gotgctl.b.sesreq = 0;
+ FH_WRITE_REG32(addr, gotgctl.d32);
+ } else {
+ FH_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
+ }
+}
+
+/**
+ * Tasklet
+ *
+ */
+extern void start_next_request(fh_otg_pcd_ep_t * ep);
+
+static void start_xfer_tasklet_func(void *data)
+{
+ fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) data;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+
+ int i;
+ depctl_data_t diepctl;
+
+ FH_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
+
+ diepctl.d32 = FH_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+
+ if (pcd->ep0.queue_sof) {
+ pcd->ep0.queue_sof = 0;
+ start_next_request(&pcd->ep0);
+ // break;
+ }
+
+ for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
+ depctl_data_t diepctl;
+ diepctl.d32 =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
+
+ if (pcd->in_ep[i].queue_sof) {
+ pcd->in_ep[i].queue_sof = 0;
+ start_next_request(&pcd->in_ep[i]);
+ // break;
+ }
+ }
+
+ return;
+}
+
+/**
+ * This function initialized the PCD portion of the driver.
+ *
+ */
+fh_otg_pcd_t *fh_otg_pcd_init(fh_otg_core_if_t * core_if)
+{
+ fh_otg_pcd_t *pcd = NULL;
+ fh_otg_dev_if_t *dev_if;
+ int i;
+
+ /*
+ * Allocate PCD structure
+ */
+ pcd = FH_ALLOC(sizeof(fh_otg_pcd_t));
+
+ if (pcd == NULL) {
+ return NULL;
+ }
+
+ pcd->lock = FH_SPINLOCK_ALLOC();
+ if (!pcd->lock) {
+ FH_ERROR("Could not allocate lock for pcd");
+ FH_FREE(pcd);
+ return NULL;
+ }
+ /* Set core_if's lock pointer to hcd->lock */
+ core_if->lock = pcd->lock;
+ pcd->core_if = core_if;
+
+ dev_if = core_if->dev_if;
+ dev_if->isoc_ep = NULL;
+
+ if (core_if->hwcfg4.b.ded_fifo_en) {
+ FH_PRINTF("Dedicated Tx FIFOs mode\n");
+ } else {
+ FH_PRINTF("Shared Tx FIFO mode\n");
+ }
+
+ /*
+ * Initialized the Core for Device mode here if there is nod ADP support.
+ * Otherwise it will be done later in fh_otg_adp_start routine.
+ */
+ if (fh_otg_is_device_mode(core_if) /*&& !core_if->adp_enable */ ) {
+ fh_otg_core_dev_init(core_if);
+ }
+
+ /*
+ * Register the PCD Callbacks.
+ */
+ fh_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
+
+ /*
+ * Initialize the DMA buffer for SETUP packets
+ */
+ if (GET_CORE_IF(pcd)->dma_enable) {
+ pcd->setup_pkt =
+ FH_DMA_ALLOC(sizeof(*pcd->setup_pkt) * 5,
+ &pcd->setup_pkt_dma_handle);
+ if (pcd->setup_pkt == NULL) {
+ FH_FREE(pcd);
+ return NULL;
+ }
+
+ pcd->status_buf =
+ FH_DMA_ALLOC(sizeof(uint16_t),
+ &pcd->status_buf_dma_handle);
+ if (pcd->status_buf == NULL) {
+ FH_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
+ pcd->setup_pkt, pcd->setup_pkt_dma_handle);
+ FH_FREE(pcd);
+ return NULL;
+ }
+
+ if (GET_CORE_IF(pcd)->dma_desc_enable) {
+ dev_if->setup_desc_addr[0] =
+ fh_otg_ep_alloc_desc_chain
+ (&dev_if->dma_setup_desc_addr[0], 1);
+ dev_if->setup_desc_addr[1] =
+ fh_otg_ep_alloc_desc_chain
+ (&dev_if->dma_setup_desc_addr[1], 1);
+ dev_if->in_desc_addr =
+ fh_otg_ep_alloc_desc_chain
+ (&dev_if->dma_in_desc_addr, 1);
+ dev_if->out_desc_addr =
+ fh_otg_ep_alloc_desc_chain
+ (&dev_if->dma_out_desc_addr, 1);
+ pcd->data_terminated = 0;
+
+ if (dev_if->setup_desc_addr[0] == 0
+ || dev_if->setup_desc_addr[1] == 0
+ || dev_if->in_desc_addr == 0
+ || dev_if->out_desc_addr == 0) {
+
+ if (dev_if->out_desc_addr)
+ fh_otg_ep_free_desc_chain
+ (dev_if->out_desc_addr,
+ dev_if->dma_out_desc_addr, 1);
+ if (dev_if->in_desc_addr)
+ fh_otg_ep_free_desc_chain
+ (dev_if->in_desc_addr,
+ dev_if->dma_in_desc_addr, 1);
+ if (dev_if->setup_desc_addr[1])
+ fh_otg_ep_free_desc_chain
+ (dev_if->setup_desc_addr[1],
+ dev_if->dma_setup_desc_addr[1], 1);
+ if (dev_if->setup_desc_addr[0])
+ fh_otg_ep_free_desc_chain
+ (dev_if->setup_desc_addr[0],
+ dev_if->dma_setup_desc_addr[0], 1);
+
+ FH_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
+ pcd->setup_pkt,
+ pcd->setup_pkt_dma_handle);
+ FH_DMA_FREE(sizeof(*pcd->status_buf),
+ pcd->status_buf,
+ pcd->status_buf_dma_handle);
+
+ FH_FREE(pcd);
+
+ return NULL;
+ }
+ }
+ } else {
+ pcd->setup_pkt = FH_ALLOC(sizeof(*pcd->setup_pkt) * 5);
+ if (pcd->setup_pkt == NULL) {
+ FH_FREE(pcd);
+ return NULL;
+ }
+
+ pcd->status_buf = FH_ALLOC(sizeof(uint16_t));
+ if (pcd->status_buf == NULL) {
+ FH_FREE(pcd->setup_pkt);
+ FH_FREE(pcd);
+ return NULL;
+ }
+ }
+
+ fh_otg_pcd_reinit(pcd);
+
+ /* Allocate the cfi object for the PCD */
+#ifdef FH_UTE_CFI
+ pcd->cfi = FH_ALLOC(sizeof(cfiobject_t));
+ if (NULL == pcd->cfi)
+ goto fail;
+ if (init_cfi(pcd->cfi)) {
+ CFI_INFO("%s: Failed to init the CFI object\n", __func__);
+ goto fail;
+ }
+#endif
+
+ /* Initialize tasklets */
+ pcd->start_xfer_tasklet = FH_TASK_ALLOC("xfer_tasklet",
+ start_xfer_tasklet_func, pcd);
+ pcd->test_mode_tasklet = FH_TASK_ALLOC("test_mode_tasklet",
+ do_test_mode, pcd);
+
+ /* Initialize SRP timer */
+ core_if->srp_timer = FH_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
+
+ if (core_if->core_params->dev_out_nak) {
+ /**
+ * Initialize xfer timeout timer. Implemented for
+ * 2.93a feature "Device DDMA OUT NAK Enhancement"
+ */
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ pcd->core_if->ep_xfer_timer[i] =
+ FH_TIMER_ALLOC("ep timer", ep_xfer_timeout,
+ &pcd->core_if->ep_xfer_info[i]);
+ }
+ }
+
+ return pcd;
+#ifdef FH_UTE_CFI
+fail:
+#endif
+ if (pcd->setup_pkt)
+ FH_FREE(pcd->setup_pkt);
+ if (pcd->status_buf)
+ FH_FREE(pcd->status_buf);
+#ifdef FH_UTE_CFI
+ if (pcd->cfi)
+ FH_FREE(pcd->cfi);
+#endif
+ if (pcd)
+ FH_FREE(pcd);
+ return NULL;
+
+}
+
+/**
+ * Remove PCD specific data
+ */
+void fh_otg_pcd_remove(fh_otg_pcd_t * pcd)
+{
+ fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
+ int i;
+ if (pcd->core_if->core_params->dev_out_nak) {
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ FH_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
+ pcd->core_if->ep_xfer_info[i].state = 0;
+ }
+ }
+
+ if (GET_CORE_IF(pcd)->dma_enable) {
+ FH_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
+ pcd->setup_pkt_dma_handle);
+ FH_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
+ pcd->status_buf_dma_handle);
+ if (GET_CORE_IF(pcd)->dma_desc_enable) {
+ fh_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
+ dev_if->dma_setup_desc_addr
+ [0], 1);
+ fh_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
+ dev_if->dma_setup_desc_addr
+ [1], 1);
+ fh_otg_ep_free_desc_chain(dev_if->in_desc_addr,
+ dev_if->dma_in_desc_addr, 1);
+ fh_otg_ep_free_desc_chain(dev_if->out_desc_addr,
+ dev_if->dma_out_desc_addr,
+ 1);
+ }
+ } else {
+ FH_FREE(pcd->setup_pkt);
+ FH_FREE(pcd->status_buf);
+ }
+ FH_SPINLOCK_FREE(pcd->lock);
+ /* Set core_if's lock pointer to NULL */
+ pcd->core_if->lock = NULL;
+
+ FH_TASK_FREE(pcd->start_xfer_tasklet);
+ FH_TASK_FREE(pcd->test_mode_tasklet);
+ if (pcd->core_if->core_params->dev_out_nak) {
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ if (pcd->core_if->ep_xfer_timer[i]) {
+ FH_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
+ }
+ }
+ }
+
+/* Release the CFI object's dynamic memory */
+#ifdef FH_UTE_CFI
+ if (pcd->cfi->ops.release) {
+ pcd->cfi->ops.release(pcd->cfi);
+ }
+#endif
+
+ FH_FREE(pcd);
+}
+
+/**
+ * Returns whether registered pcd is dual speed or not
+ */
+uint32_t fh_otg_pcd_is_dualspeed(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+
+ if ((core_if->core_params->speed == FH_SPEED_PARAM_FULL) ||
+ ((core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (core_if->core_params->ulpi_fs_ls))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Returns whether registered pcd is OTG capable or not
+ */
+uint32_t fh_otg_pcd_is_otg(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ gusbcfg_data_t usbcfg = {.d32 = 0 };
+ uint32_t retval = 0;
+
+ usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
+ if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
+ return 0;
+ else
+ return 1;
+# else
+ if (!usbcfg.b.srpcap)
+ return 0;
+ else
+ retval |= 1;
+
+ if (usbcfg.b.hnpcap)
+ retval |= 2;
+
+ if (core_if->adp_enable)
+ retval |= 4;
+#endif
+
+ return retval;
+}
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static uint32_t assign_tx_fifo(fh_otg_core_if_t * core_if, int size)
+{
+ uint32_t val, TxMsk = 1;
+ int i;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
+ val = (FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[i]) >> 16) * 4;
+ if (((TxMsk & core_if->tx_msk) == 0) && (val >= size)) {
+ core_if->tx_msk |= TxMsk;
+ return i + 1;
+ }
+ TxMsk <<= 1;
+ }
+ FH_ERROR("No suitable TxFIFO\n");
+ return 0;
+}
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static uint32_t assign_perio_tx_fifo(fh_otg_core_if_t * core_if, int size)
+{
+ uint32_t val, PerTxMsk = 1;
+ int i;
+
+ for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
+ val = (FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[i]) >> 16) * 4;
+ if (((PerTxMsk & core_if->p_tx_msk) == 0) && (val >= size)) {
+ core_if->p_tx_msk |= PerTxMsk;
+ return i + 1;
+ }
+ PerTxMsk <<= 1;
+ }
+ FH_ERROR("No suitable Periodic TxFIFO\n");
+ return 0;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_perio_tx_fifo(fh_otg_core_if_t * core_if,
+ uint32_t fifo_num)
+{
+ core_if->p_tx_msk =
+ (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_tx_fifo(fh_otg_core_if_t * core_if, uint32_t fifo_num)
+{
+ core_if->tx_msk =
+ (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
+}
+
+/**
+ * This function is being called from gadget
+ * to enable PCD endpoint.
+ */
+int fh_otg_pcd_ep_enable(fh_otg_pcd_t * pcd,
+ const uint8_t * ep_desc, void *usb_ep)
+{
+ int num, dir, fifo_size, nat;
+ fh_otg_pcd_ep_t *ep = NULL;
+ const usb_endpoint_descriptor_t *desc;
+ fh_irqflags_t flags;
+ fifosize_data_t dptxfsiz = {.d32 = 0 };
+ gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
+ gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
+ int retval = 0;
+ int i, epcount;
+
+ desc = (const usb_endpoint_descriptor_t *)ep_desc;
+
+ if (!desc) {
+ pcd->ep0.priv = usb_ep;
+ ep = &pcd->ep0;
+ retval = -FH_E_INVALID;
+ goto out;
+ }
+
+ num = UE_GET_ADDR(desc->bEndpointAddress);
+ dir = UE_GET_DIR(desc->bEndpointAddress);
+
+ if (!desc->wMaxPacketSize) {
+ FH_WARN("bad maxpacketsize\n");
+ retval = -FH_E_INVALID;
+ goto out;
+ }
+
+ if (dir == UE_DIR_IN) {
+ epcount = pcd->core_if->dev_if->num_in_eps;
+ for (i = 0; i < epcount; i++) {
+ if (num == pcd->in_ep[i].fh_ep.num) {
+ ep = &pcd->in_ep[i];
+ break;
+ }
+ }
+ } else {
+ epcount = pcd->core_if->dev_if->num_out_eps;
+ for (i = 0; i < epcount; i++) {
+ if (num == pcd->out_ep[i].fh_ep.num) {
+ ep = &pcd->out_ep[i];
+ break;
+ }
+ }
+ }
+
+ if (!ep) {
+ FH_WARN("bad address\n");
+ retval = -FH_E_INVALID;
+ goto out;
+ }
+
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+
+ ep->desc = desc;
+ ep->priv = usb_ep;
+
+ /*
+ * Activate the EP
+ */
+ ep->stopped = 0;
+
+ ep->fh_ep.is_in = (dir == UE_DIR_IN);
+ ep->fh_ep.maxpacket = UGETW(desc->wMaxPacketSize) & 0x7ff;
+ nat = UGETW(ep->desc->wMaxPacketSize);
+ nat = (nat >> 11) & 0x03;
+
+ ep->fh_ep.type = desc->bmAttributes & UE_XFERTYPE;
+
+ if (ep->fh_ep.is_in) {
+ if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
+ ep->fh_ep.tx_fifo_num = 0;
+
+ if (ep->fh_ep.type == UE_ISOCHRONOUS) {
+ /*
+ * if ISOC EP then assign a Periodic Tx FIFO.
+ */
+ fifo_size = ep->fh_ep.maxpacket * (nat + 1);
+ ep->fh_ep.tx_fifo_num =
+ assign_perio_tx_fifo(GET_CORE_IF(pcd), fifo_size);
+ }
+ } else {
+ /*
+ * if Dedicated FIFOs mode is on then assign a Tx FIFO.
+ */
+ fifo_size = ep->fh_ep.maxpacket;
+ ep->fh_ep.tx_fifo_num =
+ assign_tx_fifo(GET_CORE_IF(pcd), fifo_size);
+ }
+
+ /* Calculating EP info controller base address */
+ if (ep->fh_ep.tx_fifo_num
+ && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
+ gdfifocfg.d32 =
+ FH_READ_REG32(&GET_CORE_IF(pcd)->
+ core_global_regs->gdfifocfg);
+ gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
+ dptxfsiz.d32 =
+ (FH_READ_REG32
+ (&GET_CORE_IF(pcd)->core_global_regs->
+ dtxfsiz[ep->fh_ep.tx_fifo_num - 1]) >> 16);
+ gdfifocfg.b.epinfobase =
+ gdfifocfgbase.d32 + dptxfsiz.d32;
+ if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->
+ core_global_regs->gdfifocfg,
+ gdfifocfg.d32);
+ }
+ }
+ }
+ /* Set initial data PID. */
+ if (ep->fh_ep.type == UE_BULK) {
+ ep->fh_ep.data_pid_start = 0;
+ }
+
+ /* Alloc DMA Descriptors */
+ if (GET_CORE_IF(pcd)->dma_desc_enable) {
+#ifndef FH_UTE_PER_IO
+ if (ep->fh_ep.type != UE_ISOCHRONOUS) {
+#endif
+ ep->fh_ep.desc_addr =
+ fh_otg_ep_alloc_desc_chain(&ep->
+ fh_ep.dma_desc_addr,
+ MAX_DMA_DESC_CNT);
+ if (!ep->fh_ep.desc_addr) {
+ FH_WARN("%s, can't allocate DMA descriptor\n",
+ __func__);
+ retval = -FH_E_SHUTDOWN;
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ goto out;
+ }
+#ifndef FH_UTE_PER_IO
+ } else {
+ ep->fh_ep.desc_addr =
+ fh_otg_ep_alloc_desc_chain(&ep->
+ fh_ep.dma_desc_addr,
+ MAX_DMA_DESC_CNT/2);
+ ep->fh_ep.desc_addr1 =
+ fh_otg_ep_alloc_desc_chain(&ep->
+ fh_ep.dma_desc_addr1,
+ MAX_DMA_DESC_CNT/2);
+ if (!ep->fh_ep.desc_addr || !ep->fh_ep.desc_addr1) {
+ FH_WARN("%s, can't allocate DMA descriptor\n",
+ __func__);
+ retval = -FH_E_SHUTDOWN;
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ goto out;
+ }
+ /* Set initial data PID. */
+ if (ep->fh_ep.type == UE_ISOCHRONOUS) {
+ ep->fh_ep.iso_desc_first = 0;
+ ep->fh_ep.iso_desc_second = 0;
+ ep->fh_ep.iso_transfer_started = 0;
+ }
+ }
+#endif
+ }
+
+ FH_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
+ (ep->fh_ep.is_in ? "IN" : "OUT"),
+ ep->fh_ep.type, ep->fh_ep.maxpacket, ep->desc);
+#ifdef FH_UTE_PER_IO
+ ep->fh_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
+#endif
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
+ ep->fh_ep.bInterval = 1 << (ep->desc->bInterval - 1);
+ ep->fh_ep.frame_num = 0xFFFFFFFF;
+ }
+
+ fh_otg_ep_activate(GET_CORE_IF(pcd), &ep->fh_ep);
+
+#ifdef FH_UTE_CFI
+ if (pcd->cfi->ops.ep_enable) {
+ pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
+ }
+#endif
+
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+
+out:
+ return retval;
+}
+
+/**
+ * This function is being called from gadget
+ * to disable PCD endpoint.
+ */
+int fh_otg_pcd_ep_disable(fh_otg_pcd_t * pcd, void *ep_handle)
+{
+ fh_otg_pcd_ep_t *ep;
+ fh_irqflags_t flags;
+ fh_otg_dev_dma_desc_t *desc_addr;
+ fh_dma_t dma_desc_addr;
+ gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
+ gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
+ fifosize_data_t dptxfsiz = {.d32 = 0 };
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+
+ if (!ep || !ep->desc) {
+ FH_DEBUGPL(DBG_PCD, "bad ep address\n");
+ return -FH_E_INVALID;
+ }
+
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+
+ fh_otg_request_nuke(ep);
+
+ fh_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->fh_ep);
+ if (pcd->core_if->core_params->dev_out_nak) {
+ FH_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->fh_ep.num]);
+ pcd->core_if->ep_xfer_info[ep->fh_ep.num].state = 0;
+ }
+ ep->desc = NULL;
+ ep->stopped = 1;
+
+ gdfifocfg.d32 =
+ FH_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
+ gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
+
+ if (ep->fh_ep.is_in) {
+ if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
+ /* Flush the Tx FIFO */
+ fh_otg_flush_tx_fifo(GET_CORE_IF(pcd),
+ ep->fh_ep.tx_fifo_num);
+ }
+ release_perio_tx_fifo(GET_CORE_IF(pcd), ep->fh_ep.tx_fifo_num);
+ release_tx_fifo(GET_CORE_IF(pcd), ep->fh_ep.tx_fifo_num);
+ if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
+ /* Decreasing EPinfo Base Addr */
+ dptxfsiz.d32 =
+ (FH_READ_REG32
+ (&GET_CORE_IF(pcd)->
+ core_global_regs->dtxfsiz[ep->fh_ep.tx_fifo_num-1]) >> 16);
+ gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
+ if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
+ gdfifocfg.d32);
+ }
+ }
+ }
+
+ /* Free DMA Descriptors */
+ if (GET_CORE_IF(pcd)->dma_desc_enable) {
+ if (ep->fh_ep.type != UE_ISOCHRONOUS) {
+ desc_addr = ep->fh_ep.desc_addr;
+ dma_desc_addr = ep->fh_ep.dma_desc_addr;
+
+ /* Cannot call dma_free_coherent() with IRQs disabled */
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ fh_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
+ MAX_DMA_DESC_CNT);
+
+ } else {
+ desc_addr = ep->fh_ep.desc_addr;
+ dma_desc_addr = ep->fh_ep.dma_desc_addr;
+
+ /* Cannot call dma_free_coherent() with IRQs disabled */
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ fh_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
+ MAX_DMA_DESC_CNT/2);
+ desc_addr = ep->fh_ep.desc_addr1;
+ dma_desc_addr = ep->fh_ep.dma_desc_addr1;
+ fh_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
+ MAX_DMA_DESC_CNT/2);
+ }
+ goto out_unlocked;
+ }
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+
+out_unlocked:
+ FH_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->fh_ep.num,
+ ep->fh_ep.is_in ? "IN" : "OUT");
+ return 0;
+
+}
+
+/**
+ * This function initializes dma descriptor chain for ISOC transfers.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ */
+void fh_otg_pcd_start_iso_ddma(fh_otg_core_if_t * core_if, fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_dev_dma_desc_t *dma_desc;
+ fh_otg_pcd_request_t *req = NULL;
+ fh_ep_t *fhep = NULL;
+ uint32_t frame_num = 0;
+ int i = 0;
+ int j;
+ int sync_request = 4;
+ uint16_t nat;
+ depctl_data_t depctl;
+
+ fhep = &ep->fh_ep;
+ dma_desc = fhep->desc_addr;
+
+ nat = UGETW(ep->desc->wMaxPacketSize);
+ nat = (nat >> 11) & 0x03;
+ FH_DEBUGPL(DBG_PCD, "nat=%u binterval =%02x\n",nat, fhep->bInterval);
+ FH_DEBUGPL(DBG_PCD, "frame_num = %d\n", fhep->frame_num);
+
+ /* Complete first three IN EP requests for the synchronization */
+ if (fhep->is_in) {
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ for (j = 0; j < sync_request; j++) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ if (!req) {
+ FH_PRINTF("ISOC 0x%p, req = NULL!\n", ep);
+ return;
+ } else {
+ /* Complete first request */
+ req->actual = 0;
+ fh_otg_request_done(ep, req, 0);
+ }
+ }
+ } else {
+ FH_PRINTF("ISOC ep 0x%p, ep->queue empty!\n", ep);
+ return;
+ }
+
+ frame_num = fhep->frame_num + (sync_request -1)*fhep->bInterval;
+
+ FH_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
+ i = i+1;
+ frame_num = (frame_num + fhep->bInterval) & 0x3FFF;
+ /** DMA Descriptor Setup */
+ dma_desc->status.b_iso_in.bs = BS_HOST_BUSY;
+ dma_desc->buf = req->dma;
+ dma_desc->status.b_iso_in.txbytes = req->length;
+ dma_desc->status.b_iso_in.framenum = frame_num;
+ dma_desc->status.b_iso_in.txsts = 0;
+ dma_desc->status.b_iso_in.sp = (req->length % fhep->maxpacket) ? 1 : 0;
+ dma_desc->status.b_iso_in.ioc = 1;
+ dma_desc->status.b_iso_in.pid = nat + 1;
+ dma_desc->status.b_iso_in.l = 0;
+
+ if (req == FH_CIRCLEQ_LAST(&ep->queue)) {
+ dma_desc->status.b_iso_in.l = 1;
+ }
+ dma_desc->status.b_iso_in.bs = BS_HOST_READY;
+ FH_DEBUGPL(DBG_PCD, "ISO_DESC #%d %p status = %08x\n", i, dma_desc, dma_desc->status.d32);
+ if (i == MAX_DMA_DESC_CNT/2 - 1) {
+ dma_desc->status.b_iso_in.l = 1;
+ break;
+ }
+ dma_desc++;
+ }
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[fhep->num]->diepdma, fhep->dma_desc_addr);
+ FH_DEBUGPL(DBG_PCD, "%d ISOC IN descs were programmed\n", i-1);
+ depctl.d32 = 0;
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->in_ep_regs[fhep->num]->diepctl, 0, depctl.d32);
+ } else {
+ FH_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
+ i = i+1;
+ frame_num = (frame_num + fhep->bInterval) & 0x3FFF;
+ /** DMA Descriptor Setup */
+ dma_desc->status.b_iso_out.bs = BS_HOST_BUSY;
+ dma_desc->buf = req->dma;
+ dma_desc->status.b_iso_out.rxbytes = req->length;
+ dma_desc->status.b_iso_out.rxsts = 0;
+ dma_desc->status.b_iso_out.sp = (req->length % fhep->maxpacket) ? 1 : 0;
+ dma_desc->status.b_iso_out.ioc = 1;
+ dma_desc->status.b_iso_out.pid = nat + 1;
+ dma_desc->status.b_iso_out.l = 0;
+
+ if (req == FH_CIRCLEQ_LAST(&ep->queue)) {
+ dma_desc->status.b_iso_out.l = 1;
+ }
+ dma_desc->status.b_iso_in.bs = BS_HOST_READY;
+ FH_DEBUGPL(DBG_PCD, "ISO_DESC #%d %p status = %08x\n", i, dma_desc, dma_desc->status.d32);
+ if (i == MAX_DMA_DESC_CNT/2 - 1) {
+ dma_desc->status.b_iso_out.l = 1;
+ break;
+ }
+ dma_desc++;
+ }
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[fhep->num]->doepdma, fhep->dma_desc_addr);
+ FH_DEBUGPL(DBG_PCD, "%d ISOC OUT descs were programmed\n", i-1);
+ depctl.d32 = 0;
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->out_ep_regs[fhep->num]->doepctl, 0, depctl.d32);
+ }
+ fhep->iso_desc_first = i; //vahrama - pay attention previous one was i-1
+ fhep->iso_transfer_started = 1;
+ fhep->frame_num = frame_num;
+ fhep->use_add_buf = 1;
+}
+/**
+ * Program next ISO request to the DMA chain
+ *
+ */
+static void program_next_iso_request_ddma (fh_otg_pcd_ep_t * ep, fh_otg_pcd_request_t * req)
+{
+ fh_otg_dev_dma_desc_t *dma_desc;
+ fh_dma_t dma_desc_addr;
+ uint32_t frame_num = 0;
+ uint32_t nat;
+ uint32_t index;
+
+ FH_DEBUGPL(DBG_PCD, "%s", __FUNCTION__);
+
+ if (ep->fh_ep.use_add_buf) {
+ index = ep->fh_ep.iso_desc_second + 1;
+ } else {
+ index = ep->fh_ep.iso_desc_first + 1;
+ }
+
+ if (index > MAX_DMA_DESC_CNT/2) {
+ FH_PRINTF("There are no free descs in the chain!\n");
+ return;
+ }
+
+ if (ep->fh_ep.use_add_buf) {
+ dma_desc = &ep->fh_ep.desc_addr1[ep->fh_ep.iso_desc_second];
+ dma_desc_addr = ep->fh_ep.dma_desc_addr1;
+ ep->fh_ep.iso_desc_second += 1;
+ } else {
+ dma_desc = &ep->fh_ep.desc_addr[ep->fh_ep.iso_desc_first];
+ dma_desc_addr = ep->fh_ep.dma_desc_addr;
+ ep->fh_ep.iso_desc_first += 1;
+ }
+ nat = UGETW(ep->desc->wMaxPacketSize);
+ nat = (nat >> 11) & 0x03;
+ frame_num = (ep->fh_ep.frame_num + ep->fh_ep.bInterval) & 0x3FFF;
+ if (ep->fh_ep.is_in) {
+ /** DMA Descriptor Setup */
+ dma_desc->status.b_iso_in.bs = BS_HOST_BUSY;
+ dma_desc->buf = req->dma;
+ dma_desc->status.b_iso_in.txbytes = req->length;
+ dma_desc->status.b_iso_in.framenum = frame_num;
+ dma_desc->status.b_iso_in.txsts = 0;
+ dma_desc->status.b_iso_in.sp = (req->length % ep->fh_ep.maxpacket) ? 1 : 0;
+ dma_desc->status.b_iso_in.ioc = 1;
+ dma_desc->status.b_iso_in.pid = nat + 1;
+ dma_desc->status.b_iso_in.l = 1;
+
+ dma_desc->status.b_iso_in.bs = BS_HOST_READY;
+
+ /* Clear L bit on the previous desc of the chain */
+ if (index > 1) {
+ dma_desc--;
+ dma_desc->status.b_iso_in.l = 0;
+ }
+ } else {
+ /** DMA Descriptor Setup */
+ dma_desc->status.b_iso_out.bs = BS_HOST_BUSY;
+ dma_desc->buf = req->dma;
+ dma_desc->status.b_iso_out.rxbytes = req->length;
+ dma_desc->status.b_iso_out.rxsts = 0;
+ dma_desc->status.b_iso_out.sp = (req->length % ep->fh_ep.maxpacket) ? 1 : 0;
+ dma_desc->status.b_iso_out.ioc = 1;
+ dma_desc->status.b_iso_out.pid = nat + 1;
+ dma_desc->status.b_iso_out.l = 1;
+
+ dma_desc->status.b_iso_out.bs = BS_HOST_READY;
+
+ /* Clear L bit on the previous desc of the chain */
+ if (index > 1) {
+ dma_desc--;
+ dma_desc->status.b_iso_out.l = 0;
+ }
+ }
+ ep->fh_ep.frame_num = frame_num;
+
+}
+
+/******************************************************************************/
+#ifdef FH_UTE_PER_IO
+
+/**
+ * Free the request and its extended parts
+ *
+ */
+void fh_pcd_xiso_ereq_free(fh_otg_pcd_ep_t * ep, fh_otg_pcd_request_t * req)
+{
+ FH_FREE(req->ext_req.per_io_frame_descs);
+ FH_FREE(req);
+}
+
+/**
+ * Start the next request in the endpoint's queue.
+ *
+ */
+int fh_otg_pcd_xiso_start_next_request(fh_otg_pcd_t * pcd,
+ fh_otg_pcd_ep_t * ep)
+{
+ int i;
+ fh_otg_pcd_request_t *req = NULL;
+ fh_ep_t *fhep = NULL;
+ struct fh_iso_xreq_port *ereq = NULL;
+ struct fh_iso_pkt_desc_port *ddesc_iso;
+ uint16_t nat;
+ depctl_data_t diepctl;
+
+ fhep = &ep->fh_ep;
+
+ if (fhep->xiso_active_xfers > 0) {
+#if 0 //Disable this to decrease s/w overhead that is crucial for Isoc transfers
+ FH_WARN("There are currently active transfers for EP%d \
+ (active=%d; queued=%d)", fhep->num, fhep->xiso_active_xfers,
+ fhep->xiso_queued_xfers);
+#endif
+ return 0;
+ }
+
+ nat = UGETW(ep->desc->wMaxPacketSize);
+ nat = (nat >> 11) & 0x03;
+
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ ereq = &req->ext_req;
+ ep->stopped = 0;
+
+ /* Get the frame number */
+ fhep->xiso_frame_num =
+ fh_otg_get_frame_number(GET_CORE_IF(pcd));
+ FH_DEBUG("FRM_NUM=%d", fhep->xiso_frame_num);
+
+ ddesc_iso = ereq->per_io_frame_descs;
+
+ if (fhep->is_in) {
+ /* Setup DMA Descriptor chain for IN Isoc request */
+ for (i = 0; i < ereq->pio_pkt_count; i++) {
+ //if ((i % (nat + 1)) == 0)
+ if (i > 0)
+ fhep->xiso_frame_num =
+ (fhep->xiso_bInterval +
+ fhep->xiso_frame_num) & 0x3FFF;
+ fhep->desc_addr[i].buf =
+ req->dma + ddesc_iso[i].offset;
+ fhep->desc_addr[i].status.b_iso_in.txbytes =
+ ddesc_iso[i].length;
+ fhep->desc_addr[i].status.b_iso_in.framenum =
+ fhep->xiso_frame_num;
+ fhep->desc_addr[i].status.b_iso_in.bs =
+ BS_HOST_READY;
+ fhep->desc_addr[i].status.b_iso_in.txsts = 0;
+ fhep->desc_addr[i].status.b_iso_in.sp =
+ (ddesc_iso[i].length %
+ fhep->maxpacket) ? 1 : 0;
+ fhep->desc_addr[i].status.b_iso_in.ioc = 0;
+ fhep->desc_addr[i].status.b_iso_in.pid = nat + 1;
+ fhep->desc_addr[i].status.b_iso_in.l = 0;
+
+ /* Process the last descriptor */
+ if (i == ereq->pio_pkt_count - 1) {
+ fhep->desc_addr[i].status.b_iso_in.ioc = 1;
+ fhep->desc_addr[i].status.b_iso_in.l = 1;
+ }
+ }
+
+ /* Setup and start the transfer for this endpoint */
+ fhep->xiso_active_xfers++;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
+ in_ep_regs[fhep->num]->diepdma,
+ fhep->dma_desc_addr);
+ diepctl.d32 = 0;
+ diepctl.b.epena = 1;
+ diepctl.b.cnak = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
+ in_ep_regs[fhep->num]->diepctl, 0,
+ diepctl.d32);
+ } else {
+ /* Setup DMA Descriptor chain for OUT Isoc request */
+ for (i = 0; i < ereq->pio_pkt_count; i++) {
+ //if ((i % (nat + 1)) == 0)
+ fhep->xiso_frame_num = (fhep->xiso_bInterval +
+ fhep->xiso_frame_num) & 0x3FFF;
+ fhep->desc_addr[i].buf =
+ req->dma + ddesc_iso[i].offset;
+ fhep->desc_addr[i].status.b_iso_out.rxbytes =
+ ddesc_iso[i].length;
+ fhep->desc_addr[i].status.b_iso_out.framenum =
+ fhep->xiso_frame_num;
+ fhep->desc_addr[i].status.b_iso_out.bs =
+ BS_HOST_READY;
+ fhep->desc_addr[i].status.b_iso_out.rxsts = 0;
+ fhep->desc_addr[i].status.b_iso_out.sp =
+ (ddesc_iso[i].length %
+ fhep->maxpacket) ? 1 : 0;
+ fhep->desc_addr[i].status.b_iso_out.ioc = 0;
+ fhep->desc_addr[i].status.b_iso_out.pid = nat + 1;
+ fhep->desc_addr[i].status.b_iso_out.l = 0;
+
+ /* Process the last descriptor */
+ if (i == ereq->pio_pkt_count - 1) {
+ fhep->desc_addr[i].status.b_iso_out.ioc = 1;
+ fhep->desc_addr[i].status.b_iso_out.l = 1;
+ }
+ }
+
+ /* Setup and start the transfer for this endpoint */
+ fhep->xiso_active_xfers++;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->
+ dev_if->out_ep_regs[fhep->num]->
+ doepdma, fhep->dma_desc_addr);
+ diepctl.d32 = 0;
+ diepctl.b.epena = 1;
+ diepctl.b.cnak = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->
+ dev_if->out_ep_regs[fhep->num]->
+ doepctl, 0, diepctl.d32);
+ }
+
+ } else {
+ ep->stopped = 1;
+ }
+
+ return 0;
+}
+
+/**
+ * - Remove the request from the queue
+ */
+void complete_xiso_ep(fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_pcd_request_t *req = NULL;
+ struct fh_iso_xreq_port *ereq = NULL;
+ struct fh_iso_pkt_desc_port *ddesc_iso = NULL;
+ fh_ep_t *fhep = NULL;
+ int i;
+
+ //FH_DEBUG();
+ fhep = &ep->fh_ep;
+
+ /* Get the first pending request from the queue */
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ if (!req) {
+ FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
+ return;
+ }
+ fhep->xiso_active_xfers--;
+ fhep->xiso_queued_xfers--;
+ /* Remove this request from the queue */
+ FH_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
+ } else {
+ FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
+ return;
+ }
+
+ ep->stopped = 1;
+ ereq = &req->ext_req;
+ ddesc_iso = ereq->per_io_frame_descs;
+
+ if (fhep->xiso_active_xfers < 0) {
+ FH_WARN("EP#%d (xiso_active_xfers=%d)", fhep->num,
+ fhep->xiso_active_xfers);
+ }
+
+ /* Fill the Isoc descs of portable extended req from dma descriptors */
+ for (i = 0; i < ereq->pio_pkt_count; i++) {
+ if (fhep->is_in) { /* IN endpoints */
+ ddesc_iso[i].actual_length = ddesc_iso[i].length -
+ fhep->desc_addr[i].status.b_iso_in.txbytes;
+ ddesc_iso[i].status =
+ fhep->desc_addr[i].status.b_iso_in.txsts;
+ } else { /* OUT endpoints */
+ ddesc_iso[i].actual_length = ddesc_iso[i].length -
+ fhep->desc_addr[i].status.b_iso_out.rxbytes;
+ ddesc_iso[i].status =
+ fhep->desc_addr[i].status.b_iso_out.rxsts;
+ }
+ }
+
+ FH_SPINUNLOCK(ep->pcd->lock);
+
+ /* Call the completion function in the non-portable logic */
+ ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
+ &req->ext_req);
+
+ FH_SPINLOCK(ep->pcd->lock);
+
+ /* Free the request - specific freeing needed for extended request object */
+ fh_pcd_xiso_ereq_free(ep, req);
+
+ /* Start the next request */
+ fh_otg_pcd_xiso_start_next_request(ep->pcd, ep);
+
+ return;
+}
+
+/**
+ * Create and initialize the Isoc pkt descriptors of the extended request.
+ *
+ */
+static int fh_otg_pcd_xiso_create_pkt_descs(fh_otg_pcd_request_t * req,
+ void *ereq_nonport,
+ int atomic_alloc)
+{
+ struct fh_iso_xreq_port *ereq = NULL;
+ struct fh_iso_xreq_port *req_mapped = NULL;
+ struct fh_iso_pkt_desc_port *ipds = NULL; /* To be created in this function */
+ uint32_t pkt_count;
+ int i;
+
+ ereq = &req->ext_req;
+ req_mapped = (struct fh_iso_xreq_port *)ereq_nonport;
+ pkt_count = req_mapped->pio_pkt_count;
+
+ /* Create the isoc descs */
+ if (atomic_alloc) {
+ ipds = FH_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
+ } else {
+ ipds = FH_ALLOC(sizeof(*ipds) * pkt_count);
+ }
+
+ if (!ipds) {
+ FH_ERROR("Failed to allocate isoc descriptors");
+ return -FH_E_NO_MEMORY;
+ }
+
+ /* Initialize the extended request fields */
+ ereq->per_io_frame_descs = ipds;
+ ereq->error_count = 0;
+ ereq->pio_alloc_pkt_count = pkt_count;
+ ereq->pio_pkt_count = pkt_count;
+ ereq->tr_sub_flags = req_mapped->tr_sub_flags;
+
+ /* Init the Isoc descriptors */
+ for (i = 0; i < pkt_count; i++) {
+ ipds[i].length = req_mapped->per_io_frame_descs[i].length;
+ ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
+ ipds[i].status = req_mapped->per_io_frame_descs[i].status; /* 0 */
+ ipds[i].actual_length =
+ req_mapped->per_io_frame_descs[i].actual_length;
+ }
+
+ return 0;
+}
+
+static void prn_ext_request(struct fh_iso_xreq_port *ereq)
+{
+ struct fh_iso_pkt_desc_port *xfd = NULL;
+ int i;
+
+ FH_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
+ FH_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
+ FH_DEBUG("error_count=%d", ereq->error_count);
+ FH_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
+ FH_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
+ FH_DEBUG("res=%d", ereq->res);
+
+ for (i = 0; i < ereq->pio_pkt_count; i++) {
+ xfd = &ereq->per_io_frame_descs[0];
+ FH_DEBUG("FD #%d", i);
+
+ FH_DEBUG("xfd->actual_length=%d", xfd->actual_length);
+ FH_DEBUG("xfd->length=%d", xfd->length);
+ FH_DEBUG("xfd->offset=%d", xfd->offset);
+ FH_DEBUG("xfd->status=%d", xfd->status);
+ }
+}
+
+/**
+ *
+ */
+int fh_otg_pcd_xiso_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
+ uint8_t * buf, fh_dma_t dma_buf, uint32_t buflen,
+ int zero, void *req_handle, int atomic_alloc,
+ void *ereq_nonport)
+{
+ fh_otg_pcd_request_t *req = NULL;
+ fh_otg_pcd_ep_t *ep;
+ fh_irqflags_t flags;
+ int res;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+ if (!ep) {
+ FH_WARN("bad ep\n");
+ return -FH_E_INVALID;
+ }
+
+ /* We support this extension only for DDMA mode */
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC)
+ if (!GET_CORE_IF(pcd)->dma_desc_enable)
+ return -FH_E_INVALID;
+
+ /* Create a fh_otg_pcd_request_t object */
+ if (atomic_alloc) {
+ req = FH_ALLOC_ATOMIC(sizeof(*req));
+ } else {
+ req = FH_ALLOC(sizeof(*req));
+ }
+
+ if (!req) {
+ return -FH_E_NO_MEMORY;
+ }
+
+ /* Create the Isoc descs for this request which shall be the exact match
+ * of the structure sent to us from the non-portable logic */
+ res =
+ fh_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
+ if (res) {
+ FH_WARN("Failed to init the Isoc descriptors");
+ FH_FREE(req);
+ return res;
+ }
+
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+
+ FH_CIRCLEQ_INIT_ENTRY(req, queue_entry);
+ req->buf = buf;
+ req->dma = dma_buf;
+ req->length = buflen;
+ req->sent_zlp = zero;
+ req->priv = req_handle;
+
+ //FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+ ep->fh_ep.dma_addr = dma_buf;
+ ep->fh_ep.start_xfer_buff = buf;
+ ep->fh_ep.xfer_buff = buf;
+ ep->fh_ep.xfer_len = 0;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = buflen;
+
+ /* Add this request to the tail */
+ FH_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
+ ep->fh_ep.xiso_queued_xfers++;
+
+//FH_DEBUG("CP_0");
+//FH_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
+//prn_ext_request((struct fh_iso_xreq_port *) ereq_nonport);
+//prn_ext_request(&req->ext_req);
+
+ //FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+
+ /* If the req->status == ASAP then check if there is any active transfer
+ * for this endpoint. If no active transfers, then get the first entry
+ * from the queue and start that transfer
+ */
+ if (req->ext_req.tr_sub_flags == FH_EREQ_TF_ASAP) {
+ res = fh_otg_pcd_xiso_start_next_request(pcd, ep);
+ if (res) {
+ FH_WARN("Failed to start the next Isoc transfer");
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ FH_FREE(req);
+ return res;
+ }
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ return 0;
+}
+
+#endif
+/* END ifdef FH_UTE_PER_IO ***************************************************/
+int fh_otg_pcd_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
+ uint8_t * buf, fh_dma_t dma_buf, uint32_t buflen,
+ int zero, void *req_handle, int atomic_alloc)
+{
+ fh_irqflags_t flags;
+ fh_otg_pcd_request_t *req;
+ fh_otg_pcd_ep_t *ep;
+ uint32_t max_transfer;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+ if (!ep || (!ep->desc && ep->fh_ep.num != 0)) {
+ FH_WARN("bad ep\n");
+ return -FH_E_INVALID;
+ }
+
+ if (atomic_alloc) {
+ req = FH_ALLOC_ATOMIC(sizeof(*req));
+ } else {
+ req = FH_ALLOC(sizeof(*req));
+ }
+
+ if (!req) {
+ return -FH_E_NO_MEMORY;
+ }
+ FH_CIRCLEQ_INIT_ENTRY(req, queue_entry);
+ if (!GET_CORE_IF(pcd)->core_params->opt) {
+ if (ep->fh_ep.num != 0) {
+ FH_ERROR("queue req %p, len %d buf %p\n",
+ req_handle, buflen, buf);
+ }
+ }
+
+ req->buf = buf;
+ req->dma = dma_buf;
+ req->length = buflen;
+ req->sent_zlp = zero;
+ req->priv = req_handle;
+ req->dw_align_buf = NULL;
+ if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
+ && !GET_CORE_IF(pcd)->dma_desc_enable)
+ req->dw_align_buf = FH_DMA_ALLOC(buflen,
+ &req->dw_align_buf_dma);
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+#if 0
+ /*
+ * After adding request to the queue for IN ISOC wait for In Token Received
+ * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
+ * Received when EP is disabled interrupt to obtain starting microframe
+ * (odd/even) start transfer
+ */
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
+ if (req != 0) {
+ depctl_data_t depctl = {.d32 =
+ FH_READ_REG32(&pcd->core_if->dev_if->
+ in_ep_regs[ep->fh_ep.num]->
+ diepctl) };
+ ++pcd->request_pending;
+
+ FH_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
+ if (ep->fh_ep.is_in) {
+ depctl.b.cnak = 1;
+ FH_WRITE_REG32(&pcd->core_if->dev_if->
+ in_ep_regs[ep->fh_ep.num]->
+ diepctl, depctl.d32);
+ }
+ if (GET_CORE_IF(pcd)->dma_desc_enable) {
+ if (ep->fh_ep.iso_transfer_started) {
+ /*
+ * Add next request to the descriptor chain
+ * currently not in use by HW
+ */
+ program_next_iso_request_ddma(ep, req);
+ } else if (!ep->fh_ep.is_in)
+ /* For OUT start first request immediately after queue */
+ fh_otg_pcd_start_iso_ddma(GET_CORE_IF(pcd), ep);
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ }
+ return 0;
+ }
+#endif
+ /*
+ * For EP0 IN without premature status, zlp is required?
+ */
+ if (ep->fh_ep.num == 0 && ep->fh_ep.is_in) {
+ FH_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->fh_ep.num);
+ //_req->zero = 1;
+ }
+
+ /* Start the transfer */
+ if (FH_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
+ /* EP0 Transfer? */
+ if (ep->fh_ep.num == 0) {
+ switch (pcd->ep0state) {
+ case EP0_IN_DATA_PHASE:
+ FH_DEBUGPL(DBG_PCD,
+ "%s ep0: EP0_IN_DATA_PHASE\n",
+ __func__);
+ break;
+
+ case EP0_OUT_DATA_PHASE:
+ FH_DEBUGPL(DBG_PCD,
+ "%s ep0: EP0_OUT_DATA_PHASE\n",
+ __func__);
+ if (pcd->request_config) {
+ /* Complete STATUS PHASE */
+ ep->fh_ep.is_in = 1;
+ pcd->ep0state = EP0_IN_STATUS_PHASE;
+ }
+ break;
+
+ case EP0_IN_STATUS_PHASE:
+ FH_DEBUGPL(DBG_PCD,
+ "%s ep0: EP0_IN_STATUS_PHASE\n",
+ __func__);
+ break;
+
+ default:
+ FH_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
+ pcd->ep0state);
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ return -FH_E_SHUTDOWN;
+ }
+
+ ep->fh_ep.dma_addr = dma_buf;
+ ep->fh_ep.start_xfer_buff = buf;
+ ep->fh_ep.xfer_buff = buf;
+ ep->fh_ep.xfer_len = buflen;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = ep->fh_ep.xfer_len;
+
+ if (zero) {
+ if ((ep->fh_ep.xfer_len %
+ ep->fh_ep.maxpacket == 0)
+ && (ep->fh_ep.xfer_len != 0)) {
+ ep->fh_ep.sent_zlp = 1;
+ }
+
+ }
+
+ fh_otg_ep0_start_transfer(GET_CORE_IF(pcd),
+ &ep->fh_ep);
+ } // non-ep0 endpoints
+ else {
+#ifdef FH_UTE_CFI
+ if (ep->fh_ep.buff_mode != BM_STANDARD) {
+ /* store the request length */
+ ep->fh_ep.cfi_req_len = buflen;
+ pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
+ ep, req);
+ } else {
+#endif
+ max_transfer =
+ GET_CORE_IF(ep->pcd)->core_params->
+ max_transfer_size;
+
+ /* Setup and start the Transfer */
+ if (req->dw_align_buf) {
+ if (ep->fh_ep.is_in)
+ fh_memcpy(req->dw_align_buf,
+ buf, buflen);
+ ep->fh_ep.dma_addr =
+ req->dw_align_buf_dma;
+ ep->fh_ep.start_xfer_buff =
+ req->dw_align_buf;
+ ep->fh_ep.xfer_buff =
+ req->dw_align_buf;
+ } else {
+ ep->fh_ep.dma_addr = dma_buf;
+ ep->fh_ep.start_xfer_buff = buf;
+ ep->fh_ep.xfer_buff = buf;
+ }
+ ep->fh_ep.xfer_len = 0;
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = buflen;
+
+ ep->fh_ep.maxxfer = max_transfer;
+ if (GET_CORE_IF(pcd)->dma_desc_enable) {
+ uint32_t out_max_xfer =
+ DDMA_MAX_TRANSFER_SIZE -
+ (DDMA_MAX_TRANSFER_SIZE % 4);
+ if (ep->fh_ep.is_in) {
+ if (ep->fh_ep.maxxfer >
+ DDMA_MAX_TRANSFER_SIZE) {
+ ep->fh_ep.maxxfer =
+ DDMA_MAX_TRANSFER_SIZE;
+ }
+ } else {
+ if (ep->fh_ep.maxxfer >
+ out_max_xfer) {
+ ep->fh_ep.maxxfer =
+ out_max_xfer;
+ }
+ }
+ }
+ if (ep->fh_ep.maxxfer < ep->fh_ep.total_len) {
+ ep->fh_ep.maxxfer -=
+ (ep->fh_ep.maxxfer %
+ ep->fh_ep.maxpacket);
+ }
+
+ if (zero) {
+ if ((ep->fh_ep.total_len %
+ ep->fh_ep.maxpacket == 0)
+ && (ep->fh_ep.total_len != 0)) {
+ ep->fh_ep.sent_zlp = 1;
+ }
+ }
+#ifdef FH_UTE_CFI
+ }
+#endif
+ fh_otg_ep_start_transfer(GET_CORE_IF(pcd),
+ &ep->fh_ep);
+ }
+ }
+
+ if (req != 0) {
+ ++pcd->request_pending;
+ FH_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
+ if (ep->fh_ep.is_in && ep->stopped
+ && !(GET_CORE_IF(pcd)->dma_enable)) {
+ /** @todo NGS Create a function for this. */
+ diepmsk_data_t diepmsk = {.d32 = 0 };
+ diepmsk.b.intktxfemp = 1;
+ if (GET_CORE_IF(pcd)->multiproc_int_enable) {
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->
+ dev_if->dev_global_regs->diepeachintmsk
+ [ep->fh_ep.num], 0,
+ diepmsk.d32);
+ } else {
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->
+ dev_if->dev_global_regs->
+ diepmsk, 0, diepmsk.d32);
+ }
+
+ }
+ }
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+
+ return 0;
+}
+
+int fh_otg_pcd_ep_dequeue(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle)
+{
+ fh_irqflags_t flags;
+ fh_otg_pcd_request_t *req;
+ fh_otg_pcd_ep_t *ep;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+ if (!ep || (!ep->desc && ep->fh_ep.num != 0)) {
+ FH_WARN("bad argument\n");
+ return -FH_E_INVALID;
+ }
+
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+
+ /* make sure it's actually queued on this endpoint */
+ FH_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
+ if (req->priv == (void *)req_handle) {
+ break;
+ }
+ }
+
+ if (req->priv != (void *)req_handle) {
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ return -FH_E_INVALID;
+ }
+
+ if (!FH_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
+ fh_otg_request_done(ep, req, -FH_E_RESTART);
+ } else {
+ req = NULL;
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+
+ return req ? 0 : -FH_E_SHUTDOWN;
+
+}
+
+int fh_otg_pcd_ep_halt(fh_otg_pcd_t * pcd, void *ep_handle, int value)
+{
+ fh_otg_pcd_ep_t *ep;
+ fh_irqflags_t flags;
+ int retval = 0;
+
+ ep = get_ep_from_handle(pcd, ep_handle);
+
+ if (!ep || (!ep->desc && ep != &pcd->ep0) ||
+ (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
+ FH_WARN("%s, bad ep\n", __func__);
+ return -FH_E_INVALID;
+ }
+
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ FH_WARN("%d %s XFer In process\n", ep->fh_ep.num,
+ ep->fh_ep.is_in ? "IN" : "OUT");
+ retval = -FH_E_AGAIN;
+ } else if (value == 0) {
+ ep->fh_ep.stall_clear_flag = 0;
+ fh_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->fh_ep);
+ } else if (value == 1) {
+ stall:
+ if (ep->fh_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
+ dtxfsts_data_t txstatus;
+ fifosize_data_t txfifosize;
+
+ txfifosize.d32 =
+ FH_READ_REG32(&GET_CORE_IF(pcd)->
+ core_global_regs->dtxfsiz[ep->fh_ep.
+ tx_fifo_num]);
+ txstatus.d32 =
+ FH_READ_REG32(&GET_CORE_IF(pcd)->
+ dev_if->in_ep_regs[ep->fh_ep.num]->
+ dtxfsts);
+
+ if (txstatus.b.txfspcavail < txfifosize.b.depth) {
+ FH_WARN("%s() Data In Tx Fifo\n", __func__);
+ retval = -FH_E_AGAIN;
+ } else {
+ if (ep->fh_ep.num == 0) {
+ pcd->ep0state = EP0_STALL;
+ }
+
+ ep->stopped = 1;
+ fh_otg_ep_set_stall(GET_CORE_IF(pcd),
+ &ep->fh_ep);
+ }
+ } else {
+ if (ep->fh_ep.num == 0) {
+ pcd->ep0state = EP0_STALL;
+ }
+
+ ep->stopped = 1;
+ fh_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->fh_ep);
+ }
+ } else if (value == 2) {
+ ep->fh_ep.stall_clear_flag = 0;
+ } else if (value == 3) {
+ ep->fh_ep.stall_clear_flag = 1;
+ goto stall;
+ }
+
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+
+ return retval;
+}
+
+/**
+ * This function initiates remote wakeup of the host from suspend state.
+ */
+void fh_otg_pcd_rem_wkup_from_suspend(fh_otg_pcd_t * pcd, int set)
+{
+ dctl_data_t dctl = { 0 };
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ dsts_data_t dsts;
+
+ dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+ if (!dsts.b.suspsts) {
+ FH_WARN("Remote wakeup while is not in suspend state\n");
+ }
+ /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
+ if (pcd->remote_wakeup_enable) {
+ if (set) {
+
+ if (core_if->adp_enable) {
+ gpwrdn_data_t gpwrdn;
+
+ fh_otg_adp_probe_stop(core_if);
+
+ /* Mask SRP detected interrupt from Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.srp_det_msk = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->gpwrdn,
+ gpwrdn.d32, 0);
+
+ /* Disable Power Down Logic */
+ gpwrdn.d32 = 0;
+ gpwrdn.b.pmuactv = 1;
+ FH_MODIFY_REG32(&core_if->
+ core_global_regs->gpwrdn,
+ gpwrdn.d32, 0);
+
+ /*
+ * Initialize the Core for Device mode.
+ */
+ core_if->op_state = B_PERIPHERAL;
+ fh_otg_core_init(core_if);
+ fh_otg_enable_global_interrupts(core_if);
+ cil_pcd_start(core_if);
+
+ fh_otg_initiate_srp(core_if);
+ }
+
+ dctl.b.rmtwkupsig = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ dctl, 0, dctl.d32);
+ FH_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
+
+ fh_mdelay(2);
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ dctl, dctl.d32, 0);
+ FH_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
+ }
+ } else {
+ FH_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
+ }
+}
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+/**
+ * This function initiates remote wakeup of the host from L1 sleep state.
+ */
+void fh_otg_pcd_rem_wkup_from_sleep(fh_otg_pcd_t * pcd, int set)
+{
+ glpmcfg_data_t lpmcfg;
+ pcgcctl_data_t pcgcctl = {.d32 = 0 };
+
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+
+ /* Check if we are in L1 state */
+ if (!lpmcfg.b.prt_sleep_sts) {
+ FH_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
+ return;
+ }
+
+ /* Check if host allows remote wakeup */
+ if (!lpmcfg.b.rem_wkup_en) {
+ FH_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
+ return;
+ }
+
+ /* Check if Resume OK */
+ if (!lpmcfg.b.sleep_state_resumeok) {
+ FH_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
+ return;
+ }
+
+ lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
+ lpmcfg.b.en_utmi_sleep = 0;
+ lpmcfg.b.hird_thres &= (~(1 << 4));
+
+ /* Clear Enbl_L1Gating bit. */
+ pcgcctl.b.enbl_sleep_gating = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,0);
+
+ FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
+
+ if (set) {
+ dctl_data_t dctl = {.d32 = 0 };
+ dctl.b.rmtwkupsig = 1;
+ /* Set RmtWkUpSig bit to start remote wakup signaling.
+ * Hardware will automatically clear this bit.
+ */
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
+ 0, dctl.d32);
+ FH_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
+ }
+
+}
+#endif
+
+/**
+ * Performs remote wakeup.
+ */
+void fh_otg_pcd_remote_wakeup(fh_otg_pcd_t * pcd, int set)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_irqflags_t flags;
+ if (fh_otg_is_device_mode(core_if)) {
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+#ifdef CONFIG_USB_FH_OTG_LPM
+ if (core_if->lx_state == FH_OTG_L1) {
+ fh_otg_pcd_rem_wkup_from_sleep(pcd, set);
+ } else {
+#endif
+ fh_otg_pcd_rem_wkup_from_suspend(pcd, set);
+#ifdef CONFIG_USB_FH_OTG_LPM
+ }
+#endif
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+ }
+ return;
+}
+
+void fh_otg_pcd_disconnect_us(fh_otg_pcd_t * pcd, int no_of_usecs)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ dctl_data_t dctl = { 0 };
+
+ if (fh_otg_is_device_mode(core_if)) {
+ dctl.b.sftdiscon = 1;
+ FH_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
+ fh_udelay(no_of_usecs);
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
+
+ } else{
+ FH_PRINTF("NOT SUPPORTED IN HOST MODE\n");
+ }
+ return;
+
+}
+
+void fh_otg_pcd_disconnect_soft(fh_otg_pcd_t *pcd, int ctrl_flag)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ dctl_data_t dctl = {0};
+
+ if (fh_otg_is_device_mode(core_if)) {
+ dctl.b.sftdiscon = 1;
+ if (ctrl_flag)
+ FH_MODIFY_REG32(
+ &core_if->dev_if->dev_global_regs->dctl,
+ 0,
+ dctl.d32);
+ else
+ FH_MODIFY_REG32(
+ &core_if->dev_if->dev_global_regs->dctl,
+ dctl.d32,
+ 0);
+ } else {
+ FH_PRINTF("NOT SUPPORTED IN HOST MODE\n");
+ }
+
+ return;
+}
+
+int fh_otg_pcd_wakeup(fh_otg_pcd_t * pcd)
+{
+ dsts_data_t dsts;
+ gotgctl_data_t gotgctl;
+
+ /*
+ * This function starts the Protocol if no session is in progress. If
+ * a session is already in progress, but the device is suspended,
+ * remote wakeup signaling is started.
+ */
+
+ /* Check if valid session */
+ gotgctl.d32 =
+ FH_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
+ if (gotgctl.b.bsesvld) {
+ /* Check if suspend state */
+ dsts.d32 =
+ FH_READ_REG32(&
+ (GET_CORE_IF(pcd)->dev_if->
+ dev_global_regs->dsts));
+ if (dsts.b.suspsts) {
+ fh_otg_pcd_remote_wakeup(pcd, 1);
+ }
+ } else {
+ fh_otg_pcd_initiate_srp(pcd);
+ }
+
+ return 0;
+
+}
+
+/**
+ * Start the SRP timer to detect when the SRP does not complete within
+ * 6 seconds.
+ *
+ * @param pcd the pcd structure.
+ */
+void fh_otg_pcd_initiate_srp(fh_otg_pcd_t * pcd)
+{
+ fh_irqflags_t flags;
+ FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
+ fh_otg_initiate_srp(GET_CORE_IF(pcd));
+ FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
+}
+
+int fh_otg_pcd_get_frame_number(fh_otg_pcd_t * pcd)
+{
+ return fh_otg_get_frame_number(GET_CORE_IF(pcd));
+}
+
+int fh_otg_pcd_is_lpm_enabled(fh_otg_pcd_t * pcd)
+{
+ return GET_CORE_IF(pcd)->core_params->lpm_enable;
+}
+
+int fh_otg_pcd_is_besl_enabled(fh_otg_pcd_t * pcd)
+{
+ return GET_CORE_IF(pcd)->core_params->besl_enable;
+}
+
+int fh_otg_pcd_get_param_baseline_besl(fh_otg_pcd_t * pcd)
+{
+ return GET_CORE_IF(pcd)->core_params->baseline_besl;
+}
+
+int fh_otg_pcd_get_param_deep_besl(fh_otg_pcd_t * pcd)
+{
+ return GET_CORE_IF(pcd)->core_params->deep_besl;
+}
+
+uint32_t get_b_hnp_enable(fh_otg_pcd_t * pcd)
+{
+ return pcd->b_hnp_enable;
+}
+
+uint32_t get_a_hnp_support(fh_otg_pcd_t * pcd)
+{
+ return pcd->a_hnp_support;
+}
+
+uint32_t get_a_alt_hnp_support(fh_otg_pcd_t * pcd)
+{
+ return pcd->a_alt_hnp_support;
+}
+
+int fh_otg_pcd_get_rmwkup_enable(fh_otg_pcd_t * pcd)
+{
+ return pcd->remote_wakeup_enable;
+}
+
+#endif /* FH_HOST_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.h
new file mode 100644
index 00000000..1d2a66bd
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.h
@@ -0,0 +1,268 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd.h $
+ * $Revision: #49 $
+ * $Date: 2013/05/16 $
+ * $Change: 2231774 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_HOST_ONLY
+#if !defined(__FH_PCD_H__)
+#define __FH_PCD_H__
+
+#include "fh_otg_os_dep.h"
+#include "../fh_common_port/usb.h"
+#include "fh_otg_cil.h"
+#include "fh_otg_pcd_if.h"
+struct cfiobject;
+
+/**
+ * @file
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Perpherial Contoller Driver (PCD).
+ *
+ * The Peripheral Controller Driver (PCD) for Linux will implement the
+ * Gadget API, so that the existing Gadget drivers can be used. For
+ * the Mass Storage Function driver the File-backed USB Storage Gadget
+ * (FBS) driver will be used. The FBS driver supports the
+ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only
+ * transports.
+ *
+ */
+
+/** Invalid DMA Address */
+#define FH_DMA_ADDR_INVALID (~(fh_dma_t)0)
+
+/** Max Transfer size for any EP */
+#define DDMA_MAX_TRANSFER_SIZE 65535
+
+/**
+ * Get the pointer to the core_if from the pcd pointer.
+ */
+#define GET_CORE_IF( _pcd ) (_pcd->core_if)
+
+/**
+ * States of EP0.
+ */
+typedef enum ep0_state {
+ EP0_DISCONNECT, /* no host */
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE,
+ EP0_OUT_DATA_PHASE,
+ EP0_IN_STATUS_PHASE,
+ EP0_OUT_STATUS_PHASE,
+ EP0_STALL,
+} ep0state_e;
+
+/** Fordward declaration.*/
+struct fh_otg_pcd;
+
+/** FH_otg iso request structure.
+ *
+ */
+typedef struct usb_iso_request fh_otg_pcd_iso_request_t;
+
+#ifdef FH_UTE_PER_IO
+
+/**
+ * This shall be the exact analogy of the same type structure defined in the
+ * usb_gadget.h. Each descriptor contains
+ */
+struct fh_iso_pkt_desc_port {
+ uint32_t offset;
+ uint32_t length; /* expected length */
+ uint32_t actual_length;
+ uint32_t status;
+};
+
+struct fh_iso_xreq_port {
+ /** transfer/submission flag */
+ uint32_t tr_sub_flags;
+ /** Start the request ASAP */
+#define FH_EREQ_TF_ASAP 0x00000002
+ /** Just enqueue the request w/o initiating a transfer */
+#define FH_EREQ_TF_ENQUEUE 0x00000004
+
+ /**
+ * count of ISO packets attached to this request - shall
+ * not exceed the pio_alloc_pkt_count
+ */
+ uint32_t pio_pkt_count;
+ /** count of ISO packets allocated for this request */
+ uint32_t pio_alloc_pkt_count;
+ /** number of ISO packet errors */
+ uint32_t error_count;
+ /** reserved for future extension */
+ uint32_t res;
+ /** Will be allocated and freed in the UTE gadget and based on the CFC value */
+ struct fh_iso_pkt_desc_port *per_io_frame_descs;
+};
+#endif
+/** FH_otg request structure.
+ * This structure is a list of requests.
+ */
+typedef struct fh_otg_pcd_request {
+ void *priv;
+ void *buf;
+ fh_dma_t dma;
+ uint32_t length;
+ uint32_t actual;
+ unsigned sent_zlp:1;
+ /**
+ * Used instead of original buffer if
+ * it(physical address) is not dword-aligned.
+ **/
+ uint8_t *dw_align_buf;
+ fh_dma_t dw_align_buf_dma;
+
+ FH_CIRCLEQ_ENTRY(fh_otg_pcd_request) queue_entry;
+#ifdef FH_UTE_PER_IO
+ struct fh_iso_xreq_port ext_req;
+ //void *priv_ereq_nport; /* */
+#endif
+} fh_otg_pcd_request_t;
+
+FH_CIRCLEQ_HEAD(req_list, fh_otg_pcd_request);
+
+/** PCD EP structure.
+ * This structure describes an EP, there is an array of EPs in the PCD
+ * structure.
+ */
+typedef struct fh_otg_pcd_ep {
+ /** USB EP Descriptor */
+ const usb_endpoint_descriptor_t *desc;
+
+ /** queue of fh_otg_pcd_requests. */
+ struct req_list queue;
+ unsigned stopped:1;
+ unsigned disabling:1;
+ unsigned dma:1;
+ unsigned queue_sof:1;
+
+#ifdef FH_EN_ISOC
+ /** ISOC req handle passed */
+ void *iso_req_handle;
+#endif //_EN_ISOC_
+
+ /** FH_otg ep data. */
+ fh_ep_t fh_ep;
+
+ /** Pointer to PCD */
+ struct fh_otg_pcd *pcd;
+
+ void *priv;
+} fh_otg_pcd_ep_t;
+
+/** FH_otg PCD Structure.
+ * This structure encapsulates the data for the fh_otg PCD.
+ */
+struct fh_otg_pcd {
+ const struct fh_otg_pcd_function_ops *fops;
+ /** The FH otg device pointer */
+ struct fh_otg_device *otg_dev;
+ /** Core Interface */
+ fh_otg_core_if_t *core_if;
+ /** State of EP0 */
+ ep0state_e ep0state;
+ /** EP0 Request is pending */
+ unsigned ep0_pending:1;
+ /** Indicates when SET CONFIGURATION Request is in process */
+ unsigned request_config:1;
+ /** The state of the Remote Wakeup Enable. */
+ unsigned remote_wakeup_enable:1;
+ /** The state of the B-Device HNP Enable. */
+ unsigned b_hnp_enable:1;
+ /** The state of A-Device HNP Support. */
+ unsigned a_hnp_support:1;
+ /** The state of the A-Device Alt HNP support. */
+ unsigned a_alt_hnp_support:1;
+ /** Count of pending Requests */
+ unsigned request_pending;
+
+ /** SETUP packet for EP0
+ * This structure is allocated as a DMA buffer on PCD initialization
+ * with enough space for up to 3 setup packets.
+ */
+ union {
+ usb_device_request_t req;
+ uint32_t d32[2];
+ } *setup_pkt;
+
+ fh_dma_t setup_pkt_dma_handle;
+
+ /* Additional buffer and flag for CTRL_WR premature case */
+ uint8_t *backup_buf;
+ unsigned data_terminated;
+
+ /** 2-byte dma buffer used to return status from GET_STATUS */
+ uint16_t *status_buf;
+ fh_dma_t status_buf_dma_handle;
+
+ /** EP0 */
+ fh_otg_pcd_ep_t ep0;
+
+ /** Array of IN EPs. */
+ fh_otg_pcd_ep_t in_ep[MAX_EPS_CHANNELS - 1];
+ /** Array of OUT EPs. */
+ fh_otg_pcd_ep_t out_ep[MAX_EPS_CHANNELS - 1];
+ /** number of valid EPs in the above array. */
+// unsigned num_eps : 4;
+ fh_spinlock_t *lock;
+
+ /** Tasklet to defer starting of TEST mode transmissions until
+ * Status Phase has been completed.
+ */
+ fh_tasklet_t *test_mode_tasklet;
+
+ /** Tasklet to delay starting of xfer in DMA mode */
+ fh_tasklet_t *start_xfer_tasklet;
+
+ /** The test mode to enter when the tasklet is executed. */
+ unsigned test_mode;
+ /** The cfi_api structure that implements most of the CFI API
+ * and OTG specific core configuration functionality
+ */
+#ifdef FH_UTE_CFI
+ struct cfiobject *cfi;
+#endif
+
+};
+
+//FIXME this functions should be static, and this prototypes should be removed
+extern void fh_otg_request_nuke(fh_otg_pcd_ep_t * ep);
+extern void fh_otg_request_done(fh_otg_pcd_ep_t * ep,
+ fh_otg_pcd_request_t * req, int32_t status);
+
+void fh_otg_iso_buffer_done(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep,
+ void *req_handle);
+extern void fh_otg_pcd_start_iso_ddma(fh_otg_core_if_t * core_if,
+ fh_otg_pcd_ep_t * ep);
+
+extern void do_test_mode(void *data);
+#endif
+#endif /* FH_HOST_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_if.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_if.h
new file mode 100644
index 00000000..2bfa526f
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_if.h
@@ -0,0 +1,368 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd_if.h $
+ * $Revision: #13 $
+ * $Date: 2012/12/12 $
+ * $Change: 2125019 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_HOST_ONLY
+
+#if !defined(__FH_PCD_IF_H__)
+#define __FH_PCD_IF_H__
+
+//#include "fh_os.h"
+#include "fh_otg_core_if.h"
+
+/** @file
+ * This file defines FH_OTG PCD Core API.
+ */
+
+struct fh_otg_pcd;
+typedef struct fh_otg_pcd fh_otg_pcd_t;
+
+/** Maxpacket size for EP0 */
+#define MAX_EP0_SIZE 64
+/** Maxpacket size for any EP */
+#define MAX_PACKET_SIZE 1024
+
+/** @name Function Driver Callbacks */
+/** @{ */
+
+/** This function will be called whenever a previously queued request has
+ * completed. The status value will be set to -FH_E_SHUTDOWN to indicated a
+ * failed or aborted transfer, or -FH_E_RESTART to indicate the device was reset,
+ * or -FH_E_TIMEOUT to indicate it timed out, or -FH_E_INVALID to indicate invalid
+ * parameters. */
+typedef int (*fh_completion_cb_t) (fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle, int32_t status,
+ uint32_t actual);
+/**
+ * This function will be called whenever a previousle queued ISOC request has
+ * completed. Count of ISOC packets could be read using fh_otg_pcd_get_iso_packet_count
+ * function.
+ * The status of each ISOC packet could be read using fh_otg_pcd_get_iso_packet_*
+ * functions.
+ */
+typedef int (*fh_isoc_completion_cb_t) (fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle, int proc_buf_num);
+/** This function should handle any SETUP request that cannot be handled by the
+ * PCD Core. This includes most GET_DESCRIPTORs, SET_CONFIGS, Any
+ * class-specific requests, etc. The function must non-blocking.
+ *
+ * Returns 0 on success.
+ * Returns -FH_E_NOT_SUPPORTED if the request is not supported.
+ * Returns -FH_E_INVALID if the setup request had invalid parameters or bytes.
+ * Returns -FH_E_SHUTDOWN on any other error. */
+typedef int (*fh_setup_cb_t) (fh_otg_pcd_t * pcd, uint8_t * bytes);
+/** This is called whenever the device has been disconnected. The function
+ * driver should take appropriate action to clean up all pending requests in the
+ * PCD Core, remove all endpoints (except ep0), and initialize back to reset
+ * state. */
+typedef int (*fh_disconnect_cb_t) (fh_otg_pcd_t * pcd);
+/** This function is called when device has been connected. */
+typedef int (*fh_connect_cb_t) (fh_otg_pcd_t * pcd, int speed);
+/** This function is called when device has been suspended */
+typedef int (*fh_suspend_cb_t) (fh_otg_pcd_t * pcd);
+/** This function is called when device has received LPM tokens, i.e.
+ * device has been sent to sleep state. */
+typedef int (*fh_sleep_cb_t) (fh_otg_pcd_t * pcd);
+/** This function is called when device has been resumed
+ * from suspend(L2) or L1 sleep state. */
+typedef int (*fh_resume_cb_t) (fh_otg_pcd_t * pcd);
+/** This function is called whenever hnp params has been changed.
+ * User can call get_b_hnp_enable, get_a_hnp_support, get_a_alt_hnp_support functions
+ * to get hnp parameters. */
+typedef int (*fh_hnp_params_changed_cb_t) (fh_otg_pcd_t * pcd);
+/** This function is called whenever USB RESET is detected. */
+typedef int (*fh_reset_cb_t) (fh_otg_pcd_t * pcd);
+
+typedef int (*cfi_setup_cb_t) (fh_otg_pcd_t * pcd, void *ctrl_req_bytes);
+
+/**
+ *
+ * @param ep_handle Void pointer to the usb_ep structure
+ * @param ereq_port Pointer to the extended request structure created in the
+ * portable part.
+ */
+typedef int (*xiso_completion_cb_t) (fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle, int32_t status,
+ void *ereq_port);
+/** Function Driver Ops Data Structure */
+struct fh_otg_pcd_function_ops {
+ fh_connect_cb_t connect;
+ fh_disconnect_cb_t disconnect;
+ fh_setup_cb_t setup;
+ fh_completion_cb_t complete;
+ fh_isoc_completion_cb_t isoc_complete;
+ fh_suspend_cb_t suspend;
+ fh_sleep_cb_t sleep;
+ fh_resume_cb_t resume;
+ fh_reset_cb_t reset;
+ fh_hnp_params_changed_cb_t hnp_changed;
+ cfi_setup_cb_t cfi_setup;
+#ifdef FH_UTE_PER_IO
+ xiso_completion_cb_t xisoc_complete;
+#endif
+};
+/** @} */
+
+/** @name Function Driver Functions */
+/** @{ */
+
+/** Call this function to get pointer on fh_otg_pcd_t,
+ * this pointer will be used for all PCD API functions.
+ *
+ * @param core_if The FH_OTG Core
+ */
+extern fh_otg_pcd_t *fh_otg_pcd_init(fh_otg_core_if_t * core_if);
+
+/** Frees PCD allocated by fh_otg_pcd_init
+ *
+ * @param pcd The PCD
+ */
+extern void fh_otg_pcd_remove(fh_otg_pcd_t * pcd);
+
+/** Call this to bind the function driver to the PCD Core.
+ *
+ * @param pcd Pointer on fh_otg_pcd_t returned by fh_otg_pcd_init function.
+ * @param fops The Function Driver Ops data structure containing pointers to all callbacks.
+ */
+extern void fh_otg_pcd_start(fh_otg_pcd_t * pcd,
+ const struct fh_otg_pcd_function_ops *fops);
+
+/** Enables an endpoint for use. This function enables an endpoint in
+ * the PCD. The endpoint is described by the ep_desc which has the
+ * same format as a USB ep descriptor. The ep_handle parameter is used to refer
+ * to the endpoint from other API functions and in callbacks. Normally this
+ * should be called after a SET_CONFIGURATION/SET_INTERFACE to configure the
+ * core for that interface.
+ *
+ * Returns -FH_E_INVALID if invalid parameters were passed.
+ * Returns -FH_E_SHUTDOWN if any other error ocurred.
+ * Returns 0 on success.
+ *
+ * @param pcd The PCD
+ * @param ep_desc Endpoint descriptor
+ * @param ep_handle Handle on endpoint, that will be used to identify endpoint.
+ */
+extern int fh_otg_pcd_ep_enable(fh_otg_pcd_t * pcd,
+ const uint8_t * ep_desc, void *ep_handle);
+
+/** Disable the endpoint referenced by ep_handle.
+ *
+ * Returns -FH_E_INVALID if invalid parameters were passed.
+ * Returns -FH_E_SHUTDOWN if any other error occurred.
+ * Returns 0 on success. */
+extern int fh_otg_pcd_ep_disable(fh_otg_pcd_t * pcd, void *ep_handle);
+
+/** Queue a data transfer request on the endpoint referenced by ep_handle.
+ * After the transfer is completes, the complete callback will be called with
+ * the request status.
+ *
+ * @param pcd The PCD
+ * @param ep_handle The handle of the endpoint
+ * @param buf The buffer for the data
+ * @param dma_buf The DMA buffer for the data
+ * @param buflen The length of the data transfer
+ * @param zero Specifies whether to send zero length last packet.
+ * @param req_handle Set this handle to any value to use to reference this
+ * request in the ep_dequeue function or from the complete callback
+ * @param atomic_alloc If driver need to perform atomic allocations
+ * for internal data structures.
+ *
+ * Returns -FH_E_INVALID if invalid parameters were passed.
+ * Returns -FH_E_SHUTDOWN if any other error ocurred.
+ * Returns 0 on success. */
+extern int fh_otg_pcd_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
+ uint8_t * buf, fh_dma_t dma_buf,
+ uint32_t buflen, int zero, void *req_handle,
+ int atomic_alloc);
+#ifdef FH_UTE_PER_IO
+/**
+ *
+ * @param ereq_nonport Pointer to the extended request part of the
+ * usb_request structure defined in usb_gadget.h file.
+ */
+extern int fh_otg_pcd_xiso_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
+ uint8_t * buf, fh_dma_t dma_buf,
+ uint32_t buflen, int zero,
+ void *req_handle, int atomic_alloc,
+ void *ereq_nonport);
+
+#endif
+
+/** De-queue the specified data transfer that has not yet completed.
+ *
+ * Returns -FH_E_INVALID if invalid parameters were passed.
+ * Returns -FH_E_SHUTDOWN if any other error ocurred.
+ * Returns 0 on success. */
+extern int fh_otg_pcd_ep_dequeue(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle);
+
+/** Halt (STALL) an endpoint or clear it.
+ *
+ * Returns -FH_E_INVALID if invalid parameters were passed.
+ * Returns -FH_E_SHUTDOWN if any other error ocurred.
+ * Returns -FH_E_AGAIN if the STALL cannot be sent and must be tried again later
+ * Returns 0 on success. */
+extern int fh_otg_pcd_ep_halt(fh_otg_pcd_t * pcd, void *ep_handle, int value);
+
+/** This function should be called on every hardware interrupt */
+extern int32_t fh_otg_pcd_handle_intr(fh_otg_pcd_t * pcd);
+
+/** This function returns current frame number */
+extern int fh_otg_pcd_get_frame_number(fh_otg_pcd_t * pcd);
+
+/**
+ * Start isochronous transfers on the endpoint referenced by ep_handle.
+ * For isochronous transfers duble buffering is used.
+ * After processing each of buffers comlete callback will be called with
+ * status for each transaction.
+ *
+ * @param pcd The PCD
+ * @param ep_handle The handle of the endpoint
+ * @param buf0 The virtual address of first data buffer
+ * @param buf1 The virtual address of second data buffer
+ * @param dma0 The DMA address of first data buffer
+ * @param dma1 The DMA address of second data buffer
+ * @param sync_frame Data pattern frame number
+ * @param dp_frame Data size for pattern frame
+ * @param data_per_frame Data size for regular frame
+ * @param start_frame Frame number to start transfers, if -1 then start transfers ASAP.
+ * @param buf_proc_intrvl Interval of ISOC Buffer processing
+ * @param req_handle Handle of ISOC request
+ * @param atomic_alloc Specefies whether to perform atomic allocation for
+ * internal data structures.
+ *
+ * Returns -FH_E_NO_MEMORY if there is no enough memory.
+ * Returns -FH_E_INVALID if incorrect arguments are passed to the function.
+ * Returns -DW_E_SHUTDOWN for any other error.
+ * Returns 0 on success
+ */
+//extern int fh_otg_pcd_iso_ep_start(fh_otg_pcd_t * pcd, void *ep_handle,
+// uint8_t * buf0, uint8_t * buf1,
+// fh_dma_t dma0, fh_dma_t dma1,
+// int sync_frame, int dp_frame,
+// int data_per_frame, int start_frame,
+// int buf_proc_intrvl, void *req_handle,
+// int atomic_alloc);
+
+/** Stop ISOC transfers on endpoint referenced by ep_handle.
+ *
+ * @param pcd The PCD
+ * @param ep_handle The handle of the endpoint
+ * @param req_handle Handle of ISOC request
+ *
+ * Returns -FH_E_INVALID if incorrect arguments are passed to the function
+ * Returns 0 on success
+ */
+int fh_otg_pcd_iso_ep_stop(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle);
+
+/** Get ISOC packet status.
+ *
+ * @param pcd The PCD
+ * @param ep_handle The handle of the endpoint
+ * @param iso_req_handle Isochronoush request handle
+ * @param packet Number of packet
+ * @param status Out parameter for returning status
+ * @param actual Out parameter for returning actual length
+ * @param offset Out parameter for returning offset
+ *
+ */
+extern void fh_otg_pcd_get_iso_packet_params(fh_otg_pcd_t * pcd,
+ void *ep_handle,
+ void *iso_req_handle, int packet,
+ int *status, int *actual,
+ int *offset);
+
+/** Get ISOC packet count.
+ *
+ * @param pcd The PCD
+ * @param ep_handle The handle of the endpoint
+ * @param iso_req_handle
+ */
+extern int fh_otg_pcd_get_iso_packet_count(fh_otg_pcd_t * pcd,
+ void *ep_handle,
+ void *iso_req_handle);
+
+/** This function starts the SRP Protocol if no session is in progress. If
+ * a session is already in progress, but the device is suspended,
+ * remote wakeup signaling is started.
+ */
+extern int fh_otg_pcd_wakeup(fh_otg_pcd_t * pcd);
+
+/** This function returns 1 if LPM support is enabled, and 0 otherwise. */
+extern int fh_otg_pcd_is_lpm_enabled(fh_otg_pcd_t * pcd);
+
+/** This function returns 1 if LPM Errata support is enabled, and 0 otherwise. */
+extern int fh_otg_pcd_is_besl_enabled(fh_otg_pcd_t * pcd);
+
+/** This function returns baseline_besl module parametr. */
+extern int fh_otg_pcd_get_param_baseline_besl(fh_otg_pcd_t * pcd);
+
+/** This function returns deep_besl module parametr. */
+extern int fh_otg_pcd_get_param_deep_besl(fh_otg_pcd_t * pcd);
+
+/** This function returns 1 if remote wakeup is allowed and 0, otherwise. */
+extern int fh_otg_pcd_get_rmwkup_enable(fh_otg_pcd_t * pcd);
+
+/** Initiate SRP */
+extern void fh_otg_pcd_initiate_srp(fh_otg_pcd_t * pcd);
+
+/** Starts remote wakeup signaling. */
+extern void fh_otg_pcd_remote_wakeup(fh_otg_pcd_t * pcd, int set);
+
+/** Starts micorsecond soft disconnect. */
+extern void fh_otg_pcd_disconnect_us(fh_otg_pcd_t * pcd, int no_of_usecs);
+/** soft ctrl disconnect. */
+extern void fh_otg_pcd_disconnect_soft(fh_otg_pcd_t *pcd, int ctrl_flag);
+/** This function returns whether device is dualspeed.*/
+extern uint32_t fh_otg_pcd_is_dualspeed(fh_otg_pcd_t * pcd);
+
+/** This function returns whether device is otg. */
+extern uint32_t fh_otg_pcd_is_otg(fh_otg_pcd_t * pcd);
+
+/** These functions allow to get hnp parameters */
+extern uint32_t get_b_hnp_enable(fh_otg_pcd_t * pcd);
+extern uint32_t get_a_hnp_support(fh_otg_pcd_t * pcd);
+extern uint32_t get_a_alt_hnp_support(fh_otg_pcd_t * pcd);
+
+/** CFI specific Interface functions */
+/** Allocate a cfi buffer */
+//extern uint8_t *cfiw_ep_alloc_buffer(fh_otg_pcd_t * pcd, void *pep,
+// fh_dma_t * addr, size_t buflen,
+// int flags);
+
+/******************************************************************************/
+
+/** @} */
+
+#endif /* __FH_PCD_IF_H__ */
+
+#endif /* FH_HOST_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_intr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_intr.c
new file mode 100644
index 00000000..9b51a3cd
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_intr.c
@@ -0,0 +1,5430 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd_intr.c $
+ * $Revision: #126 $
+ * $Date: 2014/08/25 $
+ * $Change: 2595073 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_HOST_ONLY
+
+#include "fh_otg_pcd.h"
+
+#ifdef FH_UTE_CFI
+#include "fh_otg_cfi.h"
+#endif
+
+#ifdef FH_UTE_PER_IO
+extern void complete_xiso_ep(fh_otg_pcd_ep_t * ep);
+#endif
+//#define PRINT_CFI_DMA_DESCS
+
+#define DEBUG_EP0
+
+/**
+ * This function updates OTG.
+ */
+static void fh_otg_pcd_update_otg(fh_otg_pcd_t * pcd, const unsigned reset)
+{
+
+ if (reset) {
+ pcd->b_hnp_enable = 0;
+ pcd->a_hnp_support = 0;
+ pcd->a_alt_hnp_support = 0;
+ }
+
+ if (pcd->fops->hnp_changed) {
+ pcd->fops->hnp_changed(pcd);
+ }
+}
+
+/** @file
+ * This file contains the implementation of the PCD Interrupt handlers.
+ *
+ * The PCD handles the device interrupts. Many conditions can cause a
+ * device interrupt. When an interrupt occurs, the device interrupt
+ * service routine determines the cause of the interrupt and
+ * dispatches handling to the appropriate function. These interrupt
+ * handling functions are described below.
+ * All interrupt registers are processed from LSB to MSB.
+ */
+
+/**
+ * This function prints the ep0 state for debug purposes.
+ */
+static inline void print_ep0_state(fh_otg_pcd_t * pcd)
+{
+#ifdef DEBUG
+ char str[40];
+
+ switch (pcd->ep0state) {
+ case EP0_DISCONNECT:
+ fh_strcpy(str, "EP0_DISCONNECT");
+ break;
+ case EP0_IDLE:
+ fh_strcpy(str, "EP0_IDLE");
+ break;
+ case EP0_IN_DATA_PHASE:
+ fh_strcpy(str, "EP0_IN_DATA_PHASE");
+ break;
+ case EP0_OUT_DATA_PHASE:
+ fh_strcpy(str, "EP0_OUT_DATA_PHASE");
+ break;
+ case EP0_IN_STATUS_PHASE:
+ fh_strcpy(str, "EP0_IN_STATUS_PHASE");
+ break;
+ case EP0_OUT_STATUS_PHASE:
+ fh_strcpy(str, "EP0_OUT_STATUS_PHASE");
+ break;
+ case EP0_STALL:
+ fh_strcpy(str, "EP0_STALL");
+ break;
+ default:
+ fh_strcpy(str, "EP0_INVALID");
+ }
+
+ FH_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
+#endif
+}
+
+/**
+ * This function calculate the size of the payload in the memory
+ * for out endpoints and prints size for debug purposes(used in
+ * 2.93a DevOutNak feature).
+ */
+static inline void print_memory_payload(fh_otg_pcd_t * pcd, fh_ep_t * ep)
+{
+#ifdef DEBUG
+ deptsiz_data_t deptsiz_init = {.d32 = 0 };
+ deptsiz_data_t deptsiz_updt = {.d32 = 0 };
+ int pack_num;
+ unsigned payload;
+
+ deptsiz_init.d32 = pcd->core_if->start_doeptsiz_val[ep->num];
+ deptsiz_updt.d32 =
+ FH_READ_REG32(&pcd->core_if->dev_if->
+ out_ep_regs[ep->num]->doeptsiz);
+ /* Payload will be */
+ payload = deptsiz_init.b.xfersize - deptsiz_updt.b.xfersize;
+ /* Packet count is decremented every time a packet
+ * is written to the RxFIFO not in to the external memory
+ * So, if payload == 0, then it means no packet was sent to ext memory*/
+ pack_num = (!payload) ? 0 : (deptsiz_init.b.pktcnt - deptsiz_updt.b.pktcnt);
+ FH_DEBUGPL(DBG_PCDV,
+ "Payload for EP%d-%s\n",
+ ep->num, (ep->is_in ? "IN" : "OUT"));
+ FH_DEBUGPL(DBG_PCDV,
+ "Number of transfered bytes = 0x%08x\n", payload);
+ FH_DEBUGPL(DBG_PCDV,
+ "Number of transfered packets = %d\n", pack_num);
+#endif
+}
+
+
+#ifdef FH_UTE_CFI
+static inline void print_desc(struct fh_otg_dma_desc *ddesc,
+ const uint8_t * epname, int descnum)
+{
+ CFI_INFO
+ ("%s DMA_DESC(%d) buf=0x%08x bytes=0x%04x; sp=0x%x; l=0x%x; sts=0x%02x; bs=0x%02x\n",
+ epname, descnum, ddesc->buf, ddesc->status.b.bytes,
+ ddesc->status.b.sp, ddesc->status.b.l, ddesc->status.b.sts,
+ ddesc->status.b.bs);
+}
+#endif
+
+/**
+ * This function returns pointer to in ep struct with number ep_num
+ */
+static inline fh_otg_pcd_ep_t *get_in_ep(fh_otg_pcd_t * pcd, uint32_t ep_num)
+{
+ int i;
+ int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
+ if (ep_num == 0) {
+ return &pcd->ep0;
+ } else {
+ for (i = 0; i < num_in_eps; ++i) {
+ if (pcd->in_ep[i].fh_ep.num == ep_num)
+ return &pcd->in_ep[i];
+ }
+ return 0;
+ }
+}
+
+/**
+ * This function returns pointer to out ep struct with number ep_num
+ */
+static inline fh_otg_pcd_ep_t *get_out_ep(fh_otg_pcd_t * pcd, uint32_t ep_num)
+{
+ int i;
+ int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
+ if (ep_num == 0) {
+ return &pcd->ep0;
+ } else {
+ for (i = 0; i < num_out_eps; ++i) {
+ if (pcd->out_ep[i].fh_ep.num == ep_num)
+ return &pcd->out_ep[i];
+ }
+ return 0;
+ }
+}
+
+/**
+ * This functions gets a pointer to an EP from the wIndex address
+ * value of the control request.
+ */
+fh_otg_pcd_ep_t *get_ep_by_addr(fh_otg_pcd_t * pcd, u16 wIndex)
+{
+ fh_otg_pcd_ep_t *ep;
+ uint32_t ep_num = UE_GET_ADDR(wIndex);
+
+ if (ep_num == 0) {
+ ep = &pcd->ep0;
+ } else if (UE_GET_DIR(wIndex) == UE_DIR_IN) { /* in ep */
+ ep = &pcd->in_ep[ep_num - 1];
+ } else {
+ ep = &pcd->out_ep[ep_num - 1];
+ }
+
+ return ep;
+}
+
+/**
+ * This function checks the EP request queue, if the queue is not
+ * empty the next request is started.
+ */
+void start_next_request(fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_pcd_request_t *req = 0;
+ uint32_t max_transfer =
+ GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
+
+#ifdef FH_UTE_CFI
+ struct fh_otg_pcd *pcd;
+ pcd = ep->pcd;
+#endif
+
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+
+#ifdef FH_UTE_CFI
+ if (ep->fh_ep.buff_mode != BM_STANDARD) {
+ ep->fh_ep.cfi_req_len = req->length;
+ pcd->cfi->ops.build_descriptors(pcd->cfi, pcd, ep, req);
+ } else {
+#endif
+ /* Setup and start the Transfer */
+ if (req->dw_align_buf) {
+ ep->fh_ep.dma_addr = req->dw_align_buf_dma;
+ ep->fh_ep.start_xfer_buff = req->dw_align_buf;
+ ep->fh_ep.xfer_buff = req->dw_align_buf;
+ } else {
+ ep->fh_ep.dma_addr = req->dma;
+ ep->fh_ep.start_xfer_buff = req->buf;
+ ep->fh_ep.xfer_buff = req->buf;
+ }
+ ep->fh_ep.sent_zlp = 0;
+ ep->fh_ep.total_len = req->length;
+ ep->fh_ep.xfer_len = 0;
+ ep->fh_ep.xfer_count = 0;
+
+ ep->fh_ep.maxxfer = max_transfer;
+ if (GET_CORE_IF(ep->pcd)->dma_desc_enable) {
+ uint32_t out_max_xfer = DDMA_MAX_TRANSFER_SIZE
+ - (DDMA_MAX_TRANSFER_SIZE % 4);
+ if (ep->fh_ep.is_in) {
+ if (ep->fh_ep.maxxfer >
+ DDMA_MAX_TRANSFER_SIZE) {
+ ep->fh_ep.maxxfer =
+ DDMA_MAX_TRANSFER_SIZE;
+ }
+ } else {
+ if (ep->fh_ep.maxxfer > out_max_xfer) {
+ ep->fh_ep.maxxfer =
+ out_max_xfer;
+ }
+ }
+ }
+ if (ep->fh_ep.maxxfer < ep->fh_ep.total_len) {
+ ep->fh_ep.maxxfer -=
+ (ep->fh_ep.maxxfer % ep->fh_ep.maxpacket);
+ }
+ if (req->sent_zlp) {
+ if ((ep->fh_ep.total_len %
+ ep->fh_ep.maxpacket == 0)
+ && (ep->fh_ep.total_len != 0)) {
+ ep->fh_ep.sent_zlp = 1;
+ }
+
+ }
+#ifdef FH_UTE_CFI
+ }
+#endif
+ fh_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->fh_ep);
+ } else if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
+ diepmsk_data_t intr_mask = {.d32 = 0 };
+
+ intr_mask.b.nak = 1;
+
+ if (GET_CORE_IF(ep->pcd)->multiproc_int_enable) {
+ FH_MODIFY_REG32(&GET_CORE_IF(ep->pcd)->dev_if->dev_global_regs->
+ diepeachintmsk[ep->fh_ep.num], intr_mask.d32, 0);
+ } else {
+ FH_MODIFY_REG32(&GET_CORE_IF(ep->pcd)->dev_if->dev_global_regs->diepmsk,
+ intr_mask.d32, 0);
+ }
+ FH_PRINTF("There are no more ISOC requests \n");
+ ep->fh_ep.frame_num = 0xFFFFFFFF;
+ }
+}
+
+/**
+ * This function handles the SOF Interrupts. At this time the SOF
+ * Interrupt is disabled.
+ */
+int32_t fh_otg_pcd_handle_sof_intr(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+
+ gintsts_data_t gintsts;
+
+ FH_DEBUGPL(DBG_PCD, "SOF\n");
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.sofintr = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This function handles the Rx Status Queue Level Interrupt, which
+ * indicates that there is a least one packet in the Rx FIFO. The
+ * packets are moved from the FIFO to memory, where they will be
+ * processed when the Endpoint Interrupt Register indicates Transfer
+ * Complete or SETUP Phase Done.
+ *
+ * Repeat the following until the Rx Status Queue is empty:
+ * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
+ * info
+ * -# If Receive FIFO is empty then skip to step Clear the interrupt
+ * and exit
+ * -# If SETUP Packet call fh_otg_read_setup_packet to copy the
+ * SETUP data to the buffer
+ * -# If OUT Data Packet call fh_otg_read_packet to copy the data
+ * to the destination buffer
+ */
+int32_t fh_otg_pcd_handle_rx_status_q_level_intr(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ gintmsk_data_t gintmask = {.d32 = 0 };
+ device_grxsts_data_t status;
+ fh_otg_pcd_ep_t *ep;
+ gintsts_data_t gintsts;
+#ifdef DEBUG
+ static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
+#endif
+
+ //FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
+ /* Disable the Rx Status Queue Level interrupt */
+ gintmask.b.rxstsqlvl = 1;
+ FH_MODIFY_REG32(&global_regs->gintmsk, gintmask.d32, 0);
+
+ /* Get the Status from the top of the FIFO */
+ status.d32 = FH_READ_REG32(&global_regs->grxstsp);
+
+ FH_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
+ "pktsts:%x Frame:%d(0x%0x)\n",
+ status.b.epnum, status.b.bcnt,
+ dpid_str[status.b.dpid],
+ status.b.pktsts, status.b.fn, status.b.fn);
+ /* Get pointer to EP structure */
+ ep = get_out_ep(pcd, status.b.epnum);
+
+ switch (status.b.pktsts) {
+ case FH_DSTS_GOUT_NAK:
+ FH_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
+ break;
+ case FH_STS_DATA_UPDT:
+ FH_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
+ if (status.b.bcnt && ep->fh_ep.xfer_buff) {
+ /** @todo NGS Check for buffer overflow? */
+ fh_otg_read_packet(core_if,
+ ep->fh_ep.xfer_buff,
+ status.b.bcnt);
+ ep->fh_ep.xfer_count += status.b.bcnt;
+ ep->fh_ep.xfer_buff += status.b.bcnt;
+ }
+ break;
+ case FH_STS_XFER_COMP:
+ FH_DEBUGPL(DBG_PCDV, "OUT Complete\n");
+ break;
+ case FH_DSTS_SETUP_COMP:
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCDV, "Setup Complete\n");
+#endif
+ break;
+ case FH_DSTS_SETUP_UPDT:
+ fh_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCD,
+ "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
+ pcd->setup_pkt->req.bmRequestType,
+ pcd->setup_pkt->req.bRequest,
+ UGETW(pcd->setup_pkt->req.wValue),
+ UGETW(pcd->setup_pkt->req.wIndex),
+ UGETW(pcd->setup_pkt->req.wLength));
+#endif
+ ep->fh_ep.xfer_count += status.b.bcnt;
+ break;
+ default:
+ FH_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
+ status.b.pktsts);
+ break;
+ }
+
+ /* Enable the Rx Status Queue Level interrupt */
+ FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmask.d32);
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ //FH_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__);
+ return 1;
+}
+
+/**
+ * This function examines the Device IN Token Learning Queue to
+ * determine the EP number of the last IN token received. This
+ * implementation is for the Mass Storage device where there are only
+ * 2 IN EPs (Control-IN and BULK-IN).
+ *
+ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
+ * are 8 EP Numbers in each of the other possible DTKNQ Registers.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ *
+ */
+static inline int get_ep_of_last_in_token(fh_otg_core_if_t * core_if)
+{
+ fh_otg_device_global_regs_t *dev_global_regs =
+ core_if->dev_if->dev_global_regs;
+ const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
+ /* Number of Token Queue Registers */
+ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
+ dtknq1_data_t dtknqr1;
+ uint32_t in_tkn_epnums[4];
+ int ndx = 0;
+ int i = 0;
+ volatile uint32_t *addr = &dev_global_regs->dtknqr1;
+ int epnum = 0;
+
+ //FH_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH);
+
+ /* Read the DTKNQ Registers */
+ for (i = 0; i < DTKNQ_REG_CNT; i++) {
+ in_tkn_epnums[i] = FH_READ_REG32(addr);
+ FH_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
+ in_tkn_epnums[i]);
+ if (addr == &dev_global_regs->dvbusdis) {
+ addr = &dev_global_regs->dtknqr3_dthrctl;
+ } else {
+ ++addr;
+ }
+
+ }
+
+ /* Copy the DTKNQR1 data to the bit field. */
+ dtknqr1.d32 = in_tkn_epnums[0];
+ /* Get the EP numbers */
+ in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
+ ndx = dtknqr1.b.intknwptr - 1;
+
+ //FH_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx);
+ if (ndx == -1) {
+ /** @todo Find a simpler way to calculate the max
+ * queue position.*/
+ int cnt = TOKEN_Q_DEPTH;
+ if (TOKEN_Q_DEPTH <= 6) {
+ cnt = TOKEN_Q_DEPTH - 1;
+ } else if (TOKEN_Q_DEPTH <= 14) {
+ cnt = TOKEN_Q_DEPTH - 7;
+ } else if (TOKEN_Q_DEPTH <= 22) {
+ cnt = TOKEN_Q_DEPTH - 15;
+ } else {
+ cnt = TOKEN_Q_DEPTH - 23;
+ }
+ epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
+ } else {
+ if (ndx <= 5) {
+ epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
+ } else if (ndx <= 13) {
+ ndx -= 6;
+ epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
+ } else if (ndx <= 21) {
+ ndx -= 14;
+ epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
+ } else if (ndx <= 29) {
+ ndx -= 22;
+ epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
+ }
+ }
+ //FH_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum);
+ return epnum;
+}
+
+/**
+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
+ * The active request is checked for the next packet to be loaded into
+ * the non-periodic Tx FIFO.
+ */
+int32_t fh_otg_pcd_handle_np_tx_fifo_empty_intr(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ fh_otg_dev_in_ep_regs_t *ep_regs;
+ gnptxsts_data_t txstatus = {.d32 = 0 };
+ gintsts_data_t gintsts;
+
+ int epnum = 0;
+ fh_otg_pcd_ep_t *ep = 0;
+ uint32_t len = 0;
+ int dwords;
+
+ /* Get the epnum from the IN Token Learning Queue. */
+ epnum = get_ep_of_last_in_token(core_if);
+ ep = get_in_ep(pcd, epnum);
+
+ FH_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %d \n", epnum);
+
+ ep_regs = core_if->dev_if->in_ep_regs[epnum];
+
+ len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
+ if (len > ep->fh_ep.maxpacket) {
+ len = ep->fh_ep.maxpacket;
+ }
+ dwords = (len + 3) / 4;
+
+ /* While there is space in the queue and space in the FIFO and
+ * More data to tranfer, Write packets to the Tx FIFO */
+ txstatus.d32 = FH_READ_REG32(&global_regs->gnptxsts);
+ FH_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
+
+ while (txstatus.b.nptxqspcavail > 0 &&
+ txstatus.b.nptxfspcavail > dwords &&
+ ep->fh_ep.xfer_count < ep->fh_ep.xfer_len) {
+ /* Write the FIFO */
+ fh_otg_ep_write_packet(core_if, &ep->fh_ep, 0);
+ len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
+
+ if (len > ep->fh_ep.maxpacket) {
+ len = ep->fh_ep.maxpacket;
+ }
+
+ dwords = (len + 3) / 4;
+ txstatus.d32 = FH_READ_REG32(&global_regs->gnptxsts);
+ FH_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
+ }
+
+ FH_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
+ FH_READ_REG32(&global_regs->gnptxsts));
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.nptxfempty = 1;
+ FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This function is called when dedicated Tx FIFO Empty interrupt occurs.
+ * The active request is checked for the next packet to be loaded into
+ * apropriate Tx FIFO.
+ */
+static int32_t write_empty_tx_fifo(fh_otg_pcd_t * pcd, uint32_t epnum)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ fh_otg_dev_in_ep_regs_t *ep_regs;
+ dtxfsts_data_t txstatus = {.d32 = 0 };
+ fh_otg_pcd_ep_t *ep = 0;
+ uint32_t len = 0;
+ int dwords;
+
+ ep = get_in_ep(pcd, epnum);
+
+ FH_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
+
+ ep_regs = core_if->dev_if->in_ep_regs[epnum];
+
+ len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
+
+ if (len > ep->fh_ep.maxpacket) {
+ len = ep->fh_ep.maxpacket;
+ }
+
+ dwords = (len + 3) / 4;
+
+ /* While there is space in the queue and space in the FIFO and
+ * More data to tranfer, Write packets to the Tx FIFO */
+ txstatus.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+ FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
+
+ while (txstatus.b.txfspcavail >= dwords &&
+ ep->fh_ep.xfer_count < ep->fh_ep.xfer_len &&
+ ep->fh_ep.xfer_len != 0) {
+ /* Write the FIFO */
+ fh_otg_ep_write_packet(core_if, &ep->fh_ep, 0);
+
+ len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
+ if (len > ep->fh_ep.maxpacket) {
+ len = ep->fh_ep.maxpacket;
+ }
+
+ dwords = (len + 3) / 4;
+ txstatus.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+ FH_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
+ txstatus.d32);
+ }
+
+ FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
+ FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts));
+
+ return 1;
+}
+
+/**
+ * This function is called when the Device is disconnected. It stops
+ * any active requests and informs the Gadget driver of the
+ * disconnect.
+ */
+void fh_otg_pcd_stop(fh_otg_pcd_t * pcd)
+{
+ int i, num_in_eps, num_out_eps;
+ fh_otg_pcd_ep_t *ep;
+
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_SPINLOCK(pcd->lock);
+
+ num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
+ num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
+
+ FH_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
+ /* don't disconnect drivers more than once */
+ if (pcd->ep0state == EP0_DISCONNECT) {
+ FH_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
+ FH_SPINUNLOCK(pcd->lock);
+ return;
+ }
+ pcd->ep0state = EP0_DISCONNECT;
+
+ /* Reset the OTG state. */
+ fh_otg_pcd_update_otg(pcd, 1);
+
+ /* Disable the NP Tx Fifo Empty Interrupt. */
+ intr_mask.b.nptxfempty = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Flush the FIFOs */
+ /**@todo NGS Flush Periodic FIFOs */
+ fh_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10);
+ fh_otg_flush_rx_fifo(GET_CORE_IF(pcd));
+
+ /* prevent new request submissions, kill any outstanding requests */
+ ep = &pcd->ep0;
+ fh_otg_request_nuke(ep);
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 0; i < num_in_eps; i++) {
+ fh_otg_pcd_ep_t *ep = &pcd->in_ep[i];
+ fh_otg_request_nuke(ep);
+ }
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 0; i < num_out_eps; i++) {
+ fh_otg_pcd_ep_t *ep = &pcd->out_ep[i];
+ fh_otg_request_nuke(ep);
+ }
+
+ /* report disconnect; the driver is already quiesced */
+ if (pcd->fops->disconnect) {
+ FH_SPINUNLOCK(pcd->lock);
+ pcd->fops->disconnect(pcd);
+ FH_SPINLOCK(pcd->lock);
+ }
+ FH_SPINUNLOCK(pcd->lock);
+}
+
+/**
+ * This interrupt indicates that ...
+ */
+int32_t fh_otg_pcd_handle_i2c_intr(fh_otg_pcd_t * pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ gintsts_data_t gintsts;
+
+ FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "i2cintr");
+ intr_mask.b.i2cintr = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.i2cintr = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that ...
+ */
+int32_t fh_otg_pcd_handle_early_suspend_intr(fh_otg_pcd_t * pcd)
+{
+ gintsts_data_t gintsts;
+#if defined(VERBOSE)
+ FH_PRINTF("Early Suspend Detected\n");
+#endif
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.erlysuspend = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This function configures EPO to receive SETUP packets.
+ *
+ * @todo NGS: Update the comments from the HW FS.
+ *
+ * -# Program the following fields in the endpoint specific registers
+ * for Control OUT EP 0, in order to receive a setup packet
+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
+ * setup packets)
+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
+ * to back setup packets)
+ * - In DMA mode, DOEPDMA0 Register with a memory address to
+ * store any setup packets received
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param pcd Programming view of the PCD.
+ */
+static inline void ep0_out_start(fh_otg_core_if_t * core_if,
+ fh_otg_pcd_t * pcd)
+{
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ deptsiz0_data_t doeptsize0 = {.d32 = 0 };
+ fh_otg_dev_dma_desc_t *dma_desc;
+ depctl_data_t doepctl = {.d32 = 0 };
+
+#ifdef VERBOSE
+ FH_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
+ FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
+#endif
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
+ doepctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl);
+ if (doepctl.b.epena) {
+ return;
+ }
+ }
+
+ doeptsize0.b.supcnt = 3;
+ doeptsize0.b.pktcnt = 1;
+ doeptsize0.b.xfersize = 8 * 3;
+
+ if (core_if->dma_enable) {
+ if (!core_if->dma_desc_enable) {
+ /** put here as for Hermes mode deptisz register should not be written */
+ FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
+ doeptsize0.d32);
+
+ /** @todo dma needs to handle multiple setup packets (up to 3) */
+ FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
+ pcd->setup_pkt_dma_handle);
+ } else {
+ dev_if->setup_desc_index =
+ (dev_if->setup_desc_index + 1) & 1;
+ dma_desc =
+ dev_if->setup_desc_addr[dev_if->setup_desc_index];
+
+ /** DMA Descriptor Setup */
+ dma_desc->status.b.bs = BS_HOST_BUSY;
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
+ dma_desc->status.b.sr = 0;
+ dma_desc->status.b.mtrf = 0;
+ }
+ dma_desc->status.b.l = 1;
+ dma_desc->status.b.ioc = 1;
+ dma_desc->status.b.bytes = pcd->ep0.fh_ep.maxpacket;
+ dma_desc->buf = pcd->setup_pkt_dma_handle;
+ dma_desc->status.b.sts = 0;
+ dma_desc->status.b.bs = BS_HOST_READY;
+
+ /** DOEPDMA0 Register write */
+ FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
+ dev_if->dma_setup_desc_addr
+ [dev_if->setup_desc_index]);
+ }
+
+ } else {
+ /** put here as for Hermes mode deptisz register should not be written */
+ FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
+ doeptsize0.d32);
+ }
+
+ /** DOEPCTL0 Register write cnak will be set after setup interrupt */
+ doepctl.d32 = 0;
+ doepctl.b.epena = 1;
+ if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
+ doepctl.b.cnak = 1;
+ FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
+ } else {
+ FH_MODIFY_REG32(&dev_if->out_ep_regs[0]->doepctl, 0, doepctl.d32);
+ }
+
+#ifdef VERBOSE
+ FH_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
+ FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
+ FH_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
+ FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl));
+#endif
+}
+
+/**
+ * This interrupt occurs when a USB Reset is detected. When the USB
+ * Reset Interrupt occurs the device state is set to DEFAULT and the
+ * EP0 state is set to IDLE.
+ * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
+ * -# Unmask the following interrupt bits
+ * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
+ * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
+ * - DOEPMSK.SETUP = 1
+ * - DOEPMSK.XferCompl = 1
+ * - DIEPMSK.XferCompl = 1
+ * - DIEPMSK.TimeOut = 1
+ * -# Program the following fields in the endpoint specific registers
+ * for Control OUT EP 0, in order to receive a setup packet
+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
+ * setup packets)
+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
+ * to back setup packets)
+ * - In DMA mode, DOEPDMA0 Register with a memory address to
+ * store any setup packets received
+ * At this point, all the required initialization, except for enabling
+ * the control 0 OUT endpoint is done, for receiving SETUP packets.
+ */
+int32_t fh_otg_pcd_handle_usb_reset_intr(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ depctl_data_t doepctl = {.d32 = 0 };
+ depctl_data_t diepctl = {.d32 = 0 };
+ daint_data_t daintmsk = {.d32 = 0 };
+ doepmsk_data_t doepmsk = {.d32 = 0 };
+ diepmsk_data_t diepmsk = {.d32 = 0 };
+ dcfg_data_t dcfg = {.d32 = 0 };
+ grstctl_t resetctl = {.d32 = 0 };
+ dctl_data_t dctl = {.d32 = 0 };
+ int i = 0;
+ gintsts_data_t gintsts;
+ pcgcctl_data_t power = {.d32 = 0 };
+
+ power.d32 = FH_READ_REG32(core_if->pcgcctl);
+ if (power.b.stoppclk) {
+ power.d32 = 0;
+ power.b.stoppclk = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
+
+ power.b.pwrclmp = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
+
+ power.b.rstpdwnmodule = 1;
+ FH_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
+ }
+
+ core_if->lx_state = FH_OTG_L0;
+ core_if->otg_sts = 0;
+
+ FH_PRINTF("USB RESET\n");
+#ifdef FH_EN_ISOC
+ for (i = 1; i < 16; ++i) {
+ fh_otg_pcd_ep_t *ep;
+ fh_ep_t *fh_ep;
+ ep = get_in_ep(pcd, i);
+ if (ep != 0) {
+ fh_ep = &ep->fh_ep;
+ fh_ep->next_frame = 0xffffffff;
+ }
+ }
+#endif /* FH_EN_ISOC */
+
+ /* reset the HNP settings */
+ fh_otg_pcd_update_otg(pcd, 1);
+
+ /* Clear the Remote Wakeup Signalling */
+ dctl.b.rmtwkupsig = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
+
+ /* Set NAK for all OUT EPs */
+ doepctl.b.snak = 1;
+ for (i = 0; i <= dev_if->num_out_eps; i++) {
+ FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
+ }
+
+ /* Flush the NP Tx FIFO */
+ fh_otg_flush_tx_fifo(core_if, 0x10);
+ /* Flush the Learning Queue */
+ resetctl.b.intknqflsh = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
+
+ if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable) {
+ core_if->start_predict = 0;
+ for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
+ core_if->nextep_seq[i] = 0xff; // 0xff - EP not active
+ }
+ core_if->nextep_seq[0] = 0;
+ core_if->first_in_nextep_seq = 0;
+ diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
+ diepctl.b.nextep = 0;
+ FH_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
+
+ /* Update IN Endpoint Mismatch Count by active IN NP EP count + 1 */
+ dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.epmscnt = 2;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ FH_DEBUGPL(DBG_PCDV,
+ "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
+ __func__, core_if->first_in_nextep_seq);
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ FH_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
+ }
+ }
+
+ if (core_if->multiproc_int_enable) {
+ daintmsk.b.inep0 = 1;
+ daintmsk.b.outep0 = 1;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->deachintmsk,
+ daintmsk.d32);
+
+ doepmsk.b.setup = 1;
+ doepmsk.b.xfercompl = 1;
+ doepmsk.b.ahberr = 1;
+ doepmsk.b.epdisabled = 1;
+
+ if ((core_if->dma_desc_enable) ||
+ (core_if->dma_enable
+ && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
+ doepmsk.b.stsphsercvd = 1;
+ }
+ if (core_if->dma_desc_enable)
+ doepmsk.b.bna = 1;
+/*
+ doepmsk.b.babble = 1;
+ doepmsk.b.nyet = 1;
+
+ if (core_if->dma_enable) {
+ doepmsk.b.nak = 1;
+ }
+*/
+ FH_WRITE_REG32(&dev_if->dev_global_regs->doepeachintmsk[0],
+ doepmsk.d32);
+
+ diepmsk.b.xfercompl = 1;
+ diepmsk.b.timeout = 1;
+ diepmsk.b.epdisabled = 1;
+ diepmsk.b.ahberr = 1;
+ diepmsk.b.intknepmis = 1;
+ if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
+ diepmsk.b.intknepmis = 0;
+
+/* if (core_if->dma_desc_enable) {
+ diepmsk.b.bna = 1;
+ }
+*/
+/*
+ if (core_if->dma_enable) {
+ diepmsk.b.nak = 1;
+ }
+*/
+ FH_WRITE_REG32(&dev_if->dev_global_regs->diepeachintmsk[0],
+ diepmsk.d32);
+ } else {
+ daintmsk.b.inep0 = 1;
+ daintmsk.b.outep0 = 1;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->daintmsk,
+ daintmsk.d32);
+
+ doepmsk.b.setup = 1;
+ doepmsk.b.xfercompl = 1;
+ doepmsk.b.ahberr = 1;
+ doepmsk.b.epdisabled = 1;
+
+ if ((core_if->dma_desc_enable) ||
+ (core_if->dma_enable
+ && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
+ doepmsk.b.stsphsercvd = 1;
+ }
+ if (core_if->dma_desc_enable)
+ doepmsk.b.bna = 1;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
+
+ diepmsk.b.xfercompl = 1;
+ diepmsk.b.timeout = 1;
+ diepmsk.b.epdisabled = 1;
+ diepmsk.b.ahberr = 1;
+ if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
+ diepmsk.b.intknepmis = 0;
+/*
+ if (core_if->dma_desc_enable) {
+ diepmsk.b.bna = 1;
+ }
+*/
+
+ FH_WRITE_REG32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
+ }
+
+ /* Reset Device Address */
+ dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.devaddr = 0;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ /* setup EP0 to receive SETUP packets */
+ if (core_if->snpsid <= OTG_CORE_REV_2_94a)
+ ep0_out_start(core_if, pcd);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.usbreset = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * Get the device speed from the device status register and convert it
+ * to USB speed constant.
+ *
+ * @param core_if Programming view of FH_otg controller.
+ */
+static int get_device_speed(fh_otg_core_if_t * core_if)
+{
+ dsts_data_t dsts;
+ int speed = 0;
+ dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
+
+ switch (dsts.b.enumspd) {
+ case FH_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+ speed = USB_SPEED_HIGH;
+ break;
+ case FH_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+ case FH_DSTS_ENUMSPD_FS_PHY_48MHZ:
+ speed = USB_SPEED_FULL;
+ break;
+
+ case FH_DSTS_ENUMSPD_LS_PHY_6MHZ:
+ speed = USB_SPEED_LOW;
+ break;
+ }
+
+ return speed;
+}
+
+/**
+ * Read the device status register and set the device speed in the
+ * data structure.
+ * Set up EP0 to receive SETUP packets by calling fh_ep0_activate.
+ */
+int32_t fh_otg_pcd_handle_enum_done_intr(fh_otg_pcd_t * pcd)
+{
+ fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
+ gintsts_data_t gintsts;
+ gusbcfg_data_t gusbcfg;
+ fh_otg_core_global_regs_t *global_regs =
+ GET_CORE_IF(pcd)->core_global_regs;
+ uint8_t utmi16b, utmi8b;
+ int speed;
+ dcfg_data_t dcfg;
+
+ FH_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
+
+ /* WA for the case when SW gets SPEED ENUM without first USB RESET case
+ * due to USB RESET issued by the host earlier. Anyways USB Reset routine
+ * needs to be called to at least program EP 0 OUT - vahrama
+ */
+ dcfg.d32 = FH_READ_REG32(&pcd->core_if->dev_if->dev_global_regs->dcfg);
+ if (pcd->core_if->otg_ver && dcfg.b.devaddr)
+ fh_otg_pcd_handle_usb_reset_intr(pcd);
+
+
+ if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_2_60a) {
+ utmi16b = 6; //vahrama old value was 6;
+ utmi8b = 9;
+ } else {
+ utmi16b = 4;
+ utmi8b = 8;
+ }
+ fh_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->fh_ep);
+ if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a) {
+ ep0_out_start(GET_CORE_IF(pcd), pcd);
+ }
+
+#ifdef DEBUG_EP0
+ print_ep0_state(pcd);
+#endif
+
+ if (pcd->ep0state == EP0_DISCONNECT) {
+ pcd->ep0state = EP0_IDLE;
+ } else if (pcd->ep0state == EP0_STALL) {
+ pcd->ep0state = EP0_IDLE;
+ }
+
+ pcd->ep0state = EP0_IDLE;
+
+ ep0->stopped = 0;
+
+ speed = get_device_speed(GET_CORE_IF(pcd));
+ pcd->fops->connect(pcd, speed);
+
+ /* Set USB turnaround time based on device speed and PHY interface. */
+ gusbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
+ if (speed == USB_SPEED_HIGH) {
+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
+ FH_HWCFG2_HS_PHY_TYPE_ULPI) {
+ /* ULPI interface */
+ gusbcfg.b.usbtrdtim = 9;
+ }
+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
+ FH_HWCFG2_HS_PHY_TYPE_UTMI) {
+ /* UTMI+ interface */
+ if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) {
+ gusbcfg.b.usbtrdtim = utmi8b;
+ } else if (GET_CORE_IF(pcd)->hwcfg4.
+ b.utmi_phy_data_width == 1) {
+ gusbcfg.b.usbtrdtim = utmi16b;
+ } else if (GET_CORE_IF(pcd)->
+ core_params->phy_utmi_width == 8) {
+ gusbcfg.b.usbtrdtim = utmi8b;
+ } else {
+ gusbcfg.b.usbtrdtim = utmi16b;
+ }
+ }
+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
+ FH_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
+ /* UTMI+ OR ULPI interface */
+ if (gusbcfg.b.ulpi_utmi_sel == 1) {
+ /* ULPI interface */
+ gusbcfg.b.usbtrdtim = 9;
+ } else {
+ /* UTMI+ interface */
+ if (GET_CORE_IF(pcd)->
+ core_params->phy_utmi_width == 16) {
+ gusbcfg.b.usbtrdtim = utmi16b;
+ } else {
+ gusbcfg.b.usbtrdtim = utmi8b;
+ }
+ }
+ }
+ } else {
+ /* Full or low speed */
+ gusbcfg.b.usbtrdtim = 9;
+ }
+ FH_WRITE_REG32(&global_regs->gusbcfg, gusbcfg.d32);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.enumdone = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that the ISO OUT Packet was dropped due to
+ * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs
+ * read all the data from the Rx FIFO.
+ */
+int32_t fh_otg_pcd_handle_isoc_out_packet_dropped_intr(fh_otg_pcd_t * pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ gintsts_data_t gintsts;
+
+ FH_WARN("INTERRUPT Handler not implemented for %s\n",
+ "ISOC Out Dropped");
+
+ intr_mask.b.isooutdrop = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.isooutdrop = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates the end of the portion of the micro-frame
+ * for periodic transactions. If there is a periodic transaction for
+ * the next frame, load the packets into the EP periodic Tx FIFO.
+ */
+int32_t fh_otg_pcd_handle_end_periodic_frame_intr(fh_otg_pcd_t * pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ gintsts_data_t gintsts;
+ FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "EOP");
+
+ intr_mask.b.eopframe = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.eopframe = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that EP of the packet on the top of the
+ * non-periodic Tx FIFO does not match EP of the IN Token received.
+ *
+ * The "Device IN Token Queue" Registers are read to determine the
+ * order the IN Tokens have been received. The non-periodic Tx FIFO
+ * is flushed, so it can be reloaded in the order seen in the IN Token
+ * Queue.
+ */
+int32_t fh_otg_pcd_handle_ep_mismatch_intr(fh_otg_pcd_t * pcd)
+{
+ gintsts_data_t gintsts;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ dctl_data_t dctl;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ if (!core_if->en_multiple_tx_fifo && core_if->dma_enable) {
+ core_if->start_predict = 1;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
+
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ if (!gintsts.b.ginnakeff) {
+ /* Disable EP Mismatch interrupt */
+ intr_mask.d32 = 0;
+ intr_mask.b.epmismatch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0);
+ /* Enable the Global IN NAK Effective Interrupt */
+ intr_mask.d32 = 0;
+ intr_mask.b.ginnakeff = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
+ /* Set the global non-periodic IN NAK handshake */
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ dctl.b.sgnpinnak = 1;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+ } else {
+ FH_PRINTF("gintsts.b.ginnakeff = 1! dctl.b.sgnpinnak not set\n");
+ }
+ /* Disabling of all EP's will be done in fh_otg_pcd_handle_in_nak_effective()
+ * handler after Global IN NAK Effective interrupt will be asserted */
+ }
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.epmismatch = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt is valid only in DMA mode. This interrupt indicates that the
+ * core has stopped fetching data for IN endpoints due to the unavailability of
+ * TxFIFO space or Request Queue space. This interrupt is used by the
+ * application for an endpoint mismatch algorithm.
+ *
+ * @param pcd The PCD
+ */
+int32_t fh_otg_pcd_handle_ep_fetsusp_intr(fh_otg_pcd_t * pcd)
+{
+ gintsts_data_t gintsts;
+ gintmsk_data_t gintmsk_data;
+ dctl_data_t dctl;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
+
+ /* Clear the global non-periodic IN NAK handshake */
+ dctl.d32 = 0;
+ dctl.b.cgnpinnak = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+
+ /* Mask GINTSTS.FETSUSP interrupt */
+ gintmsk_data.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
+ gintmsk_data.b.fetsusp = 0;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk_data.d32);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.fetsusp = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This funcion stalls EP0.
+ */
+static inline void ep0_do_stall(fh_otg_pcd_t * pcd, const int err_val)
+{
+ fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
+ usb_device_request_t *ctrl = &pcd->setup_pkt->req;
+ FH_WARN("req %02x.%02x protocol STALL; err %d\n",
+ ctrl->bmRequestType, ctrl->bRequest, err_val);
+
+ ep0->fh_ep.is_in = 1;
+ fh_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->fh_ep);
+ ep0->fh_ep.is_in = 0;
+ fh_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->fh_ep);
+ pcd->ep0.stopped = 1;
+ pcd->ep0state = EP0_IDLE;
+ ep0_out_start(GET_CORE_IF(pcd), pcd);
+}
+
+/**
+ * This functions delegates the setup command to the gadget driver.
+ */
+static inline void do_gadget_setup(fh_otg_pcd_t * pcd,
+ usb_device_request_t * ctrl)
+{
+ int ret = 0;
+ FH_SPINUNLOCK(pcd->lock);
+ ret = pcd->fops->setup(pcd, (uint8_t *) ctrl);
+ FH_SPINLOCK(pcd->lock);
+ if (ret < 0) {
+ ep0_do_stall(pcd, ret);
+ }
+
+ /** @todo This is a g_file_storage gadget driver specific
+ * workaround: a DELAYED_STATUS result from the fsg_setup
+ * routine will result in the gadget queueing a EP0 IN status
+ * phase for a two-stage control transfer. Exactly the same as
+ * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
+ * specific request. Need a generic way to know when the gadget
+ * driver will queue the status phase. Can we assume when we
+ * call the gadget driver setup() function that it will always
+ * queue and require the following flag? Need to look into
+ * this.
+ */
+
+ if (ret == 256 + 999) {
+ pcd->request_config = 1;
+ }
+}
+
+#ifdef FH_UTE_CFI
+/**
+ * This functions delegates the CFI setup commands to the gadget driver.
+ * This function will return a negative value to indicate a failure.
+ */
+static inline int cfi_gadget_setup(fh_otg_pcd_t * pcd,
+ struct cfi_usb_ctrlrequest *ctrl_req)
+{
+ int ret = 0;
+
+ if (pcd->fops && pcd->fops->cfi_setup) {
+ FH_SPINUNLOCK(pcd->lock);
+ ret = pcd->fops->cfi_setup(pcd, ctrl_req);
+ FH_SPINLOCK(pcd->lock);
+ if (ret < 0) {
+ ep0_do_stall(pcd, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+#endif
+
+/**
+ * This function starts the Zero-Length Packet for the IN status phase
+ * of a 2 stage control transfer.
+ */
+static inline void do_setup_in_status_phase(fh_otg_pcd_t * pcd)
+{
+ fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
+ if (pcd->ep0state == EP0_STALL) {
+ return;
+ }
+
+ pcd->ep0state = EP0_IN_STATUS_PHASE;
+
+ /* Prepare for more SETUP Packets */
+ FH_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
+ if ((GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a)
+ && (pcd->core_if->dma_desc_enable)
+ && (ep0->fh_ep.xfer_count < ep0->fh_ep.total_len)) {
+ FH_DEBUGPL(DBG_PCDV,
+ "Data terminated wait next packet in out_desc_addr\n");
+ pcd->backup_buf = phys_to_virt(ep0->fh_ep.dma_addr);
+ pcd->data_terminated = 1;
+ }
+ ep0->fh_ep.xfer_len = 0;
+ ep0->fh_ep.xfer_count = 0;
+ ep0->fh_ep.is_in = 1;
+ ep0->fh_ep.dma_addr = pcd->setup_pkt_dma_handle;
+ fh_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->fh_ep);
+
+ /* Prepare for more SETUP Packets */
+ //ep0_out_start(GET_CORE_IF(pcd), pcd);
+}
+
+/**
+ * This function starts the Zero-Length Packet for the OUT status phase
+ * of a 2 stage control transfer.
+ */
+static inline void do_setup_out_status_phase(fh_otg_pcd_t * pcd)
+{
+ fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
+ doepint_data_t doepint;
+ doepint.d32 = FH_READ_REG32(&pcd->core_if->dev_if->out_ep_regs[0]->doepint);
+ if (pcd->ep0state == EP0_STALL) {
+ FH_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
+ return;
+ }
+ pcd->ep0state = EP0_OUT_STATUS_PHASE;
+
+ FH_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
+ ep0->fh_ep.xfer_len = 0;
+ ep0->fh_ep.xfer_count = 0;
+ ep0->fh_ep.is_in = 0;
+ ep0->fh_ep.dma_addr = pcd->setup_pkt_dma_handle;
+ /* If there is xfercomplete on EP0 OUT do not start OUT Status stage.
+ * xfercomplete means that ZLP was already received as EP0 OUT is enabled
+ * during IN Data stage
+ */
+ if ((doepint.b.xfercompl == 1) && (pcd->core_if->snpsid >= OTG_CORE_REV_3_00a)
+ && (pcd->core_if->dma_enable == 1) && (pcd->core_if->dma_desc_enable == 0)) {
+ FH_DEBUGPL(DBG_PCD, "Status stage already completed\n");
+ return;
+ }
+
+ fh_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->fh_ep);
+
+ /* Prepare for more SETUP Packets */
+ if (GET_CORE_IF(pcd)->dma_enable == 0) {
+ ep0_out_start(GET_CORE_IF(pcd), pcd);
+ }
+}
+
+/**
+ * Clear the EP halt (STALL) and if pending requests start the
+ * transfer.
+ */
+static inline void pcd_clear_halt(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep)
+{
+ if (ep->fh_ep.stall_clear_flag) {
+ /* Start Control Status Phase */
+ do_setup_in_status_phase(pcd);
+ return;
+ }
+
+ fh_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->fh_ep);
+
+ /* Reactive the EP */
+ fh_otg_ep_activate(GET_CORE_IF(pcd), &ep->fh_ep);
+ if (ep->stopped) {
+ ep->stopped = 0;
+ /* If there is a request in the EP queue start it */
+
+ /** @todo FIXME: this causes an EP mismatch in DMA mode.
+ * epmismatch not yet implemented. */
+
+ /*
+ * Above fixme is solved by implmenting a tasklet to call the
+ * start_next_request(), outside of interrupt context at some
+ * time after the current time, after a clear-halt setup packet.
+ * Still need to implement ep mismatch in the future if a gadget
+ * ever uses more than one endpoint at once
+ */
+ ep->queue_sof = 1;
+ FH_TASK_SCHEDULE(pcd->start_xfer_tasklet);
+ }
+ /* Start Control Status Phase */
+ do_setup_in_status_phase(pcd);
+}
+
+/**
+ * This function is called when the SET_FEATURE TEST_MODE Setup packet
+ * is sent from the host. The Device Control register is written with
+ * the Test Mode bits set to the specified Test Mode. This is done as
+ * a tasklet so that the "Status" phase of the control transfer
+ * completes before transmitting the TEST packets.
+ *
+ * @todo This has not been tested since the tasklet struct was put
+ * into the PCD struct!
+ *
+ */
+void do_test_mode(void *data)
+{
+ dctl_data_t dctl;
+ fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) data;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ int test_mode = pcd->test_mode;
+
+// FH_WARN("%s() has not been tested since being rewritten!\n", __func__);
+
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ switch (test_mode) {
+ case 1: // TEST_J
+ dctl.b.tstctl = 1;
+ break;
+
+ case 2: // TEST_K
+ dctl.b.tstctl = 2;
+ break;
+
+ case 3: // TEST_SE0_NAK
+ dctl.b.tstctl = 3;
+ break;
+
+ case 4: // TEST_PACKET
+ dctl.b.tstctl = 4;
+ break;
+
+ case 5: // TEST_FORCE_ENABLE
+ dctl.b.tstctl = 5;
+ break;
+ case 7:
+ fh_otg_set_hnpreq(core_if, 1);
+ }
+ FH_PRINTF("test mode = %d\n",test_mode);
+ core_if->test_mode = test_mode;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+}
+
+/**
+ * This function process the GET_STATUS Setup Commands.
+ */
+static inline void do_get_status(fh_otg_pcd_t * pcd)
+{
+ usb_device_request_t ctrl = pcd->setup_pkt->req;
+ fh_otg_pcd_ep_t *ep;
+ fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
+ uint16_t *status = pcd->status_buf;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCD,
+ "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bmRequestType, ctrl.bRequest,
+ UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
+ UGETW(ctrl.wLength));
+#endif
+
+ switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
+ case UT_DEVICE:
+ if (UGETW(ctrl.wIndex) == 0xF000) { /* OTG Status selector */
+ FH_PRINTF("wIndex - %d\n", UGETW(ctrl.wIndex));
+ FH_PRINTF("OTG VERSION - %d\n", core_if->otg_ver);
+ FH_PRINTF("OTG CAP - %d, %d\n",
+ core_if->core_params->otg_cap,
+ FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
+ if (core_if->otg_ver == 1
+ && core_if->core_params->otg_cap ==
+ FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+ uint8_t *otgsts = (uint8_t *) pcd->status_buf;
+ *otgsts = (core_if->otg_sts & 0x1);
+ pcd->ep0_pending = 1;
+ ep0->fh_ep.start_xfer_buff =
+ (uint8_t *) otgsts;
+ ep0->fh_ep.xfer_buff = (uint8_t *) otgsts;
+ ep0->fh_ep.dma_addr =
+ pcd->status_buf_dma_handle;
+ ep0->fh_ep.xfer_len = 1;
+ ep0->fh_ep.xfer_count = 0;
+ ep0->fh_ep.total_len = ep0->fh_ep.xfer_len;
+ fh_otg_ep0_start_transfer(GET_CORE_IF(pcd),
+ &ep0->fh_ep);
+ return;
+ } else {
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+ break;
+ } else {
+ *status = 0x1; /* Self powered */
+ *status |= pcd->remote_wakeup_enable << 1;
+ break;
+ }
+ case UT_INTERFACE:
+ *status = 0;
+ break;
+
+ case UT_ENDPOINT:
+ ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
+ if (ep == 0 || UGETW(ctrl.wLength) > 2) {
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+ /** @todo check for EP stall */
+ *status = ep->stopped;
+ break;
+ }
+ pcd->ep0_pending = 1;
+ ep0->fh_ep.start_xfer_buff = (uint8_t *) status;
+ ep0->fh_ep.xfer_buff = (uint8_t *) status;
+ ep0->fh_ep.dma_addr = pcd->status_buf_dma_handle;
+ ep0->fh_ep.xfer_len = 2;
+ ep0->fh_ep.xfer_count = 0;
+ ep0->fh_ep.total_len = ep0->fh_ep.xfer_len;
+ fh_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->fh_ep);
+}
+
+/**
+ * This function process the SET_FEATURE Setup Commands.
+ */
+static inline void do_set_feature(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+ usb_device_request_t ctrl = pcd->setup_pkt->req;
+ fh_otg_pcd_ep_t *ep = 0;
+ int32_t otg_cap_param = core_if->core_params->otg_cap;
+ gotgctl_data_t gotgctl = {.d32 = 0 };
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+
+ FH_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bmRequestType, ctrl.bRequest,
+ UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
+ UGETW(ctrl.wLength));
+ FH_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
+
+ switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
+ case UT_DEVICE:
+ switch (UGETW(ctrl.wValue)) {
+ case UF_DEVICE_REMOTE_WAKEUP:
+ pcd->remote_wakeup_enable = 1;
+ break;
+
+ case UF_TEST_MODE:
+ /* Setup the Test Mode tasklet to do the Test
+ * Packet generation after the SETUP Status
+ * phase has completed. */
+
+ /** @todo This has not been tested since the
+ * tasklet struct was put into the PCD
+ * struct! */
+ pcd->test_mode = UGETW(ctrl.wIndex) >> 8;
+ FH_TASK_SCHEDULE(pcd->test_mode_tasklet);
+ break;
+
+ case UF_DEVICE_B_HNP_ENABLE:
+ FH_DEBUGPL(DBG_PCDV,
+ "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
+
+ /* dev may initiate HNP */
+ if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+ gotgctl.b.devhnpen = 1;
+ if (core_if->otg_ver) {
+ FH_MODIFY_REG32(&global_regs->gotgctl, 0, gotgctl.d32);
+ /* Ensure that USB Suspend interrupt is unmasked */
+ gintmsk.b.usbsuspend = 1;
+ FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
+ }
+ else {
+ pcd->b_hnp_enable = 1;
+ fh_otg_pcd_update_otg(pcd, 0);
+ FH_DEBUGPL(DBG_PCD, "Request B HNP\n");
+ /**@todo Is the gotgctl.devhnpen cleared
+ * by a USB Reset? */
+ gotgctl.b.hnpreq = 1;
+ FH_WRITE_REG32(&global_regs->gotgctl, gotgctl.d32);
+ }
+ } else {
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+ break;
+
+ case UF_DEVICE_A_HNP_SUPPORT:
+ /* RH port supports HNP */
+ FH_DEBUGPL(DBG_PCDV,
+ "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
+ if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+ pcd->a_hnp_support = 1;
+ fh_otg_pcd_update_otg(pcd, 0);
+ } else {
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+ break;
+
+ case UF_DEVICE_A_ALT_HNP_SUPPORT:
+ /* other RH port does */
+ FH_DEBUGPL(DBG_PCDV,
+ "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
+ if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+ pcd->a_alt_hnp_support = 1;
+ fh_otg_pcd_update_otg(pcd, 0);
+ } else {
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+ break;
+
+ default:
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+
+ }
+ do_setup_in_status_phase(pcd);
+ break;
+
+ case UT_INTERFACE:
+ do_gadget_setup(pcd, &ctrl);
+ break;
+
+ case UT_ENDPOINT:
+ if (UGETW(ctrl.wValue) == UF_ENDPOINT_HALT) {
+ ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
+ if (ep == 0) {
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+ ep->stopped = 1;
+ fh_otg_ep_set_stall(core_if, &ep->fh_ep);
+ }
+ do_setup_in_status_phase(pcd);
+ break;
+ }
+}
+
+/**
+ * This function process the CLEAR_FEATURE Setup Commands.
+ */
+static inline void do_clear_feature(fh_otg_pcd_t * pcd)
+{
+ usb_device_request_t ctrl = pcd->setup_pkt->req;
+ fh_otg_pcd_ep_t *ep = 0;
+
+ FH_DEBUGPL(DBG_PCD,
+ "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bmRequestType, ctrl.bRequest,
+ UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
+ UGETW(ctrl.wLength));
+
+ switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
+ case UT_DEVICE:
+ switch (UGETW(ctrl.wValue)) {
+ case UF_DEVICE_REMOTE_WAKEUP:
+ pcd->remote_wakeup_enable = 0;
+ break;
+
+ case UF_TEST_MODE:
+ /** @todo Add CLEAR_FEATURE for TEST modes. */
+ break;
+
+ default:
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+ do_setup_in_status_phase(pcd);
+ break;
+
+ case UT_ENDPOINT:
+ ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
+ if (ep == 0) {
+ ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
+ return;
+ }
+
+ pcd_clear_halt(pcd, ep);
+
+ break;
+ }
+}
+
+/**
+ * This function process the SET_ADDRESS Setup Commands.
+ */
+static inline void do_set_address(fh_otg_pcd_t * pcd)
+{
+ fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
+ usb_device_request_t ctrl = pcd->setup_pkt->req;
+
+ if (ctrl.bmRequestType == UT_DEVICE) {
+ dcfg_data_t dcfg = {.d32 = 0 };
+
+#ifdef DEBUG_EP0
+// FH_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue);
+#endif
+ dcfg.b.devaddr = UGETW(ctrl.wValue);
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
+ do_setup_in_status_phase(pcd);
+ }
+}
+
+/**
+ * This function processes SETUP commands. In Linux, the USB Command
+ * processing is done in two places - the first being the PCD and the
+ * second in the Gadget Driver (for example, the File-Backed Storage
+ * Gadget Driver).
+ *
+ * <table>
+ * <tr><td>Command </td><td>Driver </td><td>Description</td></tr>
+ *
+ * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
+ * defined in chapter 9 of the USB 2.0 Specification chapter 9
+ * </td></tr>
+ *
+ * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
+ * requests are the ENDPOINT_HALT feature is procesed, all others the
+ * interface requests are ignored.</td></tr>
+ *
+ * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
+ * requests are processed by the PCD. Interface requests are passed
+ * to the Gadget Driver.</td></tr>
+ *
+ * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
+ * with device address received </td></tr>
+ *
+ * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
+ * requested descriptor</td></tr>
+ *
+ * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
+ * not implemented by any of the existing Gadget Drivers.</td></tr>
+ *
+ * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
+ * all EPs and enable EPs for new configuration.</td></tr>
+ *
+ * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
+ * the current configuration</td></tr>
+ *
+ * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
+ * EPs and enable EPs for new configuration.</td></tr>
+ *
+ * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
+ * current interface.</td></tr>
+ *
+ * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
+ * message.</td></tr>
+ * </table>
+ *
+ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
+ * processed by pcd_setup. Calling the Function Driver's setup function from
+ * pcd_setup processes the gadget SETUP commands.
+ */
+static inline void pcd_setup(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ usb_device_request_t ctrl = pcd->setup_pkt->req;
+ fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
+
+ deptsiz0_data_t doeptsize0 = {.d32 = 0 };
+
+#ifdef FH_UTE_CFI
+ int retval = 0;
+ struct cfi_usb_ctrlrequest cfi_req;
+#endif
+
+ doeptsize0.d32 = FH_READ_REG32(&dev_if->out_ep_regs[0]->doeptsiz);
+
+ /** In BDMA more then 1 setup packet is not supported till 3.00a */
+ if (core_if->dma_enable && core_if->dma_desc_enable == 0
+ && (doeptsize0.b.supcnt < 2)
+ && (core_if->snpsid < OTG_CORE_REV_2_94a)) {
+ FH_ERROR
+ ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n");
+ }
+ if ((core_if->snpsid >= OTG_CORE_REV_3_00a)
+ && (core_if->dma_enable == 1) && (core_if->dma_desc_enable == 0)) {
+ if (doeptsize0.b.supcnt == 3 && ep0->fh_ep.stp_rollover == 0) {
+ FH_ERROR(" !!! Setup packet count was not updated by the core\n");
+ return;
+ }
+ ctrl =
+ (pcd->setup_pkt +
+ (3 - doeptsize0.b.supcnt - 1 +
+ ep0->fh_ep.stp_rollover))->req;
+ }
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bmRequestType, ctrl.bRequest,
+ UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
+ UGETW(ctrl.wLength));
+#endif
+
+ /* Clean up the request queue */
+ fh_otg_request_nuke(ep0);
+ ep0->stopped = 0;
+
+ if (ctrl.bmRequestType & UE_DIR_IN) {
+ ep0->fh_ep.is_in = 1;
+ pcd->ep0state = EP0_IN_DATA_PHASE;
+ } else {
+ ep0->fh_ep.is_in = 0;
+ pcd->ep0state = EP0_OUT_DATA_PHASE;
+ }
+
+ if (UGETW(ctrl.wLength) == 0) {
+ ep0->fh_ep.is_in = 1;
+ pcd->ep0state = EP0_IN_STATUS_PHASE;
+ }
+
+ if (UT_GET_TYPE(ctrl.bmRequestType) != UT_STANDARD) {
+
+#ifdef FH_UTE_CFI
+ FH_MEMCPY(&cfi_req, &ctrl, sizeof(usb_device_request_t));
+
+ //printk(KERN_ALERT "CFI: req_type=0x%02x; req=0x%02x\n",
+ ctrl.bRequestType, ctrl.bRequest);
+ if (UT_GET_TYPE(cfi_req.bRequestType) == UT_VENDOR) {
+ if (cfi_req.bRequest > 0xB0 && cfi_req.bRequest < 0xBF) {
+ retval = cfi_setup(pcd, &cfi_req);
+ if (retval < 0) {
+ ep0_do_stall(pcd, retval);
+ pcd->ep0_pending = 0;
+ return;
+ }
+
+ /* if need gadget setup then call it and check the retval */
+ if (pcd->cfi->need_gadget_att) {
+ retval =
+ cfi_gadget_setup(pcd,
+ &pcd->
+ cfi->ctrl_req);
+ if (retval < 0) {
+ pcd->ep0_pending = 0;
+ return;
+ }
+ }
+
+ if (pcd->cfi->need_status_in_complete) {
+ do_setup_in_status_phase(pcd);
+ }
+ return;
+ }
+ }
+#endif
+
+ /* handle non-standard (class/vendor) requests in the gadget driver */
+ do_gadget_setup(pcd, &ctrl);
+ return;
+ }
+
+ /** @todo NGS: Handle bad setup packet? */
+
+///////////////////////////////////////////
+//// --- Standard Request handling --- ////
+
+ switch (ctrl.bRequest) {
+ case UR_GET_STATUS:
+ do_get_status(pcd);
+ break;
+
+ case UR_CLEAR_FEATURE:
+ do_clear_feature(pcd);
+ break;
+
+ case UR_SET_FEATURE:
+ do_set_feature(pcd);
+ break;
+
+ case UR_SET_ADDRESS:
+ do_set_address(pcd);
+ break;
+
+ case UR_SET_INTERFACE:
+ case UR_SET_CONFIG:
+// _pcd->request_config = 1; /* Configuration changed */
+ do_gadget_setup(pcd, &ctrl);
+ break;
+
+ case UR_SYNCH_FRAME:
+ do_gadget_setup(pcd, &ctrl);
+ break;
+
+ default:
+ /* Call the Gadget Driver's setup functions */
+ do_gadget_setup(pcd, &ctrl);
+ break;
+ }
+}
+
+/**
+ * This function completes the ep0 control transfer.
+ */
+static int32_t ep0_complete_request(fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ fh_otg_dev_in_ep_regs_t *in_ep_regs =
+ dev_if->in_ep_regs[ep->fh_ep.num];
+#ifdef DEBUG_EP0
+ fh_otg_dev_out_ep_regs_t *out_ep_regs =
+ dev_if->out_ep_regs[ep->fh_ep.num];
+#endif
+ deptsiz0_data_t deptsiz;
+ dev_dma_desc_sts_t desc_sts = {.d32 = 0 };
+ fh_otg_pcd_request_t *req;
+ int is_last = 0;
+ fh_otg_pcd_t *pcd = ep->pcd;
+
+#ifdef FH_UTE_CFI
+ struct cfi_usb_ctrlrequest *ctrlreq;
+ int retval = -FH_E_NOT_SUPPORTED;
+#endif
+
+ if (pcd->ep0_pending && FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ if (ep->fh_ep.is_in) {
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
+#endif
+ do_setup_out_status_phase(pcd);
+ } else {
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
+#endif
+
+#ifdef FH_UTE_CFI
+ ctrlreq = &pcd->cfi->ctrl_req;
+
+ if (UT_GET_TYPE(ctrlreq->bRequestType) == UT_VENDOR) {
+ if (ctrlreq->bRequest > 0xB0
+ && ctrlreq->bRequest < 0xBF) {
+
+ /* Return if the PCD failed to handle the request */
+ if ((retval =
+ pcd->cfi->ops.
+ ctrl_write_complete(pcd->cfi,
+ pcd)) < 0) {
+ CFI_INFO
+ ("ERROR setting a new value in the PCD(%d)\n",
+ retval);
+ ep0_do_stall(pcd, retval);
+ pcd->ep0_pending = 0;
+ return 0;
+ }
+
+ /* If the gadget needs to be notified on the request */
+ if (pcd->cfi->need_gadget_att == 1) {
+ //retval = do_gadget_setup(pcd, &pcd->cfi->ctrl_req);
+ retval =
+ cfi_gadget_setup(pcd,
+ &pcd->cfi->
+ ctrl_req);
+
+ /* Return from the function if the gadget failed to process
+ * the request properly - this should never happen !!!
+ */
+ if (retval < 0) {
+ CFI_INFO
+ ("ERROR setting a new value in the gadget(%d)\n",
+ retval);
+ pcd->ep0_pending = 0;
+ return 0;
+ }
+ }
+
+ CFI_INFO("%s: RETVAL=%d\n", __func__,
+ retval);
+ /* If we hit here then the PCD and the gadget has properly
+ * handled the request - so send the ZLP IN to the host.
+ */
+ /* @todo: MAS - decide whether we need to start the setup
+ * stage based on the need_setup value of the cfi object
+ */
+ do_setup_in_status_phase(pcd);
+ pcd->ep0_pending = 0;
+ return 1;
+ }
+ }
+#endif
+
+ do_setup_in_status_phase(pcd);
+ }
+ pcd->ep0_pending = 0;
+ return 1;
+ }
+
+ if (FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ return 0;
+ }
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+
+ if (pcd->ep0state == EP0_OUT_STATUS_PHASE
+ || pcd->ep0state == EP0_IN_STATUS_PHASE) {
+ is_last = 1;
+ } else if (ep->fh_ep.is_in) {
+ deptsiz.d32 = FH_READ_REG32(&in_ep_regs->dieptsiz);
+ if (core_if->dma_desc_enable != 0)
+ desc_sts = dev_if->in_desc_addr->status;
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCDV, "%d len=%d xfersize=%d pktcnt=%d\n",
+ ep->fh_ep.num, ep->fh_ep.xfer_len,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+#endif
+
+ if (((core_if->dma_desc_enable == 0)
+ && (deptsiz.b.xfersize == 0))
+ || ((core_if->dma_desc_enable != 0)
+ && (desc_sts.b.bytes == 0))) {
+ req->actual = ep->fh_ep.xfer_count;
+ /* Is a Zero Len Packet needed? */
+ if (req->sent_zlp) {
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
+#endif
+ req->sent_zlp = 0;
+ }
+ do_setup_out_status_phase(pcd);
+ }
+ } else {
+ /* ep0-OUT */
+#ifdef DEBUG_EP0
+ deptsiz.d32 = FH_READ_REG32(&out_ep_regs->doeptsiz);
+ FH_DEBUGPL(DBG_PCDV, "%d len=%d xsize=%d pktcnt=%d\n",
+ ep->fh_ep.num, ep->fh_ep.xfer_len,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+#endif
+ req->actual = ep->fh_ep.xfer_count;
+
+ /* Is a Zero Len Packet needed? */
+ if (req->sent_zlp) {
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
+#endif
+ req->sent_zlp = 0;
+ }
+ /* For older cores do setup in status phase in Slave/BDMA modes,
+ * starting from 3.00 do that only in slave, and for DMA modes
+ * just re-enable ep 0 OUT here*/
+ if (core_if->dma_enable == 0
+ || (core_if->dma_desc_enable == 0
+ && core_if->snpsid <= OTG_CORE_REV_2_94a)) {
+ do_setup_in_status_phase(pcd);
+ } else if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
+ FH_DEBUGPL(DBG_PCDV,
+ "Enable out ep before in status phase\n");
+ ep0_out_start(core_if, pcd);
+ }
+ }
+
+ /* Complete the request */
+ if (is_last) {
+ fh_otg_request_done(ep, req, 0);
+ ep->fh_ep.start_xfer_buff = 0;
+ ep->fh_ep.xfer_buff = 0;
+ ep->fh_ep.xfer_len = 0;
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef FH_UTE_CFI
+/**
+ * This function calculates traverses all the CFI DMA descriptors and
+ * and accumulates the bytes that are left to be transfered.
+ *
+ * @return The total bytes left to transfered, or a negative value as failure
+ */
+static inline int cfi_calc_desc_residue(fh_otg_pcd_ep_t * ep)
+{
+ int32_t ret = 0;
+ int i;
+ struct fh_otg_dma_desc *ddesc = NULL;
+ struct cfi_ep *cfiep;
+
+ /* See if the pcd_ep has its respective cfi_ep mapped */
+ cfiep = get_cfi_ep_by_pcd_ep(ep->pcd->cfi, ep);
+ if (!cfiep) {
+ CFI_INFO("%s: Failed to find ep\n", __func__);
+ return -1;
+ }
+
+ ddesc = ep->fh_ep.descs;
+
+ for (i = 0; (i < cfiep->desc_count) && (i < MAX_DMA_DESCS_PER_EP); i++) {
+
+#if defined(PRINT_CFI_DMA_DESCS)
+ print_desc(ddesc, ep->ep.name, i);
+#endif
+ ret += ddesc->status.b.bytes;
+ ddesc++;
+ }
+
+ if (ret)
+ CFI_INFO("!!!!!!!!!! WARNING (%s) - residue=%d\n", __func__,
+ ret);
+
+ return ret;
+}
+#endif
+
+/**
+ * This function completes the request for the EP. If there are
+ * additional requests for the EP in the queue they will be started.
+ */
+static void complete_ep(fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ fh_otg_dev_in_ep_regs_t *in_ep_regs =
+ dev_if->in_ep_regs[ep->fh_ep.num];
+ deptsiz_data_t deptsiz;
+ dev_dma_desc_sts_t desc_sts;
+ fh_otg_pcd_request_t *req = 0;
+ fh_otg_dev_dma_desc_t *dma_desc;
+ uint32_t byte_count = 0;
+ int is_last = 0;
+ int i;
+
+ FH_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->fh_ep.num,
+ (ep->fh_ep.is_in ? "IN" : "OUT"));
+
+ /* Get any pending requests */
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ if (!req) {
+ FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
+ return;
+ }
+ } else {
+ FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
+ return;
+ }
+
+ FH_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending);
+
+ if (ep->fh_ep.is_in) {
+ deptsiz.d32 = FH_READ_REG32(&in_ep_regs->dieptsiz);
+
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable == 0) {
+ if (deptsiz.b.xfersize == 0
+ && deptsiz.b.pktcnt == 0) {
+ byte_count =
+ ep->fh_ep.xfer_len -
+ ep->fh_ep.xfer_count;
+
+ ep->fh_ep.xfer_buff += byte_count;
+ ep->fh_ep.dma_addr += byte_count;
+ ep->fh_ep.xfer_count += byte_count;
+
+ FH_DEBUGPL(DBG_PCDV,
+ "%d-%s len=%d xfersize=%d pktcnt=%d\n",
+ ep->fh_ep.num,
+ (ep->fh_ep.
+ is_in ? "IN" : "OUT"),
+ ep->fh_ep.xfer_len,
+ deptsiz.b.xfersize,
+ deptsiz.b.pktcnt);
+
+ if (ep->fh_ep.xfer_len <
+ ep->fh_ep.total_len) {
+ fh_otg_ep_start_transfer
+ (core_if, &ep->fh_ep);
+ } else if (ep->fh_ep.sent_zlp) {
+ /*
+ * This fragment of code should initiate 0
+ * length transfer in case if it is queued
+ * a transfer with size divisible to EPs max
+ * packet size and with usb_request zero field
+ * is set, which means that after data is transfered,
+ * it is also should be transfered
+ * a 0 length packet at the end. For Slave and
+ * Buffer DMA modes in this case SW has
+ * to initiate 2 transfers one with transfer size,
+ * and the second with 0 size. For Descriptor
+ * DMA mode SW is able to initiate a transfer,
+ * which will handle all the packets including
+ * the last 0 length.
+ */
+ ep->fh_ep.sent_zlp = 0;
+ fh_otg_ep_start_zl_transfer
+ (core_if, &ep->fh_ep);
+ } else {
+ is_last = 1;
+ }
+ } else {
+ if (ep->fh_ep.type ==
+ FH_OTG_EP_TYPE_ISOC) {
+ req->actual = 0;
+ fh_otg_request_done(ep, req, 0);
+
+ ep->fh_ep.start_xfer_buff = 0;
+ ep->fh_ep.xfer_buff = 0;
+ ep->fh_ep.xfer_len = 0;
+
+ /* If there is a request in the queue start it. */
+ start_next_request(ep);
+ } else
+ FH_WARN
+ ("Incomplete transfer (%d - %s [siz=%d pkt=%d])\n",
+ ep->fh_ep.num,
+ (ep->fh_ep.is_in ? "IN" : "OUT"),
+ deptsiz.b.xfersize,
+ deptsiz.b.pktcnt);
+ }
+ } else {
+ dma_desc = ep->fh_ep.desc_addr;
+ byte_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+
+#ifdef FH_UTE_CFI
+ CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
+ ep->fh_ep.buff_mode);
+ if (ep->fh_ep.buff_mode != BM_STANDARD) {
+ int residue;
+
+ residue = cfi_calc_desc_residue(ep);
+ if (residue < 0)
+ return;
+
+ byte_count = residue;
+ } else {
+#endif
+ for (i = 0; i < ep->fh_ep.desc_cnt;
+ ++i) {
+ desc_sts = dma_desc->status;
+ if (ep->fh_ep.type ==
+ FH_OTG_EP_TYPE_ISOC) {
+ byte_count +=
+ desc_sts.b_iso_in.txbytes;
+ } else {
+ byte_count +=
+ desc_sts.b.bytes;
+ }
+ dma_desc++;
+ }
+#ifdef FH_UTE_CFI
+ }
+#endif
+ if (byte_count == 0) {
+ ep->fh_ep.xfer_count =
+ ep->fh_ep.total_len;
+ is_last = 1;
+ } else {
+ FH_WARN("Incomplete transfer\n");
+ }
+ }
+ } else {
+ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
+ FH_DEBUGPL(DBG_PCDV,
+ "%d-%s len=%d xfersize=%d pktcnt=%d\n",
+ ep->fh_ep.num,
+ ep->fh_ep.is_in ? "IN" : "OUT",
+ ep->fh_ep.xfer_len,
+ deptsiz.b.xfersize,
+ deptsiz.b.pktcnt);
+
+ /* Check if the whole transfer was completed,
+ * if no, setup transfer for next portion of data
+ */
+ if (ep->fh_ep.xfer_len < ep->fh_ep.total_len) {
+ fh_otg_ep_start_transfer(core_if,
+ &ep->fh_ep);
+ } else if (ep->fh_ep.sent_zlp) {
+ /*
+ * This fragment of code should initiate 0
+ * length trasfer in case if it is queued
+ * a trasfer with size divisible to EPs max
+ * packet size and with usb_request zero field
+ * is set, which means that after data is transfered,
+ * it is also should be transfered
+ * a 0 length packet at the end. For Slave and
+ * Buffer DMA modes in this case SW has
+ * to initiate 2 transfers one with transfer size,
+ * and the second with 0 size. For Desriptor
+ * DMA mode SW is able to initiate a transfer,
+ * which will handle all the packets including
+ * the last 0 legth.
+ */
+ ep->fh_ep.sent_zlp = 0;
+ fh_otg_ep_start_zl_transfer(core_if,
+ &ep->fh_ep);
+ } else {
+ is_last = 1;
+ }
+ } else {
+ FH_WARN
+ ("Incomplete transfer (%d-%s [siz=%d pkt=%d])\n",
+ ep->fh_ep.num,
+ (ep->fh_ep.is_in ? "IN" : "OUT"),
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+ }
+ }
+ } else {
+ fh_otg_dev_out_ep_regs_t *out_ep_regs =
+ dev_if->out_ep_regs[ep->fh_ep.num];
+ desc_sts.d32 = 0;
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable) {
+ dma_desc = ep->fh_ep.desc_addr;
+ byte_count = 0;
+ ep->fh_ep.sent_zlp = 0;
+
+#ifdef FH_UTE_CFI
+ CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
+ ep->fh_ep.buff_mode);
+ if (ep->fh_ep.buff_mode != BM_STANDARD) {
+ int residue;
+ residue = cfi_calc_desc_residue(ep);
+ if (residue < 0)
+ return;
+ byte_count = residue;
+ } else {
+#endif
+
+ for (i = 0; i < ep->fh_ep.desc_cnt;
+ ++i) {
+ desc_sts = dma_desc->status;
+ if (ep->fh_ep.type ==
+ FH_OTG_EP_TYPE_ISOC) {
+ byte_count +=
+ desc_sts.b_iso_out.rxbytes;
+ } else {
+ byte_count +=
+ desc_sts.b.bytes;
+ }
+ dma_desc++;
+ }
+
+#ifdef FH_UTE_CFI
+ }
+#endif
+ /* Checking for interrupt Out transfers with not
+ * dword aligned mps sizes
+ */
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_INTR &&
+ (ep->fh_ep.maxpacket % 4)) {
+ ep->fh_ep.xfer_count =
+ ep->fh_ep.total_len - byte_count;
+ if ((ep->fh_ep.xfer_len %
+ ep->fh_ep.maxpacket)
+ && (ep->fh_ep.xfer_len /
+ ep->fh_ep.maxpacket <
+ MAX_DMA_DESC_CNT))
+ ep->fh_ep.xfer_len -=
+ (ep->fh_ep.desc_cnt -
+ 1) * ep->fh_ep.maxpacket +
+ ep->fh_ep.xfer_len %
+ ep->fh_ep.maxpacket;
+ else
+ ep->fh_ep.xfer_len -=
+ ep->fh_ep.desc_cnt *
+ ep->fh_ep.maxpacket;
+ if (ep->fh_ep.xfer_len > 0) {
+ fh_otg_ep_start_transfer
+ (core_if, &ep->fh_ep);
+ } else {
+ is_last = 1;
+ }
+ } else {
+ ep->fh_ep.xfer_count =
+ ep->fh_ep.total_len - byte_count +
+ ((4 -
+ (ep->fh_ep.
+ total_len & 0x3)) & 0x3);
+ is_last = 1;
+ }
+ } else {
+ deptsiz.d32 = 0;
+ deptsiz.d32 =
+ FH_READ_REG32(&out_ep_regs->doeptsiz);
+
+ byte_count = (ep->fh_ep.xfer_len -
+ ep->fh_ep.xfer_count -
+ deptsiz.b.xfersize);
+ ep->fh_ep.xfer_buff += byte_count;
+ ep->fh_ep.dma_addr += byte_count;
+ ep->fh_ep.xfer_count += byte_count;
+
+ /* Check if the whole transfer was completed,
+ * if no, setup transfer for next portion of data
+ */
+ if (ep->fh_ep.xfer_len < ep->fh_ep.total_len) {
+ fh_otg_ep_start_transfer(core_if,
+ &ep->fh_ep);
+ } else if (ep->fh_ep.sent_zlp) {
+ /*
+ * This fragment of code should initiate 0
+ * length trasfer in case if it is queued
+ * a trasfer with size divisible to EPs max
+ * packet size and with usb_request zero field
+ * is set, which means that after data is transfered,
+ * it is also should be transfered
+ * a 0 length packet at the end. For Slave and
+ * Buffer DMA modes in this case SW has
+ * to initiate 2 transfers one with transfer size,
+ * and the second with 0 size. For Desriptor
+ * DMA mode SW is able to initiate a transfer,
+ * which will handle all the packets including
+ * the last 0 legth.
+ */
+ ep->fh_ep.sent_zlp = 0;
+ fh_otg_ep_start_zl_transfer(core_if,
+ &ep->fh_ep);
+ } else {
+ is_last = 1;
+ }
+ }
+ } else {
+ /* Check if the whole transfer was completed,
+ * if no, setup transfer for next portion of data
+ */
+ if (ep->fh_ep.xfer_len < ep->fh_ep.total_len) {
+ fh_otg_ep_start_transfer(core_if, &ep->fh_ep);
+ } else if (ep->fh_ep.sent_zlp) {
+ /*
+ * This fragment of code should initiate 0
+ * length transfer in case if it is queued
+ * a transfer with size divisible to EPs max
+ * packet size and with usb_request zero field
+ * is set, which means that after data is transfered,
+ * it is also should be transfered
+ * a 0 length packet at the end. For Slave and
+ * Buffer DMA modes in this case SW has
+ * to initiate 2 transfers one with transfer size,
+ * and the second with 0 size. For Descriptor
+ * DMA mode SW is able to initiate a transfer,
+ * which will handle all the packets including
+ * the last 0 length.
+ */
+ ep->fh_ep.sent_zlp = 0;
+ fh_otg_ep_start_zl_transfer(core_if,
+ &ep->fh_ep);
+ } else {
+ is_last = 1;
+ }
+ }
+
+ FH_DEBUGPL(DBG_PCDV,
+ "addr %p, %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n",
+ &out_ep_regs->doeptsiz, ep->fh_ep.num,
+ ep->fh_ep.is_in ? "IN" : "OUT",
+ ep->fh_ep.xfer_len, ep->fh_ep.xfer_count,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+ }
+
+ /* Complete the request */
+ if (is_last) {
+#ifdef FH_UTE_CFI
+ if (ep->fh_ep.buff_mode != BM_STANDARD) {
+ req->actual = ep->fh_ep.cfi_req_len - byte_count;
+ } else {
+#endif
+ req->actual = ep->fh_ep.xfer_count;
+#ifdef FH_UTE_CFI
+ }
+#endif
+ if (req->dw_align_buf) {
+ if (!ep->fh_ep.is_in) {
+ fh_memcpy(req->buf, req->dw_align_buf, req->length);
+ }
+ FH_DMA_FREE(req->length, req->dw_align_buf,
+ req->dw_align_buf_dma);
+ }
+
+ fh_otg_request_done(ep, req, 0);
+
+ ep->fh_ep.start_xfer_buff = 0;
+ ep->fh_ep.xfer_buff = 0;
+ ep->fh_ep.xfer_len = 0;
+
+ /* If there is a request in the queue start it. */
+ start_next_request(ep);
+ }
+}
+/**
+ * This function completes the request for the ISO EP in DDMA. If it is last
+ * descriptor and ep was disabled, then program already prepared(during ep_queue)
+ * descriptor chain if there are more requests to process
+ */
+static void complete_ddma_iso_ep(fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
+ dev_dma_desc_sts_t desc_sts;
+ fh_otg_pcd_request_t *req = 0;
+ fh_otg_dev_dma_desc_t *dma_desc;
+ fh_dma_t dma_desc_addr;
+ fh_ep_t *fh_ep;
+ uint32_t depdma;
+ uint32_t index;
+
+ FH_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->fh_ep.num,
+ (ep->fh_ep.is_in ? "IN" : "OUT"));
+ fh_ep = &ep->fh_ep;
+ if (fh_ep->use_add_buf) {
+ dma_desc_addr = fh_ep->dma_desc_addr;
+ dma_desc = fh_ep->desc_addr;
+ } else {
+ dma_desc_addr = fh_ep->dma_desc_addr1;
+ dma_desc = fh_ep->desc_addr1;
+ }
+ /* Get any pending requests */
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ if (!req) {
+ FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
+ return;
+ }
+ } else {
+ FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
+ return;
+ }
+
+ if (fh_ep->is_in) {
+ depdma = FH_READ_REG32(&core_if->dev_if->in_ep_regs[fh_ep->num]->diepdma);
+ index = (depdma - dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t) - 1;
+ desc_sts = dma_desc[index].status;
+ req->actual = req->length - desc_sts.b_iso_in.txbytes;
+ } else {
+ depdma = FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepdma);
+ index = (depdma - dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t) - 1;
+ desc_sts = dma_desc[index].status;
+ if (req->length%4)
+ req->actual = req->length - desc_sts.b_iso_out.rxbytes + (4 - req->length%4);
+ else
+ req->actual = req->length - desc_sts.b_iso_out.rxbytes;
+ }
+
+ /* Complete the request */
+ fh_otg_request_done(ep, req, 0);
+}
+
+#ifdef FH_EN_ISOC
+
+/**
+ * This function BNA interrupt for Isochronous EPs
+ *
+ */
+static void fh_otg_pcd_handle_iso_bna(fh_otg_pcd_ep_t * ep)
+{
+ fh_ep_t *fh_ep = &ep->fh_ep;
+ volatile uint32_t *addr;
+ depctl_data_t depctl = {.d32 = 0 };
+ fh_otg_pcd_t *pcd = ep->pcd;
+ fh_otg_dev_dma_desc_t *dma_desc;
+ int i;
+
+ dma_desc =
+ fh_ep->iso_desc_addr + fh_ep->desc_cnt * (fh_ep->proc_buf_num);
+
+ if (fh_ep->is_in) {
+ dev_dma_desc_sts_t sts = {.d32 = 0 };
+ for (i = 0; i < fh_ep->desc_cnt; ++i, ++dma_desc) {
+ sts.d32 = dma_desc->status.d32;
+ sts.b_iso_in.bs = BS_HOST_READY;
+ dma_desc->status.d32 = sts.d32;
+ }
+ } else {
+ dev_dma_desc_sts_t sts = {.d32 = 0 };
+ for (i = 0; i < fh_ep->desc_cnt; ++i, ++dma_desc) {
+ sts.d32 = dma_desc->status.d32;
+ sts.b_iso_out.bs = BS_HOST_READY;
+ dma_desc->status.d32 = sts.d32;
+ }
+ }
+
+ if (fh_ep->is_in == 0) {
+ addr =
+ &GET_CORE_IF(pcd)->dev_if->out_ep_regs[fh_ep->
+ num]->doepctl;
+ } else {
+ addr =
+ &GET_CORE_IF(pcd)->dev_if->in_ep_regs[fh_ep->num]->diepctl;
+ }
+ depctl.b.epena = 1;
+ FH_MODIFY_REG32(addr, depctl.d32, depctl.d32);
+}
+
+/**
+ * This function sets latest iso packet information(non-PTI mode)
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ *
+ */
+void set_current_pkt_info(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ deptsiz_data_t deptsiz = {.d32 = 0 };
+ dma_addr_t dma_addr;
+ uint32_t offset;
+
+ if (ep->proc_buf_num)
+ dma_addr = ep->dma_addr1;
+ else
+ dma_addr = ep->dma_addr0;
+
+ if (ep->is_in) {
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->dev_if->
+ in_ep_regs[ep->num]->dieptsiz);
+ offset = ep->data_per_frame;
+ } else {
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[ep->num]->doeptsiz);
+ offset =
+ ep->data_per_frame +
+ (0x4 & (0x4 - (ep->data_per_frame & 0x3)));
+ }
+
+ if (!deptsiz.b.xfersize) {
+ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
+ ep->pkt_info[ep->cur_pkt].offset =
+ ep->cur_pkt_dma_addr - dma_addr;
+ ep->pkt_info[ep->cur_pkt].status = 0;
+ } else {
+ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
+ ep->pkt_info[ep->cur_pkt].offset =
+ ep->cur_pkt_dma_addr - dma_addr;
+ ep->pkt_info[ep->cur_pkt].status = -FH_E_NO_DATA;
+ }
+ ep->cur_pkt_addr += offset;
+ ep->cur_pkt_dma_addr += offset;
+ ep->cur_pkt++;
+}
+
+/**
+ * This function sets latest iso packet information(DDMA mode)
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param fh_ep The EP to start the transfer on.
+ *
+ */
+static void set_ddma_iso_pkts_info(fh_otg_core_if_t * core_if,
+ fh_ep_t * fh_ep)
+{
+ fh_otg_dev_dma_desc_t *dma_desc;
+ dev_dma_desc_sts_t sts = {.d32 = 0 };
+ iso_pkt_info_t *iso_packet;
+ uint32_t data_per_desc;
+ uint32_t offset;
+ int i, j;
+
+ iso_packet = fh_ep->pkt_info;
+
+ /** Reinit closed DMA Descriptors*/
+ /** ISO OUT EP */
+ if (fh_ep->is_in == 0) {
+ dma_desc =
+ fh_ep->iso_desc_addr +
+ fh_ep->desc_cnt * fh_ep->proc_buf_num;
+ offset = 0;
+
+ for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
+ i += fh_ep->pkt_per_frm) {
+ for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
+ data_per_desc =
+ ((j + 1) * fh_ep->maxpacket >
+ fh_ep->
+ data_per_frame) ? fh_ep->data_per_frame -
+ j * fh_ep->maxpacket : fh_ep->maxpacket;
+ data_per_desc +=
+ (data_per_desc % 4) ? (4 -
+ data_per_desc %
+ 4) : 0;
+
+ sts.d32 = dma_desc->status.d32;
+
+ /* Write status in iso_packet_decsriptor */
+ iso_packet->status =
+ sts.b_iso_out.rxsts +
+ (sts.b_iso_out.bs ^ BS_DMA_DONE);
+ if (iso_packet->status) {
+ iso_packet->status = -FH_E_NO_DATA;
+ }
+
+ /* Received data length */
+ if (!sts.b_iso_out.rxbytes) {
+ iso_packet->length =
+ data_per_desc -
+ sts.b_iso_out.rxbytes;
+ } else {
+ iso_packet->length =
+ data_per_desc -
+ sts.b_iso_out.rxbytes + (4 -
+ fh_ep->data_per_frame
+ % 4);
+ }
+
+ iso_packet->offset = offset;
+
+ offset += data_per_desc;
+ dma_desc++;
+ iso_packet++;
+ }
+ }
+
+ for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
+ data_per_desc =
+ ((j + 1) * fh_ep->maxpacket >
+ fh_ep->data_per_frame) ? fh_ep->data_per_frame -
+ j * fh_ep->maxpacket : fh_ep->maxpacket;
+ data_per_desc +=
+ (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
+
+ sts.d32 = dma_desc->status.d32;
+
+ /* Write status in iso_packet_decsriptor */
+ iso_packet->status =
+ sts.b_iso_out.rxsts +
+ (sts.b_iso_out.bs ^ BS_DMA_DONE);
+ if (iso_packet->status) {
+ iso_packet->status = -FH_E_NO_DATA;
+ }
+
+ /* Received data length */
+ iso_packet->length =
+ fh_ep->data_per_frame - sts.b_iso_out.rxbytes;
+
+ iso_packet->offset = offset;
+
+ offset += data_per_desc;
+ iso_packet++;
+ dma_desc++;
+ }
+
+ sts.d32 = dma_desc->status.d32;
+
+ /* Write status in iso_packet_decsriptor */
+ iso_packet->status =
+ sts.b_iso_out.rxsts + (sts.b_iso_out.bs ^ BS_DMA_DONE);
+ if (iso_packet->status) {
+ iso_packet->status = -FH_E_NO_DATA;
+ }
+ /* Received data length */
+ if (!sts.b_iso_out.rxbytes) {
+ iso_packet->length =
+ fh_ep->data_per_frame - sts.b_iso_out.rxbytes;
+ } else {
+ iso_packet->length =
+ fh_ep->data_per_frame - sts.b_iso_out.rxbytes +
+ (4 - fh_ep->data_per_frame % 4);
+ }
+
+ iso_packet->offset = offset;
+ } else {
+/** ISO IN EP */
+
+ dma_desc =
+ fh_ep->iso_desc_addr +
+ fh_ep->desc_cnt * fh_ep->proc_buf_num;
+
+ for (i = 0; i < fh_ep->desc_cnt - 1; i++) {
+ sts.d32 = dma_desc->status.d32;
+
+ /* Write status in iso packet descriptor */
+ iso_packet->status =
+ sts.b_iso_in.txsts +
+ (sts.b_iso_in.bs ^ BS_DMA_DONE);
+ if (iso_packet->status != 0) {
+ iso_packet->status = -FH_E_NO_DATA;
+
+ }
+ /* Bytes has been transfered */
+ iso_packet->length =
+ fh_ep->data_per_frame - sts.b_iso_in.txbytes;
+
+ dma_desc++;
+ iso_packet++;
+ }
+
+ sts.d32 = dma_desc->status.d32;
+ while (sts.b_iso_in.bs == BS_DMA_BUSY) {
+ sts.d32 = dma_desc->status.d32;
+ }
+
+ /* Write status in iso packet descriptor ??? do be done with ERROR codes */
+ iso_packet->status =
+ sts.b_iso_in.txsts + (sts.b_iso_in.bs ^ BS_DMA_DONE);
+ if (iso_packet->status != 0) {
+ iso_packet->status = -FH_E_NO_DATA;
+ }
+
+ /* Bytes has been transfered */
+ iso_packet->length =
+ fh_ep->data_per_frame - sts.b_iso_in.txbytes;
+ }
+}
+
+/**
+ * This function reinitialize DMA Descriptors for Isochronous transfer
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param fh_ep The EP to start the transfer on.
+ *
+ */
+static void reinit_ddma_iso_xfer(fh_otg_core_if_t * core_if, fh_ep_t * fh_ep)
+{
+ int i, j;
+ fh_otg_dev_dma_desc_t *dma_desc;
+ dma_addr_t dma_ad;
+ volatile uint32_t *addr;
+ dev_dma_desc_sts_t sts = {.d32 = 0 };
+ uint32_t data_per_desc;
+
+ if (fh_ep->is_in == 0) {
+ addr = &core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl;
+ } else {
+ addr = &core_if->dev_if->in_ep_regs[fh_ep->num]->diepctl;
+ }
+
+ if (fh_ep->proc_buf_num == 0) {
+ /** Buffer 0 descriptors setup */
+ dma_ad = fh_ep->dma_addr0;
+ } else {
+ /** Buffer 1 descriptors setup */
+ dma_ad = fh_ep->dma_addr1;
+ }
+
+ /** Reinit closed DMA Descriptors*/
+ /** ISO OUT EP */
+ if (fh_ep->is_in == 0) {
+ dma_desc =
+ fh_ep->iso_desc_addr +
+ fh_ep->desc_cnt * fh_ep->proc_buf_num;
+
+ sts.b_iso_out.bs = BS_HOST_READY;
+ sts.b_iso_out.rxsts = 0;
+ sts.b_iso_out.l = 0;
+ sts.b_iso_out.sp = 0;
+ sts.b_iso_out.ioc = 0;
+ sts.b_iso_out.pid = 0;
+ sts.b_iso_out.framenum = 0;
+
+ for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
+ i += fh_ep->pkt_per_frm) {
+ for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
+ data_per_desc =
+ ((j + 1) * fh_ep->maxpacket >
+ fh_ep->
+ data_per_frame) ? fh_ep->data_per_frame -
+ j * fh_ep->maxpacket : fh_ep->maxpacket;
+ data_per_desc +=
+ (data_per_desc % 4) ? (4 -
+ data_per_desc %
+ 4) : 0;
+ sts.b_iso_out.rxbytes = data_per_desc;
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ dma_ad += data_per_desc;
+ dma_desc++;
+ }
+ }
+
+ for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
+
+ data_per_desc =
+ ((j + 1) * fh_ep->maxpacket >
+ fh_ep->data_per_frame) ? fh_ep->data_per_frame -
+ j * fh_ep->maxpacket : fh_ep->maxpacket;
+ data_per_desc +=
+ (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
+ sts.b_iso_out.rxbytes = data_per_desc;
+
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ dma_desc++;
+ dma_ad += data_per_desc;
+ }
+
+ sts.b_iso_out.ioc = 1;
+ sts.b_iso_out.l = fh_ep->proc_buf_num;
+
+ data_per_desc =
+ ((j + 1) * fh_ep->maxpacket >
+ fh_ep->data_per_frame) ? fh_ep->data_per_frame -
+ j * fh_ep->maxpacket : fh_ep->maxpacket;
+ data_per_desc +=
+ (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
+ sts.b_iso_out.rxbytes = data_per_desc;
+
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+ } else {
+/** ISO IN EP */
+
+ dma_desc =
+ fh_ep->iso_desc_addr +
+ fh_ep->desc_cnt * fh_ep->proc_buf_num;
+
+ sts.b_iso_in.bs = BS_HOST_READY;
+ sts.b_iso_in.txsts = 0;
+ sts.b_iso_in.sp = 0;
+ sts.b_iso_in.ioc = 0;
+ sts.b_iso_in.pid = fh_ep->pkt_per_frm;
+ sts.b_iso_in.framenum = fh_ep->next_frame;
+ sts.b_iso_in.txbytes = fh_ep->data_per_frame;
+ sts.b_iso_in.l = 0;
+
+ for (i = 0; i < fh_ep->desc_cnt - 1; i++) {
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ sts.b_iso_in.framenum += fh_ep->bInterval;
+ dma_ad += fh_ep->data_per_frame;
+ dma_desc++;
+ }
+
+ sts.b_iso_in.ioc = 1;
+ sts.b_iso_in.l = fh_ep->proc_buf_num;
+
+ dma_desc->buf = dma_ad;
+ dma_desc->status.d32 = sts.d32;
+
+ fh_ep->next_frame =
+ sts.b_iso_in.framenum + fh_ep->bInterval * 1;
+ }
+ fh_ep->proc_buf_num = (fh_ep->proc_buf_num ^ 1) & 0x1;
+}
+
+/**
+ * This function is to handle Iso EP transfer complete interrupt
+ * in case Iso out packet was dropped
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param fh_ep The EP for wihich transfer complete was asserted
+ *
+ */
+static uint32_t handle_iso_out_pkt_dropped(fh_otg_core_if_t * core_if,
+ fh_ep_t * fh_ep)
+{
+ uint32_t dma_addr;
+ uint32_t drp_pkt;
+ uint32_t drp_pkt_cnt;
+ deptsiz_data_t deptsiz = {.d32 = 0 };
+ depctl_data_t depctl = {.d32 = 0 };
+ int i;
+
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[fh_ep->num]->doeptsiz);
+
+ drp_pkt = fh_ep->pkt_cnt - deptsiz.b.pktcnt;
+ drp_pkt_cnt = fh_ep->pkt_per_frm - (drp_pkt % fh_ep->pkt_per_frm);
+
+ /* Setting dropped packets status */
+ for (i = 0; i < drp_pkt_cnt; ++i) {
+ fh_ep->pkt_info[drp_pkt].status = -FH_E_NO_DATA;
+ drp_pkt++;
+ deptsiz.b.pktcnt--;
+ }
+
+ if (deptsiz.b.pktcnt > 0) {
+ deptsiz.b.xfersize =
+ fh_ep->xfer_len - (fh_ep->pkt_cnt -
+ deptsiz.b.pktcnt) * fh_ep->maxpacket;
+ } else {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 0;
+ }
+
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doeptsiz,
+ deptsiz.d32);
+
+ if (deptsiz.b.pktcnt > 0) {
+ if (fh_ep->proc_buf_num) {
+ dma_addr =
+ fh_ep->dma_addr1 + fh_ep->xfer_len -
+ deptsiz.b.xfersize;
+ } else {
+ dma_addr =
+ fh_ep->dma_addr0 + fh_ep->xfer_len -
+ deptsiz.b.xfersize;;
+ }
+
+ FH_WRITE_REG32(&core_if->dev_if->
+ out_ep_regs[fh_ep->num]->doepdma, dma_addr);
+
+ /** Re-enable endpoint, clear nak */
+ depctl.d32 = 0;
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+
+ FH_MODIFY_REG32(&core_if->dev_if->
+ out_ep_regs[fh_ep->num]->doepctl, depctl.d32,
+ depctl.d32);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+/**
+ * This function sets iso packets information(PTI mode)
+ *
+ * @param core_if Programming view of FH_otg controller.
+ * @param ep The EP to start the transfer on.
+ *
+ */
+static uint32_t set_iso_pkts_info(fh_otg_core_if_t * core_if, fh_ep_t * ep)
+{
+ int i, j;
+ dma_addr_t dma_ad;
+ iso_pkt_info_t *packet_info = ep->pkt_info;
+ uint32_t offset;
+ uint32_t frame_data;
+ deptsiz_data_t deptsiz;
+
+ if (ep->proc_buf_num == 0) {
+ /** Buffer 0 descriptors setup */
+ dma_ad = ep->dma_addr0;
+ } else {
+ /** Buffer 1 descriptors setup */
+ dma_ad = ep->dma_addr1;
+ }
+
+ if (ep->is_in) {
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
+ dieptsiz);
+ } else {
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
+ doeptsiz);
+ }
+
+ if (!deptsiz.b.xfersize) {
+ offset = 0;
+ for (i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) {
+ frame_data = ep->data_per_frame;
+ for (j = 0; j < ep->pkt_per_frm; ++j) {
+
+ /* Packet status - is not set as initially
+ * it is set to 0 and if packet was sent
+ successfully, status field will remain 0*/
+
+ /* Bytes has been transfered */
+ packet_info->length =
+ (ep->maxpacket <
+ frame_data) ? ep->maxpacket : frame_data;
+
+ /* Received packet offset */
+ packet_info->offset = offset;
+ offset += packet_info->length;
+ frame_data -= packet_info->length;
+
+ packet_info++;
+ }
+ }
+ return 1;
+ } else {
+ /* This is a workaround for in case of Transfer Complete with
+ * PktDrpSts interrupts merging - in this case Transfer complete
+ * interrupt for Isoc Out Endpoint is asserted without PktDrpSts
+ * set and with DOEPTSIZ register non zero. Investigations showed,
+ * that this happens when Out packet is dropped, but because of
+ * interrupts merging during first interrupt handling PktDrpSts
+ * bit is cleared and for next merged interrupts it is not reset.
+ * In this case SW hadles the interrupt as if PktDrpSts bit is set.
+ */
+ if (ep->is_in) {
+ return 1;
+ } else {
+ return handle_iso_out_pkt_dropped(core_if, ep);
+ }
+ }
+}
+
+/**
+ * This function is to handle Iso EP transfer complete interrupt
+ *
+ * @param pcd The PCD
+ * @param ep The EP for which transfer complete was asserted
+ *
+ */
+static void complete_iso_ep(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
+ fh_ep_t *fh_ep = &ep->fh_ep;
+ uint8_t is_last = 0;
+
+ if (ep->fh_ep.next_frame == 0xffffffff) {
+ FH_WARN("Next frame is not set!\n");
+ return;
+ }
+
+ if (core_if->dma_enable) {
+ if (core_if->dma_desc_enable) {
+ set_ddma_iso_pkts_info(core_if, fh_ep);
+ reinit_ddma_iso_xfer(core_if, fh_ep);
+ is_last = 1;
+ } else {
+ if (core_if->pti_enh_enable) {
+ if (set_iso_pkts_info(core_if, fh_ep)) {
+ fh_ep->proc_buf_num =
+ (fh_ep->proc_buf_num ^ 1) & 0x1;
+ fh_otg_iso_ep_start_buf_transfer
+ (core_if, fh_ep);
+ is_last = 1;
+ }
+ } else {
+ set_current_pkt_info(core_if, fh_ep);
+ if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
+ is_last = 1;
+ fh_ep->cur_pkt = 0;
+ fh_ep->proc_buf_num =
+ (fh_ep->proc_buf_num ^ 1) & 0x1;
+ if (fh_ep->proc_buf_num) {
+ fh_ep->cur_pkt_addr =
+ fh_ep->xfer_buff1;
+ fh_ep->cur_pkt_dma_addr =
+ fh_ep->dma_addr1;
+ } else {
+ fh_ep->cur_pkt_addr =
+ fh_ep->xfer_buff0;
+ fh_ep->cur_pkt_dma_addr =
+ fh_ep->dma_addr0;
+ }
+
+ }
+ fh_otg_iso_ep_start_frm_transfer(core_if,
+ fh_ep);
+ }
+ }
+ } else {
+ set_current_pkt_info(core_if, fh_ep);
+ if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
+ is_last = 1;
+ fh_ep->cur_pkt = 0;
+ fh_ep->proc_buf_num = (fh_ep->proc_buf_num ^ 1) & 0x1;
+ if (fh_ep->proc_buf_num) {
+ fh_ep->cur_pkt_addr = fh_ep->xfer_buff1;
+ fh_ep->cur_pkt_dma_addr = fh_ep->dma_addr1;
+ } else {
+ fh_ep->cur_pkt_addr = fh_ep->xfer_buff0;
+ fh_ep->cur_pkt_dma_addr = fh_ep->dma_addr0;
+ }
+
+ }
+ fh_otg_iso_ep_start_frm_transfer(core_if, fh_ep);
+ }
+ if (is_last)
+ fh_otg_iso_buffer_done(pcd, ep, ep->iso_req_handle);
+}
+#endif /* FH_EN_ISOC */
+
+/**
+ * This function handle BNA interrupt for Non Isochronous EPs
+ *
+ */
+static void fh_otg_pcd_handle_noniso_bna(fh_otg_pcd_ep_t * ep)
+{
+ fh_ep_t *fh_ep = &ep->fh_ep;
+ volatile uint32_t *addr;
+ depctl_data_t depctl = {.d32 = 0 };
+ fh_otg_pcd_t *pcd = ep->pcd;
+ fh_otg_dev_dma_desc_t *dma_desc;
+ dev_dma_desc_sts_t sts = {.d32 = 0 };
+ fh_otg_core_if_t *core_if = ep->pcd->core_if;
+ int i, start;
+
+ if (!fh_ep->desc_cnt)
+ FH_WARN("Ep%d %s Descriptor count = %d \n", fh_ep->num,
+ (fh_ep->is_in ? "IN" : "OUT"), fh_ep->desc_cnt);
+
+ if (core_if->core_params->cont_on_bna && !fh_ep->is_in
+ && fh_ep->type != FH_OTG_EP_TYPE_CONTROL) {
+ uint32_t doepdma;
+ fh_otg_dev_out_ep_regs_t *out_regs =
+ core_if->dev_if->out_ep_regs[fh_ep->num];
+ doepdma = FH_READ_REG32(&(out_regs->doepdma));
+ start = (doepdma - fh_ep->dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t);
+ dma_desc = &(fh_ep->desc_addr[start]);
+ } else {
+ start = 0;
+ dma_desc = fh_ep->desc_addr;
+ }
+
+
+ for (i = start; i < fh_ep->desc_cnt; ++i, ++dma_desc) {
+ sts.d32 = dma_desc->status.d32;
+ sts.b.bs = BS_HOST_READY;
+ dma_desc->status.d32 = sts.d32;
+ }
+
+ if (fh_ep->is_in == 0) {
+ addr =
+ &GET_CORE_IF(pcd)->dev_if->out_ep_regs[fh_ep->num]->
+ doepctl;
+ } else {
+ addr =
+ &GET_CORE_IF(pcd)->dev_if->in_ep_regs[fh_ep->num]->diepctl;
+ }
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+ FH_MODIFY_REG32(addr, 0, depctl.d32);
+}
+
+/**
+ * This function handles EP0 Control transfers.
+ *
+ * The state of the control transfers are tracked in
+ * <code>ep0state</code>.
+ */
+static void handle_ep0(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
+ dev_dma_desc_sts_t desc_sts;
+ deptsiz0_data_t deptsiz;
+ uint32_t byte_count;
+
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
+ print_ep0_state(pcd);
+#endif
+
+ switch (pcd->ep0state) {
+ case EP0_DISCONNECT:
+ break;
+
+ case EP0_IDLE:
+ pcd->request_config = 0;
+
+ pcd_setup(pcd);
+ break;
+
+ case EP0_IN_DATA_PHASE:
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
+ ep0->fh_ep.num, (ep0->fh_ep.is_in ? "IN" : "OUT"),
+ ep0->fh_ep.type, ep0->fh_ep.maxpacket);
+#endif
+
+ if (core_if->dma_enable != 0) {
+ /*
+ * For EP0 we can only program 1 packet at a time so we
+ * need to do the make calculations after each complete.
+ * Call write_packet to make the calculations, as in
+ * slave mode, and use those values to determine if we
+ * can complete.
+ */
+ if (core_if->dma_desc_enable == 0) {
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->
+ dev_if->in_ep_regs[0]->
+ dieptsiz);
+ byte_count =
+ ep0->fh_ep.xfer_len - deptsiz.b.xfersize;
+ } else {
+ desc_sts =
+ core_if->dev_if->in_desc_addr->status;
+ byte_count =
+ ep0->fh_ep.xfer_len - desc_sts.b.bytes;
+ }
+ ep0->fh_ep.xfer_count += byte_count;
+ ep0->fh_ep.xfer_buff += byte_count;
+ ep0->fh_ep.dma_addr += byte_count;
+ }
+ if (ep0->fh_ep.xfer_count < ep0->fh_ep.total_len) {
+ fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+ &ep0->fh_ep);
+ FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
+ } else if (ep0->fh_ep.sent_zlp) {
+ fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+ &ep0->fh_ep);
+ ep0->fh_ep.sent_zlp = 0;
+ FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
+ } else {
+ ep0_complete_request(ep0);
+ FH_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
+ }
+ break;
+ case EP0_OUT_DATA_PHASE:
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
+ ep0->fh_ep.num, (ep0->fh_ep.is_in ? "IN" : "OUT"),
+ ep0->fh_ep.type, ep0->fh_ep.maxpacket);
+#endif
+ if (core_if->dma_enable != 0) {
+ if (core_if->dma_desc_enable == 0) {
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->
+ dev_if->out_ep_regs[0]->
+ doeptsiz);
+ byte_count =
+ ep0->fh_ep.maxpacket - deptsiz.b.xfersize;
+ } else {
+ desc_sts =
+ core_if->dev_if->out_desc_addr->status;
+ byte_count =
+ ep0->fh_ep.maxpacket - desc_sts.b.bytes;
+ }
+ ep0->fh_ep.xfer_count += byte_count;
+ ep0->fh_ep.xfer_buff += byte_count;
+ ep0->fh_ep.dma_addr += byte_count;
+ }
+ if (ep0->fh_ep.xfer_count < ep0->fh_ep.total_len) {
+ fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+ &ep0->fh_ep);
+ FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
+ } else if (ep0->fh_ep.sent_zlp) {
+ fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
+ &ep0->fh_ep);
+ ep0->fh_ep.sent_zlp = 0;
+ FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
+ } else {
+ ep0_complete_request(ep0);
+ FH_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
+ }
+ break;
+
+ case EP0_IN_STATUS_PHASE:
+ case EP0_OUT_STATUS_PHASE:
+ FH_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
+ ep0_complete_request(ep0);
+ pcd->ep0state = EP0_IDLE;
+ ep0->stopped = 1;
+ ep0->fh_ep.is_in = 0; /* OUT for next SETUP */
+
+ /* Prepare for more SETUP Packets */
+ if (core_if->dma_enable) {
+ ep0_out_start(core_if, pcd);
+ }
+ break;
+
+ case EP0_STALL:
+ FH_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
+ break;
+ }
+#ifdef DEBUG_EP0
+ print_ep0_state(pcd);
+#endif
+}
+
+/**
+ * Restart transfer
+ */
+static void restart_transfer(fh_otg_pcd_t * pcd, const uint32_t epnum)
+{
+ fh_otg_core_if_t *core_if;
+ fh_otg_dev_if_t *dev_if;
+ deptsiz_data_t dieptsiz = {.d32 = 0 };
+ fh_otg_pcd_ep_t *ep;
+
+ ep = get_in_ep(pcd, epnum);
+
+#ifdef FH_EN_ISOC
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
+ return;
+ }
+#endif /* FH_EN_ISOC */
+
+ core_if = GET_CORE_IF(pcd);
+ dev_if = core_if->dev_if;
+
+ dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
+
+ FH_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
+ " stopped=%d\n", ep->fh_ep.xfer_buff,
+ ep->fh_ep.xfer_count, ep->fh_ep.xfer_len, ep->stopped);
+ /*
+ * If xfersize is 0 and pktcnt in not 0, resend the last packet.
+ */
+ if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 &&
+ ep->fh_ep.start_xfer_buff != 0) {
+ if (ep->fh_ep.total_len <= ep->fh_ep.maxpacket) {
+ ep->fh_ep.xfer_count = 0;
+ ep->fh_ep.xfer_buff = ep->fh_ep.start_xfer_buff;
+ ep->fh_ep.xfer_len = ep->fh_ep.xfer_count;
+ } else {
+ ep->fh_ep.xfer_count -= ep->fh_ep.maxpacket;
+ /* convert packet size to dwords. */
+ ep->fh_ep.xfer_buff -= ep->fh_ep.maxpacket;
+ ep->fh_ep.xfer_len = ep->fh_ep.xfer_count;
+ }
+ ep->stopped = 0;
+ FH_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
+ "xfer_len=%0x stopped=%d\n",
+ ep->fh_ep.xfer_buff,
+ ep->fh_ep.xfer_count, ep->fh_ep.xfer_len,
+ ep->stopped);
+ if (epnum == 0) {
+ fh_otg_ep0_start_transfer(core_if, &ep->fh_ep);
+ } else {
+ fh_otg_ep_start_transfer(core_if, &ep->fh_ep);
+ }
+ }
+}
+
+/*
+ * This function create new nextep sequnce based on Learn Queue.
+ *
+ * @param core_if Programming view of FH_otg controller
+ */
+void predict_nextep_seq( fh_otg_core_if_t * core_if)
+{
+ fh_otg_device_global_regs_t *dev_global_regs =
+ core_if->dev_if->dev_global_regs;
+ const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
+ /* Number of Token Queue Registers */
+ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
+ dtknq1_data_t dtknqr1;
+ uint32_t in_tkn_epnums[4];
+ uint8_t seqnum[MAX_EPS_CHANNELS];
+ uint8_t intkn_seq[TOKEN_Q_DEPTH];
+ grstctl_t resetctl = {.d32 = 0 };
+ uint8_t temp;
+ int ndx = 0;
+ int start = 0;
+ int end = 0;
+ int sort_done = 0;
+ int i = 0;
+ volatile uint32_t *addr = &dev_global_regs->dtknqr1;
+
+ FH_DEBUGPL(DBG_PCD, "dev_token_q_depth=%d\n", TOKEN_Q_DEPTH);
+
+ /* Read the DTKNQ Registers */
+ for (i = 0; i < DTKNQ_REG_CNT; i++) {
+ in_tkn_epnums[i] = FH_READ_REG32(addr);
+ FH_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
+ in_tkn_epnums[i]);
+ if (addr == &dev_global_regs->dvbusdis) {
+ addr = &dev_global_regs->dtknqr3_dthrctl;
+ } else {
+ ++addr;
+ }
+
+ }
+
+ /* Copy the DTKNQR1 data to the bit field. */
+ dtknqr1.d32 = in_tkn_epnums[0];
+ if (dtknqr1.b.wrap_bit) {
+ ndx = dtknqr1.b.intknwptr;
+ end = ndx - 1;
+ if (end < 0)
+ end = TOKEN_Q_DEPTH - 1;
+ } else {
+ ndx = 0;
+ end = dtknqr1.b.intknwptr - 1;
+ if (end < 0)
+ end = 0;
+ }
+ start = ndx;
+
+ /* Fill seqnum[] by initial values: EP number + 31 */
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ seqnum[i] = i + 31;
+ }
+
+ /* Fill intkn_seq[] from in_tkn_epnums[0] */
+ for (i = 0; (i < 6) && (i < TOKEN_Q_DEPTH); i++)
+ intkn_seq[i] = (in_tkn_epnums[0] >> ((7 - i) * 4)) & 0xf;
+
+ if (TOKEN_Q_DEPTH > 6) {
+ /* Fill intkn_seq[] from in_tkn_epnums[1] */
+ for (i = 6; (i < 14) && (i < TOKEN_Q_DEPTH); i++)
+ intkn_seq[i] =
+ (in_tkn_epnums[1] >> ((7 - (i - 6)) * 4)) & 0xf;
+ }
+
+ if (TOKEN_Q_DEPTH > 14) {
+ /* Fill intkn_seq[] from in_tkn_epnums[1] */
+ for (i = 14; (i < 22) && (i < TOKEN_Q_DEPTH); i++)
+ intkn_seq[i] =
+ (in_tkn_epnums[2] >> ((7 - (i - 14)) * 4)) & 0xf;
+ }
+
+ if (TOKEN_Q_DEPTH > 22) {
+ /* Fill intkn_seq[] from in_tkn_epnums[1] */
+ for (i = 22; (i < 30) && (i < TOKEN_Q_DEPTH); i++)
+ intkn_seq[i] =
+ (in_tkn_epnums[3] >> ((7 - (i - 22)) * 4)) & 0xf;
+ }
+
+ FH_DEBUGPL(DBG_PCDV, "%s start=%d end=%d intkn_seq[]:\n", __func__,
+ start, end);
+ for (i = 0; i < TOKEN_Q_DEPTH; i++)
+ FH_DEBUGPL(DBG_PCDV, "%d\n", intkn_seq[i]);
+
+ /* Update seqnum based on intkn_seq[] */
+ i = 0;
+ do {
+ seqnum[intkn_seq[ndx]] = i;
+ ndx++;
+ i++;
+ if (ndx == TOKEN_Q_DEPTH)
+ ndx = 0;
+ } while (i < TOKEN_Q_DEPTH);
+
+ /* Mark non active EP's in seqnum[] by 0xff */
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ if (core_if->nextep_seq[i] == 0xff)
+ seqnum[i] = 0xff;
+ }
+
+ /* Sort seqnum[] */
+ sort_done = 0;
+ while (!sort_done) {
+ sort_done = 1;
+ for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
+ if (seqnum[i] > seqnum[i + 1]) {
+ temp = seqnum[i];
+ seqnum[i] = seqnum[i + 1];
+ seqnum[i + 1] = temp;
+ sort_done = 0;
+ }
+ }
+ }
+
+ ndx = start + seqnum[0];
+ if (ndx >= TOKEN_Q_DEPTH)
+ ndx = ndx % TOKEN_Q_DEPTH;
+ core_if->first_in_nextep_seq = intkn_seq[ndx];
+
+ /* Update seqnum[] by EP numbers */
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ ndx = start + i;
+ if (seqnum[i] < 31) {
+ ndx = start + seqnum[i];
+ if (ndx >= TOKEN_Q_DEPTH)
+ ndx = ndx % TOKEN_Q_DEPTH;
+ seqnum[i] = intkn_seq[ndx];
+ } else {
+ if (seqnum[i] < 0xff) {
+ seqnum[i] = seqnum[i] - 31;
+ } else {
+ break;
+ }
+ }
+ }
+
+ /* Update nextep_seq[] based on seqnum[] */
+ for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
+ if (seqnum[i] != 0xff) {
+ if (seqnum[i + 1] != 0xff) {
+ core_if->nextep_seq[seqnum[i]] = seqnum[i + 1];
+ } else {
+ core_if->nextep_seq[seqnum[i]] = core_if->first_in_nextep_seq;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ FH_DEBUGPL(DBG_PCDV, "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
+ __func__, core_if->first_in_nextep_seq);
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ FH_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
+ }
+
+ /* Flush the Learning Queue */
+ resetctl.d32 = FH_READ_REG32(&core_if->core_global_regs->grstctl);
+ resetctl.b.intknqflsh = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
+
+
+}
+
+/**
+ * handle the IN EP disable interrupt.
+ */
+static inline void handle_in_ep_disable_intr(fh_otg_pcd_t * pcd,
+ const uint32_t epnum)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ deptsiz_data_t dieptsiz = {.d32 = 0 };
+ dctl_data_t dctl = {.d32 = 0 };
+ fh_otg_pcd_ep_t *ep;
+ fh_ep_t *fh_ep;
+ gintmsk_data_t gintmsk_data;
+ depctl_data_t depctl;
+ uint32_t diepdma;
+ uint32_t remain_to_transfer = 0;
+ uint8_t i;
+ uint32_t xfer_size;
+
+ ep = get_in_ep(pcd, epnum);
+ fh_ep = &ep->fh_ep;
+
+ if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ fh_otg_flush_tx_fifo(core_if, fh_ep->tx_fifo_num);
+ complete_ep(ep);
+ return;
+ }
+
+ FH_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", epnum,
+ FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl));
+ dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
+ depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
+
+ FH_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
+ dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
+
+ if ((core_if->start_predict == 0) || (depctl.b.eptype & 1)) {
+ if (ep->stopped) {
+ if (core_if->en_multiple_tx_fifo)
+ /* Flush the Tx FIFO */
+ fh_otg_flush_tx_fifo(core_if, fh_ep->tx_fifo_num);
+ /* Clear the Global IN NP NAK */
+ dctl.d32 = 0;
+ dctl.b.cgnpinnak = 1;
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+ /* Restart the transaction */
+ if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
+ restart_transfer(pcd, epnum);
+ }
+ } else {
+ /* Restart the transaction */
+ if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
+ restart_transfer(pcd, epnum);
+ }
+ FH_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
+ }
+ return;
+ }
+
+ if (core_if->start_predict > 2) { // NP IN EP
+ core_if->start_predict--;
+ return;
+ }
+
+ core_if->start_predict--;
+
+ if (core_if->start_predict == 1) { // All NP IN Ep's disabled now
+
+ predict_nextep_seq(core_if);
+
+ /* Update all active IN EP's NextEP field based of nextep_seq[] */
+ for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
+ depctl.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ if (core_if->nextep_seq[i] != 0xff) { // Active NP IN EP
+ depctl.b.nextep = core_if->nextep_seq[i];
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
+ }
+ }
+ /* Flush Shared NP TxFIFO */
+ fh_otg_flush_tx_fifo(core_if, 0);
+ /* Rewind buffers */
+ if (!core_if->dma_desc_enable) {
+ i = core_if->first_in_nextep_seq;
+ do {
+ ep = get_in_ep(pcd, i);
+ dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
+ xfer_size = ep->fh_ep.total_len - ep->fh_ep.xfer_count;
+ if (xfer_size > ep->fh_ep.maxxfer)
+ xfer_size = ep->fh_ep.maxxfer;
+ depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ if (dieptsiz.b.pktcnt != 0) {
+ if (xfer_size == 0) {
+ remain_to_transfer = 0;
+ } else {
+ if ((xfer_size % ep->fh_ep.maxpacket) == 0) {
+ remain_to_transfer =
+ dieptsiz.b.pktcnt * ep->fh_ep.maxpacket;
+ } else {
+ remain_to_transfer = ((dieptsiz.b.pktcnt -1) * ep->fh_ep.maxpacket)
+ + (xfer_size % ep->fh_ep.maxpacket);
+ }
+ }
+ diepdma = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepdma);
+ dieptsiz.b.xfersize = remain_to_transfer;
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->dieptsiz, dieptsiz.d32);
+ diepdma = ep->fh_ep.dma_addr + (xfer_size - remain_to_transfer);
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepdma, diepdma);
+ }
+ i = core_if->nextep_seq[i];
+ } while (i != core_if->first_in_nextep_seq);
+ } else { // dma_desc_enable
+ FH_PRINTF("%s Learning Queue not supported in DDMA\n", __func__);
+ }
+
+ /* Restart transfers in predicted sequences */
+ i = core_if->first_in_nextep_seq;
+ do {
+ dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
+ depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ if (dieptsiz.b.pktcnt != 0) {
+ depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
+ }
+ i = core_if->nextep_seq[i];
+ } while (i != core_if->first_in_nextep_seq);
+
+ /* Clear the global non-periodic IN NAK handshake */
+ dctl.d32 = 0;
+ dctl.b.cgnpinnak = 1;
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+
+ /* Unmask EP Mismatch interrupt */
+ gintmsk_data.d32 = 0;
+ gintmsk_data.b.epmismatch = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, gintmsk_data.d32);
+
+ core_if->start_predict = 0;
+
+ }
+}
+
+/**
+ * Handler for the IN EP timeout handshake interrupt.
+ */
+static inline void handle_in_ep_timeout_intr(fh_otg_pcd_t * pcd,
+ const uint32_t epnum)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+
+#ifdef DEBUG
+ deptsiz_data_t dieptsiz = {.d32 = 0 };
+ uint32_t num = 0;
+#endif
+ dctl_data_t dctl = {.d32 = 0 };
+ fh_otg_pcd_ep_t *ep;
+
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ ep = get_in_ep(pcd, epnum);
+
+ /* Disable the NP Tx Fifo Empty Interrrupt */
+ if (!core_if->dma_enable) {
+ intr_mask.b.nptxfempty = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+ }
+ /** @todo NGS Check EP type.
+ * Implement for Periodic EPs */
+ /*
+ * Non-periodic EP
+ */
+ /* Enable the Global IN NAK Effective Interrupt */
+ intr_mask.b.ginnakeff = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
+
+ /* Set Global IN NAK */
+ dctl.b.sgnpinnak = 1;
+ FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+
+ ep->stopped = 1;
+
+#ifdef DEBUG
+ dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[num]->dieptsiz);
+ FH_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
+ dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
+#endif
+
+#ifdef DISABLE_PERIODIC_EP
+ /*
+ * Set the NAK bit for this EP to
+ * start the disable process.
+ */
+ diepctl.d32 = 0;
+ diepctl.b.snak = 1;
+ FH_MODIFY_REG32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32,
+ diepctl.d32);
+ ep->disabling = 1;
+ ep->stopped = 1;
+#endif
+}
+
+/**
+ * Handler for the IN EP NAK interrupt.
+ */
+static inline int32_t handle_in_ep_nak_intr(fh_otg_pcd_t * pcd,
+ const uint32_t epnum)
+{
+ /** @todo implement ISR */
+ fh_otg_core_if_t *core_if;
+ diepmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
+ core_if = GET_CORE_IF(pcd);
+ intr_mask.b.nak = 1;
+
+ if (core_if->multiproc_int_enable) {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ diepeachintmsk[epnum], intr_mask.d32, 0);
+ } else {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->diepmsk,
+ intr_mask.d32, 0);
+ }
+
+ return 1;
+}
+
+/**
+ * Handler for the OUT EP Babble interrupt.
+ */
+static inline int32_t handle_out_ep_babble_intr(fh_otg_pcd_t * pcd,
+ const uint32_t epnum)
+{
+ /** @todo implement ISR */
+ fh_otg_core_if_t *core_if;
+ doepmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_PRINTF("INTERRUPT Handler not implemented for %s\n",
+ "OUT EP Babble");
+ core_if = GET_CORE_IF(pcd);
+ intr_mask.b.babble = 1;
+
+ if (core_if->multiproc_int_enable) {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ doepeachintmsk[epnum], intr_mask.d32, 0);
+ } else {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
+ intr_mask.d32, 0);
+ }
+
+ return 1;
+}
+
+/**
+ * Handler for the OUT EP NAK interrupt.
+ */
+static inline int32_t handle_out_ep_nak_intr(fh_otg_pcd_t * pcd,
+ const uint32_t epnum)
+{
+ /** @todo implement ISR */
+ fh_otg_core_if_t *core_if;
+ doepmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_DEBUGPL(DBG_ANY, "INTERRUPT Handler not implemented for %s\n", "OUT EP NAK");
+ core_if = GET_CORE_IF(pcd);
+ intr_mask.b.nak = 1;
+
+ if (core_if->multiproc_int_enable) {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ doepeachintmsk[epnum], intr_mask.d32, 0);
+ } else {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
+ intr_mask.d32, 0);
+ }
+
+ return 1;
+}
+
+/**
+ * Handler for the OUT EP NYET interrupt.
+ */
+static inline int32_t handle_out_ep_nyet_intr(fh_otg_pcd_t * pcd,
+ const uint32_t epnum)
+{
+ /** @todo implement ISR */
+ fh_otg_core_if_t *core_if;
+ doepmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
+ core_if = GET_CORE_IF(pcd);
+ intr_mask.b.nyet = 1;
+
+ if (core_if->multiproc_int_enable) {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
+ doepeachintmsk[epnum], intr_mask.d32, 0);
+ } else {
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
+ intr_mask.d32, 0);
+ }
+
+ return 1;
+}
+static void handle_xfercompl_iso_ddma (fh_otg_dev_if_t *dev_if, fh_otg_pcd_ep_t *ep)
+{
+ depctl_data_t depctl;
+ fh_ep_t *fh_ep;
+ uint32_t doepdma;
+ fh_dma_t dma_desc_addr;
+ fh_otg_dev_dma_desc_t *dma_desc;
+ int index = 0;
+ uint8_t epnum;
+
+ fh_ep = &ep->fh_ep;
+ epnum = fh_ep->num;
+
+ complete_ddma_iso_ep(ep);
+
+ if (fh_ep->is_in) {
+ depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
+ if (!depctl.b.epena) {
+ if (fh_ep->use_add_buf) {
+ FH_DEBUGPL(DBG_PCD, "go to second buffer \n");
+ fh_ep->use_add_buf = 0;
+ fh_ep->iso_desc_first = 0;
+ if (fh_ep->iso_desc_second) {
+ depctl_data_t diepctl;
+ FH_WRITE_REG32(&dev_if->in_ep_regs[epnum]->diepdma,
+ fh_ep->dma_desc_addr1);
+ diepctl.d32 = 0;
+ diepctl.b.epena = 1;
+ diepctl.b.cnak = 1;
+ FH_MODIFY_REG32(&dev_if->in_ep_regs[epnum]->diepctl,
+ 0, diepctl.d32);
+ } else {
+ FH_DEBUGPL(DBG_PCD, "DDMA: No more ISOC requests 1\n");
+ }
+ } else {
+ FH_DEBUGPL(DBG_PCD, "go to first buffer \n");
+ fh_ep->use_add_buf = 1;
+ fh_ep->iso_desc_second = 0;
+ if (fh_ep->iso_desc_first) {
+ depctl_data_t diepctl;
+ FH_WRITE_REG32(&dev_if->in_ep_regs[epnum]->diepdma,
+ fh_ep->dma_desc_addr);
+ diepctl.d32 = 0;
+ diepctl.b.epena = 1;
+ diepctl.b.cnak = 1;
+ FH_MODIFY_REG32(&dev_if->in_ep_regs[epnum]->diepctl,
+ 0, diepctl.d32);
+ } else {
+ FH_DEBUGPL(DBG_PCD, "DDMA: No more ISOC requests 2\n");
+ }
+ }
+ }
+ } else {
+ depctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[epnum]->doepctl);
+ doepdma = FH_READ_REG32(&dev_if->out_ep_regs[epnum]->doepdma);
+
+ if (fh_ep->use_add_buf) {
+ index = fh_ep->iso_desc_first;
+ dma_desc_addr = fh_ep->dma_desc_addr;
+ } else {
+ index = fh_ep->iso_desc_second;
+ dma_desc_addr = fh_ep->dma_desc_addr1;
+ }
+
+ if (index == (doepdma - dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t)) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ FH_MODIFY_REG32(&dev_if->out_ep_regs[epnum]->doepctl, 0, depctl.d32);
+ }
+ dma_desc = fh_ep->desc_addr + fh_ep->iso_desc_first;
+ if (!depctl.b.epena) {
+ if (fh_ep->use_add_buf) {
+ FH_DEBUGPL(DBG_PCD, "go to second buffer \n");
+ fh_ep->use_add_buf = 0;
+ fh_ep->iso_desc_first = 0;
+ if (fh_ep->iso_desc_second) {
+ FH_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepdma, fh_ep->dma_desc_addr1);
+ depctl.d32 = 0;
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+ FH_MODIFY_REG32(&dev_if->out_ep_regs[epnum]->doepctl, 0, depctl.d32);
+ } else {
+ FH_DEBUGPL(DBG_PCD, "DDMA: There are no more ISOC requests 1!!! \n");
+ }
+ } else {
+ fh_ep->use_add_buf = 1;
+ fh_ep->iso_desc_second = 0;
+ if (fh_ep->iso_desc_first) {
+ FH_DEBUGPL(DBG_PCD, "go to first buffer");
+ FH_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepdma, fh_ep->dma_desc_addr);
+ depctl.d32 = 0;
+ depctl.b.epena = 1;
+ depctl.b.cnak = 1;
+ FH_MODIFY_REG32(&dev_if->out_ep_regs[epnum]->doepctl, 0, depctl.d32);
+ } else {
+ FH_DEBUGPL(DBG_PCD, "DDMA: There are no more ISOC requests 2!!! \n");
+ }
+ }
+ }
+ }
+}
+/**
+ * This interrupt indicates that an IN EP has a pending Interrupt.
+ * The sequence for handling the IN EP interrupt is shown below:
+ * -# Read the Device All Endpoint Interrupt register
+ * -# Repeat the following for each IN EP interrupt bit set (from
+ * LSB to MSB).
+ * -# Read the Device Endpoint Interrupt (DIEPINTn) register
+ * -# If "Transfer Complete" call the request complete function
+ * -# If "Endpoint Disabled" complete the EP disable procedure.
+ * -# If "AHB Error Interrupt" log error
+ * -# If "Time-out Handshake" log error
+ * -# If "IN Token Received when TxFIFO Empty" write packet to Tx
+ * FIFO.
+ * -# If "IN Token EP Mismatch" (disable, this is handled by EP
+ * Mismatch Interrupt)
+ */
+static int32_t fh_otg_pcd_handle_in_ep_intr(fh_otg_pcd_t * pcd)
+{
+#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \
+do { \
+ diepint_data_t diepint = {.d32=0}; \
+ diepint.b.__intr = 1; \
+ FH_WRITE_REG32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
+ diepint.d32); \
+} while (0)
+
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ fh_otg_dev_if_t *dev_if = core_if->dev_if;
+ diepint_data_t diepint = {.d32 = 0 };
+ depctl_data_t depctl = {.d32 = 0 };
+ uint32_t ep_intr;
+ uint32_t epnum = 0;
+ fh_otg_pcd_ep_t *ep;
+ fh_ep_t *fh_ep;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
+
+ /* Read in the device interrupt bits */
+ ep_intr = fh_otg_read_dev_all_in_ep_intr(core_if);
+
+ /* Service the Device IN interrupts for each endpoint */
+ while (ep_intr) {
+ if (ep_intr & 0x1) {
+ uint32_t empty_msk;
+ /* Get EP pointer */
+ ep = get_in_ep(pcd, epnum);
+ fh_ep = &ep->fh_ep;
+
+ depctl.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
+ empty_msk =
+ FH_READ_REG32(&dev_if->
+ dev_global_regs->dtknqr4_fifoemptymsk);
+
+ FH_DEBUGPL(DBG_PCDV,
+ "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n",
+ epnum, empty_msk, depctl.d32);
+
+ FH_DEBUGPL(DBG_PCD,
+ "EP%d-%s: type=%d, mps=%d\n",
+ fh_ep->num, (fh_ep->is_in ? "IN" : "OUT"),
+ fh_ep->type, fh_ep->maxpacket);
+
+ diepint.d32 =
+ fh_otg_read_dev_in_ep_intr(core_if, fh_ep);
+
+ FH_DEBUGPL(DBG_PCDV,
+ "EP %d Interrupt Register - 0x%x\n", epnum,
+ diepint.d32);
+ /* Transfer complete */
+ if (diepint.b.xfercompl) {
+ /* Disable the NP Tx FIFO Empty
+ * Interrupt */
+ if (core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ FH_MODIFY_REG32
+ (&core_if->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+ } else {
+ /* Disable the Tx FIFO Empty Interrupt for this EP */
+ uint32_t fifoemptymsk =
+ 0x1 << fh_ep->num;
+ FH_MODIFY_REG32(&core_if->
+ dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ fifoemptymsk, 0);
+ }
+ /* Clear the bit in DIEPINTn for this interrupt */
+ CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
+
+ /* Complete the transfer */
+ if (epnum == 0) {
+ handle_ep0(pcd);
+ }
+#ifdef FH_EN_ISOC
+ else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ if (!ep->stopped)
+ complete_iso_ep(pcd, ep);
+ }
+#endif /* FH_EN_ISOC */
+#ifdef FH_UTE_PER_IO
+ else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ if (!ep->stopped)
+ complete_xiso_ep(ep);
+ }
+#endif /* FH_UTE_PER_IO */
+ else {
+#if 0
+ if (core_if->dma_desc_enable
+ && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ handle_xfercompl_iso_ddma(dev_if, ep);
+ } else {
+ if (fh_ep->type == FH_OTG_EP_TYPE_ISOC
+ && fh_ep->bInterval > 1) {
+ fh_ep->frame_num += fh_ep->bInterval;
+ if (fh_ep->frame_num > 0x3FFF)
+ {
+ fh_ep->frm_overrun = 1;
+ fh_ep->frame_num &= 0x3FFF;
+ } else
+ fh_ep->frm_overrun = 0;
+ }
+#else
+ {
+#endif
+ complete_ep(ep);
+ if (diepint.b.nak)
+ CLEAR_IN_EP_INTR(core_if, epnum, nak);
+ }
+ }
+ }
+ /* Endpoint disable */
+ if (diepint.b.epdisabled) {
+ FH_DEBUGPL(DBG_ANY, "EP%d IN disabled\n",
+ epnum);
+ handle_in_ep_disable_intr(pcd, epnum);
+
+ /* Clear the bit in DIEPINTn for this interrupt */
+ CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
+ }
+ /* AHB Error */
+ if (diepint.b.ahberr) {
+ FH_ERROR("EP%d IN AHB Error\n", epnum);
+ /* Clear the bit in DIEPINTn for this interrupt */
+ CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
+ }
+ /* TimeOUT Handshake (non-ISOC IN EPs) */
+ if (diepint.b.timeout) {
+ FH_ERROR("EP%d IN Time-out\n", epnum);
+ handle_in_ep_timeout_intr(pcd, epnum);
+
+ CLEAR_IN_EP_INTR(core_if, epnum, timeout);
+ }
+ /** IN Token received with TxF Empty */
+ if (diepint.b.intktxfemp) {
+ FH_DEBUGPL(DBG_ANY,
+ "EP%d IN TKN TxFifo Empty\n",
+ epnum);
+ if (!ep->stopped && epnum != 0) {
+
+ diepmsk_data_t diepmsk = {.d32 = 0 };
+ diepmsk.b.intktxfemp = 1;
+
+ if (core_if->multiproc_int_enable) {
+ FH_MODIFY_REG32
+ (&dev_if->dev_global_regs->diepeachintmsk
+ [epnum], diepmsk.d32, 0);
+ } else {
+ FH_MODIFY_REG32
+ (&dev_if->dev_global_regs->diepmsk,
+ diepmsk.d32, 0);
+ }
+ } else if (core_if->dma_desc_enable
+ && epnum == 0
+ && pcd->ep0state ==
+ EP0_OUT_STATUS_PHASE) {
+ // EP0 IN set STALL
+ depctl.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs
+ [epnum]->diepctl);
+
+ /* set the disable and stall bits */
+ if (depctl.b.epena) {
+ depctl.b.epdis = 1;
+ }
+ depctl.b.stall = 1;
+ FH_WRITE_REG32(&dev_if->in_ep_regs
+ [epnum]->diepctl,
+ depctl.d32);
+ }
+ CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
+ }
+ /** IN Token Received with EP mismatch */
+ if (diepint.b.intknepmis) {
+ FH_DEBUGPL(DBG_ANY,
+ "EP%d IN TKN EP Mismatch\n", epnum);
+ CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
+ }
+ /** IN Endpoint NAK Effective */
+ if (diepint.b.inepnakeff) {
+ FH_DEBUGPL(DBG_ANY,
+ "EP%d IN EP NAK Effective\n",
+ epnum);
+ /* Periodic EP */
+ if (ep->disabling) {
+ depctl.d32 = 0;
+ depctl.b.snak = 1;
+ depctl.b.epdis = 1;
+ FH_MODIFY_REG32(&dev_if->in_ep_regs
+ [epnum]->diepctl,
+ depctl.d32,
+ depctl.d32);
+ }
+ CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
+
+ }
+
+ /** IN EP Tx FIFO Empty Intr */
+ if (diepint.b.emptyintr) {
+ FH_DEBUGPL(DBG_ANY,
+ "EP%d Tx FIFO Empty Intr \n",
+ epnum);
+ write_empty_tx_fifo(pcd, epnum);
+
+ CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
+
+ }
+
+ /** IN EP BNA Intr */
+ if (diepint.b.bna) {
+ CLEAR_IN_EP_INTR(core_if, epnum, bna);
+ if (core_if->dma_desc_enable) {
+#ifdef FH_EN_ISOC
+ if (fh_ep->type ==
+ FH_OTG_EP_TYPE_ISOC) {
+ /*
+ * This checking is performed to prevent first "false" BNA
+ * handling occuring right after reconnect
+ */
+ if (fh_ep->next_frame !=
+ 0xffffffff)
+ fh_otg_pcd_handle_iso_bna(ep);
+ } else
+#endif /* FH_EN_ISOC */
+ {
+ fh_otg_pcd_handle_noniso_bna(ep);
+ }
+ }
+ }
+ /* NAK Interrupt */
+ if (diepint.b.nak) {
+ FH_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n",
+ epnum);
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
+ if (core_if->dma_desc_enable) {
+ if (ep->fh_ep.frame_num == 0xFFFFFFFF) {
+ ep->fh_ep.frame_num = core_if->frame_num;
+ fh_otg_pcd_start_iso_ddma(core_if, ep);
+ } else {
+ CLEAR_IN_EP_INTR(core_if, epnum, nak);
+ }
+ } else {
+ depctl_data_t depctl;
+ if (ep->fh_ep.frame_num == 0xFFFFFFFF) {
+ ep->fh_ep.frame_num = core_if->frame_num;
+ if (ep->fh_ep.bInterval > 1) {
+ depctl.d32 = 0;
+ depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
+ if (ep->fh_ep.frame_num & 0x1) {
+ depctl.b.setd1pid = 1;
+ depctl.b.setd0pid = 0;
+ } else {
+ depctl.b.setd0pid = 1;
+ depctl.b.setd1pid = 0;
+ }
+ FH_WRITE_REG32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32);
+ }
+ start_next_request(ep);
+ }
+ ep->fh_ep.frame_num += ep->fh_ep.bInterval;
+ if (fh_ep->frame_num > 0x3FFF) {
+ fh_ep->frm_overrun = 1;
+ fh_ep->frame_num &= 0x3FFF;
+ } else {
+ fh_ep->frm_overrun = 0;
+ }
+ }
+ }
+
+ CLEAR_IN_EP_INTR(core_if, epnum, nak);
+ }
+ }
+ epnum++;
+ ep_intr >>= 1;
+ }
+
+ return 1;
+#undef CLEAR_IN_EP_INTR
+}
+
+/**
+ * This interrupt indicates that an OUT EP has a pending Interrupt.
+ * The sequence for handling the OUT EP interrupt is shown below:
+ * -# Read the Device All Endpoint Interrupt register
+ * -# Repeat the following for each OUT EP interrupt bit set (from
+ * LSB to MSB).
+ * -# Read the Device Endpoint Interrupt (DOEPINTn) register
+ * -# If "Transfer Complete" call the request complete function
+ * -# If "Endpoint Disabled" complete the EP disable procedure.
+ * -# If "AHB Error Interrupt" log error
+ * -# If "Setup Phase Done" process Setup Packet (See Standard USB
+ * Command Processing)
+ */
+static int32_t fh_otg_pcd_handle_out_ep_intr(fh_otg_pcd_t * pcd)
+{
+#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \
+do { \
+ doepint_data_t doepint = {.d32=0}; \
+ doepint.b.__intr = 1; \
+ FH_WRITE_REG32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
+ doepint.d32); \
+} while (0)
+
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ uint32_t ep_intr;
+ doepint_data_t doepint = {.d32 = 0 };
+ uint32_t epnum = 0;
+ fh_otg_pcd_ep_t *ep;
+ fh_ep_t *fh_ep;
+ dctl_data_t dctl = {.d32 = 0 };
+ gintmsk_data_t gintmsk = {.d32 = 0 };
+
+
+ FH_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
+
+ /* Read in the device interrupt bits */
+ ep_intr = fh_otg_read_dev_all_out_ep_intr(core_if);
+
+ while (ep_intr) {
+ if (ep_intr & 0x1) {
+ /* Get EP pointer */
+ ep = get_out_ep(pcd, epnum);
+ fh_ep = &ep->fh_ep;
+
+#ifdef VERBOSE
+ FH_DEBUGPL(DBG_PCDV,
+ "EP%d-%s: type=%d, mps=%d\n",
+ fh_ep->num, (fh_ep->is_in ? "IN" : "OUT"),
+ fh_ep->type, fh_ep->maxpacket);
+#endif
+ doepint.d32 =
+ fh_otg_read_dev_out_ep_intr(core_if, fh_ep);
+
+ /* Transfer complete */
+ if (doepint.b.xfercompl) {
+
+ if (epnum == 0) {
+ /* Clear the bit in DOEPINTn for this interrupt */
+ CLEAR_OUT_EP_INTR(core_if, epnum, xfercompl);
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
+ FH_DEBUGPL(DBG_PCDV, "in xfer xomplete DOEPINT=%x doepint=%x\n",
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[0]->doepint),
+ doepint.d32);
+ FH_DEBUGPL(DBG_PCDV, "DOEPCTL=%x \n",
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[0]->doepctl));
+
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a
+ && core_if->dma_enable == 0) {
+ doepint_data_t doepint;
+ doepint.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[0]->doepint);
+ if (pcd->ep0state == EP0_IDLE && doepint.b.sr) {
+ CLEAR_OUT_EP_INTR(core_if, epnum, sr);
+ if (doepint.b.stsphsercvd)
+ CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
+ goto exit_xfercompl;
+ }
+ }
+ /* In case of DDMA look at SR bit to go to the Data Stage */
+ if (core_if->dma_desc_enable) {
+ dev_dma_desc_sts_t status = {.d32 = 0};
+ if (pcd->ep0state == EP0_IDLE) {
+ status.d32 = core_if->dev_if->setup_desc_addr[core_if->
+ dev_if->setup_desc_index]->status.d32;
+ if(pcd->data_terminated) {
+ pcd->data_terminated = 0;
+ status.d32 = core_if->dev_if->out_desc_addr->status.d32;
+ fh_memcpy(&pcd->setup_pkt->req, pcd->backup_buf, 8);
+ }
+ if (status.b.sr) {
+ if (doepint.b.setup) {
+ FH_DEBUGPL(DBG_PCDV, "DMA DESC EP0_IDLE SR=1 setup=1\n");
+ /* Already started data stage, clear setup */
+ CLEAR_OUT_EP_INTR(core_if, epnum, setup);
+ doepint.b.setup = 0;
+ handle_ep0(pcd);
+ /* Prepare for more setup packets */
+ if (pcd->ep0state == EP0_IN_STATUS_PHASE ||
+ pcd->ep0state == EP0_IN_DATA_PHASE) {
+ ep0_out_start(core_if, pcd);
+ }
+
+ goto exit_xfercompl;
+ } else {
+ /* Prepare for more setup packets */
+ FH_DEBUGPL(DBG_PCDV,
+ "EP0_IDLE SR=1 setup=0 new setup comes\n");
+ ep0_out_start(core_if, pcd);
+ }
+ }
+ } else {
+ fh_otg_pcd_request_t *req;
+ dev_dma_desc_sts_t status = {.d32 = 0};
+ diepint_data_t diepint0;
+ diepint0.d32 = FH_READ_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepint);
+
+ if (pcd->ep0state == EP0_STALL || pcd->ep0state == EP0_DISCONNECT) {
+ FH_ERROR("EP0 is stalled/disconnected\n");
+ }
+
+ /* Clear IN xfercompl if set */
+ if (diepint0.b.xfercompl && (pcd->ep0state == EP0_IN_STATUS_PHASE
+ || pcd->ep0state == EP0_IN_DATA_PHASE)) {
+ FH_WRITE_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepint, diepint0.d32);
+ }
+
+ status.d32 = core_if->dev_if->setup_desc_addr[core_if->
+ dev_if->setup_desc_index]->status.d32;
+
+ if ((pcd->ep0state == EP0_OUT_STATUS_PHASE) ||
+ (ep->fh_ep.xfer_count != ep->fh_ep.total_len
+ && pcd->ep0state == EP0_OUT_DATA_PHASE))
+ status.d32 = core_if->dev_if->out_desc_addr->status.d32;
+ if (status.b.sr) {
+ if (FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ FH_DEBUGPL(DBG_PCDV, "Request queue empty!!\n");
+ } else {
+ FH_DEBUGPL(DBG_PCDV, "complete req!!\n");
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ if (ep->fh_ep.xfer_count != ep->fh_ep.total_len &&
+ pcd->ep0state == EP0_OUT_DATA_PHASE) {
+ /* Read arrived setup packet from req->buf */
+ fh_memcpy(&pcd->setup_pkt->req,
+ req->buf + ep->fh_ep.xfer_count, 8);
+ }
+ req->actual = ep->fh_ep.xfer_count;
+ fh_otg_request_done(ep, req, -ECONNRESET);
+ ep->fh_ep.start_xfer_buff = 0;
+ ep->fh_ep.xfer_buff = 0;
+ ep->fh_ep.xfer_len = 0;
+ }
+ pcd->ep0state = EP0_IDLE;
+ if (doepint.b.setup) {
+ FH_DEBUGPL(DBG_PCDV, "EP0_IDLE SR=1 setup=1\n");
+ /* Data stage started, clear setup */
+ CLEAR_OUT_EP_INTR(core_if, epnum, setup);
+ doepint.b.setup = 0;
+ handle_ep0(pcd);
+ /* Prepare for setup packets if ep0in was enabled*/
+ if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
+ ep0_out_start(core_if, pcd);
+ }
+
+ goto exit_xfercompl;
+ } else {
+ /* Prepare for more setup packets */
+ FH_DEBUGPL(DBG_PCDV,
+ "EP0_IDLE SR=1 setup=0 new setup comes 2\n");
+ ep0_out_start(core_if, pcd);
+ }
+ }
+ }
+ }
+ if (core_if->snpsid >= OTG_CORE_REV_3_00a && core_if->dma_enable
+ && core_if->dma_desc_enable == 0) {
+ doepint_data_t doepint_temp = {.d32 = 0};
+ deptsiz0_data_t doeptsize0 = {.d32 = 0 };
+ doepint_temp.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[ep->fh_ep.num]->doepint);
+ doeptsize0.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[ep->fh_ep.num]->doeptsiz);
+ if (((ep->fh_ep.xfer_count == ep->fh_ep.total_len || doeptsize0.b.xfersize == 64) &&
+ pcd->ep0state == EP0_OUT_DATA_PHASE && doepint.b.stsphsercvd) ||
+ (doeptsize0.b.xfersize == 24 && pcd->ep0state == EP0_IN_STATUS_PHASE)) {
+ CLEAR_OUT_EP_INTR(core_if, epnum, xfercompl);
+ FH_DEBUGPL(DBG_PCDV, "WA for xfercompl along with stsphs \n");
+ doepint.b.xfercompl = 0;
+ ep0_out_start(core_if, pcd);
+ goto exit_xfercompl;
+ }
+
+ if (pcd->ep0state == EP0_IDLE) {
+ if (doepint_temp.b.sr) {
+ CLEAR_OUT_EP_INTR(core_if, epnum, sr);
+ }
+ /* Delay is needed for core to update setup
+ * packet count from 3 to 2 after receiving
+ * setup packet*/
+ fh_udelay(100);
+ doepint.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[0]->doepint);
+ if (doeptsize0.b.supcnt == 3) {
+ FH_DEBUGPL(DBG_ANY, "Rolling over!!!!!!!\n");
+ ep->fh_ep.stp_rollover = 1;
+ }
+ if (doepint.b.setup) {
+retry:
+ /* Already started data stage, clear setup */
+ CLEAR_OUT_EP_INTR(core_if, epnum, setup);
+ doepint.b.setup = 0;
+ handle_ep0(pcd);
+ ep->fh_ep.stp_rollover = 0;
+ /* Prepare for more setup packets */
+ if (pcd->ep0state == EP0_IN_STATUS_PHASE ||
+ pcd->ep0state == EP0_IN_DATA_PHASE) {
+ depctl_data_t depctl = {.d32 = 0};
+ depctl.b.cnak = 1;
+ ep0_out_start(core_if, pcd);
+ /* Core not updating setup packet count
+ * in case of PET testing - @TODO vahrama
+ * to check with HW team further */
+ if (!core_if->otg_ver) {
+ FH_MODIFY_REG32(&core_if->dev_if->
+ out_ep_regs[0]->doepctl, 0, depctl.d32);
+ }
+ }
+ goto exit_xfercompl;
+ } else {
+ /* Prepare for more setup packets */
+ FH_DEBUGPL(DBG_ANY,
+ "EP0_IDLE SR=1 setup=0 new setup comes\n");
+ doepint.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[0]->doepint);
+ if(doepint.b.setup)
+ goto retry;
+ ep0_out_start(core_if, pcd);
+ }
+ } else {
+ fh_otg_pcd_request_t *req;
+ diepint_data_t diepint0 = {.d32 = 0};
+ doepint_data_t doepint_temp = {.d32 = 0};
+ depctl_data_t diepctl0;
+ diepint0.d32 = FH_READ_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepint);
+ diepctl0.d32 = FH_READ_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepctl);
+
+ if (pcd->ep0state == EP0_IN_DATA_PHASE
+ || pcd->ep0state == EP0_IN_STATUS_PHASE) {
+ if (diepint0.b.xfercompl) {
+ FH_WRITE_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepint, diepint0.d32);
+ }
+ if (diepctl0.b.epena) {
+ diepint_data_t diepint = {.d32 = 0};
+ diepctl0.b.snak = 1;
+ FH_WRITE_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepctl, diepctl0.d32);
+ do {
+ fh_udelay(10);
+ diepint.d32 = FH_READ_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepint);
+ } while (!diepint.b.inepnakeff);
+ diepint.b.inepnakeff = 1;
+ FH_WRITE_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepint, diepint.d32);
+ diepctl0.d32 = 0;
+ diepctl0.b.epdis = 1;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl,
+ diepctl0.d32);
+ do {
+ fh_udelay(10);
+ diepint.d32 = FH_READ_REG32(&core_if->dev_if->
+ in_ep_regs[0]->diepint);
+ } while (!diepint.b.epdisabled);
+ diepint.b.epdisabled = 1;
+ FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[0]->diepint,
+ diepint.d32);
+ }
+ }
+ doepint_temp.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[ep->fh_ep.num]->doepint);
+ if (doepint_temp.b.sr) {
+ CLEAR_OUT_EP_INTR(core_if, epnum, sr);
+ if (FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ FH_DEBUGPL(DBG_PCDV, "Request queue empty!!\n");
+ } else {
+ FH_DEBUGPL(DBG_PCDV, "complete req!!\n");
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ if (ep->fh_ep.xfer_count != ep->fh_ep.total_len &&
+ pcd->ep0state == EP0_OUT_DATA_PHASE) {
+ /* Read arrived setup packet from req->buf */
+ fh_memcpy(&pcd->setup_pkt->req,
+ req->buf + ep->fh_ep.xfer_count, 8);
+ }
+ req->actual = ep->fh_ep.xfer_count;
+ fh_otg_request_done(ep, req, -ECONNRESET);
+ ep->fh_ep.start_xfer_buff = 0;
+ ep->fh_ep.xfer_buff = 0;
+ ep->fh_ep.xfer_len = 0;
+ }
+ pcd->ep0state = EP0_IDLE;
+ if (doepint.b.setup) {
+ FH_DEBUGPL(DBG_PCDV, "EP0_IDLE SR=1 setup=1\n");
+ /* Data stage started, clear setup */
+ CLEAR_OUT_EP_INTR(core_if, epnum, setup);
+ doepint.b.setup = 0;
+ handle_ep0(pcd);
+ /* Prepare for setup packets if ep0in was enabled*/
+ if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
+ depctl_data_t depctl = {.d32 = 0};
+ depctl.b.cnak = 1;
+ ep0_out_start(core_if, pcd);
+ /* Core not updating setup packet count
+ * in case of PET testing - @TODO vahrama
+ * to check with HW team further */
+ if (!core_if->otg_ver) {
+ FH_MODIFY_REG32(&core_if->dev_if->
+ out_ep_regs[0]->doepctl, 0, depctl.d32);
+ }
+ }
+ goto exit_xfercompl;
+ } else {
+ /* Prepare for more setup packets */
+ FH_DEBUGPL(DBG_PCDV,
+ "EP0_IDLE SR=1 setup=0 new setup comes 2\n");
+ ep0_out_start(core_if, pcd);
+ }
+ }
+ }
+ }
+ if (core_if->dma_enable == 0 || pcd->ep0state != EP0_IDLE)
+ handle_ep0(pcd);
+exit_xfercompl:
+ FH_DEBUGPL(DBG_PCDV, "after DOEPINT=%x doepint=%x\n",
+ fh_otg_read_dev_out_ep_intr(core_if, fh_ep), doepint.d32);
+ } else {
+ if (core_if->dma_desc_enable == 0
+ || pcd->ep0state != EP0_IDLE)
+ handle_ep0(pcd);
+ }
+#ifdef FH_EN_ISOC
+ } else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ if (doepint.b.pktdrpsts == 0) {
+ /* Clear the bit in DOEPINTn for this interrupt */
+ CLEAR_OUT_EP_INTR(core_if,
+ epnum,
+ xfercompl);
+ complete_iso_ep(pcd, ep);
+ } else {
+
+ doepint_data_t doepint = {.d32 = 0 };
+ doepint.b.xfercompl = 1;
+ doepint.b.pktdrpsts = 1;
+ FH_WRITE_REG32
+ (&core_if->dev_if->out_ep_regs
+ [epnum]->doepint,
+ doepint.d32);
+ if (handle_iso_out_pkt_dropped
+ (core_if, fh_ep)) {
+ complete_iso_ep(pcd,
+ ep);
+ }
+ }
+#endif /* FH_EN_ISOC */
+#ifdef FH_UTE_PER_IO
+ } else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ CLEAR_OUT_EP_INTR(core_if, epnum, xfercompl);
+ if (!ep->stopped)
+ complete_xiso_ep(ep);
+#endif /* FH_UTE_PER_IO */
+ } else {
+ /* Clear the bit in DOEPINTn for this interrupt */
+ CLEAR_OUT_EP_INTR(core_if, epnum,
+ xfercompl);
+
+ if (core_if->core_params->dev_out_nak) {
+ FH_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[epnum]);
+ pcd->core_if->ep_xfer_info[epnum].state = 0;
+#ifdef DEBUG
+ print_memory_payload(pcd, fh_ep);
+#endif
+ }
+ if (core_if->dma_desc_enable && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ /*
+ * handle_xfercompl_iso_ddma
+ * (core_if->dev_if, ep);
+ */
+ complete_ep(ep);
+ } else {
+ complete_ep(ep);
+ }
+ }
+
+ }
+ if (doepint.b.stsphsercvd) {
+ deptsiz0_data_t deptsiz;
+ CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[0]->doeptsiz);
+ if ((core_if->dma_desc_enable) || (core_if->dma_enable &&
+ core_if->snpsid >= OTG_CORE_REV_3_00a)) {
+ do_setup_in_status_phase(pcd);
+ }
+ }
+
+ /* Endpoint disable */
+ if (doepint.b.epdisabled) {
+
+ /* Clear the bit in DOEPINTn for this interrupt */
+ CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled);
+ if (core_if->core_params->dev_out_nak) {
+#ifdef DEBUG
+ print_memory_payload(pcd, fh_ep);
+#endif
+ /* In case of timeout condition */
+ if (core_if->ep_xfer_info[epnum].state == 2) {
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->
+ dev_global_regs->dctl);
+ dctl.b.cgoutnak = 1;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
+ dctl.d32);
+ /* Unmask goutnakeff interrupt which was masked
+ * during handle nak out interrupt */
+ gintmsk.b.goutnakeff = 1;
+ FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
+ 0, gintmsk.d32);
+
+ complete_ep(ep);
+ }
+ }
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC)
+ {
+ dctl_data_t dctl;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ fh_otg_pcd_request_t *req = 0;
+
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->
+ dev_global_regs->dctl);
+ dctl.b.cgoutnak = 1;
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
+ dctl.d32);
+
+ intr_mask.d32 = 0;
+ intr_mask.b.incomplisoout = 1;
+
+ /* Get any pending requests */
+ if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
+ req = FH_CIRCLEQ_FIRST(&ep->queue);
+ if (!req) {
+ FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
+ } else {
+ fh_otg_request_done(ep, req, 0);
+ start_next_request(ep);
+ }
+ } else {
+ FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
+ }
+ }
+ }
+ /* AHB Error */
+ if (doepint.b.ahberr) {
+ FH_ERROR("EP%d OUT AHB Error\n", epnum);
+ FH_ERROR("EP%d DEPDMA=0x%08x \n",
+ epnum, core_if->dev_if->out_ep_regs[epnum]->doepdma);
+ CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
+ }
+ /* Setup Phase Done (contorl EPs) */
+ if (doepint.b.setup) {
+#ifdef DEBUG_EP0
+ FH_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n", epnum);
+#endif
+ CLEAR_OUT_EP_INTR(core_if, epnum, setup);
+
+ handle_ep0(pcd);
+ }
+
+ /** OUT EP BNA Intr */
+ if (doepint.b.bna) {
+ CLEAR_OUT_EP_INTR(core_if, epnum, bna);
+ if (core_if->dma_desc_enable) {
+#ifdef FH_EN_ISOC
+ if (fh_ep->type ==
+ FH_OTG_EP_TYPE_ISOC) {
+ /*
+ * This checking is performed to prevent first "false" BNA
+ * handling occuring right after reconnect
+ */
+ if (fh_ep->next_frame !=
+ 0xffffffff)
+ fh_otg_pcd_handle_iso_bna(ep);
+ } else
+#endif /* FH_EN_ISOC */
+ if (ep->fh_ep.type != FH_OTG_EP_TYPE_ISOC) {
+ fh_otg_pcd_handle_noniso_bna(ep);
+ }
+ }
+ }
+ /* Babble Interrupt */
+ if (doepint.b.babble) {
+ FH_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n",
+ epnum);
+ handle_out_ep_babble_intr(pcd, epnum);
+
+ CLEAR_OUT_EP_INTR(core_if, epnum, babble);
+ }
+ if (doepint.b.outtknepdis) {
+ FH_DEBUGPL(DBG_ANY, "EP%d OUT Token received when EP is \
+ disabled\n",epnum);
+ if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
+ if (core_if->dma_desc_enable) {
+ if (!ep->fh_ep.iso_transfer_started) {
+ ep->fh_ep.frame_num = core_if->frame_num;
+ fh_otg_pcd_start_iso_ddma(core_if, ep);
+ }
+ } else {
+ doepmsk_data_t doepmsk = {.d32 = 0};
+ ep->fh_ep.frame_num = core_if->frame_num;
+ if (ep->fh_ep.bInterval > 1) {
+ depctl_data_t depctl;
+ depctl.d32 = FH_READ_REG32(&core_if->dev_if->
+ out_ep_regs[epnum]->doepctl);
+ if (ep->fh_ep.frame_num & 0x1) {
+ depctl.b.setd1pid = 1;
+ depctl.b.setd0pid = 0;
+ } else {
+ depctl.b.setd0pid = 1;
+ depctl.b.setd1pid = 0;
+ }
+ FH_WRITE_REG32(&core_if->dev_if->
+ out_ep_regs[epnum]->doepctl, depctl.d32);
+ }
+
+ start_next_request(ep);
+ doepmsk.b.outtknepdis = 1;
+ FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
+ doepmsk.d32, 0);
+ }
+ }
+ CLEAR_OUT_EP_INTR(core_if, epnum, outtknepdis);
+ }
+
+ /* NAK Interrutp */
+ if (doepint.b.nak) {
+ FH_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum);
+ handle_out_ep_nak_intr(pcd, epnum);
+
+ CLEAR_OUT_EP_INTR(core_if, epnum, nak);
+ }
+ /* NYET Interrutp */
+ if (doepint.b.nyet) {
+ FH_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum);
+ handle_out_ep_nyet_intr(pcd, epnum);
+
+ CLEAR_OUT_EP_INTR(core_if, epnum, nyet);
+ }
+ }
+
+ epnum++;
+ ep_intr >>= 1;
+ }
+
+ return 1;
+
+#undef CLEAR_OUT_EP_INTR
+}
+static int drop_transfer(uint32_t trgt_fr, uint32_t curr_fr, uint8_t frm_overrun)
+{
+ int retval = 0;
+ if(!frm_overrun && curr_fr >= trgt_fr)
+ retval = 1;
+ else if (frm_overrun
+ && (curr_fr >= trgt_fr && ((curr_fr - trgt_fr) < 0x3FFF / 2)))
+ retval = 1;
+ return retval;
+}
+
+/**
+ * Incomplete ISO IN Transfer Interrupt.
+ * This interrupt indicates one of the following conditions occurred
+ * while transmitting an ISOC transaction.
+ * - Corrupted IN Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ * The follow actions will be taken:
+ * -# Determine the EP
+ * -# Set incomplete flag in fh_ep structure
+ * -# Disable EP; when "Endpoint Disabled" interrupt is received
+ * Flush FIFO
+ */
+int32_t fh_otg_pcd_handle_incomplete_isoc_in_intr(fh_otg_pcd_t * pcd)
+{
+ gintsts_data_t gintsts;
+
+#ifdef FH_EN_ISOC
+ fh_otg_dev_if_t *dev_if;
+ deptsiz_data_t deptsiz = {.d32 = 0 };
+ depctl_data_t depctl = {.d32 = 0 };
+ dsts_data_t dsts = {.d32 = 0 };
+ fh_ep_t *fh_ep;
+ int i;
+
+ dev_if = GET_CORE_IF(pcd)->dev_if;
+
+ for (i = 1; i <= dev_if->num_in_eps; ++i) {
+ fh_ep = &pcd->in_ep[i].fh_ep;
+ if (fh_ep->active && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ deptsiz.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
+ depctl.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+
+ if (depctl.b.epdis && deptsiz.d32) {
+ set_current_pkt_info(GET_CORE_IF(pcd), fh_ep);
+ if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
+ fh_ep->cur_pkt = 0;
+ fh_ep->proc_buf_num =
+ (fh_ep->proc_buf_num ^ 1) & 0x1;
+
+ if (fh_ep->proc_buf_num) {
+ fh_ep->cur_pkt_addr =
+ fh_ep->xfer_buff1;
+ fh_ep->cur_pkt_dma_addr =
+ fh_ep->dma_addr1;
+ } else {
+ fh_ep->cur_pkt_addr =
+ fh_ep->xfer_buff0;
+ fh_ep->cur_pkt_dma_addr =
+ fh_ep->dma_addr0;
+ }
+
+ }
+
+ dsts.d32 =
+ FH_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
+ dev_global_regs->dsts);
+ fh_ep->next_frame = dsts.b.soffn;
+
+ fh_otg_iso_ep_start_frm_transfer(GET_CORE_IF
+ (pcd),
+ fh_ep);
+ }
+ }
+ }
+
+#else
+ depctl_data_t depctl = {.d32 = 0 };
+ fh_ep_t *fh_ep;
+ fh_otg_dev_if_t *dev_if;
+ int i;
+ dev_if = GET_CORE_IF(pcd)->dev_if;
+
+ FH_DEBUGPL(DBG_PCD,"Incomplete ISO IN \n");
+
+ for (i = 1; i <= dev_if->num_in_eps; ++i) {
+ fh_ep = &pcd->in_ep[i-1].fh_ep;
+ depctl.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ if (depctl.b.epena && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
+ if (drop_transfer(fh_ep->frame_num, GET_CORE_IF(pcd)->frame_num,
+ fh_ep->frm_overrun))
+ {
+ depctl.d32 =
+ FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ depctl.b.snak = 1;
+ depctl.b.epdis = 1;
+ FH_MODIFY_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32, depctl.d32);
+ }
+ }
+ }
+
+ /*intr_mask.b.incomplisoin = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0); */
+#endif //FH_EN_ISOC
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.incomplisoin = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * Incomplete ISO OUT Transfer Interrupt.
+ *
+ * This interrupt indicates that the core has dropped an ISO OUT
+ * packet. The following conditions can be the cause:
+ * - FIFO Full, the entire packet would not fit in the FIFO.
+ * - CRC Error
+ * - Corrupted Token
+ * The follow actions will be taken:
+ * -# Determine the EP
+ * -# Set incomplete flag in fh_ep structure
+ * -# Read any data from the FIFO
+ * -# Disable EP. When "Endpoint Disabled" interrupt is received
+ * re-enable EP.
+ */
+int32_t fh_otg_pcd_handle_incomplete_isoc_out_intr(fh_otg_pcd_t * pcd)
+{
+
+ gintsts_data_t gintsts;
+
+#ifdef FH_EN_ISOC
+ fh_otg_dev_if_t *dev_if;
+ deptsiz_data_t deptsiz = {.d32 = 0 };
+ depctl_data_t depctl = {.d32 = 0 };
+ dsts_data_t dsts = {.d32 = 0 };
+ fh_ep_t *fh_ep;
+ int i;
+
+ dev_if = GET_CORE_IF(pcd)->dev_if;
+
+ for (i = 1; i <= dev_if->num_out_eps; ++i) {
+ fh_ep = &pcd->in_ep[i].fh_ep;
+ if (pcd->out_ep[i].fh_ep.active &&
+ pcd->out_ep[i].fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
+ deptsiz.d32 =
+ FH_READ_REG32(&dev_if->out_ep_regs[i]->doeptsiz);
+ depctl.d32 =
+ FH_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
+
+ if (depctl.b.epdis && deptsiz.d32) {
+ set_current_pkt_info(GET_CORE_IF(pcd),
+ &pcd->out_ep[i].fh_ep);
+ if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
+ fh_ep->cur_pkt = 0;
+ fh_ep->proc_buf_num =
+ (fh_ep->proc_buf_num ^ 1) & 0x1;
+
+ if (fh_ep->proc_buf_num) {
+ fh_ep->cur_pkt_addr =
+ fh_ep->xfer_buff1;
+ fh_ep->cur_pkt_dma_addr =
+ fh_ep->dma_addr1;
+ } else {
+ fh_ep->cur_pkt_addr =
+ fh_ep->xfer_buff0;
+ fh_ep->cur_pkt_dma_addr =
+ fh_ep->dma_addr0;
+ }
+
+ }
+
+ dsts.d32 =
+ FH_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
+ dev_global_regs->dsts);
+ fh_ep->next_frame = dsts.b.soffn;
+
+ fh_otg_iso_ep_start_frm_transfer(GET_CORE_IF
+ (pcd),
+ fh_ep);
+ }
+ }
+ }
+#else
+ /** @todo implement ISR */
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ fh_otg_core_if_t *core_if;
+ deptsiz_data_t deptsiz = {.d32 = 0 };
+ depctl_data_t depctl = {.d32 = 0 };
+ dctl_data_t dctl = {.d32 = 0 };
+ fh_ep_t *fh_ep = NULL;
+ int i;
+ core_if = GET_CORE_IF(pcd);
+
+ for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
+ fh_ep = &pcd->out_ep[i].fh_ep;
+ depctl.d32 =
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl);
+ if (depctl.b.epena && depctl.b.dpid == (core_if->frame_num & 0x1)) {
+ core_if->dev_if->isoc_ep = fh_ep;
+ deptsiz.d32 =
+ FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doeptsiz);
+ break;
+ }
+ }
+ dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
+ gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
+ intr_mask.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
+
+ if (!intr_mask.b.goutnakeff) {
+ /* Unmask it */
+ intr_mask.b.goutnakeff = 1;
+ FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, intr_mask.d32);
+ }
+ if (!gintsts.b.goutnakeff) {
+ dctl.b.sgoutnak = 1;
+ }
+ FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+
+ depctl.d32 = FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl);
+ if (depctl.b.epena) {
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ }
+ FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl, depctl.d32);
+
+ intr_mask.d32 = 0;
+ intr_mask.b.incomplisoout = 1;
+
+#endif /* FH_EN_ISOC */
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.incomplisoout = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This function handles the Global IN NAK Effective interrupt.
+ *
+ */
+int32_t fh_otg_pcd_handle_in_nak_effective(fh_otg_pcd_t * pcd)
+{
+ fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
+ depctl_data_t diepctl = {.d32 = 0 };
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ gintsts_data_t gintsts;
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+ int i;
+
+ FH_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
+
+ /* Disable all active IN EPs */
+ for (i = 0; i <= dev_if->num_in_eps; i++) {
+ diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
+ if (!(diepctl.b.eptype & 1) && diepctl.b.epena) {
+ if (core_if->start_predict > 0)
+ core_if->start_predict++;
+ diepctl.b.epdis = 1;
+ diepctl.b.snak = 1;
+ FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, diepctl.d32);
+ }
+ }
+
+
+ /* Disable the Global IN NAK Effective Interrupt */
+ intr_mask.b.ginnakeff = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.ginnakeff = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * OUT NAK Effective.
+ *
+ */
+int32_t fh_otg_pcd_handle_out_nak_effective(fh_otg_pcd_t * pcd)
+{
+ fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
+ gintmsk_data_t intr_mask = {.d32 = 0 };
+ gintsts_data_t gintsts;
+ depctl_data_t doepctl;
+ int i;
+
+ /* Disable the Global OUT NAK Effective Interrupt */
+ intr_mask.b.goutnakeff = 1;
+ FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* If DEV OUT NAK enabled */
+ if (pcd->core_if->core_params->dev_out_nak) {
+ /* Run over all out endpoints to determine the ep number on
+ * which the timeout has happened
+ */
+ for (i = 0; i <= dev_if->num_out_eps; i++) {
+ if (pcd->core_if->ep_xfer_info[i].state == 2)
+ break;
+ }
+ if (i > dev_if->num_out_eps) {
+ dctl_data_t dctl;
+ dctl.d32 =
+ FH_READ_REG32(&dev_if->dev_global_regs->dctl);
+ dctl.b.cgoutnak = 1;
+ FH_WRITE_REG32(&dev_if->dev_global_regs->dctl,
+ dctl.d32);
+ goto out;
+ }
+
+ /* Disable the endpoint */
+ doepctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
+ if (doepctl.b.epena) {
+ doepctl.b.epdis = 1;
+ doepctl.b.snak = 1;
+ }
+ FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
+ return 1;
+ }
+ /* We come here from Incomplete ISO OUT handler */
+ if (dev_if->isoc_ep) {
+ fh_ep_t *fh_ep = (fh_ep_t *) dev_if->isoc_ep;
+ uint32_t epnum = fh_ep->num;
+ doepint_data_t doepint;
+ doepint.d32 =
+ FH_READ_REG32(&dev_if->out_ep_regs[fh_ep->num]->doepint);
+ dev_if->isoc_ep = NULL;
+ doepctl.d32 =
+ FH_READ_REG32(&dev_if->out_ep_regs[epnum]->doepctl);
+ FH_PRINTF("Before disable DOEPCTL = %08x\n", doepctl.d32);
+ if (doepctl.b.epena) {
+ doepctl.b.epdis = 1;
+ doepctl.b.snak = 1;
+ }
+ FH_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepctl,
+ doepctl.d32);
+ return 1;
+ } else
+ FH_PRINTF("INTERRUPT Handler not implemented for %s\n",
+ "Global OUT NAK Effective\n");
+
+out:
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.goutnakeff = 1;
+ FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * PCD interrupt handler.
+ *
+ * The PCD handles the device interrupts. Many conditions can cause a
+ * device interrupt. When an interrupt occurs, the device interrupt
+ * service routine determines the cause of the interrupt and
+ * dispatches handling to the appropriate function. These interrupt
+ * handling functions are described below.
+ *
+ * All interrupt registers are processed from LSB to MSB.
+ *
+ */
+int32_t fh_otg_pcd_handle_intr(fh_otg_pcd_t * pcd)
+{
+ fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
+#ifdef VERBOSE
+ fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+#endif
+ gintsts_data_t gintr_status;
+ int32_t retval = 0;
+
+ if (fh_otg_check_haps_status(core_if) == -1 ) {
+ FH_WARN("HAPS is disconnected");
+ return retval;
+ }
+
+ /* Exit from ISR if core is hibernated */
+ if (core_if->hibernation_suspend == 1) {
+ return retval;
+ }
+#ifdef VERBOSE
+ FH_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n",
+ __func__,
+ FH_READ_REG32(&global_regs->gintsts),
+ FH_READ_REG32(&global_regs->gintmsk));
+#endif
+
+ if (fh_otg_is_device_mode(core_if)) {
+ FH_SPINLOCK(pcd->lock);
+#ifdef VERBOSE
+ FH_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n",
+ __func__,
+ FH_READ_REG32(&global_regs->gintsts),
+ FH_READ_REG32(&global_regs->gintmsk));
+#endif
+
+ gintr_status.d32 = fh_otg_read_core_intr(core_if);
+
+ FH_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n",
+ __func__, gintr_status.d32);
+
+ if (gintr_status.b.sofintr) {
+ retval |= fh_otg_pcd_handle_sof_intr(pcd);
+ }
+ if (gintr_status.b.rxstsqlvl) {
+ retval |=
+ fh_otg_pcd_handle_rx_status_q_level_intr(pcd);
+ }
+ if (gintr_status.b.nptxfempty) {
+ retval |= fh_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
+ }
+ if (gintr_status.b.goutnakeff) {
+ retval |= fh_otg_pcd_handle_out_nak_effective(pcd);
+ }
+ if (gintr_status.b.i2cintr) {
+ retval |= fh_otg_pcd_handle_i2c_intr(pcd);
+ }
+ if (gintr_status.b.erlysuspend) {
+ retval |= fh_otg_pcd_handle_early_suspend_intr(pcd);
+ }
+ if (gintr_status.b.usbreset) {
+ retval |= fh_otg_pcd_handle_usb_reset_intr(pcd);
+ }
+ if (gintr_status.b.enumdone) {
+ retval |= fh_otg_pcd_handle_enum_done_intr(pcd);
+ }
+ if (gintr_status.b.isooutdrop) {
+ retval |=
+ fh_otg_pcd_handle_isoc_out_packet_dropped_intr
+ (pcd);
+ }
+ if (gintr_status.b.eopframe) {
+ retval |=
+ fh_otg_pcd_handle_end_periodic_frame_intr(pcd);
+ }
+ if (gintr_status.b.inepint) {
+ if (!core_if->multiproc_int_enable) {
+ retval |= fh_otg_pcd_handle_in_ep_intr(pcd);
+ }
+ }
+ if (gintr_status.b.outepintr) {
+ if (!core_if->multiproc_int_enable) {
+ retval |= fh_otg_pcd_handle_out_ep_intr(pcd);
+ }
+ }
+ if (gintr_status.b.epmismatch) {
+ retval |= fh_otg_pcd_handle_ep_mismatch_intr(pcd);
+ }
+ if (gintr_status.b.fetsusp) {
+ retval |= fh_otg_pcd_handle_ep_fetsusp_intr(pcd);
+ }
+ if (gintr_status.b.ginnakeff) {
+ retval |= fh_otg_pcd_handle_in_nak_effective(pcd);
+ }
+ if (gintr_status.b.incomplisoin) {
+ retval |=
+ fh_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
+ }
+ if (gintr_status.b.incomplisoout) {
+ retval |=
+ fh_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
+ }
+
+ /* In MPI mode Device Endpoints interrupts are asserted
+ * without setting outepintr and inepint bits set, so these
+ * Interrupt handlers are called without checking these bit-fields
+ */
+ if (core_if->multiproc_int_enable) {
+ retval |= fh_otg_pcd_handle_in_ep_intr(pcd);
+ retval |= fh_otg_pcd_handle_out_ep_intr(pcd);
+ }
+#ifdef VERBOSE
+ FH_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__,
+ FH_READ_REG32(&global_regs->gintsts));
+#endif
+ FH_SPINUNLOCK(pcd->lock);
+ }
+ return retval;
+}
+
+#endif /* FH_HOST_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_linux.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_linux.c
new file mode 100644
index 00000000..ebeed66d
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_linux.c
@@ -0,0 +1,1447 @@
+ /* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd_linux.c $
+ * $Revision: #30 $
+ * $Date: 2015/08/06 $
+ * $Change: 2913039 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef FH_HOST_ONLY
+
+/** @file
+ * This file implements the Peripheral Controller Driver.
+ *
+ * The Peripheral Controller Driver (PCD) is responsible for
+ * translating requests from the Function Driver into the appropriate
+ * actions on the FH_otg controller. It isolates the Function Driver
+ * from the specifics of the controller by providing an API to the
+ * Function Driver.
+ *
+ * The Peripheral Controller Driver for Linux will implement the
+ * Gadget API, so that the existing Gadget drivers can be used.
+ * (Gadget Driver is the Linux terminology for a Function Driver.)
+ *
+ * The Linux Gadget API is defined in the header file
+ * <code><linux/usb_gadget.h></code>. The USB EP operations API is
+ * defined in the structure <code>usb_ep_ops</code> and the USB
+ * Controller API is defined in the structure
+ * <code>usb_gadget_ops</code>.
+ *
+ */
+#include <linux/platform_device.h>
+
+#include "fh_otg_os_dep.h"
+#include "fh_otg_pcd_if.h"
+#include "fh_otg_pcd.h"
+#include "fh_otg_driver.h"
+#include "fh_otg_dbg.h"
+
+static struct gadget_wrapper {
+ fh_otg_pcd_t *pcd;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ struct usb_ep ep0;
+ struct usb_ep in_ep[16];
+ struct usb_ep out_ep[16];
+
+} *gadget_wrapper;
+
+/* Display the contents of the buffer */
+extern void dump_msg(const u8 * buf, unsigned int length);
+/**
+ * Get the fh_otg_pcd_ep_t* from usb_ep* pointer - NULL in case
+ * if the endpoint is not found
+ */
+static struct fh_otg_pcd_ep *ep_from_handle(fh_otg_pcd_t * pcd, void *handle)
+{
+ int i;
+ if (pcd->ep0.priv == handle) {
+ return &pcd->ep0;
+ }
+
+ for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
+ if (pcd->in_ep[i].priv == handle)
+ return &pcd->in_ep[i];
+ if (pcd->out_ep[i].priv == handle)
+ return &pcd->out_ep[i];
+ }
+
+ return NULL;
+}
+
+/* USB Endpoint Operations */
+/*
+ * The following sections briefly describe the behavior of the Gadget
+ * API endpoint operations implemented in the FH_otg driver
+ * software. Detailed descriptions of the generic behavior of each of
+ * these functions can be found in the Linux header file
+ * include/linux/usb_gadget.h.
+ *
+ * The Gadget API provides wrapper functions for each of the function
+ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper
+ * function, which then calls the underlying PCD function. The
+ * following sections are named according to the wrapper
+ * functions. Within each section, the corresponding FH_otg PCD
+ * function name is specified.
+ *
+ */
+
+/**
+ * This function is called by the Gadget Driver for each EP to be
+ * configured for the current configuration (SET_CONFIGURATION).
+ *
+ * This function initializes the fh_otg_ep_t data structure, and then
+ * calls fh_otg_ep_activate.
+ */
+static int ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *ep_desc)
+{
+ int retval;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, usb_ep, ep_desc);
+
+ if (!usb_ep || !ep_desc || ep_desc->bDescriptorType != USB_DT_ENDPOINT) {
+ FH_WARN("%s, bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+ if (usb_ep == &gadget_wrapper->ep0) {
+ FH_WARN("%s, bad ep(0)\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check FIFO size? */
+ if (!ep_desc->wMaxPacketSize) {
+ FH_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name);
+ return -ERANGE;
+ }
+
+ if (!gadget_wrapper->driver ||
+ gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
+ FH_WARN("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+
+ /* Delete after check - MAS */
+#if 0
+ nat = (uint32_t) ep_desc->wMaxPacketSize;
+ printk(KERN_ALERT "%s: nat (before) =%d\n", __func__, nat);
+ nat = (nat >> 11) & 0x03;
+ printk(KERN_ALERT "%s: nat (after) =%d\n", __func__, nat);
+#endif
+ retval = fh_otg_pcd_ep_enable(gadget_wrapper->pcd,
+ (const uint8_t *)ep_desc,
+ (void *)usb_ep);
+ if (retval) {
+ FH_WARN("fh_otg_pcd_ep_enable failed\n");
+ return -EINVAL;
+ }
+
+ usb_ep->maxpacket = le16_to_cpu(ep_desc->wMaxPacketSize);
+
+ return 0;
+}
+
+/**
+ * This function is called when an EP is disabled due to disconnect or
+ * change in configuration. Any pending requests will terminate with a
+ * status of -ESHUTDOWN.
+ *
+ * This function modifies the fh_otg_ep_t data structure for this EP,
+ * and then calls fh_otg_ep_deactivate.
+ */
+static int ep_disable(struct usb_ep *usb_ep)
+{
+ int retval;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, usb_ep);
+ if (!usb_ep) {
+ FH_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__,
+ usb_ep ? usb_ep->name : NULL);
+ return -EINVAL;
+ }
+
+ retval = fh_otg_pcd_ep_disable(gadget_wrapper->pcd, usb_ep);
+ if (retval) {
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+/**
+ * This function allocates a request object to use with the specified
+ * endpoint.
+ *
+ * @param ep The endpoint to be used with with the request
+ * @param gfp_flags the GFP_* flags to use.
+ */
+static struct usb_request *fh_otg_pcd_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct usb_request *usb_req;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p,%d)\n", __func__, ep, gfp_flags);
+ if (0 == ep) {
+ FH_WARN("%s() %s\n", __func__, "Invalid EP!\n");
+ return 0;
+ }
+ usb_req = kmalloc(sizeof(*usb_req), gfp_flags);
+ if (0 == usb_req) {
+ FH_WARN("%s() %s\n", __func__, "request allocation failed!\n");
+ return 0;
+ }
+ memset(usb_req, 0, sizeof(*usb_req));
+ usb_req->dma = FH_DMA_ADDR_INVALID;
+
+ return usb_req;
+}
+
+/**
+ * This function frees a request object.
+ *
+ * @param ep The endpoint associated with the request
+ * @param req The request being freed
+ */
+static void fh_otg_pcd_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ FH_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, ep, req);
+
+ if (0 == ep || 0 == req) {
+ FH_WARN("%s() %s\n", __func__,
+ "Invalid ep or req argument!\n");
+ return;
+ }
+
+ kfree(req);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+/**
+ * This function allocates an I/O buffer to be used for a transfer
+ * to/from the specified endpoint.
+ *
+ * @param usb_ep The endpoint to be used with with the request
+ * @param bytes The desired number of bytes for the buffer
+ * @param dma Pointer to the buffer's DMA address; must be valid
+ * @param gfp_flags the GFP_* flags to use.
+ * @return address of a new buffer or null is buffer could not be allocated.
+ */
+static void *fh_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes,
+ dma_addr_t * dma, gfp_t gfp_flags)
+{
+ void *buf;
+ fh_otg_pcd_t *pcd = 0;
+
+ pcd = gadget_wrapper->pcd;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes,
+ dma, gfp_flags);
+
+ /* Check dword alignment */
+ if ((bytes & 0x3UL) != 0) {
+ FH_WARN("%s() Buffer size is not a multiple of"
+ "DWORD size (%d)", __func__, bytes);
+ }
+
+ buf = dma_alloc_coherent(NULL, bytes, dma, gfp_flags);
+
+ /* Check dword alignment */
+ if (((int)buf & 0x3UL) != 0) {
+ FH_WARN("%s() Buffer is not DWORD aligned (%p)",
+ __func__, buf);
+ }
+
+ return buf;
+}
+
+/**
+ * This function frees an I/O buffer that was allocated by alloc_buffer.
+ *
+ * @param usb_ep the endpoint associated with the buffer
+ * @param buf address of the buffer
+ * @param dma The buffer's DMA address
+ * @param bytes The number of bytes of the buffer
+ */
+static void fh_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf,
+ dma_addr_t dma, unsigned bytes)
+{
+ fh_otg_pcd_t *pcd = 0;
+
+ pcd = gadget_wrapper->pcd;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p,%0x,%d)\n", __func__, buf, dma, bytes);
+
+ dma_free_coherent(NULL, bytes, buf, dma);
+}
+#endif
+
+/**
+ * This function is used to submit an I/O Request to an EP.
+ *
+ * - When the request completes the request's completion callback
+ * is called to return the request to the driver.
+ * - An EP, except control EPs, may have multiple requests
+ * pending.
+ * - Once submitted the request cannot be examined or modified.
+ * - Each request is turned into one or more packets.
+ * - A BULK EP can queue any amount of data; the transfer is
+ * packetized.
+ * - Zero length Packets are specified with the request 'zero'
+ * flag.
+ */
+static int ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
+ gfp_t gfp_flags)
+{
+ fh_otg_pcd_t *pcd;
+ struct fh_otg_pcd_ep *ep;
+ int retval, is_isoc_ep, is_in_ep;
+ dma_addr_t dma_addr;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p,%p,%d)\n",
+ __func__, usb_ep, usb_req, gfp_flags);
+
+ if (!usb_req || !usb_req->complete || !usb_req->buf) {
+ FH_WARN("bad params\n");
+ return -EINVAL;
+ }
+
+ if (!usb_ep) {
+ FH_WARN("bad ep\n");
+ return -EINVAL;
+ }
+
+ pcd = gadget_wrapper->pcd;
+ if (!gadget_wrapper->driver ||
+ gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
+ FH_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n",
+ gadget_wrapper->gadget.speed);
+ FH_WARN("bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ FH_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n",
+ usb_ep->name, usb_req, usb_req->length, usb_req->buf);
+
+ usb_req->status = -EINPROGRESS;
+ usb_req->actual = 0;
+
+ ep = ep_from_handle(pcd, usb_ep);
+ if (ep == NULL) {
+ is_isoc_ep = 0;
+ is_in_ep = 0;
+ } else {
+ is_isoc_ep = (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) ? 1 : 0;
+ is_in_ep = ep->fh_ep.is_in;
+ }
+
+ dma_addr = usb_req->dma;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)
+ if (GET_CORE_IF(pcd)->dma_enable) {
+ struct platform_device *dev =
+ gadget_wrapper->pcd->otg_dev->os_dep.pdev;
+ if (dma_addr == FH_DMA_ADDR_INVALID) {
+ if (usb_req->length != 0) {
+ dma_addr = dma_map_single(&dev->dev,
+ usb_req->buf,
+ usb_req->length,
+ is_in_ep ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ usb_req->dma = dma_addr;
+ } else {
+ dma_addr = 0;
+ }
+ }
+ }
+#endif
+
+#ifdef FH_UTE_PER_IO
+ if (is_isoc_ep == 1) {
+ retval =
+ fh_otg_pcd_xiso_ep_queue(pcd, usb_ep, usb_req->buf,
+ dma_addr, usb_req->length,
+ usb_req->zero, usb_req,
+ gfp_flags == GFP_ATOMIC ? 1 : 0,
+ &usb_req->ext_req);
+ if (retval)
+ return -EINVAL;
+
+ return 0;
+ }
+#endif
+ retval = fh_otg_pcd_ep_queue(pcd, usb_ep, usb_req->buf, dma_addr,
+ usb_req->length, usb_req->zero, usb_req,
+ gfp_flags == GFP_ATOMIC ? 1 : 0);
+ if (retval) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * This function cancels an I/O request from an EP.
+ */
+static int ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+ FH_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, usb_ep, usb_req);
+
+ if (!usb_ep || !usb_req) {
+ FH_WARN("bad argument\n");
+ return -EINVAL;
+ }
+ if (!gadget_wrapper->driver ||
+ gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
+ FH_WARN("bogus device state\n");
+ return -ESHUTDOWN;
+ }
+ if (fh_otg_pcd_ep_dequeue(gadget_wrapper->pcd, usb_ep, usb_req)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * usb_ep_set_halt stalls an endpoint.
+ *
+ * usb_ep_clear_halt clears an endpoint halt and resets its data
+ * toggle.
+ *
+ * Both of these functions are implemented with the same underlying
+ * function. The behavior depends on the value argument.
+ *
+ * @param[in] usb_ep the Endpoint to halt or clear halt.
+ * @param[in] value
+ * - 0 means clear_halt.
+ * - 1 means set_halt,
+ * - 2 means clear stall lock flag.
+ * - 3 means set stall lock flag.
+ */
+static int ep_halt(struct usb_ep *usb_ep, int value)
+{
+ int retval = 0;
+
+ FH_DEBUGPL(DBG_PCD, "HALT %s %d\n", usb_ep->name, value);
+
+ if (!usb_ep) {
+ FH_WARN("bad ep\n");
+ return -EINVAL;
+ }
+
+ retval = fh_otg_pcd_ep_halt(gadget_wrapper->pcd, usb_ep, value);
+ if (retval == -FH_E_AGAIN) {
+ return -EAGAIN;
+ } else if (retval) {
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ep_wedge(struct usb_ep *usb_ep)
+{
+ FH_DEBUGPL(DBG_PCD, "WEDGE %s\n", usb_ep->name);
+
+ return ep_halt(usb_ep, 3);
+}
+#endif
+
+#ifdef FH_EN_ISOC
+/**
+ * This function is used to submit an ISOC Transfer Request to an EP.
+ *
+ * - Every time a sync period completes the request's completion callback
+ * is called to provide data to the gadget driver.
+ * - Once submitted the request cannot be modified.
+ * - Each request is turned into periodic data packets untill ISO
+ * Transfer is stopped..
+ */
+static int iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req,
+ gfp_t gfp_flags)
+{
+ int retval = 0;
+
+ if (!req || !req->process_buffer || !req->buf0 || !req->buf1) {
+ FH_WARN("bad params\n");
+ return -EINVAL;
+ }
+
+ if (!usb_ep) {
+ FH_PRINTF("bad params\n");
+ return -EINVAL;
+ }
+
+ req->status = -EINPROGRESS;
+
+ retval =
+ fh_otg_pcd_iso_ep_start(gadget_wrapper->pcd, usb_ep, req->buf0,
+ req->buf1, req->dma0, req->dma1,
+ req->sync_frame, req->data_pattern_frame,
+ req->data_per_frame,
+ req->
+ flags & USB_REQ_ISO_ASAP ? -1 :
+ req->start_frame, req->buf_proc_intrvl,
+ req, gfp_flags == GFP_ATOMIC ? 1 : 0);
+
+ if (retval) {
+ return -EINVAL;
+ }
+
+ return retval;
+}
+
+/**
+ * This function stops ISO EP Periodic Data Transfer.
+ */
+static int iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req)
+{
+ int retval = 0;
+ if (!usb_ep) {
+ FH_WARN("bad ep\n");
+ }
+
+ if (!gadget_wrapper->driver ||
+ gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
+ FH_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n",
+ gadget_wrapper->gadget.speed);
+ FH_WARN("bogus device state\n");
+ }
+
+ fh_otg_pcd_iso_ep_stop(gadget_wrapper->pcd, usb_ep, req);
+ if (retval) {
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+static struct usb_iso_request *alloc_iso_request(struct usb_ep *ep,
+ int packets, gfp_t gfp_flags)
+{
+ struct usb_iso_request *pReq = NULL;
+ uint32_t req_size;
+
+ req_size = sizeof(struct usb_iso_request);
+ req_size +=
+ (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor)));
+
+ pReq = kmalloc(req_size, gfp_flags);
+ if (!pReq) {
+ FH_WARN("Can't allocate Iso Request\n");
+ return 0;
+ }
+ pReq->iso_packet_desc0 = (void *)(pReq + 1);
+
+ pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets;
+
+ return pReq;
+}
+
+static void free_iso_request(struct usb_ep *ep, struct usb_iso_request *req)
+{
+ kfree(req);
+}
+
+static struct usb_isoc_ep_ops fh_otg_pcd_ep_ops = {
+ .ep_ops = {
+ .enable = ep_enable,
+ .disable = ep_disable,
+
+ .alloc_request = fh_otg_pcd_alloc_request,
+ .free_request = fh_otg_pcd_free_request,
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ .alloc_buffer = fh_otg_pcd_alloc_buffer,
+ .free_buffer = fh_otg_pcd_free_buffer,
+#endif
+
+ .queue = ep_queue,
+ .dequeue = ep_dequeue,
+
+ .set_halt = ep_halt,
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .set_wedge = ep_wedge,
+ #endif
+ .fifo_status = 0,
+ .fifo_flush = 0,
+ },
+
+ .iso_ep_start = iso_ep_start,
+ .iso_ep_stop = iso_ep_stop,
+ .alloc_iso_request = alloc_iso_request,
+ .free_iso_request = free_iso_request,
+};
+
+#else
+
+static struct usb_ep_ops fh_otg_pcd_ep_ops = {
+ .enable = ep_enable,
+ .disable = ep_disable,
+
+ .alloc_request = fh_otg_pcd_alloc_request,
+ .free_request = fh_otg_pcd_free_request,
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ .alloc_buffer = fh_otg_pcd_alloc_buffer,
+ .free_buffer = fh_otg_pcd_free_buffer,
+#endif
+
+ .queue = ep_queue,
+ .dequeue = ep_dequeue,
+
+ .set_halt = ep_halt,
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .set_wedge = ep_wedge,
+ #endif
+
+ .fifo_status = 0,
+ .fifo_flush = 0,
+
+};
+
+#endif /* _EN_ISOC_ */
+/* Gadget Operations */
+/**
+ * The following gadget operations will be implemented in the FH_otg
+ * PCD. Functions in the API that are not described below are not
+ * implemented.
+ *
+ * The Gadget API provides wrapper functions for each of the function
+ * pointers defined in usb_gadget_ops. The Gadget Driver calls the
+ * wrapper function, which then calls the underlying PCD function. The
+ * following sections are named according to the wrapper functions
+ * (except for ioctl, which doesn't have a wrapper function). Within
+ * each section, the corresponding FH_otg PCD function name is
+ * specified.
+ *
+ */
+
+/**
+ *Gets the USB Frame number of the last SOF.
+ */
+static int get_frame_number(struct usb_gadget *gadget)
+{
+ struct gadget_wrapper *d;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, gadget);
+
+ if (gadget == 0) {
+ return -ENODEV;
+ }
+
+ d = container_of(gadget, struct gadget_wrapper, gadget);
+ return fh_otg_pcd_get_frame_number(d->pcd);
+}
+
+#ifdef CONFIG_USB_FH_OTG_LPM
+static int test_lpm_enabled(struct usb_gadget *gadget)
+{
+ struct gadget_wrapper *d;
+
+ d = container_of(gadget, struct gadget_wrapper, gadget);
+
+ return fh_otg_pcd_is_lpm_enabled(d->pcd);
+}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
+static int test_besl_enabled(struct usb_gadget *gadget)
+{
+ struct gadget_wrapper *d;
+
+ d = container_of(gadget, struct gadget_wrapper, gadget);
+
+ return fh_otg_pcd_is_besl_enabled(d->pcd);
+}
+static int get_param_baseline_besl(struct usb_gadget *gadget)
+{
+ struct gadget_wrapper *d;
+
+ d = container_of(gadget, struct gadget_wrapper, gadget);
+
+ return fh_otg_pcd_get_param_baseline_besl(d->pcd);
+}
+static int get_param_deep_besl(struct usb_gadget *gadget)
+{
+ struct gadget_wrapper *d;
+
+ d = container_of(gadget, struct gadget_wrapper, gadget);
+
+ return fh_otg_pcd_get_param_deep_besl(d->pcd);
+}
+#endif
+#endif
+
+/**
+ * Initiates Session Request Protocol (SRP) to wakeup the host if no
+ * session is in progress. If a session is already in progress, but
+ * the device is suspended, remote wakeup signaling is started.
+ *
+ */
+static int wakeup(struct usb_gadget *gadget)
+{
+ struct gadget_wrapper *d;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, gadget);
+
+ if (gadget == 0) {
+ return -ENODEV;
+ } else {
+ d = container_of(gadget, struct gadget_wrapper, gadget);
+ }
+ fh_otg_pcd_wakeup(d->pcd);
+ return 0;
+}
+
+static int d_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct gadget_wrapper *d;
+
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, gadget);
+ if (gadget == 0)
+ return -ENODEV;
+
+ d = container_of(gadget, struct gadget_wrapper, gadget);
+ if (is_on)
+ fh_otg_pcd_disconnect_soft(d->pcd, 0);
+ else
+ fh_otg_pcd_disconnect_soft(d->pcd, 1);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops fh_otg_pcd_ops = {
+ .get_frame = get_frame_number,
+ .wakeup = wakeup,
+ .pullup = d_pullup,
+#ifdef CONFIG_USB_FH_OTG_LPM
+ .lpm_support = test_lpm_enabled,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
+ .besl_support = test_besl_enabled,
+ .get_baseline_besl = get_param_baseline_besl,
+ .get_deep_besl = get_param_deep_besl,
+#endif
+#endif
+ // current versions must always be self-powered
+};
+
+static int _setup(fh_otg_pcd_t * pcd, uint8_t * bytes)
+{
+ int retval = -FH_E_NOT_SUPPORTED;
+ if (gadget_wrapper->driver && gadget_wrapper->driver->setup) {
+ retval = gadget_wrapper->driver->setup(&gadget_wrapper->gadget,
+ (struct usb_ctrlrequest
+ *)bytes);
+ }
+
+ if (retval == -ENOTSUPP) {
+ retval = -FH_E_NOT_SUPPORTED;
+ } else if (retval < 0) {
+ retval = -FH_E_INVALID;
+ }
+
+ return retval;
+}
+
+#ifdef FH_EN_ISOC
+static int _isoc_complete(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle, int proc_buf_num)
+{
+ int i, packet_count;
+ struct usb_gadget_iso_packet_descriptor *iso_packet = 0;
+ struct usb_iso_request *iso_req = req_handle;
+
+ if (proc_buf_num) {
+ iso_packet = iso_req->iso_packet_desc1;
+ } else {
+ iso_packet = iso_req->iso_packet_desc0;
+ }
+ packet_count =
+ fh_otg_pcd_get_iso_packet_count(pcd, ep_handle, req_handle);
+ for (i = 0; i < packet_count; ++i) {
+ int status;
+ int actual;
+ int offset;
+ fh_otg_pcd_get_iso_packet_params(pcd, ep_handle, req_handle,
+ i, &status, &actual, &offset);
+ switch (status) {
+ case -FH_E_NO_DATA:
+ status = -ENODATA;
+ break;
+ default:
+ if (status) {
+ FH_PRINTF("unknown status in isoc packet\n");
+ }
+
+ }
+ iso_packet[i].status = status;
+ iso_packet[i].offset = offset;
+ iso_packet[i].actual_length = actual;
+ }
+
+ iso_req->status = 0;
+ iso_req->process_buffer(ep_handle, iso_req);
+
+ return 0;
+}
+#endif /* FH_EN_ISOC */
+
+#ifdef FH_UTE_PER_IO
+/**
+ * Copy the contents of the extended request to the Linux usb_request's
+ * extended part and call the gadget's completion.
+ *
+ * @param pcd Pointer to the pcd structure
+ * @param ep_handle Void pointer to the usb_ep structure
+ * @param req_handle Void pointer to the usb_request structure
+ * @param status Request status returned from the portable logic
+ * @param ereq_port Void pointer to the extended request structure
+ * created in the the portable part that contains the
+ * results of the processed iso packets.
+ */
+static int _xisoc_complete(fh_otg_pcd_t * pcd, void *ep_handle,
+ void *req_handle, int32_t status, void *ereq_port)
+{
+ struct fh_ute_iso_req_ext *ereqorg = NULL;
+ struct fh_iso_xreq_port *ereqport = NULL;
+ struct fh_ute_iso_packet_descriptor *desc_org = NULL;
+ int i;
+ struct usb_request *req;
+ //struct fh_ute_iso_packet_descriptor *
+ //int status = 0;
+
+ req = (struct usb_request *)req_handle;
+ ereqorg = &req->ext_req;
+ ereqport = (struct fh_iso_xreq_port *)ereq_port;
+ desc_org = ereqorg->per_io_frame_descs;
+
+ if (req && req->complete) {
+ /* Copy the request data from the portable logic to our request */
+ for (i = 0; i < ereqport->pio_pkt_count; i++) {
+ desc_org[i].actual_length =
+ ereqport->per_io_frame_descs[i].actual_length;
+ desc_org[i].status =
+ ereqport->per_io_frame_descs[i].status;
+ }
+
+ switch (status) {
+ case -FH_E_SHUTDOWN:
+ req->status = -ESHUTDOWN;
+ break;
+ case -FH_E_RESTART:
+ req->status = -ECONNRESET;
+ break;
+ case -FH_E_INVALID:
+ req->status = -EINVAL;
+ break;
+ case -FH_E_TIMEOUT:
+ req->status = -ETIMEDOUT;
+ break;
+ default:
+ req->status = status;
+ }
+
+ /* And call the gadget's completion */
+ req->complete(ep_handle, req);
+ }
+
+ return 0;
+}
+#endif /* FH_UTE_PER_IO */
+
+static int _complete(fh_otg_pcd_t *pcd, void *ep_handle,
+ void *req_handle, int32_t status, uint32_t actual)
+{
+ struct usb_request *req = (struct usb_request *)req_handle;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)
+ if (GET_CORE_IF(pcd)->dma_enable && req->length != 0) {
+ struct platform_device *dev =
+ gadget_wrapper->pcd->otg_dev->os_dep.pdev;
+ struct fh_otg_pcd_ep *ep = ep_from_handle(pcd, ep_handle);
+ int is_in_ep = 0;
+
+ if (ep)
+ is_in_ep = ep->fh_ep.is_in;
+
+ if (FH_DMA_ADDR_INVALID != req->dma) {
+ dma_unmap_single(&dev->dev,
+ req->dma,
+ req->length,
+ is_in_ep ?
+ PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ req->dma = FH_DMA_ADDR_INVALID;
+ }
+ };
+#endif
+
+ if (req && req->complete) {
+ switch (status) {
+ case -FH_E_SHUTDOWN:
+ req->status = -ESHUTDOWN;
+ break;
+ case -FH_E_RESTART:
+ req->status = -ECONNRESET;
+ break;
+ case -FH_E_INVALID:
+ req->status = -EINVAL;
+ break;
+ case -FH_E_TIMEOUT:
+ req->status = -ETIMEDOUT;
+ break;
+ default:
+ req->status = status;
+
+ }
+
+ req->actual = actual;
+ FH_SPINUNLOCK(pcd->lock);
+ req->complete(ep_handle, req);
+ FH_SPINLOCK(pcd->lock);
+ }
+
+ return 0;
+}
+
+static int _connect(fh_otg_pcd_t * pcd, int speed)
+{
+ gadget_wrapper->gadget.speed = speed;
+ return 0;
+}
+
+static int _disconnect(fh_otg_pcd_t * pcd)
+{
+ if (gadget_wrapper->driver && gadget_wrapper->driver->disconnect) {
+ gadget_wrapper->driver->disconnect(&gadget_wrapper->gadget);
+ }
+ return 0;
+}
+
+static int _resume(fh_otg_pcd_t * pcd)
+{
+ if (gadget_wrapper->driver && gadget_wrapper->driver->resume) {
+ gadget_wrapper->driver->resume(&gadget_wrapper->gadget);
+ }
+
+ return 0;
+}
+
+static int _suspend(fh_otg_pcd_t * pcd)
+{
+ if (gadget_wrapper->driver && gadget_wrapper->driver->suspend) {
+ gadget_wrapper->driver->suspend(&gadget_wrapper->gadget);
+ }
+ return 0;
+}
+
+/**
+ * This function updates the otg values in the gadget structure.
+ */
+static int _hnp_changed(fh_otg_pcd_t * pcd)
+{
+
+ if (!gadget_wrapper->gadget.is_otg)
+ return 0;
+
+ gadget_wrapper->gadget.b_hnp_enable = get_b_hnp_enable(pcd);
+ gadget_wrapper->gadget.a_hnp_support = get_a_hnp_support(pcd);
+ gadget_wrapper->gadget.a_alt_hnp_support = get_a_alt_hnp_support(pcd);
+ return 0;
+}
+
+static int _reset(fh_otg_pcd_t * pcd)
+{
+ return 0;
+}
+
+#ifdef FH_UTE_CFI
+static int _cfi_setup(fh_otg_pcd_t * pcd, void *cfi_req)
+{
+ int retval = -FH_E_INVALID;
+ if (gadget_wrapper->driver->cfi_feature_setup) {
+ retval =
+ gadget_wrapper->driver->
+ cfi_feature_setup(&gadget_wrapper->gadget,
+ (struct cfi_usb_ctrlrequest *)cfi_req);
+ }
+
+ return retval;
+}
+#endif
+
+static const struct fh_otg_pcd_function_ops fops = {
+ .complete = _complete,
+#ifdef FH_EN_ISOC
+ .isoc_complete = _isoc_complete,
+#endif
+ .setup = _setup,
+ .disconnect = _disconnect,
+ .connect = _connect,
+ .resume = _resume,
+ .suspend = _suspend,
+ .hnp_changed = _hnp_changed,
+ .reset = _reset,
+#ifdef FH_UTE_CFI
+ .cfi_setup = _cfi_setup,
+#endif
+#ifdef FH_UTE_PER_IO
+ .xisoc_complete = _xisoc_complete,
+#endif
+};
+
+/**
+ * This function is the top level PCD interrupt handler.
+ */
+static irqreturn_t fh_otg_pcd_irq(int irq, void *dev)
+{
+ fh_otg_pcd_t *pcd = dev;
+ int32_t retval = IRQ_NONE;
+
+ retval = fh_otg_pcd_handle_intr(pcd);
+ if (retval != 0) {
+ S3C2410X_CLEAR_EINTPEND();
+ }
+ return IRQ_RETVAL(retval);
+}
+
+/**
+ * This function initialized the usb_ep structures to there default
+ * state.
+ *
+ * @param d Pointer on gadget_wrapper.
+ */
+void gadget_add_eps(struct gadget_wrapper *d)
+{
+ static const char *names[] = {
+
+ "ep0",
+ "ep1in",
+ "ep2out",
+ "ep3in",
+ "ep4out",
+ "ep5in",
+ "ep6out",
+ "ep7in",
+ "ep8out",
+ "ep9in",
+ "ep10out",
+ "ep11in",
+ "ep12out",
+ };
+
+ int i;
+ struct usb_ep *ep;
+ int8_t dev_endpoints;
+
+ FH_DEBUGPL(DBG_PCDV, "%s\n", __func__);
+
+ INIT_LIST_HEAD(&d->gadget.ep_list);
+ d->gadget.ep0 = &d->ep0;
+ d->gadget.speed = USB_SPEED_UNKNOWN;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
+ d->gadget.max_speed = USB_SPEED_HIGH;
+#endif
+
+ INIT_LIST_HEAD(&d->gadget.ep0->ep_list);
+
+ /**
+ * Initialize the EP0 structure.
+ */
+ ep = &d->ep0;
+
+ /* Init the usb_ep structure. */
+ ep->name = names[0];
+ ep->ops = (struct usb_ep_ops *)&fh_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->maxpacket = MAX_PACKET_SIZE;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ ep->maxpacket_limit = MAX_PACKET_SIZE;
+#endif
+
+ fh_otg_pcd_ep_enable(d->pcd, NULL, ep);
+
+ list_add_tail(&ep->ep_list, &d->gadget.ep_list);
+
+ /**
+ * Initialize the EP structures.
+ */
+ dev_endpoints = d->pcd->core_if->dev_if->num_in_eps;
+
+ for (i = 0; i < dev_endpoints; i++) {
+ ep = &d->in_ep[i];
+
+ /* Init the usb_ep structure. */
+ ep->name = names[d->pcd->in_ep[i].fh_ep.num];
+ ep->ops = (struct usb_ep_ops *)&fh_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->maxpacket = MAX_PACKET_SIZE;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ ep->maxpacket_limit = MAX_PACKET_SIZE;
+#endif
+
+ list_add_tail(&ep->ep_list, &d->gadget.ep_list);
+ }
+
+ dev_endpoints = d->pcd->core_if->dev_if->num_out_eps;
+
+ for (i = 0; i < dev_endpoints; i++) {
+ ep = &d->out_ep[i];
+
+ /* Init the usb_ep structure. */
+ ep->name = names[d->pcd->out_ep[i].fh_ep.num];
+ ep->ops = (struct usb_ep_ops *)&fh_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->maxpacket = MAX_PACKET_SIZE;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ ep->maxpacket_limit = MAX_PACKET_SIZE;
+#endif
+
+ list_add_tail(&ep->ep_list, &d->gadget.ep_list);
+ }
+
+
+ /* remove ep0 from the list. There is a ep0 pointer. */
+ list_del_init(&d->ep0.ep_list);
+
+ d->ep0.maxpacket = MAX_EP0_SIZE;
+}
+
+/**
+ * This function releases the Gadget device.
+ * required by device_unregister().
+ *
+ * @todo Should this do something? Should it free the PCD?
+ */
+static void fh_otg_pcd_gadget_release(struct device *dev)
+{
+ FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, dev);
+}
+
+static struct gadget_wrapper *alloc_wrapper(struct platform_device *_dev)
+{
+ static char pcd_name[] = "fh_otg";
+ fh_otg_device_t *otg_dev = platform_get_drvdata(_dev);
+
+ struct gadget_wrapper *d;
+ int retval;
+
+ d = FH_ALLOC(sizeof(*d));
+ if (d == NULL) {
+ return NULL;
+ }
+
+ memset(d, 0, sizeof(*d));
+
+ d->gadget.name = pcd_name;
+ d->pcd = otg_dev->pcd;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
+ strcpy(d->gadget.dev.bus_id, "gadget");
+#else
+ dev_set_name(&d->gadget.dev, "%s", "gadget");
+#endif
+
+ d->gadget.dev.parent = &_dev->dev;
+ d->gadget.dev.release = fh_otg_pcd_gadget_release;
+ d->gadget.ops = &fh_otg_pcd_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
+ d->gadget.is_dualspeed = fh_otg_pcd_is_dualspeed(otg_dev->pcd);
+#endif
+ d->gadget.is_otg = fh_otg_pcd_is_otg(otg_dev->pcd);
+
+ d->driver = 0;
+ /* Register the gadget device */
+ retval = device_register(&d->gadget.dev);
+ if (retval != 0) {
+ FH_ERROR("device_register failed\n");
+ FH_FREE(d);
+ return NULL;
+ }
+
+ return d;
+}
+
+static void free_wrapper(struct gadget_wrapper *d)
+{
+ if (d->driver) {
+ /* should have been done already by driver model core */
+ FH_WARN("driver '%s' is still registered\n",
+ d->driver->driver.name);
+ usb_gadget_unregister_driver(d->driver);
+ }
+
+ device_unregister(&d->gadget.dev);
+ FH_FREE(d);
+}
+
+/**
+ * This function initialized the PCD portion of the driver.
+ *
+ */
+int pcd_init(struct platform_device *dev, int irq)
+{
+ fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
+
+ int retval = 0;
+
+ printk(KERN_ERR "%s(%p)\n", __func__, dev);
+
+ otg_dev->pcd = fh_otg_pcd_init(otg_dev->core_if);
+
+ if (!otg_dev->pcd) {
+ FH_ERROR("fh_otg_pcd_init failed\n");
+ return -ENOMEM;
+ }
+
+ otg_dev->pcd->otg_dev = otg_dev;
+ gadget_wrapper = alloc_wrapper(dev);
+
+ /*
+ * Initialize EP structures
+ */
+ gadget_add_eps(gadget_wrapper);
+ /*
+ * Setup interupt handler
+ */
+
+ retval = request_irq(irq, fh_otg_pcd_irq,
+ IRQF_SHARED | IRQF_DISABLED,
+ gadget_wrapper->gadget.name, otg_dev->pcd);
+ if (retval != 0) {
+ FH_ERROR("request of irq%d failed\n", irq);
+ free_wrapper(gadget_wrapper);
+ return -EBUSY;
+ }
+
+ fh_otg_pcd_start(gadget_wrapper->pcd, &fops);
+ platform_set_drvdata(dev, otg_dev);
+
+ return retval;
+}
+
+/**
+ * Cleanup the PCD.
+ */
+void pcd_remove(struct platform_device *dev, int irq)
+{
+
+ fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
+ fh_otg_pcd_t *pcd = otg_dev->pcd;
+
+ printk(KERN_ERR "%s(%p)(%p)\n", __func__, dev, otg_dev);
+
+ /*
+ * Free the IRQ
+ */
+ printk(KERN_ERR "pcd free irq :%d\n", irq);
+ free_irq(irq, pcd);
+ free_wrapper(gadget_wrapper);
+ fh_otg_pcd_remove(otg_dev->pcd);
+ otg_dev->pcd = 0;
+}
+
+/**
+ * This function registers a gadget driver with the PCD.
+ *
+ * When a driver is successfully registered, it will receive control
+ * requests including set_configuration(), which enables non-control
+ * requests. then usb traffic follows until a disconnect is reported.
+ * then a host may connect again, or the driver might get unbound.
+ *
+ * @param driver The driver being registered
+ * @param bind The bind function of gadget driver
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
+#else
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *))
+#endif
+{
+ int retval;
+
+ FH_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n",
+ driver->driver.name);
+
+ if (!driver ||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
+ driver->speed == USB_SPEED_UNKNOWN ||
+#else
+ driver->max_speed == USB_SPEED_UNKNOWN ||
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ !driver->bind ||
+#else
+ !bind ||
+#endif
+ !driver->unbind || !driver->disconnect || !driver->setup) {
+ FH_DEBUGPL(DBG_PCDV, "EINVAL\n");
+ return -EINVAL;
+ }
+ if (gadget_wrapper == 0) {
+ FH_DEBUGPL(DBG_PCDV, "ENODEV\n");
+ return -ENODEV;
+ }
+ if (gadget_wrapper->driver != 0) {
+ FH_DEBUGPL(DBG_PCDV, "EBUSY (%p)\n", gadget_wrapper->driver);
+ return -EBUSY;
+ }
+
+ /* hook up the driver */
+ gadget_wrapper->driver = driver;
+ gadget_wrapper->gadget.dev.driver = &driver->driver;
+
+ FH_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+ retval = driver->bind(&gadget_wrapper->gadget);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ retval = driver->bind(&gadget_wrapper->gadget,gadget_wrapper->driver);
+#else
+ retval = bind(&gadget_wrapper->gadget);
+#endif
+ if (retval) {
+ FH_ERROR("bind to driver %s --> error %d\n",
+ driver->driver.name, retval);
+ gadget_wrapper->driver = 0;
+ gadget_wrapper->gadget.dev.driver = 0;
+ return retval;
+ }
+ FH_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n",
+ driver->driver.name);
+ return 0;
+}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+EXPORT_SYMBOL(usb_gadget_register_driver);
+#else
+EXPORT_SYMBOL(usb_gadget_probe_driver);
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,1)
+
+int usb_udc_attach_driver(const char *name, struct usb_gadget_driver *driver)
+ {
+ int retval;
+ if(strcmp(name, "fh_otg")){
+ FH_ERROR("NO FH DEV FOUND \n");
+ return -ENODEV;
+ }
+
+ FH_DEBUGPL(DBG_PCD, "Registering gadget driver '%s'\n",
+ driver->driver.name);
+
+ if (!driver || driver->max_speed == USB_SPEED_UNKNOWN || !driver->bind ||
+ !driver->unbind || !driver->disconnect || !driver->setup) {
+ FH_DEBUGPL(DBG_PCDV, "EINVAL\n");
+ return -EINVAL;
+ }
+ if (gadget_wrapper == 0) {
+ FH_DEBUGPL(DBG_PCDV, "ENODEV\n");
+ return -ENODEV;
+ }
+ if (gadget_wrapper->driver != 0) {
+ FH_DEBUGPL(DBG_PCDV, "EBUSY (%p)\n", gadget_wrapper->driver);
+ return -EBUSY;
+ }
+
+ /* hook up the driver */
+ gadget_wrapper->driver = driver;
+ gadget_wrapper->gadget.dev.driver = &driver->driver;
+
+ FH_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name);
+ retval = driver->bind(&gadget_wrapper->gadget,gadget_wrapper->driver);
+ if (retval) {
+ FH_ERROR("bind to driver %s --> error %d\n",
+ driver->driver.name, retval);
+ gadget_wrapper->driver = 0;
+ gadget_wrapper->gadget.dev.driver = 0;
+ return retval;
+ }
+ FH_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n",
+ driver->driver.name);
+ return 0;
+}
+EXPORT_SYMBOL(usb_udc_attach_driver);
+
+void usb_gadget_set_state(struct usb_gadget *gadget,
+ enum usb_device_state state)
+{
+ gadget->state = state;
+ FH_SCHEDULE_SYSTEM_WORK(&gadget->work);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+
+#endif
+
+
+
+/**
+ * This function unregisters a gadget driver
+ *
+ * @param driver The driver being unregistered
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ //FH_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver);
+
+ if (gadget_wrapper == 0) {
+ FH_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__,
+ -ENODEV);
+ return -ENODEV;
+ }
+ if (driver == 0 || driver != gadget_wrapper->driver) {
+ FH_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__,
+ -EINVAL);
+ return -EINVAL;
+ }
+
+ driver->unbind(&gadget_wrapper->gadget);
+ gadget_wrapper->driver = 0;
+
+ FH_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n", driver->driver.name);
+ return 0;
+}
+
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+#endif /* FH_HOST_ONLY */
diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_regs.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_regs.h
new file mode 100644
index 00000000..e1070282
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_regs.h
@@ -0,0 +1,2558 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_regs.h $
+ * $Revision: #105 $
+ * $Date: 2015/10/12 $
+ * $Change: 2972621 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef __FH_OTG_REGS_H__
+#define __FH_OTG_REGS_H__
+
+#include "fh_otg_core_if.h"
+
+/**
+ * @file
+ *
+ * This file contains the data structures for accessing the FH_otg core registers.
+ *
+ * The application interfaces with the HS OTG core by reading from and
+ * writing to the Control and Status Register (CSR) space through the
+ * AHB Slave interface. These registers are 32 bits wide, and the
+ * addresses are 32-bit-block aligned.
+ * CSRs are classified as follows:
+ * - Core Global Registers
+ * - Device Mode Registers
+ * - Device Global Registers
+ * - Device Endpoint Specific Registers
+ * - Host Mode Registers
+ * - Host Global Registers
+ * - Host Port CSRs
+ * - Host Channel Specific Registers
+ *
+ * Only the Core Global registers can be accessed in both Device and
+ * Host modes. When the HS OTG core is operating in one mode, either
+ * Device or Host, the application must not access registers from the
+ * other mode. When the core switches from one mode to another, the
+ * registers in the new mode of operation must be reprogrammed as they
+ * would be after a power-on reset.
+ */
+
+/****************************************************************************/
+/** FH_otg Core registers .
+ * The fh_otg_core_global_regs structure defines the size
+ * and relative field offsets for the Core Global registers.
+ */
+typedef struct fh_otg_core_global_regs {
+ /** OTG Control and Status Register. <i>Offset: 000h</i> */
+ volatile uint32_t gotgctl;
+ /** OTG Interrupt Register. <i>Offset: 004h</i> */
+ volatile uint32_t gotgint;
+ /**Core AHB Configuration Register. <i>Offset: 008h</i> */
+ volatile uint32_t gahbcfg;
+
+#define FH_GLBINTRMASK 0x0001
+#define FH_DMAENABLE 0x0020
+#define FH_NPTXEMPTYLVL_EMPTY 0x0080
+#define FH_NPTXEMPTYLVL_HALFEMPTY 0x0000
+#define FH_PTXEMPTYLVL_EMPTY 0x0100
+#define FH_PTXEMPTYLVL_HALFEMPTY 0x0000
+
+ /**Core USB Configuration Register. <i>Offset: 00Ch</i> */
+ volatile uint32_t gusbcfg;
+ /**Core Reset Register. <i>Offset: 010h</i> */
+ volatile uint32_t grstctl;
+ /**Core Interrupt Register. <i>Offset: 014h</i> */
+ volatile uint32_t gintsts;
+ /**Core Interrupt Mask Register. <i>Offset: 018h</i> */
+ volatile uint32_t gintmsk;
+ /**Receive Status Queue Read Register (Read Only). <i>Offset: 01Ch</i> */
+ volatile uint32_t grxstsr;
+ /**Receive Status Queue Read & POP Register (Read Only). <i>Offset: 020h</i>*/
+ volatile uint32_t grxstsp;
+ /**Receive FIFO Size Register. <i>Offset: 024h</i> */
+ volatile uint32_t grxfsiz;
+ /**Non Periodic Transmit FIFO Size Register. <i>Offset: 028h</i> */
+ volatile uint32_t gnptxfsiz;
+ /**Non Periodic Transmit FIFO/Queue Status Register (Read
+ * Only). <i>Offset: 02Ch</i> */
+ volatile uint32_t gnptxsts;
+ /**I2C Access Register. <i>Offset: 030h</i> */
+ volatile uint32_t gi2cctl;
+ /**PHY Vendor Control Register. <i>Offset: 034h</i> */
+ volatile uint32_t gpvndctl;
+ /**General Purpose Input/Output Register. <i>Offset: 038h</i> */
+ volatile uint32_t ggpio;
+ /**User ID Register. <i>Offset: 03Ch</i> */
+ volatile uint32_t guid;
+ /**Synopsys ID Register (Read Only). <i>Offset: 040h</i> */
+ volatile uint32_t gsnpsid;
+ /**User HW Config1 Register (Read Only). <i>Offset: 044h</i> */
+ volatile uint32_t ghwcfg1;
+ /**User HW Config2 Register (Read Only). <i>Offset: 048h</i> */
+ volatile uint32_t ghwcfg2;
+#define FH_SLAVE_ONLY_ARCH 0
+#define FH_EXT_DMA_ARCH 1
+#define FH_INT_DMA_ARCH 2
+
+#define FH_MODE_HNP_SRP_CAPABLE 0
+#define FH_MODE_SRP_ONLY_CAPABLE 1
+#define FH_MODE_NO_HNP_SRP_CAPABLE 2
+#define FH_MODE_SRP_CAPABLE_DEVICE 3
+#define FH_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define FH_MODE_SRP_CAPABLE_HOST 5
+#define FH_MODE_NO_SRP_CAPABLE_HOST 6
+
+ /**User HW Config3 Register (Read Only). <i>Offset: 04Ch</i> */
+ volatile uint32_t ghwcfg3;
+ /**User HW Config4 Register (Read Only). <i>Offset: 050h</i>*/
+ volatile uint32_t ghwcfg4;
+ /** Core LPM Configuration register <i>Offset: 054h</i>*/
+ volatile uint32_t glpmcfg;
+ /** Global PowerDn Register <i>Offset: 058h</i> */
+ volatile uint32_t gpwrdn;
+ /** Global DFIFO SW Config Register <i>Offset: 05Ch</i> */
+ volatile uint32_t gdfifocfg;
+ /** ADP Control Register <i>Offset: 060h</i> */
+ volatile uint32_t adpctl;
+ /** Reserved <i>Offset: 064h-0FFh</i> */
+ volatile uint32_t reserved39[39];
+ /** Host Periodic Transmit FIFO Size Register. <i>Offset: 100h</i> */
+ volatile uint32_t hptxfsiz;
+ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled,
+ otherwise Device Transmit FIFO#n Register.
+ * <i>Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15).</i> */
+ volatile uint32_t dtxfsiz[15];
+
+} fh_otg_core_global_regs_t;
+
+/**
+ * This union represents the bit fields of the Core OTG Control
+ * and Status Register (GOTGCTL). Set the bits using the bit
+ * fields then write the <i>d32</i> value to the register.
+ */
+typedef union gotgctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned sesreqscs:1;
+ unsigned sesreq:1;
+ unsigned vbvalidoven:1;
+ unsigned vbvalidovval:1;
+ unsigned avalidoven:1;
+ unsigned avalidovval:1;
+ unsigned bvalidoven:1;
+ unsigned bvalidovval:1;
+ unsigned hstnegscs:1;
+ unsigned hnpreq:1;
+ unsigned hstsethnpen:1;
+ unsigned devhnpen:1;
+ unsigned reserved12_15:4;
+ unsigned conidsts:1;
+ unsigned dbnctime:1;
+ unsigned asesvld:1;
+ unsigned bsesvld:1;
+ unsigned otgver:1;
+ unsigned reserved1:1;
+ unsigned multvalidbc:5;
+ unsigned chirpen:1;
+ unsigned reserved28_31:4;
+ } b;
+} gotgctl_data_t;
+
+/**
+ * This union represents the bit fields of the Core OTG Interrupt Register
+ * (GOTGINT). Set/clear the bits using the bit fields then write the <i>d32</i>
+ * value to the register.
+ */
+typedef union gotgint_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Current Mode */
+ unsigned reserved0_1:2;
+
+ /** Session End Detected */
+ unsigned sesenddet:1;
+
+ unsigned reserved3_7:5;
+
+ /** Session Request Success Status Change */
+ unsigned sesreqsucstschng:1;
+ /** Host Negotiation Success Status Change */
+ unsigned hstnegsucstschng:1;
+
+ unsigned reserved10_16:7;
+
+ /** Host Negotiation Detected */
+ unsigned hstnegdet:1;
+ /** A-Device Timeout Change */
+ unsigned adevtoutchng:1;
+ /** Debounce Done */
+ unsigned debdone:1;
+ /** Multi-Valued input changed */
+ unsigned mvic:1;
+
+ unsigned reserved31_21:11;
+
+ } b;
+} gotgint_data_t;
+
+/**
+ * This union represents the bit fields of the Core AHB Configuration
+ * Register (GAHBCFG). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gahbcfg_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned glblintrmsk:1;
+#define FH_GAHBCFG_GLBINT_ENABLE 1
+
+ unsigned hburstlen:4;
+#define FH_GAHBCFG_INT_DMA_BURST_SINGLE 0
+#define FH_GAHBCFG_INT_DMA_BURST_INCR 1
+#define FH_GAHBCFG_INT_DMA_BURST_INCR4 3
+#define FH_GAHBCFG_INT_DMA_BURST_INCR8 5
+#define FH_GAHBCFG_INT_DMA_BURST_INCR16 7
+
+ unsigned dmaenable:1;
+#define FH_GAHBCFG_DMAENABLE 1
+ unsigned reserved:1;
+ unsigned nptxfemplvl_txfemplvl:1;
+ unsigned ptxfemplvl:1;
+#define FH_GAHBCFG_TXFEMPTYLVL_EMPTY 1
+#define FH_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0
+ unsigned reserved9_20:12;
+ unsigned remmemsupp:1;
+ unsigned notialldmawrit:1;
+ unsigned ahbsingle:1;
+ unsigned reserved24_31:8;
+ } b;
+} gahbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core USB Configuration
+ * Register (GUSBCFG). Set the bits using the bit fields then write
+ * the <i>d32</i> value to the register.
+ */
+typedef union gusbcfg_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned toutcal:3;
+ unsigned phyif:1;
+ unsigned ulpi_utmi_sel:1;
+ unsigned fsintf:1;
+ unsigned physel:1;
+ unsigned ddrsel:1;
+ unsigned srpcap:1;
+ unsigned hnpcap:1;
+ unsigned usbtrdtim:4;
+ unsigned reserved1:1;
+ unsigned phylpwrclksel:1;
+ unsigned otgutmifssel:1;
+ unsigned ulpi_fsls:1;
+ unsigned ulpi_auto_res:1;
+ unsigned ulpi_clk_sus_m:1;
+ unsigned ulpi_ext_vbus_drv:1;
+ unsigned ulpi_int_vbus_indicator:1;
+ unsigned term_sel_dl_pulse:1;
+ unsigned indicator_complement:1;
+ unsigned indicator_pass_through:1;
+ unsigned ulpi_int_prot_dis:1;
+ unsigned ic_usb_cap:1;
+ unsigned ic_traffic_pull_remove:1;
+ unsigned tx_end_delay:1;
+ unsigned force_host_mode:1;
+ unsigned force_dev_mode:1;
+ unsigned reserved31:1;
+ } b;
+} gusbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core Reset Register
+ * (GRSTCTL). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union grstctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Core Soft Reset (CSftRst) (Device and Host)
+ *
+ * The application can flush the control logic in the
+ * entire core using this bit. This bit resets the
+ * pipelines in the AHB Clock domain as well as the
+ * PHY Clock domain.
+ *
+ * The state machines are reset to an IDLE state, the
+ * control bits in the CSRs are cleared, all the
+ * transmit FIFOs and the receive FIFO are flushed.
+ *
+ * The status mask bits that control the generation of
+ * the interrupt, are cleared, to clear the
+ * interrupt. The interrupt status bits are not
+ * cleared, so the application can get the status of
+ * any events that occurred in the core after it has
+ * set this bit.
+ *
+ * Any transactions on the AHB are terminated as soon
+ * as possible following the protocol. Any
+ * transactions on the USB are terminated immediately.
+ *
+ * The configuration settings in the CSRs are
+ * unchanged, so the software doesn't have to
+ * reprogram these registers (Device
+ * Configuration/Host Configuration/Core System
+ * Configuration/Core PHY Configuration).
+ *
+ * The application can write to this bit, any time it
+ * wants to reset the core. This is a self clearing
+ * bit and the core clears this bit after all the
+ * necessary logic is reset in the core, which may
+ * take several clocks, depending on the current state
+ * of the core.
+ */
+ unsigned csftrst:1;
+ /** Hclk Soft Reset
+ *
+ * The application uses this bit to reset the control logic in
+ * the AHB clock domain. Only AHB clock domain pipelines are
+ * reset.
+ */
+ unsigned hsftrst:1;
+ /** Host Frame Counter Reset (Host Only)<br>
+ *
+ * The application can reset the (micro)frame number
+ * counter inside the core, using this bit. When the
+ * (micro)frame counter is reset, the subsequent SOF
+ * sent out by the core, will have a (micro)frame
+ * number of 0.
+ */
+ unsigned hstfrm:1;
+ /** In Token Sequence Learning Queue Flush
+ * (INTknQFlsh) (Device Only)
+ */
+ unsigned intknqflsh:1;
+ /** RxFIFO Flush (RxFFlsh) (Device and Host)
+ *
+ * The application can flush the entire Receive FIFO
+ * using this bit. The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is reading from the RxFIFO nor the MAC
+ * is writing the data in to the FIFO. The
+ * application should wait until the bit is cleared
+ * before performing any other operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned rxfflsh:1;
+ /** TxFIFO Flush (TxFFlsh) (Device and Host).
+ *
+ * This bit is used to selectively flush a single or
+ * all transmit FIFOs. The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is writing into the TxFIFO nor the MAC
+ * is reading the data out of the FIFO. The
+ * application should wait until the core clears this
+ * bit, before performing any operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned txfflsh:1;
+
+ /** TxFIFO Number (TxFNum) (Device and Host).
+ *
+ * This is the FIFO number which needs to be flushed,
+ * using the TxFIFO Flush bit. This field should not
+ * be changed until the TxFIFO Flush bit is cleared by
+ * the core.
+ * - 0x0 : Non Periodic TxFIFO Flush
+ * - 0x1 : Periodic TxFIFO #1 Flush in device mode
+ * or Periodic TxFIFO in host mode
+ * - 0x2 : Periodic TxFIFO #2 Flush in device mode.
+ * - ...
+ * - 0xF : Periodic TxFIFO #15 Flush in device mode
+ * - 0x10: Flush all the Transmit NonPeriodic and
+ * Transmit Periodic FIFOs in the core
+ */
+ unsigned txfnum:5;
+ /** Reserved */
+ unsigned reserved11_29:19;
+ /** DMA Request Signal. Indicated DMA request is in
+ * probress. Used for debug purpose. */
+ unsigned dmareq:1;
+ /** AHB Master Idle. Indicates the AHB Master State
+ * Machine is in IDLE condition. */
+ unsigned ahbidle:1;
+ } b;
+} grstctl_t;
+
+/**
+ * This union represents the bit fields of the Core Interrupt Mask
+ * Register (GINTMSK). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gintmsk_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved0:1;
+ unsigned modemismatch:1;
+ unsigned otgintr:1;
+ unsigned sofintr:1;
+ unsigned rxstsqlvl:1;
+ unsigned nptxfempty:1;
+ unsigned ginnakeff:1;
+ unsigned goutnakeff:1;
+ unsigned ulpickint:1;
+ unsigned i2cintr:1;
+ unsigned erlysuspend:1;
+ unsigned usbsuspend:1;
+ unsigned usbreset:1;
+ unsigned enumdone:1;
+ unsigned isooutdrop:1;
+ unsigned eopframe:1;
+ unsigned restoredone:1;
+ unsigned epmismatch:1;
+ unsigned inepintr:1;
+ unsigned outepintr:1;
+ unsigned incomplisoin:1;
+ unsigned incomplisoout:1;
+ unsigned fetsusp:1;
+ unsigned resetdet:1;
+ unsigned portintr:1;
+ unsigned hcintr:1;
+ unsigned ptxfempty:1;
+ unsigned lpmtranrcvd:1;
+ unsigned conidstschng:1;
+ unsigned disconnect:1;
+ unsigned sessreqintr:1;
+ unsigned wkupintr:1;
+ } b;
+} gintmsk_data_t;
+/**
+ * This union represents the bit fields of the Core Interrupt Register
+ * (GINTSTS). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union gintsts_data {
+ /** raw register data */
+ uint32_t d32;
+#define FH_SOF_INTR_MASK 0x0008
+ /** register bits */
+ struct {
+#define FH_HOST_MODE 1
+ unsigned curmode:1;
+ unsigned modemismatch:1;
+ unsigned otgintr:1;
+ unsigned sofintr:1;
+ unsigned rxstsqlvl:1;
+ unsigned nptxfempty:1;
+ unsigned ginnakeff:1;
+ unsigned goutnakeff:1;
+ unsigned ulpickint:1;
+ unsigned i2cintr:1;
+ unsigned erlysuspend:1;
+ unsigned usbsuspend:1;
+ unsigned usbreset:1;
+ unsigned enumdone:1;
+ unsigned isooutdrop:1;
+ unsigned eopframe:1;
+ unsigned restoredone:1;
+ unsigned epmismatch:1;
+ unsigned inepint:1;
+ unsigned outepintr:1;
+ unsigned incomplisoin:1;
+ unsigned incomplisoout:1;
+ unsigned fetsusp:1;
+ unsigned resetdet:1;
+ unsigned portintr:1;
+ unsigned hcintr:1;
+ unsigned ptxfempty:1;
+ unsigned lpmtranrcvd:1;
+ unsigned conidstschng:1;
+ unsigned disconnect:1;
+ unsigned sessreqintr:1;
+ unsigned wkupintr:1;
+ } b;
+} gintsts_data_t;
+
+/**
+ * This union represents the bit fields in the Device Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union device_grxsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned epnum:4;
+ unsigned bcnt:11;
+ unsigned dpid:2;
+
+#define FH_STS_DATA_UPDT 0x2 // OUT Data Packet
+#define FH_STS_XFER_COMP 0x3 // OUT Data Transfer Complete
+
+#define FH_DSTS_GOUT_NAK 0x1 // Global OUT NAK
+#define FH_DSTS_SETUP_COMP 0x4 // Setup Phase Complete
+#define FH_DSTS_SETUP_UPDT 0x6 // SETUP Packet
+ unsigned pktsts:4;
+ unsigned fn:4;
+ unsigned reserved25_31:7;
+ } b;
+} device_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union host_grxsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned chnum:4;
+ unsigned bcnt:11;
+ unsigned dpid:2;
+
+ unsigned pktsts:4;
+#define FH_GRXSTS_PKTSTS_IN 0x2
+#define FH_GRXSTS_PKTSTS_IN_XFER_COMP 0x3
+#define FH_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5
+#define FH_GRXSTS_PKTSTS_CH_HALTED 0x7
+
+ unsigned reserved21_31:11;
+ } b;
+} host_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ,
+ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the <i>d32</i> element
+ * then read out the bits using the <i>b</i>it elements.
+ */
+typedef union fifosize_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned startaddr:16;
+ unsigned depth:16;
+ } b;
+} fifosize_data_t;
+
+/**
+ * This union represents the bit fields in the Non-Periodic Transmit
+ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union gnptxsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned nptxfspcavail:16;
+ unsigned nptxqspcavail:8;
+ /** Top of the Non-Periodic Transmit Request Queue
+ * - bit 24 - Terminate (Last entry for the selected
+ * channel/EP)
+ * - bits 26:25 - Token Type
+ * - 2'b00 - IN/OUT
+ * - 2'b01 - Zero Length OUT
+ * - 2'b10 - PING/Complete Split
+ * - 2'b11 - Channel Halt
+ * - bits 30:27 - Channel/EP Number
+ */
+ unsigned nptxqtop_terminate:1;
+ unsigned nptxqtop_token:2;
+ unsigned nptxqtop_chnep:4;
+ unsigned reserved:1;
+ } b;
+} gnptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Transmit
+ * FIFO Status Register (DTXFSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union dtxfsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned txfspcavail:16;
+ unsigned reserved:16;
+ } b;
+} dtxfsts_data_t;
+
+/**
+ * This union represents the bit fields in the I2C Control Register
+ * (I2CCTL). Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union gi2cctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned rwdata:8;
+ unsigned regaddr:8;
+ unsigned addr:7;
+ unsigned i2cen:1;
+ unsigned ack:1;
+ unsigned i2csuspctl:1;
+ unsigned i2cdevaddr:2;
+ unsigned i2cdatse0:1;
+ unsigned reserved:1;
+ unsigned rw:1;
+ unsigned bsydne:1;
+ } b;
+} gi2cctl_data_t;
+
+/**
+ * This union represents the bit fields in the PHY Vendor Control Register
+ * (GPVNDCTL). Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union gpvndctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned regdata:8;
+ unsigned vctrl:8;
+ unsigned regaddr16_21:6;
+ unsigned regwr:1;
+ unsigned reserved23_24:2;
+ unsigned newregreq:1;
+ unsigned vstsbsy:1;
+ unsigned vstsdone:1;
+ unsigned reserved28_30:3;
+ unsigned disulpidrvr:1;
+ } b;
+} gpvndctl_data_t;
+
+/**
+ * This union represents the bit fields in the General Purpose
+ * Input/Output Register (GGPIO).
+ * Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union ggpio_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned gpi:16;
+ unsigned gpo:16;
+ } b;
+} ggpio_data_t;
+
+/**
+ * This union represents the bit fields in the User ID Register
+ * (GUID). Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union guid_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned rwdata:32;
+ } b;
+} guid_data_t;
+
+/**
+ * This union represents the bit fields in the Synopsys ID Register
+ * (GSNPSID). Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union gsnpsid_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned rwdata:32;
+ } b;
+} gsnpsid_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config1
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg1_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ep_dir0:2;
+ unsigned ep_dir1:2;
+ unsigned ep_dir2:2;
+ unsigned ep_dir3:2;
+ unsigned ep_dir4:2;
+ unsigned ep_dir5:2;
+ unsigned ep_dir6:2;
+ unsigned ep_dir7:2;
+ unsigned ep_dir8:2;
+ unsigned ep_dir9:2;
+ unsigned ep_dir10:2;
+ unsigned ep_dir11:2;
+ unsigned ep_dir12:2;
+ unsigned ep_dir13:2;
+ unsigned ep_dir14:2;
+ unsigned ep_dir15:2;
+ } b;
+} hwcfg1_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config2
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg2_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG2 */
+ unsigned op_mode:3;
+#define FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0
+#define FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1
+#define FH_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2
+#define FH_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define FH_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define FH_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+
+ unsigned architecture:2;
+ unsigned point2point:1;
+ unsigned hs_phy_type:2;
+#define FH_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define FH_HWCFG2_HS_PHY_TYPE_UTMI 1
+#define FH_HWCFG2_HS_PHY_TYPE_ULPI 2
+#define FH_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+
+ unsigned fs_phy_type:2;
+ unsigned num_dev_ep:4;
+ unsigned num_host_chan:4;
+ unsigned perio_ep_supported:1;
+ unsigned dynamic_fifo:1;
+ unsigned multi_proc_int:1;
+ unsigned reserved21:1;
+ unsigned nonperio_tx_q_depth:2;
+ unsigned host_perio_tx_q_depth:2;
+ unsigned dev_token_q_depth:5;
+ unsigned otg_enable_ic_usb:1;
+ } b;
+} hwcfg2_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config3
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg3_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG3 */
+ unsigned xfer_size_cntr_width:4;
+ unsigned packet_size_cntr_width:3;
+ unsigned otg_func:1;
+ unsigned i2c:1;
+ unsigned vendor_ctrl_if:1;
+ unsigned optional_features:1;
+ unsigned synch_reset_type:1;
+ unsigned adp_supp:1;
+ unsigned otg_enable_hsic:1;
+ unsigned bc_support:1;
+ unsigned otg_lpm_en:1;
+ unsigned dfifo_depth:16;
+ } b;
+} hwcfg3_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config4
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg4_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned num_dev_perio_in_ep:4;
+ unsigned power_optimiz:1;
+ unsigned min_ahb_freq:1;
+ unsigned hiber:1;
+ unsigned xhiber:1;
+ unsigned reserved:6;
+ unsigned utmi_phy_data_width:2;
+ unsigned num_dev_mode_ctrl_ep:4;
+ unsigned iddig_filt_en:1;
+ unsigned vbus_valid_filt_en:1;
+ unsigned a_valid_filt_en:1;
+ unsigned b_valid_filt_en:1;
+ unsigned session_end_filt_en:1;
+ unsigned ded_fifo_en:1;
+ unsigned num_in_eps:4;
+ unsigned desc_dma:1;
+ unsigned desc_dma_dyn:1;
+ } b;
+} hwcfg4_data_t;
+
+/**
+ * This union represents the bit fields of the Core LPM Configuration
+ * Register (GLPMCFG). Set the bits using bit fields then write
+ * the <i>d32</i> value to the register.
+ */
+typedef union glpmctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** LPM-Capable (LPMCap) (Device and Host)
+ * The application uses this bit to control
+ * the FH_otg core LPM capabilities.
+ */
+ unsigned lpm_cap_en:1;
+ /** LPM response programmed by application (AppL1Res) (Device)
+ * Handshake response to LPM token pre-programmed
+ * by device application software.
+ */
+ unsigned appl_resp:1;
+ /** Host Initiated Resume Duration (HIRD) (Device and Host)
+ * In Host mode this field indicates the value of HIRD
+ * to be sent in an LPM transaction.
+ * In Device mode this field is updated with the
+ * Received LPM Token HIRD bmAttribute
+ * when an ACK/NYET/STALL response is sent
+ * to an LPM transaction.
+ */
+ unsigned hird:4;
+ /** RemoteWakeEnable (bRemoteWake) (Device and Host)
+ * In Host mode this bit indicates the value of remote
+ * wake up to be sent in wIndex field of LPM transaction.
+ * In Device mode this field is updated with the
+ * Received LPM Token bRemoteWake bmAttribute
+ * when an ACK/NYET/STALL response is sent
+ * to an LPM transaction.
+ */
+ unsigned rem_wkup_en:1;
+ /** Enable utmi_sleep_n (EnblSlpM) (Device and Host)
+ * The application uses this bit to control
+ * the utmi_sleep_n assertion to the PHY when in L1 state.
+ */
+ unsigned en_utmi_sleep:1;
+ /** HIRD Threshold (HIRD_Thres) (Device and Host)
+ */
+ unsigned hird_thres:5;
+ /** LPM Response (CoreL1Res) (Device and Host)
+ * In Host mode this bit contains handsake response to
+ * LPM transaction.
+ * In Device mode the response of the core to
+ * LPM transaction received is reflected in these two bits.
+ - 0x0 : ERROR (No handshake response)
+ - 0x1 : STALL
+ - 0x2 : NYET
+ - 0x3 : ACK
+ */
+ unsigned lpm_resp:2;
+ /** Port Sleep Status (SlpSts) (Device and Host)
+ * This bit is set as long as a Sleep condition
+ * is present on the USB bus.
+ */
+ unsigned prt_sleep_sts:1;
+ /** Sleep State Resume OK (L1ResumeOK) (Device and Host)
+ * Indicates that the application or host
+ * can start resume from Sleep state.
+ */
+ unsigned sleep_state_resumeok:1;
+ /** LPM channel Index (LPM_Chnl_Indx) (Host)
+ * The channel number on which the LPM transaction
+ * has to be applied while sending
+ * an LPM transaction to the local device.
+ */
+ unsigned lpm_chan_index:4;
+ /** LPM Retry Count (LPM_Retry_Cnt) (Host)
+ * Number host retries that would be performed
+ * if the device response was not valid response.
+ */
+ unsigned retry_count:3;
+ /** Send LPM Transaction (SndLPM) (Host)
+ * When set by application software,
+ * an LPM transaction containing two tokens
+ * is sent.
+ */
+ unsigned send_lpm:1;
+ /** LPM Retry status (LPM_RetryCnt_Sts) (Host)
+ * Number of LPM Host Retries still remaining
+ * to be transmitted for the current LPM sequence
+ */
+ unsigned retry_count_sts:3;
+ /** Enable Best Effort Service Latency (BESL) (Device and Host)
+ * This bit enables the BESL features as defined in the LPM errata
+ */
+ unsigned en_besl:1;
+
+ unsigned reserved29:1;
+ /** In host mode once this bit is set, the host
+ * configures to drive the HSIC Idle state on the bus.
+ * It then waits for the device to initiate the Connect sequence.
+ * In device mode once this bit is set, the device waits for
+ * the HSIC Idle line state on the bus. Upon receving the Idle
+ * line state, it initiates the HSIC Connect sequence.
+ */
+ unsigned hsic_connect:1;
+ /** This bit overrides and functionally inverts
+ * the if_select_hsic input port signal.
+ */
+ unsigned inv_sel_hsic:1;
+ } b;
+} glpmcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core ADP Timer, Control and
+ * Status Register (ADPTIMCTLSTS). Set the bits using bit fields then write
+ * the <i>d32</i> value to the register.
+ */
+typedef union adpctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Probe Discharge (PRB_DSCHG)
+ * These bits set the times for TADP_DSCHG.
+ * These bits are defined as follows:
+ * 2'b00 - 4 msec
+ * 2'b01 - 8 msec
+ * 2'b10 - 16 msec
+ * 2'b11 - 32 msec
+ */
+ unsigned prb_dschg:2;
+ /** Probe Delta (PRB_DELTA)
+ * These bits set the resolution for RTIM value.
+ * The bits are defined in units of 32 kHz clock cycles as follows:
+ * 2'b00 - 1 cycles
+ * 2'b01 - 2 cycles
+ * 2'b10 - 3 cycles
+ * 2'b11 - 4 cycles
+ * For example if this value is chosen to 2'b01, it means that RTIM
+ * increments for every 3(three) 32Khz clock cycles.
+ */
+ unsigned prb_delta:2;
+ /** Probe Period (PRB_PER)
+ * These bits sets the TADP_PRD as shown in Figure 4 as follows:
+ * 2'b00 - 0.625 to 0.925 sec (typical 0.775 sec)
+ * 2'b01 - 1.25 to 1.85 sec (typical 1.55 sec)
+ * 2'b10 - 1.9 to 2.6 sec (typical 2.275 sec)
+ * 2'b11 - Reserved
+ */
+ unsigned prb_per:2;
+ /** These bits capture the latest time it took for VBUS to ramp from
+ * VADP_SINK to VADP_PRB.
+ * 0x000 - 1 cycles
+ * 0x001 - 2 cycles
+ * 0x002 - 3 cycles
+ * etc
+ * 0x7FF - 2048 cycles
+ * A time of 1024 cycles at 32 kHz corresponds to a time of 32 msec.
+ */
+ unsigned rtim:11;
+ /** Enable Probe (EnaPrb)
+ * When programmed to 1'b1, the core performs a probe operation.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned enaprb:1;
+ /** Enable Sense (EnaSns)
+ * When programmed to 1'b1, the core performs a Sense operation.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned enasns:1;
+ /** ADP Reset (ADPRes)
+ * When set, ADP controller is reset.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adpres:1;
+ /** ADP Enable (ADPEn)
+ * When set, the core performs either ADP probing or sensing
+ * based on EnaPrb or EnaSns.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adpen:1;
+ /** ADP Probe Interrupt (ADP_PRB_INT)
+ * When this bit is set, it means that the VBUS
+ * voltage is greater than VADP_PRB or VADP_PRB is reached.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adp_prb_int:1;
+ /**
+ * ADP Sense Interrupt (ADP_SNS_INT)
+ * When this bit is set, it means that the VBUS voltage is greater than
+ * VADP_SNS value or VADP_SNS is reached.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adp_sns_int:1;
+ /** ADP Tomeout Interrupt (ADP_TMOUT_INT)
+ * This bit is relevant only for an ADP probe.
+ * When this bit is set, it means that the ramp time has
+ * completed ie ADPCTL.RTIM has reached its terminal value
+ * of 0x7FF. This is a debug feature that allows software
+ * to read the ramp time after each cycle.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adp_tmout_int:1;
+ /** ADP Probe Interrupt Mask (ADP_PRB_INT_MSK)
+ * When this bit is set, it unmasks the interrupt due to ADP_PRB_INT.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adp_prb_int_msk:1;
+ /** ADP Sense Interrupt Mask (ADP_SNS_INT_MSK)
+ * When this bit is set, it unmasks the interrupt due to ADP_SNS_INT.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adp_sns_int_msk:1;
+ /** ADP Timoeout Interrupt Mask (ADP_TMOUT_MSK)
+ * When this bit is set, it unmasks the interrupt due to ADP_TMOUT_INT.
+ * This bit is valid only if OTG_Ver = 1'b1.
+ */
+ unsigned adp_tmout_int_msk:1;
+ /** Access Request
+ * 2'b00 - Read/Write Valid (updated by the core)
+ * 2'b01 - Read
+ * 2'b00 - Write
+ * 2'b00 - Reserved
+ */
+ unsigned ar:2;
+ /** Reserved */
+ unsigned reserved29_31:3;
+ } b;
+} adpctl_data_t;
+
+////////////////////////////////////////////
+// Device Registers
+/**
+ * Device Global Registers. <i>Offsets 800h-BFFh</i>
+ *
+ * The following structures define the size and relative field offsets
+ * for the Device Mode Registers.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct fh_otg_dev_global_regs {
+ /** Device Configuration Register. <i>Offset 800h</i> */
+ volatile uint32_t dcfg;
+ /** Device Control Register. <i>Offset: 804h</i> */
+ volatile uint32_t dctl;
+ /** Device Status Register (Read Only). <i>Offset: 808h</i> */
+ volatile uint32_t dsts;
+ /** Reserved. <i>Offset: 80Ch</i> */
+ uint32_t unused;
+ /** Device IN Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 810h</i> */
+ volatile uint32_t diepmsk;
+ /** Device OUT Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 814h</i> */
+ volatile uint32_t doepmsk;
+ /** Device All Endpoints Interrupt Register. <i>Offset: 818h</i> */
+ volatile uint32_t daint;
+ /** Device All Endpoints Interrupt Mask Register. <i>Offset:
+ * 81Ch</i> */
+ volatile uint32_t daintmsk;
+ /** Device IN Token Queue Read Register-1 (Read Only).
+ * <i>Offset: 820h</i> */
+ volatile uint32_t dtknqr1;
+ /** Device IN Token Queue Read Register-2 (Read Only).
+ * <i>Offset: 824h</i> */
+ volatile uint32_t dtknqr2;
+ /** Device VBUS discharge Register. <i>Offset: 828h</i> */
+ volatile uint32_t dvbusdis;
+ /** Device VBUS Pulse Register. <i>Offset: 82Ch</i> */
+ volatile uint32_t dvbuspulse;
+ /** Device IN Token Queue Read Register-3 (Read Only). /
+ * Device Thresholding control register (Read/Write)
+ * <i>Offset: 830h</i> */
+ volatile uint32_t dtknqr3_dthrctl;
+ /** Device IN Token Queue Read Register-4 (Read Only). /
+ * Device IN EPs empty Inr. Mask Register (Read/Write)
+ * <i>Offset: 834h</i> */
+ volatile uint32_t dtknqr4_fifoemptymsk;
+ /** Device Each Endpoint Interrupt Register (Read Only). /
+ * <i>Offset: 838h</i> */
+ volatile uint32_t deachint;
+ /** Device Each Endpoint Interrupt mask Register (Read/Write). /
+ * <i>Offset: 83Ch</i> */
+ volatile uint32_t deachintmsk;
+ /** Device Each In Endpoint Interrupt mask Register (Read/Write). /
+ * <i>Offset: 840h</i> */
+ volatile uint32_t diepeachintmsk[MAX_EPS_CHANNELS];
+ /** Device Each Out Endpoint Interrupt mask Register (Read/Write). /
+ * <i>Offset: 880h</i> */
+ volatile uint32_t doepeachintmsk[MAX_EPS_CHANNELS];
+} fh_otg_device_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Device Configuration
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements. Write the
+ * <i>d32</i> member to the dcfg register.
+ */
+typedef union dcfg_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Device Speed */
+ unsigned devspd:2;
+ /** Non Zero Length Status OUT Handshake */
+ unsigned nzstsouthshk:1;
+#define FH_DCFG_SEND_STALL 1
+
+ unsigned ena32khzs:1;
+ /** Device Addresses */
+ unsigned devaddr:7;
+ /** Periodic Frame Interval */
+ unsigned perfrint:2;
+#define FH_DCFG_FRAME_INTERVAL_80 0
+#define FH_DCFG_FRAME_INTERVAL_85 1
+#define FH_DCFG_FRAME_INTERVAL_90 2
+#define FH_DCFG_FRAME_INTERVAL_95 3
+
+ /** Enable Device OUT NAK for bulk in DDMA mode */
+ unsigned endevoutnak:1;
+
+ unsigned reserved14_17:4;
+ /** In Endpoint Mis-match count */
+ unsigned epmscnt:5;
+ /** Enable Descriptor DMA in Device mode */
+ unsigned descdma:1;
+ unsigned perschintvl:2;
+ unsigned resvalid:6;
+ } b;
+} dcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Device Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Remote Wakeup */
+ unsigned rmtwkupsig:1;
+ /** Soft Disconnect */
+ unsigned sftdiscon:1;
+ /** Global Non-Periodic IN NAK Status */
+ unsigned gnpinnaksts:1;
+ /** Global OUT NAK Status */
+ unsigned goutnaksts:1;
+ /** Test Control */
+ unsigned tstctl:3;
+ /** Set Global Non-Periodic IN NAK */
+ unsigned sgnpinnak:1;
+ /** Clear Global Non-Periodic IN NAK */
+ unsigned cgnpinnak:1;
+ /** Set Global OUT NAK */
+ unsigned sgoutnak:1;
+ /** Clear Global OUT NAK */
+ unsigned cgoutnak:1;
+ /** Power-On Programming Done */
+ unsigned pwronprgdone:1;
+ /** Reserved */
+ unsigned reserved:1;
+ /** Global Multi Count */
+ unsigned gmc:2;
+ /** Ignore Frame Number for ISOC EPs */
+ unsigned ifrmnum:1;
+ /** NAK on Babble */
+ unsigned nakonbble:1;
+ /** Enable Continue on BNA */
+ unsigned encontonbna:1;
+ /** Enable deep sleep besl reject feature*/
+ unsigned besl_reject:1;
+
+ unsigned reserved17_31:13;
+ } b;
+} dctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device Status
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Suspend Status */
+ unsigned suspsts:1;
+ /** Enumerated Speed */
+ unsigned enumspd:2;
+#define FH_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0
+#define FH_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1
+#define FH_DSTS_ENUMSPD_LS_PHY_6MHZ 2
+#define FH_DSTS_ENUMSPD_FS_PHY_48MHZ 3
+ /** Erratic Error */
+ unsigned errticerr:1;
+ unsigned reserved4_7:4;
+ /** Frame or Microframe Number of the received SOF */
+ unsigned soffn:14;
+ unsigned reserved22_31:10;
+ } b;
+} dsts_data_t;
+
+/**
+ * This union represents the bit fields in the Device IN EP Interrupt
+ * Register and the Device IN EP Common Mask Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union diepint_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer complete mask */
+ unsigned xfercompl:1;
+ /** Endpoint disable mask */
+ unsigned epdisabled:1;
+ /** AHB Error mask */
+ unsigned ahberr:1;
+ /** TimeOUT Handshake mask (non-ISOC EPs) */
+ unsigned timeout:1;
+ /** IN Token received with TxF Empty mask */
+ unsigned intktxfemp:1;
+ /** IN Token Received with EP mismatch mask */
+ unsigned intknepmis:1;
+ /** IN Endpoint NAK Effective mask */
+ unsigned inepnakeff:1;
+ /** Reserved */
+ unsigned emptyintr:1;
+
+ unsigned txfifoundrn:1;
+
+ /** BNA Interrupt mask */
+ unsigned bna:1;
+
+ unsigned reserved10_12:3;
+ /** BNA Interrupt mask */
+ unsigned nak:1;
+
+ unsigned reserved14_31:18;
+ } b;
+} diepint_data_t;
+
+/**
+ * This union represents the bit fields in the Device IN EP
+ * Common/Dedicated Interrupt Mask Register.
+ */
+typedef union diepint_data diepmsk_data_t;
+
+/**
+ * This union represents the bit fields in the Device OUT EP Interrupt
+ * Registerand Device OUT EP Common Interrupt Mask Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union doepint_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer complete */
+ unsigned xfercompl:1;
+ /** Endpoint disable */
+ unsigned epdisabled:1;
+ /** AHB Error */
+ unsigned ahberr:1;
+ /** Setup Phase Done (contorl EPs) */
+ unsigned setup:1;
+ /** OUT Token Received when Endpoint Disabled */
+ unsigned outtknepdis:1;
+
+ unsigned stsphsercvd:1;
+ /** Back-to-Back SETUP Packets Received */
+ unsigned back2backsetup:1;
+
+ unsigned reserved7:1;
+ /** OUT packet Error */
+ unsigned outpkterr:1;
+ /** BNA Interrupt */
+ unsigned bna:1;
+
+ unsigned reserved10:1;
+ /** Packet Drop Status */
+ unsigned pktdrpsts:1;
+ /** Babble Interrupt */
+ unsigned babble:1;
+ /** NAK Interrupt */
+ unsigned nak:1;
+ /** NYET Interrupt */
+ unsigned nyet:1;
+ /** Bit indicating setup packet received */
+ unsigned sr:1;
+
+ unsigned reserved16_31:16;
+ } b;
+} doepint_data_t;
+
+/**
+ * This union represents the bit fields in the Device OUT EP
+ * Common/Dedicated Interrupt Mask Register.
+ */
+typedef union doepint_data doepmsk_data_t;
+
+/**
+ * This union represents the bit fields in the Device All EP Interrupt
+ * and Mask Registers.
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union daint_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** IN Endpoint bits */
+ unsigned in:16;
+ /** OUT Endpoint bits */
+ unsigned out:16;
+ } ep;
+ struct {
+ /** IN Endpoint bits */
+ unsigned inep0:1;
+ unsigned inep1:1;
+ unsigned inep2:1;
+ unsigned inep3:1;
+ unsigned inep4:1;
+ unsigned inep5:1;
+ unsigned inep6:1;
+ unsigned inep7:1;
+ unsigned inep8:1;
+ unsigned inep9:1;
+ unsigned inep10:1;
+ unsigned inep11:1;
+ unsigned inep12:1;
+ unsigned inep13:1;
+ unsigned inep14:1;
+ unsigned inep15:1;
+ /** OUT Endpoint bits */
+ unsigned outep0:1;
+ unsigned outep1:1;
+ unsigned outep2:1;
+ unsigned outep3:1;
+ unsigned outep4:1;
+ unsigned outep5:1;
+ unsigned outep6:1;
+ unsigned outep7:1;
+ unsigned outep8:1;
+ unsigned outep9:1;
+ unsigned outep10:1;
+ unsigned outep11:1;
+ unsigned outep12:1;
+ unsigned outep13:1;
+ unsigned outep14:1;
+ unsigned outep15:1;
+ } b;
+} daint_data_t;
+
+/**
+ * This union represents the bit fields in the Device IN Token Queue
+ * Read Registers.
+ * - Read the register into the <i>d32</i> member.
+ * - READ-ONLY Register
+ */
+typedef union dtknq1_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** In Token Queue Write Pointer */
+ unsigned intknwptr:5;
+ /** Reserved */
+ unsigned reserved05_06:2;
+ /** write pointer has wrapped. */
+ unsigned wrap_bit:1;
+ /** EP Numbers of IN Tokens 0 ... 4 */
+ unsigned epnums0_5:24;
+ } b;
+} dtknq1_data_t;
+
+/**
+ * This union represents Threshold control Register
+ * - Read and write the register into the <i>d32</i> member.
+ * - READ-WRITABLE Register
+ */
+typedef union dthrctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** non ISO Tx Thr. Enable */
+ unsigned non_iso_thr_en:1;
+ /** ISO Tx Thr. Enable */
+ unsigned iso_thr_en:1;
+ /** Tx Thr. Length */
+ unsigned tx_thr_len:9;
+ /** AHB Threshold ratio */
+ unsigned ahb_thr_ratio:2;
+ /** Reserved */
+ unsigned reserved13_15:3;
+ /** Rx Thr. Enable */
+ unsigned rx_thr_en:1;
+ /** Rx Thr. Length */
+ unsigned rx_thr_len:9;
+ unsigned reserved26:1;
+ /** Arbiter Parking Enable*/
+ unsigned arbprken:1;
+ /** Reserved */
+ unsigned reserved28_31:4;
+ } b;
+} dthrctl_data_t;
+
+/**
+ * Device Logical IN Endpoint-Specific Registers. <i>Offsets
+ * 900h-AFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct fh_otg_dev_in_ep_regs {
+ /** Device IN Endpoint Control Register. <i>Offset:900h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t diepctl;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 04h</i> */
+ uint32_t reserved04;
+ /** Device IN Endpoint Interrupt Register. <i>Offset:900h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t diepint;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device IN Endpoint Transfer Size
+ * Register. <i>Offset:900h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t dieptsiz;
+ /** Device IN Endpoint DMA Address Register. <i>Offset:900h +
+ * (ep_num * 20h) + 14h</i> */
+ volatile uint32_t diepdma;
+ /** Device IN Endpoint Transmit FIFO Status Register. <i>Offset:900h +
+ * (ep_num * 20h) + 18h</i> */
+ volatile uint32_t dtxfsts;
+ /** Device IN Endpoint DMA Buffer Register. <i>Offset:900h +
+ * (ep_num * 20h) + 1Ch</i> */
+ volatile uint32_t diepdmab;
+} fh_otg_dev_in_ep_regs_t;
+
+/**
+ * Device Logical OUT Endpoint-Specific Registers. <i>Offsets:
+ * B00h-CFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct fh_otg_dev_out_ep_regs {
+ /** Device OUT Endpoint Control Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t doepctl;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 04h</i> */
+ uint32_t reserved04;
+ /** Device OUT Endpoint Interrupt Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t doepint;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device OUT Endpoint Transfer Size Register. <i>Offset:
+ * B00h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t doeptsiz;
+ /** Device OUT Endpoint DMA Address Register. <i>Offset:B00h
+ * + (ep_num * 20h) + 14h</i> */
+ volatile uint32_t doepdma;
+ /** Reserved. <i>Offset:B00h + * (ep_num * 20h) + 18h</i> */
+ uint32_t unused;
+ /** Device OUT Endpoint DMA Buffer Register. <i>Offset:B00h
+ * + (ep_num * 20h) + 1Ch</i> */
+ uint32_t doepdmab;
+} fh_otg_dev_out_ep_regs_t;
+
+/**
+ * This union represents the bit fields in the Device EP Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union depctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Maximum Packet Size
+ * IN/OUT EPn
+ * IN/OUT EP0 - 2 bits
+ * 2'b00: 64 Bytes
+ * 2'b01: 32
+ * 2'b10: 16
+ * 2'b11: 8 */
+ unsigned mps:11;
+#define FH_DEP0CTL_MPS_64 0
+#define FH_DEP0CTL_MPS_32 1
+#define FH_DEP0CTL_MPS_16 2
+#define FH_DEP0CTL_MPS_8 3
+
+ /** Next Endpoint
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned nextep:4;
+
+ /** USB Active Endpoint */
+ unsigned usbactep:1;
+
+ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints)
+ * This field contains the PID of the packet going to
+ * be received or transmitted on this endpoint. The
+ * application should program the PID of the first
+ * packet going to be received or transmitted on this
+ * endpoint , after the endpoint is
+ * activated. Application use the SetD1PID and
+ * SetD0PID fields of this register to program either
+ * D0 or D1 PID.
+ *
+ * The encoding for this field is
+ * - 0: D0
+ * - 1: D1
+ */
+ unsigned dpid:1;
+
+ /** NAK Status */
+ unsigned naksts:1;
+
+ /** Endpoint Type
+ * 2'b00: Control
+ * 2'b01: Isochronous
+ * 2'b10: Bulk
+ * 2'b11: Interrupt */
+ unsigned eptype:2;
+
+ /** Snoop Mode
+ * OUT EPn/OUT EP0
+ * IN EPn/IN EP0 - reserved */
+ unsigned snp:1;
+
+ /** Stall Handshake */
+ unsigned stall:1;
+
+ /** Tx Fifo Number
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned txfnum:4;
+
+ /** Clear NAK */
+ unsigned cnak:1;
+ /** Set NAK */
+ unsigned snak:1;
+ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA0. Set Even
+ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to even (micro)
+ * frame.
+ */
+ unsigned setd0pid:1;
+ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA1 Set Odd
+ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to odd (micro) frame.
+ */
+ unsigned setd1pid:1;
+
+ /** Endpoint Disable */
+ unsigned epdis:1;
+ /** Endpoint Enable */
+ unsigned epena:1;
+ } b;
+} depctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer size */
+ unsigned xfersize:19;
+/** Max packet count for EP (pow(2,10)-1) */
+#define MAX_PKT_CNT 1023
+ /** Packet Count */
+ unsigned pktcnt:10;
+ /** Multi Count - Periodic IN endpoints */
+ unsigned mc:2;
+ unsigned reserved:1;
+ } b;
+} deptsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP 0 Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz0_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer size */
+ unsigned xfersize:7;
+ /** Reserved */
+ unsigned reserved7_18:12;
+ /** Packet Count */
+ unsigned pktcnt:2;
+ /** Reserved */
+ unsigned reserved21_28:8;
+ /**Setup Packet Count (DOEPTSIZ0 Only) */
+ unsigned supcnt:2;
+ unsigned reserved31;
+ } b;
+} deptsiz0_data_t;
+
+/////////////////////////////////////////////////
+// DMA Descriptor Specific Structures
+//
+
+/** Buffer status definitions */
+
+#define BS_HOST_READY 0x0
+#define BS_DMA_BUSY 0x1
+#define BS_DMA_DONE 0x2
+#define BS_HOST_BUSY 0x3
+
+/** Receive/Transmit status definitions */
+
+#define RTS_SUCCESS 0x0
+#define RTS_BUFFLUSH 0x1
+#define RTS_RESERVED 0x2
+#define RTS_BUFERR 0x3
+
+/**
+ * This union represents the bit fields in the DMA Descriptor
+ * status quadlet. Read the quadlet into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it, <i>b_iso_out</i> and
+ * <i>b_iso_in</i> elements.
+ */
+typedef union dev_dma_desc_sts {
+ /** raw register data */
+ uint32_t d32;
+ /** quadlet bits */
+ struct {
+ /** Received number of bytes */
+ unsigned bytes:16;
+ /** NAK bit - only for OUT EPs */
+ unsigned nak:1;
+ unsigned reserved17_22:6;
+ /** Multiple Transfer - only for OUT EPs */
+ unsigned mtrf:1;
+ /** Setup Packet received - only for OUT EPs */
+ unsigned sr:1;
+ /** Interrupt On Complete */
+ unsigned ioc:1;
+ /** Short Packet */
+ unsigned sp:1;
+ /** Last */
+ unsigned l:1;
+ /** Receive Status */
+ unsigned sts:2;
+ /** Buffer Status */
+ unsigned bs:2;
+ } b;
+
+//#ifdef FH_EN_ISOC
+ /** iso out quadlet bits */
+ struct {
+ /** Received number of bytes */
+ unsigned rxbytes:11;
+
+ unsigned reserved11:1;
+ /** Frame Number */
+ unsigned framenum:11;
+ /** Received ISO Data PID */
+ unsigned pid:2;
+ /** Interrupt On Complete */
+ unsigned ioc:1;
+ /** Short Packet */
+ unsigned sp:1;
+ /** Last */
+ unsigned l:1;
+ /** Receive Status */
+ unsigned rxsts:2;
+ /** Buffer Status */
+ unsigned bs:2;
+ } b_iso_out;
+
+ /** iso in quadlet bits */
+ struct {
+ /** Transmited number of bytes */
+ unsigned txbytes:12;
+ /** Frame Number */
+ unsigned framenum:11;
+ /** Transmited ISO Data PID */
+ unsigned pid:2;
+ /** Interrupt On Complete */
+ unsigned ioc:1;
+ /** Short Packet */
+ unsigned sp:1;
+ /** Last */
+ unsigned l:1;
+ /** Transmit Status */
+ unsigned txsts:2;
+ /** Buffer Status */
+ unsigned bs:2;
+ } b_iso_in;
+//#endif /* FH_EN_ISOC */
+} dev_dma_desc_sts_t;
+
+/**
+ * DMA Descriptor structure
+ *
+ * DMA Descriptor structure contains two quadlets:
+ * Status quadlet and Data buffer pointer.
+ */
+typedef struct fh_otg_dev_dma_desc {
+ /** DMA Descriptor status quadlet */
+ dev_dma_desc_sts_t status;
+ /** DMA Descriptor data buffer pointer */
+ uint32_t buf;
+} fh_otg_dev_dma_desc_t;
+
+/**
+ * The fh_otg_dev_if structure contains information needed to manage
+ * the FH_otg controller acting in device mode. It represents the
+ * programming view of the device-specific aspects of the controller.
+ */
+typedef struct fh_otg_dev_if {
+ /** Pointer to device Global registers.
+ * Device Global Registers starting at offset 800h
+ */
+ fh_otg_device_global_regs_t *dev_global_regs;
+#define FH_DEV_GLOBAL_REG_OFFSET 0x800
+
+ /**
+ * Device Logical IN Endpoint-Specific Registers 900h-AFCh
+ */
+ fh_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS];
+#define FH_DEV_IN_EP_REG_OFFSET 0x900
+#define FH_EP_REG_OFFSET 0x20
+
+ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */
+ fh_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS];
+#define FH_DEV_OUT_EP_REG_OFFSET 0xB00
+
+ /* Device configuration information */
+ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */
+ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */
+ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/
+
+ /** Size of periodic FIFOs (Bytes) */
+ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS];
+
+ /** Size of Tx FIFOs (Bytes) */
+ uint16_t tx_fifo_size[MAX_TX_FIFOS];
+
+ /** Thresholding enable flags and length varaiables **/
+ uint16_t rx_thr_en;
+ uint16_t iso_tx_thr_en;
+ uint16_t non_iso_tx_thr_en;
+
+ uint16_t rx_thr_length;
+ uint16_t tx_thr_length;
+
+ /**
+ * Pointers to the DMA Descriptors for EP0 Control
+ * transfers (virtual and physical)
+ */
+
+ /** 2 descriptors for SETUP packets */
+ fh_dma_t dma_setup_desc_addr[2];
+ fh_otg_dev_dma_desc_t *setup_desc_addr[2];
+
+ /** Pointer to Descriptor with latest SETUP packet */
+ fh_otg_dev_dma_desc_t *psetup;
+
+ /** Index of current SETUP handler descriptor */
+ uint32_t setup_desc_index;
+
+ /** Descriptor for Data In or Status In phases */
+ fh_dma_t dma_in_desc_addr;
+ fh_otg_dev_dma_desc_t *in_desc_addr;
+
+ /** Descriptor for Data Out or Status Out phases */
+ fh_dma_t dma_out_desc_addr;
+ fh_otg_dev_dma_desc_t *out_desc_addr;
+
+ /** Setup Packet Detected - if set clear NAK when queueing */
+ uint32_t spd;
+ /** Isoc ep pointer on which incomplete happens */
+ void *isoc_ep;
+
+} fh_otg_dev_if_t;
+
+/////////////////////////////////////////////////
+// Host Mode Register Structures
+//
+/**
+ * The Host Global Registers structure defines the size and relative
+ * field offsets for the Host Mode Global Registers. Host Global
+ * Registers offsets 400h-7FFh.
+*/
+typedef struct fh_otg_host_global_regs {
+ /** Host Configuration Register. <i>Offset: 400h</i> */
+ volatile uint32_t hcfg;
+ /** Host Frame Interval Register. <i>Offset: 404h</i> */
+ volatile uint32_t hfir;
+ /** Host Frame Number / Frame Remaining Register. <i>Offset: 408h</i> */
+ volatile uint32_t hfnum;
+ /** Reserved. <i>Offset: 40Ch</i> */
+ uint32_t reserved40C;
+ /** Host Periodic Transmit FIFO/ Queue Status Register. <i>Offset: 410h</i> */
+ volatile uint32_t hptxsts;
+ /** Host All Channels Interrupt Register. <i>Offset: 414h</i> */
+ volatile uint32_t haint;
+ /** Host All Channels Interrupt Mask Register. <i>Offset: 418h</i> */
+ volatile uint32_t haintmsk;
+ /** Host Frame List Base Address Register . <i>Offset: 41Ch</i> */
+ volatile uint32_t hflbaddr;
+} fh_otg_host_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Configuration Register.
+ * Read the register into the <i>d32</i> member then set/clear the bits using
+ * the <i>b</i>it elements. Write the <i>d32</i> member to the hcfg register.
+ */
+typedef union hcfg_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** FS/LS Phy Clock Select */
+ unsigned fslspclksel:2;
+#define FH_HCFG_30_60_MHZ 0
+#define FH_HCFG_48_MHZ 1
+#define FH_HCFG_6_MHZ 2
+
+ /** FS/LS Only Support */
+ unsigned fslssupp:1;
+ unsigned reserved3_6:4;
+ /** Enable 32-KHz Suspend Mode */
+ unsigned ena32khzs:1;
+ /** Resume Validation Periiod */
+ unsigned resvalid:8;
+ unsigned reserved16_22:7;
+ /** Enable Scatter/gather DMA in Host mode */
+ unsigned descdma:1;
+ /** Frame List Entries */
+ unsigned frlisten:2;
+ /** Enable Periodic Scheduling */
+ unsigned perschedena:1;
+ unsigned reserved27_30:4;
+ unsigned modechtimen:1;
+ } b;
+} hcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfir_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned frint:16;
+ unsigned hfirrldctrl:1;
+ unsigned reserved:15;
+ } b;
+} hfir_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfnum_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned frnum:16;
+#define FH_HFNUM_MAX_FRNUM 0x3FFF
+ unsigned frrem:16;
+ } b;
+} hfnum_data_t;
+
+typedef union hptxsts_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned ptxfspcavail:16;
+ unsigned ptxqspcavail:8;
+ /** Top of the Periodic Transmit Request Queue
+ * - bit 24 - Terminate (last entry for the selected channel)
+ * - bits 26:25 - Token Type
+ * - 2'b00 - Zero length
+ * - 2'b01 - Ping
+ * - 2'b10 - Disable
+ * - bits 30:27 - Channel Number
+ * - bit 31 - Odd/even microframe
+ */
+ unsigned ptxqtop_terminate:1;
+ unsigned ptxqtop_token:2;
+ unsigned ptxqtop_chnum:4;
+ unsigned ptxqtop_odd:1;
+ } b;
+} hptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Port Control and Status
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hprt0 register.
+ */
+typedef union hprt0_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned prtconnsts:1;
+ unsigned prtconndet:1;
+ unsigned prtena:1;
+ unsigned prtenchng:1;
+ unsigned prtovrcurract:1;
+ unsigned prtovrcurrchng:1;
+ unsigned prtres:1;
+ unsigned prtsusp:1;
+ unsigned prtrst:1;
+ unsigned reserved9:1;
+ unsigned prtlnsts:2;
+ unsigned prtpwr:1;
+ unsigned prttstctl:4;
+ unsigned prtspd:2;
+#define FH_HPRT0_PRTSPD_HIGH_SPEED 0
+#define FH_HPRT0_PRTSPD_FULL_SPEED 1
+#define FH_HPRT0_PRTSPD_LOW_SPEED 2
+ unsigned reserved19_31:13;
+ } b;
+} hprt0_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haint_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ch0:1;
+ unsigned ch1:1;
+ unsigned ch2:1;
+ unsigned ch3:1;
+ unsigned ch4:1;
+ unsigned ch5:1;
+ unsigned ch6:1;
+ unsigned ch7:1;
+ unsigned ch8:1;
+ unsigned ch9:1;
+ unsigned ch10:1;
+ unsigned ch11:1;
+ unsigned ch12:1;
+ unsigned ch13:1;
+ unsigned ch14:1;
+ unsigned ch15:1;
+ unsigned reserved:16;
+ } b;
+
+ struct {
+ unsigned chint:16;
+ unsigned reserved:16;
+ } b2;
+} haint_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haintmsk_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ch0:1;
+ unsigned ch1:1;
+ unsigned ch2:1;
+ unsigned ch3:1;
+ unsigned ch4:1;
+ unsigned ch5:1;
+ unsigned ch6:1;
+ unsigned ch7:1;
+ unsigned ch8:1;
+ unsigned ch9:1;
+ unsigned ch10:1;
+ unsigned ch11:1;
+ unsigned ch12:1;
+ unsigned ch13:1;
+ unsigned ch14:1;
+ unsigned ch15:1;
+ unsigned reserved:16;
+ } b;
+
+ struct {
+ unsigned chint:16;
+ unsigned reserved:16;
+ } b2;
+} haintmsk_data_t;
+
+/**
+ * Host Channel Specific Registers. <i>500h-5FCh</i>
+ */
+typedef struct fh_otg_hc_regs {
+ /** Host Channel 0 Characteristic Register. <i>Offset: 500h + (chan_num * 20h) + 00h</i> */
+ volatile uint32_t hcchar;
+ /** Host Channel 0 Split Control Register. <i>Offset: 500h + (chan_num * 20h) + 04h</i> */
+ volatile uint32_t hcsplt;
+ /** Host Channel 0 Interrupt Register. <i>Offset: 500h + (chan_num * 20h) + 08h</i> */
+ volatile uint32_t hcint;
+ /** Host Channel 0 Interrupt Mask Register. <i>Offset: 500h + (chan_num * 20h) + 0Ch</i> */
+ volatile uint32_t hcintmsk;
+ /** Host Channel 0 Transfer Size Register. <i>Offset: 500h + (chan_num * 20h) + 10h</i> */
+ volatile uint32_t hctsiz;
+ /** Host Channel 0 DMA Address Register. <i>Offset: 500h + (chan_num * 20h) + 14h</i> */
+ volatile uint32_t hcdma;
+ volatile uint32_t reserved;
+ /** Host Channel 0 DMA Buffer Address Register. <i>Offset: 500h + (chan_num * 20h) + 1Ch</i> */
+ volatile uint32_t hcdmab;
+} fh_otg_hc_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Characteristics
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+typedef union hcchar_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Maximum packet size in bytes */
+ unsigned mps:11;
+
+ /** Endpoint number */
+ unsigned epnum:4;
+
+ /** 0: OUT, 1: IN */
+ unsigned epdir:1;
+
+ unsigned reserved:1;
+
+ /** 0: Full/high speed device, 1: Low speed device */
+ unsigned lspddev:1;
+
+ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */
+ unsigned eptype:2;
+
+ /** Packets per frame for periodic transfers. 0 is reserved. */
+ unsigned multicnt:2;
+
+ /** Device address */
+ unsigned devaddr:7;
+
+ /**
+ * Frame to transmit periodic transaction.
+ * 0: even, 1: odd
+ */
+ unsigned oddfrm:1;
+
+ /** Channel disable */
+ unsigned chdis:1;
+
+ /** Channel enable */
+ unsigned chen:1;
+ } b;
+} hcchar_data_t;
+
+typedef union hcsplt_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Port Address */
+ unsigned prtaddr:7;
+
+ /** Hub Address */
+ unsigned hubaddr:7;
+
+ /** Transaction Position */
+ unsigned xactpos:2;
+#define FH_HCSPLIT_XACTPOS_MID 0
+#define FH_HCSPLIT_XACTPOS_END 1
+#define FH_HCSPLIT_XACTPOS_BEGIN 2
+#define FH_HCSPLIT_XACTPOS_ALL 3
+
+ /** Do Complete Split */
+ unsigned compsplt:1;
+
+ /** Reserved */
+ unsigned reserved:14;
+
+ /** Split Enble */
+ unsigned spltena:1;
+ } b;
+} hcsplt_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union hcint_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer Complete */
+ unsigned xfercomp:1;
+ /** Channel Halted */
+ unsigned chhltd:1;
+ /** AHB Error */
+ unsigned ahberr:1;
+ /** STALL Response Received */
+ unsigned stall:1;
+ /** NAK Response Received */
+ unsigned nak:1;
+ /** ACK Response Received */
+ unsigned ack:1;
+ /** NYET Response Received */
+ unsigned nyet:1;
+ /** Transaction Err */
+ unsigned xacterr:1;
+ /** Babble Error */
+ unsigned bblerr:1;
+ /** Frame Overrun */
+ unsigned frmovrun:1;
+ /** Data Toggle Error */
+ unsigned datatglerr:1;
+ /** Buffer Not Available (only for DDMA mode) */
+ unsigned bna:1;
+ /** Exessive transaction error (only for DDMA mode) */
+ unsigned xcs_xact:1;
+ /** Frame List Rollover interrupt */
+ unsigned frm_list_roll:1;
+ /** Reserved */
+ unsigned reserved14_31:18;
+ } b;
+} hcint_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Interrupt Mask
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcintmsk register.
+ */
+typedef union hcintmsk_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned xfercompl:1;
+ unsigned chhltd:1;
+ unsigned ahberr:1;
+ unsigned stall:1;
+ unsigned nak:1;
+ unsigned ack:1;
+ unsigned nyet:1;
+ unsigned xacterr:1;
+ unsigned bblerr:1;
+ unsigned frmovrun:1;
+ unsigned datatglerr:1;
+ unsigned bna:1;
+ unsigned xcs_xact:1;
+ unsigned frm_list_roll:1;
+ unsigned reserved14_31:18;
+ } b;
+} hcintmsk_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Transfer Size
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+
+typedef union hctsiz_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Total transfer size in bytes */
+ unsigned xfersize:19;
+
+ /** Data packets to transfer */
+ unsigned pktcnt:10;
+
+ /**
+ * Packet ID for next data packet
+ * 0: DATA0
+ * 1: DATA2
+ * 2: DATA1
+ * 3: MDATA (non-Control), SETUP (Control)
+ */
+ unsigned pid:2;
+#define FH_HCTSIZ_DATA0 0
+#define FH_HCTSIZ_DATA1 2
+#define FH_HCTSIZ_DATA2 1
+#define FH_HCTSIZ_MDATA 3
+#define FH_HCTSIZ_SETUP 3
+
+ /** Do PING protocol when 1 */
+ unsigned dopng:1;
+ } b;
+
+ /** register bits */
+ struct {
+ /** Scheduling information */
+ unsigned schinfo:8;
+
+ /** Number of transfer descriptors.
+ * Max value:
+ * 64 in general,
+ * 256 only for HS isochronous endpoint.
+ */
+ unsigned ntd:8;
+
+ /** Data packets to transfer */
+ unsigned reserved16_28:13;
+
+ /**
+ * Packet ID for next data packet
+ * 0: DATA0
+ * 1: DATA2
+ * 2: DATA1
+ * 3: MDATA (non-Control)
+ */
+ unsigned pid:2;
+
+ /** Do PING protocol when 1 */
+ unsigned dopng:1;
+ } b_ddma;
+} hctsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Host DMA Address
+ * Register used in Descriptor DMA mode.
+ */
+typedef union hcdma_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved0_2:3;
+ /** Current Transfer Descriptor. Not used for ISOC */
+ unsigned ctd:8;
+ /** Start Address of Descriptor List */
+ unsigned dma_addr:21;
+ } b;
+} hcdma_data_t;
+
+/**
+ * This union represents the bit fields in the DMA Descriptor
+ * status quadlet for host mode. Read the quadlet into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union host_dma_desc_sts {
+ /** raw register data */
+ uint32_t d32;
+ /** quadlet bits */
+
+ /* for non-isochronous */
+ struct {
+ /** Number of bytes */
+ unsigned n_bytes:17;
+ /** QTD offset to jump when Short Packet received - only for IN EPs */
+ unsigned qtd_offset:6;
+ /**
+ * Set to request the core to jump to alternate QTD if
+ * Short Packet received - only for IN EPs
+ */
+ unsigned a_qtd:1;
+ /**
+ * Setup Packet bit. When set indicates that buffer contains
+ * setup packet.
+ */
+ unsigned sup:1;
+ /** Interrupt On Complete */
+ unsigned ioc:1;
+ /** End of List */
+ unsigned eol:1;
+ unsigned reserved27:1;
+ /** Rx/Tx Status */
+ unsigned sts:2;
+#define DMA_DESC_STS_PKTERR 1
+ unsigned reserved30:1;
+ /** Active Bit */
+ unsigned a:1;
+ } b;
+ /* for isochronous */
+ struct {
+ /** Number of bytes */
+ unsigned n_bytes:12;
+ unsigned reserved12_24:13;
+ /** Interrupt On Complete */
+ unsigned ioc:1;
+ unsigned reserved26_27:2;
+ /** Rx/Tx Status */
+ unsigned sts:2;
+ unsigned reserved30:1;
+ /** Active Bit */
+ unsigned a:1;
+ } b_isoc;
+} host_dma_desc_sts_t;
+
+#define MAX_DMA_DESC_SIZE 131071
+#define MAX_DMA_DESC_NUM_GENERIC 64
+#define MAX_DMA_DESC_NUM_HS_ISOC 256
+#define MAX_FRLIST_EN_NUM 64
+/**
+ * Host-mode DMA Descriptor structure
+ *
+ * DMA Descriptor structure contains two quadlets:
+ * Status quadlet and Data buffer pointer.
+ */
+typedef struct fh_otg_host_dma_desc {
+ /** DMA Descriptor status quadlet */
+ host_dma_desc_sts_t status;
+ /** DMA Descriptor data buffer pointer */
+ uint32_t buf;
+} fh_otg_host_dma_desc_t;
+
+/** OTG Host Interface Structure.
+ *
+ * The OTG Host Interface Structure structure contains information
+ * needed to manage the FH_otg controller acting in host mode. It
+ * represents the programming view of the host-specific aspects of the
+ * controller.
+ */
+typedef struct fh_otg_host_if {
+ /** Host Global Registers starting at offset 400h.*/
+ fh_otg_host_global_regs_t *host_global_regs;
+#define FH_OTG_HOST_GLOBAL_REG_OFFSET 0x400
+
+ /** Host Port 0 Control and Status Register */
+ volatile uint32_t *hprt0;
+#define FH_OTG_HOST_PORT_REGS_OFFSET 0x440
+
+ /** Host Channel Specific Registers at offsets 500h-5FCh. */
+ fh_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS];
+#define FH_OTG_HOST_CHAN_REGS_OFFSET 0x500
+#define FH_OTG_CHAN_REGS_OFFSET 0x20
+
+ /* Host configuration information */
+ /** Number of Host Channels (range: 1-16) */
+ uint8_t num_host_channels;
+ /** Periodic EPs supported (0: no, 1: yes) */
+ uint8_t perio_eps_supported;
+ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */
+ uint16_t perio_tx_fifo_size;
+
+} fh_otg_host_if_t;
+
+/**
+ * This union represents the bit fields in the Power and Clock Gating Control
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union pcgcctl_data {
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Stop Pclk */
+ unsigned stoppclk:1;
+ /** Gate Hclk */
+ unsigned gatehclk:1;
+ /** Power Clamp */
+ unsigned pwrclmp:1;
+ /** Reset Power Down Modules */
+ unsigned rstpdwnmodule:1;
+ /** Reserved */
+ unsigned reserved:1;
+ /** Enable Sleep Clock Gating (Enbl_L1Gating) */
+ unsigned enbl_sleep_gating:1;
+ /** PHY In Sleep (PhySleep) */
+ unsigned phy_in_sleep:1;
+ /** Deep Sleep*/
+ unsigned deep_sleep:1;
+ unsigned resetaftsusp:1;
+ unsigned restoremode:1;
+ unsigned enbl_extnd_hiber:1;
+ unsigned extnd_hiber_pwrclmp:1;
+ unsigned extnd_hiber_switch:1;
+ unsigned ess_reg_restored:1;
+ unsigned prt_clk_sel:2;
+ unsigned port_power:1;
+ unsigned max_xcvrselect:2;
+ unsigned max_termsel:1;
+ unsigned mac_dev_addr:7;
+ unsigned p2hd_dev_enum_spd:2;
+ unsigned p2hd_prt_spd:2;
+ unsigned if_dev_mode:1;
+ } b;
+} pcgcctl_data_t;
+
+/**
+ * This union represents the bit fields in the Global Data FIFO Software
+ * Configuration Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union gdfifocfg_data {
+ /* raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** OTG Data FIFO depth */
+ unsigned gdfifocfg:16;
+ /** Start address of EP info controller */
+ unsigned epinfobase:16;
+ } b;
+} gdfifocfg_data_t;
+
+/**
+ * This union represents the bit fields in the Global Power Down Register
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union gpwrdn_data {
+ /* raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** PMU Interrupt Select */
+ unsigned pmuintsel:1;
+ /** PMU Active */
+ unsigned pmuactv:1;
+ /** Restore */
+ unsigned restore:1;
+ /** Power Down Clamp */
+ unsigned pwrdnclmp:1;
+ /** Power Down Reset */
+ unsigned pwrdnrstn:1;
+ /** Power Down Switch */
+ unsigned pwrdnswtch:1;
+ /** Disable VBUS */
+ unsigned dis_vbus:1;
+ /** Line State Change */
+ unsigned lnstschng:1;
+ /** Line state change mask */
+ unsigned lnstchng_msk:1;
+ /** Reset Detected */
+ unsigned rst_det:1;
+ /** Reset Detect mask */
+ unsigned rst_det_msk:1;
+ /** Disconnect Detected */
+ unsigned disconn_det:1;
+ /** Disconnect Detect mask */
+ unsigned disconn_det_msk:1;
+ /** Connect Detected*/
+ unsigned connect_det:1;
+ /** Connect Detected Mask*/
+ unsigned connect_det_msk:1;
+ /** SRP Detected */
+ unsigned srp_det:1;
+ /** SRP Detect mask */
+ unsigned srp_det_msk:1;
+ /** Status Change Interrupt */
+ unsigned sts_chngint:1;
+ /** Status Change Interrupt Mask */
+ unsigned sts_chngint_msk:1;
+ /** Line State */
+ unsigned linestate:2;
+ /** Indicates current mode(status of IDDIG signal) */
+ unsigned idsts:1;
+ /** B Session Valid signal status*/
+ unsigned bsessvld:1;
+ /** ADP Event Detected */
+ unsigned adp_int:1;
+ /** Multi Valued ID pin */
+ unsigned mult_val_id_bc:5;
+ /** Reserved 24_31 */
+ unsigned reserved29_31:3;
+ } b;
+} gpwrdn_data_t;
+
+#endif
diff --git a/drivers/usb/host/fh_otg/fh_otg/test/Makefile b/drivers/usb/host/fh_otg/fh_otg/test/Makefile
new file mode 100755
index 00000000..fc453759
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/test/Makefile
@@ -0,0 +1,16 @@
+
+PERL=/usr/bin/perl
+PL_TESTS=test_sysfs.pl test_mod_param.pl
+
+.PHONY : test
+test : perl_tests
+
+perl_tests :
+ @echo
+ @echo Running perl tests
+ @for test in $(PL_TESTS); do \
+ if $(PERL) ./$$test ; then \
+ echo "=======> $$test, PASSED" ; \
+ else echo "=======> $$test, FAILED" ; \
+ fi \
+ done
diff --git a/drivers/usb/host/fh_otg/fh_otg/test/fh_otg_test.pm b/drivers/usb/host/fh_otg/fh_otg/test/fh_otg_test.pm
new file mode 100755
index 00000000..b4b4c294
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/test/fh_otg_test.pm
@@ -0,0 +1,337 @@
+package fh_otg_test;
+
+use strict;
+use Exporter ();
+
+use vars qw(@ISA @EXPORT
+$sysfsdir $paramdir $errors $params
+);
+
+@ISA = qw(Exporter);
+
+#
+# Globals
+#
+$sysfsdir = "/sys/devices/lm0";
+$paramdir = "/sys/module/fh_otg";
+$errors = 0;
+
+$params = [
+ {
+ NAME => "otg_cap",
+ DEFAULT => 0,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 2
+ },
+ {
+ NAME => "dma_enable",
+ DEFAULT => 0,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 1
+ },
+ {
+ NAME => "dma_burst_size",
+ DEFAULT => 32,
+ ENUM => [1, 4, 8, 16, 32, 64, 128, 256],
+ LOW => 1,
+ HIGH => 256
+ },
+ {
+ NAME => "host_speed",
+ DEFAULT => 0,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 1
+ },
+ {
+ NAME => "host_support_fs_ls_low_power",
+ DEFAULT => 0,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 1
+ },
+ {
+ NAME => "host_ls_low_power_phy_clk",
+ DEFAULT => 0,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 1
+ },
+ {
+ NAME => "dev_speed",
+ DEFAULT => 0,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 1
+ },
+ {
+ NAME => "enable_dynamic_fifo",
+ DEFAULT => 1,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 1
+ },
+ {
+ NAME => "data_fifo_size",
+ DEFAULT => 8192,
+ ENUM => [],
+ LOW => 32,
+ HIGH => 32768
+ },
+ {
+ NAME => "dev_rx_fifo_size",
+ DEFAULT => 1064,
+ ENUM => [],
+ LOW => 16,
+ HIGH => 32768
+ },
+ {
+ NAME => "dev_nperio_tx_fifo_size",
+ DEFAULT => 1024,
+ ENUM => [],
+ LOW => 16,
+ HIGH => 32768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_1",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_2",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_3",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_4",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_5",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_6",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_7",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_8",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_9",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_10",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_11",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_12",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_13",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_14",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "dev_perio_tx_fifo_size_15",
+ DEFAULT => 256,
+ ENUM => [],
+ LOW => 4,
+ HIGH => 768
+ },
+ {
+ NAME => "host_rx_fifo_size",
+ DEFAULT => 1024,
+ ENUM => [],
+ LOW => 16,
+ HIGH => 32768
+ },
+ {
+ NAME => "host_nperio_tx_fifo_size",
+ DEFAULT => 1024,
+ ENUM => [],
+ LOW => 16,
+ HIGH => 32768
+ },
+ {
+ NAME => "host_perio_tx_fifo_size",
+ DEFAULT => 1024,
+ ENUM => [],
+ LOW => 16,
+ HIGH => 32768
+ },
+ {
+ NAME => "max_transfer_size",
+ DEFAULT => 65535,
+ ENUM => [],
+ LOW => 2047,
+ HIGH => 65535
+ },
+ {
+ NAME => "max_packet_count",
+ DEFAULT => 511,
+ ENUM => [],
+ LOW => 15,
+ HIGH => 511
+ },
+ {
+ NAME => "host_channels",
+ DEFAULT => 12,
+ ENUM => [],
+ LOW => 1,
+ HIGH => 16
+ },
+ {
+ NAME => "dev_endpoints",
+ DEFAULT => 6,
+ ENUM => [],
+ LOW => 1,
+ HIGH => 15
+ },
+ {
+ NAME => "phy_type",
+ DEFAULT => 1,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 2
+ },
+ {
+ NAME => "phy_utmi_width",
+ DEFAULT => 16,
+ ENUM => [8, 16],
+ LOW => 8,
+ HIGH => 16
+ },
+ {
+ NAME => "phy_ulpi_ddr",
+ DEFAULT => 0,
+ ENUM => [],
+ LOW => 0,
+ HIGH => 1
+ },
+ ];
+
+
+#
+#
+sub check_arch {
+ $_ = `uname -m`;
+ chomp;
+ unless (m/armv4tl/) {
+ warn "# \n# Can't execute on $_. Run on integrator platform.\n# \n";
+ return 0;
+ }
+ return 1;
+}
+
+#
+#
+sub load_module {
+ my $params = shift;
+ print "\nRemoving Module\n";
+ system "rmmod fh_otg";
+ print "Loading Module\n";
+ if ($params ne "") {
+ print "Module Parameters: $params\n";
+ }
+ if (system("modprobe fh_otg $params")) {
+ warn "Unable to load module\n";
+ return 0;
+ }
+ return 1;
+}
+
+#
+#
+sub test_status {
+ my $arg = shift;
+
+ print "\n";
+
+ if (defined $arg) {
+ warn "WARNING: $arg\n";
+ }
+
+ if ($errors > 0) {
+ warn "TEST FAILED with $errors errors\n";
+ return 0;
+ } else {
+ print "TEST PASSED\n";
+ return 0 if (defined $arg);
+ }
+ return 1;
+}
+
+#
+#
+@EXPORT = qw(
+$sysfsdir
+$paramdir
+$params
+$errors
+check_arch
+load_module
+test_status
+);
+
+1;
diff --git a/drivers/usb/host/fh_otg/fh_otg/test/test_mod_param.pl b/drivers/usb/host/fh_otg/fh_otg/test/test_mod_param.pl
new file mode 100755
index 00000000..f7c6549c
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/test/test_mod_param.pl
@@ -0,0 +1,133 @@
+#!/usr/bin/perl -w
+#
+# Run this program on the integrator.
+#
+# - Tests module parameter default values.
+# - Tests setting of valid module parameter values via modprobe.
+# - Tests invalid module parameter values.
+# -----------------------------------------------------------------------------
+use strict;
+use fh_otg_test;
+
+check_arch() or die;
+
+#
+#
+sub test {
+ my ($param,$expected) = @_;
+ my $value = get($param);
+
+ if ($value == $expected) {
+ print "$param = $value, okay\n";
+ }
+
+ else {
+ warn "ERROR: value of $param != $expected, $value\n";
+ $errors ++;
+ }
+}
+
+#
+#
+sub get {
+ my $param = shift;
+ my $tmp = `cat $paramdir/$param`;
+ chomp $tmp;
+ return $tmp;
+}
+
+#
+#
+sub test_main {
+
+ print "\nTesting Module Parameters\n";
+
+ load_module("") or die;
+
+ # Test initial values
+ print "\nTesting Default Values\n";
+ foreach (@{$params}) {
+ test ($_->{NAME}, $_->{DEFAULT});
+ }
+
+ # Test low value
+ print "\nTesting Low Value\n";
+ my $cmd_params = "";
+ foreach (@{$params}) {
+ $cmd_params = $cmd_params . "$_->{NAME}=$_->{LOW} ";
+ }
+ load_module($cmd_params) or die;
+
+ foreach (@{$params}) {
+ test ($_->{NAME}, $_->{LOW});
+ }
+
+ # Test high value
+ print "\nTesting High Value\n";
+ $cmd_params = "";
+ foreach (@{$params}) {
+ $cmd_params = $cmd_params . "$_->{NAME}=$_->{HIGH} ";
+ }
+ load_module($cmd_params) or die;
+
+ foreach (@{$params}) {
+ test ($_->{NAME}, $_->{HIGH});
+ }
+
+ # Test Enum
+ print "\nTesting Enumerated\n";
+ foreach (@{$params}) {
+ if (defined $_->{ENUM}) {
+ my $value;
+ foreach $value (@{$_->{ENUM}}) {
+ $cmd_params = "$_->{NAME}=$value";
+ load_module($cmd_params) or die;
+ test ($_->{NAME}, $value);
+ }
+ }
+ }
+
+ # Test Invalid Values
+ print "\nTesting Invalid Values\n";
+ $cmd_params = "";
+ foreach (@{$params}) {
+ $cmd_params = $cmd_params . sprintf "$_->{NAME}=%d ", $_->{LOW}-1;
+ }
+ load_module($cmd_params) or die;
+
+ foreach (@{$params}) {
+ test ($_->{NAME}, $_->{DEFAULT});
+ }
+
+ $cmd_params = "";
+ foreach (@{$params}) {
+ $cmd_params = $cmd_params . sprintf "$_->{NAME}=%d ", $_->{HIGH}+1;
+ }
+ load_module($cmd_params) or die;
+
+ foreach (@{$params}) {
+ test ($_->{NAME}, $_->{DEFAULT});
+ }
+
+ print "\nTesting Enumerated\n";
+ foreach (@{$params}) {
+ if (defined $_->{ENUM}) {
+ my $value;
+ foreach $value (@{$_->{ENUM}}) {
+ $value = $value + 1;
+ $cmd_params = "$_->{NAME}=$value";
+ load_module($cmd_params) or die;
+ test ($_->{NAME}, $_->{DEFAULT});
+ $value = $value - 2;
+ $cmd_params = "$_->{NAME}=$value";
+ load_module($cmd_params) or die;
+ test ($_->{NAME}, $_->{DEFAULT});
+ }
+ }
+ }
+
+ test_status() or die;
+}
+
+test_main();
+0;
diff --git a/drivers/usb/host/fh_otg/fh_otg/test/test_sysfs.pl b/drivers/usb/host/fh_otg/fh_otg/test/test_sysfs.pl
new file mode 100755
index 00000000..0eecbc7f
--- /dev/null
+++ b/drivers/usb/host/fh_otg/fh_otg/test/test_sysfs.pl
@@ -0,0 +1,193 @@
+#!/usr/bin/perl -w
+#
+# Run this program on the integrator
+# - Tests select sysfs attributes.
+# - Todo ... test more attributes, hnp/srp, buspower/bussuspend, etc.
+# -----------------------------------------------------------------------------
+use strict;
+use fh_otg_test;
+
+check_arch() or die;
+
+#
+#
+sub test {
+ my ($attr,$expected) = @_;
+ my $string = get($attr);
+
+ if ($string eq $expected) {
+ printf("$attr = $string, okay\n");
+ }
+ else {
+ warn "ERROR: value of $attr != $expected, $string\n";
+ $errors ++;
+ }
+}
+
+#
+#
+sub set {
+ my ($reg, $value) = @_;
+ system "echo $value > $sysfsdir/$reg";
+}
+
+#
+#
+sub get {
+ my $attr = shift;
+ my $string = `cat $sysfsdir/$attr`;
+ chomp $string;
+ if ($string =~ m/\s\=\s/) {
+ my $tmp;
+ ($tmp, $string) = split /\s=\s/, $string;
+ }
+ return $string;
+}
+
+#
+#
+sub test_main {
+ print("\nTesting Sysfs Attributes\n");
+
+ load_module("") or die;
+
+ # Test initial values of regoffset/regvalue/guid/gsnpsid
+ print("\nTesting Default Values\n");
+
+ test("regoffset", "0xffffffff");
+ test("regvalue", "invalid offset");
+ test("guid", "0x12345678"); # this will fail if it has been changed
+ test("gsnpsid", "0x4f54200a");
+
+ # Test operation of regoffset/regvalue
+ print("\nTesting regoffset\n");
+ set('regoffset', '5a5a5a5a');
+ test("regoffset", "0xffffffff");
+
+ set('regoffset', '0');
+ test("regoffset", "0x00000000");
+
+ set('regoffset', '40000');
+ test("regoffset", "0x00000000");
+
+ set('regoffset', '3ffff');
+ test("regoffset", "0x0003ffff");
+
+ set('regoffset', '1');
+ test("regoffset", "0x00000001");
+
+ print("\nTesting regvalue\n");
+ set('regoffset', '3c');
+ test("regvalue", "0x12345678");
+ set('regvalue', '5a5a5a5a');
+ test("regvalue", "0x5a5a5a5a");
+ set('regvalue','a5a5a5a5');
+ test("regvalue", "0xa5a5a5a5");
+ set('guid','12345678');
+
+ # Test HNP Capable
+ print("\nTesting HNP Capable bit\n");
+ set('hnpcapable', '1');
+ test("hnpcapable", "0x1");
+ set('hnpcapable','0');
+ test("hnpcapable", "0x0");
+
+ set('regoffset','0c');
+
+ my $old = get('gusbcfg');
+ print("setting hnpcapable\n");
+ set('hnpcapable', '1');
+ test("hnpcapable", "0x1");
+ test('gusbcfg', sprintf "0x%08x", (oct ($old) | (1<<9)));
+ test('regvalue', sprintf "0x%08x", (oct ($old) | (1<<9)));
+
+ $old = get('gusbcfg');
+ print("clearing hnpcapable\n");
+ set('hnpcapable', '0');
+ test("hnpcapable", "0x0");
+ test ('gusbcfg', sprintf "0x%08x", oct ($old) & (~(1<<9)));
+ test ('regvalue', sprintf "0x%08x", oct ($old) & (~(1<<9)));
+
+ # Test SRP Capable
+ print("\nTesting SRP Capable bit\n");
+ set('srpcapable', '1');
+ test("srpcapable", "0x1");
+ set('srpcapable','0');
+ test("srpcapable", "0x0");
+
+ set('regoffset','0c');
+
+ $old = get('gusbcfg');
+ print("setting srpcapable\n");
+ set('srpcapable', '1');
+ test("srpcapable", "0x1");
+ test('gusbcfg', sprintf "0x%08x", (oct ($old) | (1<<8)));
+ test('regvalue', sprintf "0x%08x", (oct ($old) | (1<<8)));
+
+ $old = get('gusbcfg');
+ print("clearing srpcapable\n");
+ set('srpcapable', '0');
+ test("srpcapable", "0x0");
+ test('gusbcfg', sprintf "0x%08x", oct ($old) & (~(1<<8)));
+ test('regvalue', sprintf "0x%08x", oct ($old) & (~(1<<8)));
+
+ # Test GGPIO
+ print("\nTesting GGPIO\n");
+ set('ggpio','5a5a5a5a');
+ test('ggpio','0x5a5a0000');
+ set('ggpio','a5a5a5a5');
+ test('ggpio','0xa5a50000');
+ set('ggpio','11110000');
+ test('ggpio','0x11110000');
+ set('ggpio','00001111');
+ test('ggpio','0x00000000');
+
+ # Test DEVSPEED
+ print("\nTesting DEVSPEED\n");
+ set('regoffset','800');
+ $old = get('regvalue');
+ set('devspeed','0');
+ test('devspeed','0x0');
+ test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3)));
+ set('devspeed','1');
+ test('devspeed','0x1');
+ test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 1));
+ set('devspeed','2');
+ test('devspeed','0x2');
+ test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 2));
+ set('devspeed','3');
+ test('devspeed','0x3');
+ test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 3));
+ set('devspeed','4');
+ test('devspeed','0x0');
+ test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3)));
+ set('devspeed','5');
+ test('devspeed','0x1');
+ test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 1));
+
+
+ # mode Returns the current mode:0 for device mode1 for host mode Read
+ # hnp Initiate the Host Negotiation Protocol. Read returns the status. Read/Write
+ # srp Initiate the Session Request Protocol. Read returns the status. Read/Write
+ # buspower Get or Set the Power State of the bus (0 - Off or 1 - On) Read/Write
+ # bussuspend Suspend the USB bus. Read/Write
+ # busconnected Get the connection status of the bus Read
+
+ # gotgctl Get or set the Core Control Status Register. Read/Write
+ ## gusbcfg Get or set the Core USB Configuration Register Read/Write
+ # grxfsiz Get or set the Receive FIFO Size Register Read/Write
+ # gnptxfsiz Get or set the non-periodic Transmit Size Register Read/Write
+ # gpvndctl Get or set the PHY Vendor Control Register Read/Write
+ ## ggpio Get the value in the lower 16-bits of the General Purpose IO Register or Set the upper 16 bits. Read/Write
+ ## guid Get or set the value of the User ID Register Read/Write
+ ## gsnpsid Get the value of the Synopsys ID Regester Read
+ ## devspeed Get or set the device speed setting in the DCFG register Read/Write
+ # enumspeed Gets the device enumeration Speed. Read
+ # hptxfsiz Get the value of the Host Periodic Transmit FIFO Read
+ # hprt0 Get or Set the value in the Host Port Control and Status Register Read/Write
+
+ test_status("TEST NYI") or die;
+}
+
+test_main();
+0;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index f9cf3f04..d08a2708 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1003,7 +1003,7 @@ MODULE_LICENSE ("GPL");
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
-#if defined(CONFIG_ARCH_S3C2410) || defined(CONFIG_ARCH_S3C64XX)
+#ifdef CONFIG_ARCH_S3C64XX
#include "ohci-s3c2410.c"
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 7c9a4d55..7aeb729b 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -21,14 +21,21 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <plat/usb-control.h>
+
+#include <mach/hardware.h>
+#include <mach/usb-control.h>
#define valid_port(idx) ((idx) == 1 || (idx) == 2)
+extern void usb_host_clk_en(void);
+
/* clock device associated with the hcd */
static struct clk *clk;
+
+#if defined(CONFIG_ARCH_2410)
static struct clk *usb_clk;
+#endif
/* forward definitions */
@@ -47,8 +54,10 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
+#if defined(CONFIG_ARCH_2410)
clk_enable(usb_clk);
mdelay(2); /* let the bus clock stabilise */
+#endif
clk_enable(clk);
@@ -56,8 +65,9 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
info->hcd = hcd;
info->report_oc = s3c2410_hcd_oc;
- if (info->enable_oc != NULL)
+ if (info->enable_oc != NULL) {
(info->enable_oc)(info, 1);
+ }
}
}
@@ -71,12 +81,15 @@ static void s3c2410_stop_hc(struct platform_device *dev)
info->report_oc = NULL;
info->hcd = NULL;
- if (info->enable_oc != NULL)
+ if (info->enable_oc != NULL) {
(info->enable_oc)(info, 0);
+ }
}
clk_disable(clk);
+#if defined(CONFIG_ARCH_2410)
clk_disable(usb_clk);
+#endif
}
/* ohci_s3c2410_hub_status_data
@@ -86,14 +99,14 @@ static void s3c2410_stop_hc(struct platform_device *dev)
*/
static int
-ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf)
+ohci_s3c2410_hub_status_data (struct usb_hcd *hcd, char *buf)
{
struct s3c2410_hcd_info *info = to_s3c2410_info(hcd);
struct s3c2410_hcd_port *port;
int orig;
int portno;
- orig = ohci_hub_status_data(hcd, buf);
+ orig = ohci_hub_status_data (hcd, buf);
if (info == NULL)
return orig;
@@ -143,7 +156,7 @@ static void s3c2410_usb_set_power(struct s3c2410_hcd_info *info,
* request.
*/
-static int ohci_s3c2410_hub_control(
+static int ohci_s3c2410_hub_control (
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -197,8 +210,9 @@ static int ohci_s3c2410_hub_control(
dev_dbg(hcd->self.controller,
"ClearPortFeature: OVER_CURRENT\n");
- if (valid_port(wIndex))
+ if (valid_port(wIndex)) {
info->port[wIndex-1].oc_status = 0;
+ }
goto out;
@@ -239,11 +253,8 @@ static int ohci_s3c2410_hub_control(
desc->wHubCharacteristics |= cpu_to_le16(0x0001);
if (info->enable_oc) {
- desc->wHubCharacteristics &= ~cpu_to_le16(
- HUB_CHAR_OCPM);
- desc->wHubCharacteristics |= cpu_to_le16(
- 0x0008 |
- 0x0001);
+ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
+ desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001);
}
dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
@@ -257,11 +268,13 @@ static int ohci_s3c2410_hub_control(
dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
if (valid_port(wIndex)) {
- if (info->port[wIndex-1].oc_changed)
+ if (info->port[wIndex-1].oc_changed) {
*data |= cpu_to_le32(RH_PS_OCIC);
+ }
- if (info->port[wIndex-1].oc_status)
+ if (info->port[wIndex-1].oc_status) {
*data |= cpu_to_le32(RH_PS_POCI);
+ }
}
}
@@ -319,7 +332,7 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
*/
static void
-usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
+usb_hcd_s3c2410_remove (struct usb_hcd *hcd, struct platform_device *dev)
{
usb_remove_hcd(hcd);
s3c2410_stop_hc(dev);
@@ -337,12 +350,16 @@ usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
* through the hotplug entry's driver_data.
*
*/
-static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
+static int usb_hcd_s3c2410_probe (const struct hc_driver *driver,
struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
int retval;
+#if !defined(CONFIG_ARCH_2410)
+ usb_host_clk_en();
+#endif
+
s3c2410_usb_set_power(dev->dev.platform_data, 1, 1);
s3c2410_usb_set_power(dev->dev.platform_data, 2, 1);
@@ -351,7 +368,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
return -ENOMEM;
hcd->rsrc_start = dev->resource[0].start;
- hcd->rsrc_len = resource_size(&dev->resource[0]);
+ hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_err(&dev->dev, "request_mem_region failed\n");
@@ -362,16 +379,18 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
clk = clk_get(&dev->dev, "usb-host");
if (IS_ERR(clk)) {
dev_err(&dev->dev, "cannot get usb-host clock\n");
- retval = PTR_ERR(clk);
+ retval = -ENOENT;
goto err_mem;
}
+#if defined(CONFIG_ARCH_2410)
usb_clk = clk_get(&dev->dev, "usb-bus-host");
if (IS_ERR(usb_clk)) {
- dev_err(&dev->dev, "cannot get usb-bus-host clock\n");
- retval = PTR_ERR(usb_clk);
+ dev_err(&dev->dev, "cannot get usb-host clock\n");
+ retval = -ENOENT;
goto err_clk;
}
+#endif
s3c2410_start_hc(dev, hcd);
@@ -393,10 +412,13 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
err_ioremap:
s3c2410_stop_hc(dev);
iounmap(hcd->regs);
+
+#if defined(CONFIG_ARCH_2410)
clk_put(usb_clk);
err_clk:
clk_put(clk);
+#endif
err_mem:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
@@ -409,19 +431,17 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
/*-------------------------------------------------------------------------*/
static int
-ohci_s3c2410_start(struct usb_hcd *hcd)
+ohci_s3c2410_start (struct usb_hcd *hcd)
{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ret;
- ret = ohci_init(ohci);
- if (ret < 0)
+ if ((ret = ohci_init(ohci)) < 0)
return ret;
- ret = ohci_run(ohci);
- if (ret < 0) {
- err("can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
+ if ((ret = ohci_run (ohci)) < 0) {
+ err ("can't start %s", hcd->self.bus_name);
+ ohci_stop (hcd);
return ret;
}
@@ -473,12 +493,12 @@ static const struct hc_driver ohci_s3c2410_hc_driver = {
/* device driver */
-static int __devinit ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
+static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
{
return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev);
}
-static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
+static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -488,7 +508,7 @@ static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_drv_probe,
- .remove = __devexit_p(ohci_hcd_s3c2410_drv_remove),
+ .remove = ohci_hcd_s3c2410_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
/*.suspend = ohci_hcd_s3c2410_drv_suspend, */
/*.resume = ohci_hcd_s3c2410_drv_resume, */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 21d816e9..673ee6b0 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -606,7 +606,14 @@ config HP_WATCHDOG
will only load on an HP ProLiant system with a minimum of iLO2 support.
To compile this driver as a module, choose M here: the module will be
called hpwdt.
-
+
+config FH_WATCHDOG
+ tristate "fullhan watchdog driver"
+
+ help
+ A software monitoring watchdog and NMI sourcing driver.
+
+
config HPWDT_NMI_DECODING
bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on HP_WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ed26f709..0cb0e134 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_FH_WATCHDOG) += fh_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/fh_wdt.c b/drivers/watchdog/fh_wdt.c
new file mode 100644
index 00000000..17ff23c5
--- /dev/null
+++ b/drivers/watchdog/fh_wdt.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright 2010-2011 Picochip Ltd., Jamie Iles
+ * http://www.picochip.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file implements a driver for the Synopsys DesignWare watchdog device
+ * in the many ARM subsystems. The watchdog has 16 different timeout periods
+ * and these are a function of the input clock frequency.
+ *
+ * The DesignWare watchdog cannot be stopped once it has been started so we
+ * use a software timer to implement a ping that will keep the watchdog alive.
+ * If we receive an expected close for the watchdog then we keep the timer
+ * running, otherwise the timer is stopped and the watchdog will expire.
+ */
+#define pr_fmt(fmt) "fh_wdt: " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <linux/interrupt.h>
+#include <mach/pmu.h>
+#include <mach/fh_wdt.h>
+
+#define WDT_RESPONSE_MODE
+
+#define WDT_TIMER_MODE
+
+#ifndef WDT_TIMER_MODE
+#define WDT_NOTIMER_MODE
+#endif
+
+#define WDOG_CONTROL_REG_OFFSET 0x00
+#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
+#define WDOG_CONTROL_REG_RMOD_MASK 0x02
+#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
+#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
+#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
+#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
+
+/* Hardware timeout in seconds */
+#define WDT_HW_TIMEOUT 2
+/* User land timeout */
+#define WDT_HEARTBEAT 15
+static int heartbeat = WDT_HEARTBEAT;
+
+/* The maximum TOP (timeout period) value that can be set in the watchdog. */
+#define FH_WDT_MAX_TOP 15
+
+static int curr_clk_rate;
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define WDT_TIMEOUT (HZ / 2)
+
+struct fh_wdt_t fh_wdt;
+
+static inline int fh_wdt_is_enabled(void)
+{
+ return readl(fh_wdt.regs + WDOG_CONTROL_REG_OFFSET) &
+ WDOG_CONTROL_REG_WDT_EN_MASK;
+}
+
+#define WDT_CLOCK clk_get_rate(fh_wdt.clk)
+
+static inline int fh_wdt_top_in_seconds(unsigned top)
+{
+ /*
+ * There are 16 possible timeout values in 0..15 where the number of
+ * cycles is 2 ^ (16 + i) and the watchdog counts down.
+ */
+ return (1 << (16 + top)) / WDT_CLOCK;
+}
+
+static inline void fh_wdt_set_next_heartbeat(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh_wdt.lock, flags);
+ fh_wdt.next_heartbeat = jiffies + heartbeat * HZ;
+ spin_unlock_irqrestore(&fh_wdt.lock, flags);
+}
+
+static int fh_wdt_set_top(unsigned top_s)
+{
+ int i, val, top_val = FH_WDT_MAX_TOP;
+ unsigned long flags;
+ /*
+ * Iterate over the timeout values until we find the closest match. We
+ * always look for >=.
+ */
+ spin_lock_irqsave(&fh_wdt.lock, flags);
+ for (i = 0; i <= FH_WDT_MAX_TOP; ++i)
+ if (fh_wdt_top_in_seconds(i) >= top_s) {
+ top_val = i;
+ break;
+ }
+
+ /* Set the new value in the watchdog. */
+ printk("[wdt] set topval: %d", top_val);
+ writel(top_val, fh_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+
+ fh_wdt_set_next_heartbeat();
+
+ val = fh_wdt_top_in_seconds(top_val);
+ spin_unlock_irqrestore(&fh_wdt.lock, flags);
+ return val;
+}
+
+#ifndef WDT_TIMER_MODE
+static int fh_wdt_get_top(void)
+{
+ unsigned int val;
+
+ val = readl(fh_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ val = (1 << (16 + val)) / WDT_CLOCK;
+ return val;
+}
+#endif
+
+static void fh_wdt_keepalive(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh_wdt.lock, flags);
+ writel(WDOG_COUNTER_RESTART_KICK_VALUE, fh_wdt.regs +
+ WDOG_COUNTER_RESTART_REG_OFFSET);
+ spin_unlock_irqrestore(&fh_wdt.lock, flags);
+}
+
+static void fh_wdt_ping(unsigned long data)
+{
+ if (time_before(jiffies, fh_wdt.next_heartbeat) ||
+ (!nowayout && !fh_wdt.in_use)) {
+ fh_wdt_keepalive();
+ mod_timer(&fh_wdt.timer, jiffies + WDT_TIMEOUT);
+ } else
+ pr_crit("keepalive missed, machine will reset\n");
+}
+
+static int fh_wdt_open(struct inode *inode, struct file *filp)
+{
+ unsigned long flags;
+
+ if (test_and_set_bit(0, &fh_wdt.in_use))
+ return -EBUSY;
+
+ /* Make sure we don't get unloaded. */
+ __module_get(THIS_MODULE);
+
+ spin_lock_irqsave(&fh_wdt.lock, flags);
+
+ if(fh_wdt.plat_data && fh_wdt.plat_data->resume)
+ fh_wdt.plat_data->resume();
+
+ fh_wdt_set_top(WDT_HW_TIMEOUT);
+ if (!fh_wdt_is_enabled())
+ {
+ /*
+ * The watchdog is not currently enabled. Set the timeout to
+ * the maximum and then start it.
+ */
+ u32 value;
+ value = WDOG_CONTROL_REG_WDT_EN_MASK;
+#ifdef WDT_RESPONSE_MODE
+ value |= WDOG_CONTROL_REG_RMOD_MASK;
+#endif
+ writel(value, fh_wdt.regs + WDOG_CONTROL_REG_OFFSET);
+ fh_wdt_keepalive();
+ }
+#ifdef WDT_TIMER_MODE
+ fh_wdt_set_next_heartbeat();
+#else
+ del_timer(&fh_wdt.timer);
+#endif
+ spin_unlock_irqrestore(&fh_wdt.lock, flags);
+
+ return nonseekable_open(inode, filp);
+}
+
+ssize_t fh_wdt_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *offset)
+{
+ if (!len)
+ return 0;
+
+ if (!nowayout) {
+ size_t i;
+
+ fh_wdt.expect_close = 0;
+
+ for (i = 0; i < len; ++i) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+
+ if (c == 'V') {
+ fh_wdt.expect_close = 1;
+ break;
+ }
+ }
+ }
+
+ fh_wdt_set_next_heartbeat();
+ mod_timer(&fh_wdt.timer, jiffies + WDT_TIMEOUT);
+
+ return len;
+}
+
+#ifndef WDT_TIMER_MODE
+static u32 fh_wdt_time_left(void)
+{
+ return readl(fh_wdt.regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
+ WDT_CLOCK;
+}
+#endif
+
+static const struct watchdog_info fh_wdt_ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE,
+ .identity = "Synopsys DesignWare Watchdog",
+};
+
+static long fh_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned long val;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info *)arg, &fh_wdt_ident,
+ sizeof(fh_wdt_ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int *)arg);
+
+ case WDIOC_KEEPALIVE:
+#ifdef WDT_TIMER_MODE
+ fh_wdt_set_next_heartbeat();
+#else
+ fh_wdt_keepalive();
+#endif
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+#ifdef WDT_TIMER_MODE
+ pr_debug("[wdt] settime value %lu", val);
+ heartbeat = val;
+ fh_wdt_keepalive();
+ fh_wdt_set_next_heartbeat();
+#else
+ fh_wdt_set_top(val);
+ fh_wdt_keepalive();
+#endif
+ return put_user(val , (int __user *)arg);
+
+ case WDIOC_GETTIMEOUT:
+#ifdef WDT_TIMER_MODE
+ return put_user(heartbeat, (int __user *)arg);
+#else
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+ return put_user(fh_wdt_time_left(), (int __user *)arg);
+#endif
+
+ case WDIOC_GETTIMELEFT:
+#ifdef WDT_TIMER_MODE
+ val = (fh_wdt.next_heartbeat - fh_wdt.timer.expires) / HZ;
+ return put_user(val, (int __user *)arg);
+#else
+ /* Get the time left in hardware if use hardware-mode*/
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+ return put_user(fh_wdt_get_top(), (int __user *)arg);
+#endif
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+
+ if (val & WDIOS_DISABLECARD) {
+ if(fh_wdt.plat_data && fh_wdt.plat_data->pause)
+ fh_wdt.plat_data->pause();
+ else
+ return -EPERM;
+ }
+
+ if (val & WDIOS_ENABLECARD) {
+ if(fh_wdt.plat_data && fh_wdt.plat_data->resume)
+ fh_wdt.plat_data->resume();
+ else
+ return -EPERM;
+ }
+
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+#ifdef WDT_RESPONSE_MODE
+static irqreturn_t fh_wdt_interrupt(int this_irq, void *dev_id)
+{
+ struct fh_wdt_t *fh_wdt = (struct fh_wdt_t *)dev_id;
+
+ if (fh_wdt && fh_wdt->plat_data->intr)
+ return fh_wdt->plat_data->intr(fh_wdt);
+ else
+ return IRQ_HANDLED;
+}
+#endif
+
+static int fh_wdt_release(struct inode *inode, struct file *filp)
+{
+ clear_bit(0, &fh_wdt.in_use);
+#ifdef WDT_TIMER_MODE
+ if (!fh_wdt.expect_close) {
+ del_timer(&fh_wdt.timer);
+ if (!nowayout)
+ pr_crit("unexpected close, system will reboot soon\n");
+ else
+ pr_crit("watchdog cannot be disabled, system will reboot soon\n");
+ }
+
+ fh_wdt.expect_close = 0;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int fh_wdt_suspend(struct device *dev)
+{
+ clk_disable(fh_wdt.clk);
+ writel(FH_WDT_MAX_TOP, fh_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ curr_clk_rate = WDT_CLOCK;
+ fh_wdt_keepalive();
+
+ return 0;
+}
+
+static int fh_wdt_resume(struct device *dev)
+{
+ int err;
+
+ clk_set_rate(fh_wdt.clk, curr_clk_rate);
+ err = clk_enable(fh_wdt.clk);
+
+ if (err)
+ {
+ pr_err("an error occured during wdt resume, error no: %d\n", err);
+ return err;
+ }
+
+ fh_wdt_keepalive();
+
+ return 0;
+}
+
+static const struct dev_pm_ops fh_wdt_pm_ops = {
+ .suspend = fh_wdt_suspend,
+ .resume = fh_wdt_resume,
+};
+#endif /* CONFIG_PM */
+
+static const struct file_operations wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = fh_wdt_open,
+ .write = fh_wdt_write,
+ .unlocked_ioctl = fh_wdt_ioctl,
+ .release = fh_wdt_release
+};
+
+static struct miscdevice fh_wdt_miscdev = {
+ .fops = &wdt_fops,
+ .name = "watchdog",
+ .minor = WATCHDOG_MINOR,
+};
+
+static int __devinit fh_wdt_drv_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ "fh_wdt"))
+ return -ENOMEM;
+
+ fh_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!fh_wdt.regs)
+ return -ENOMEM;
+#ifdef WDT_RESPONSE_MODE
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ return -ENXIO;
+ }
+
+ ret = request_irq(irq, fh_wdt_interrupt, IRQF_DISABLED,\
+ dev_name(&pdev->dev),
+ &fh_wdt);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", irq);
+ return -ENXIO;
+ }
+#endif
+
+ fh_wdt.plat_data = dev_get_platdata(&pdev->dev);
+ fh_wdt.clk = clk_get( NULL,"wdt_clk");
+ if (IS_ERR(fh_wdt.clk))
+ return PTR_ERR(fh_wdt.clk);
+
+ clk_set_rate(fh_wdt.clk, 1000000);
+
+ ret = clk_enable(fh_wdt.clk);
+ if (ret)
+ goto out_put_clk;
+
+ spin_lock_init(&fh_wdt.lock);
+
+ ret = misc_register(&fh_wdt_miscdev);
+ if (ret)
+ goto out_disable_clk;
+
+ fh_wdt_set_next_heartbeat();
+ setup_timer(&fh_wdt.timer, fh_wdt_ping, 0);
+ mod_timer(&fh_wdt.timer, jiffies + WDT_TIMEOUT);
+
+ return 0;
+
+out_disable_clk:
+ clk_disable(fh_wdt.clk);
+out_put_clk:
+ clk_put(fh_wdt.clk);
+
+ return ret;
+}
+
+static int __devexit fh_wdt_drv_remove(struct platform_device *pdev)
+{
+ misc_deregister(&fh_wdt_miscdev);
+
+ clk_disable(fh_wdt.clk);
+ clk_put(fh_wdt.clk);
+
+ return 0;
+}
+
+static struct platform_driver fh_wdt_driver = {
+ .probe = fh_wdt_drv_probe,
+ .remove = __devexit_p(fh_wdt_drv_remove),
+ .driver = {
+ .name = "fh_wdt",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &fh_wdt_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init fh_wdt_watchdog_init(void)
+{
+ return platform_driver_register(&fh_wdt_driver);
+}
+module_init(fh_wdt_watchdog_init);
+
+static void __exit fh_wdt_watchdog_exit(void)
+{
+ platform_driver_unregister(&fh_wdt_driver);
+}
+module_exit(fh_wdt_watchdog_exit);
+
+MODULE_AUTHOR("fullhan");
+MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/fs/Kconfig b/fs/Kconfig
index 19891aab..77c5e665 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -192,6 +192,7 @@ source "fs/hfsplus/Kconfig"
source "fs/befs/Kconfig"
source "fs/bfs/Kconfig"
source "fs/efs/Kconfig"
+source "fs/yaffs2/Kconfig"
source "fs/jffs2/Kconfig"
# UBIFS File system configuration
source "fs/ubifs/Kconfig"
diff --git a/fs/Kconfig.pre.yaffs b/fs/Kconfig.pre.yaffs
new file mode 100644
index 00000000..19891aab
--- /dev/null
+++ b/fs/Kconfig.pre.yaffs
@@ -0,0 +1,273 @@
+#
+# File system configuration
+#
+
+menu "File systems"
+
+if BLOCK
+
+source "fs/ext2/Kconfig"
+source "fs/ext3/Kconfig"
+source "fs/ext4/Kconfig"
+
+config FS_XIP
+# execute in place
+ bool
+ depends on EXT2_FS_XIP
+ default y
+
+source "fs/jbd/Kconfig"
+source "fs/jbd2/Kconfig"
+
+config FS_MBCACHE
+# Meta block cache for Extended Attributes (ext2/ext3/ext4)
+ tristate
+ default y if EXT2_FS=y && EXT2_FS_XATTR
+ default y if EXT3_FS=y && EXT3_FS_XATTR
+ default y if EXT4_FS=y && EXT4_FS_XATTR
+ default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS_XATTR
+
+source "fs/reiserfs/Kconfig"
+source "fs/jfs/Kconfig"
+
+source "fs/xfs/Kconfig"
+source "fs/gfs2/Kconfig"
+source "fs/ocfs2/Kconfig"
+source "fs/btrfs/Kconfig"
+source "fs/nilfs2/Kconfig"
+
+endif # BLOCK
+
+# Posix ACL utility routines
+#
+# Note: Posix ACLs can be implemented without these helpers. Never use
+# this symbol for ifdefs in core code.
+#
+config FS_POSIX_ACL
+ def_bool n
+
+config EXPORTFS
+ tristate
+
+config FILE_LOCKING
+ bool "Enable POSIX file locking API" if EXPERT
+ default y
+ help
+ This option enables standard file locking support, required
+ for filesystems like NFS and for the flock() system
+ call. Disabling this option saves about 11k.
+
+source "fs/notify/Kconfig"
+
+source "fs/quota/Kconfig"
+
+source "fs/autofs4/Kconfig"
+source "fs/fuse/Kconfig"
+
+config CUSE
+ tristate "Character device in Userspace support"
+ depends on FUSE_FS
+ help
+ This FUSE extension allows character devices to be
+ implemented in userspace.
+
+ If you want to develop or use userspace character device
+ based on CUSE, answer Y or M.
+
+config GENERIC_ACL
+ bool
+ select FS_POSIX_ACL
+
+menu "Caches"
+
+source "fs/fscache/Kconfig"
+source "fs/cachefiles/Kconfig"
+
+endmenu
+
+if BLOCK
+menu "CD-ROM/DVD Filesystems"
+
+source "fs/isofs/Kconfig"
+source "fs/udf/Kconfig"
+
+endmenu
+endif # BLOCK
+
+if BLOCK
+menu "DOS/FAT/NT Filesystems"
+
+source "fs/fat/Kconfig"
+source "fs/ntfs/Kconfig"
+
+endmenu
+endif # BLOCK
+
+menu "Pseudo filesystems"
+
+source "fs/proc/Kconfig"
+source "fs/sysfs/Kconfig"
+
+config TMPFS
+ bool "Virtual memory file system support (former shm fs)"
+ depends on SHMEM
+ help
+ Tmpfs is a file system which keeps all files in virtual memory.
+
+ Everything in tmpfs is temporary in the sense that no files will be
+ created on your hard drive. The files live in memory and swap
+ space. If you unmount a tmpfs instance, everything stored therein is
+ lost.
+
+ See <file:Documentation/filesystems/tmpfs.txt> for details.
+
+config TMPFS_POSIX_ACL
+ bool "Tmpfs POSIX Access Control Lists"
+ depends on TMPFS
+ select TMPFS_XATTR
+ select GENERIC_ACL
+ help
+ POSIX Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N.
+
+config TMPFS_XATTR
+ bool "Tmpfs extended attributes"
+ depends on TMPFS
+ default n
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ Currently this enables support for the trusted.* and
+ security.* namespaces.
+
+ You need this for POSIX ACL support on tmpfs.
+
+ If unsure, say N.
+
+config HUGETLBFS
+ bool "HugeTLB file system support"
+ depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
+ SYS_SUPPORTS_HUGETLBFS || BROKEN
+ help
+ hugetlbfs is a filesystem backing for HugeTLB pages, based on
+ ramfs. For architectures that support it, say Y here and read
+ <file:Documentation/vm/hugetlbpage.txt> for details.
+
+ If unsure, say N.
+
+config HUGETLB_PAGE
+ def_bool HUGETLBFS
+
+source "fs/configfs/Kconfig"
+
+endmenu
+
+menuconfig MISC_FILESYSTEMS
+ bool "Miscellaneous filesystems"
+ default y
+ ---help---
+ Say Y here to get to see options for various miscellaneous
+ filesystems, such as filesystems that came from other
+ operating systems.
+
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled; if unsure, say Y here.
+
+if MISC_FILESYSTEMS
+
+source "fs/adfs/Kconfig"
+source "fs/affs/Kconfig"
+source "fs/ecryptfs/Kconfig"
+source "fs/hfs/Kconfig"
+source "fs/hfsplus/Kconfig"
+source "fs/befs/Kconfig"
+source "fs/bfs/Kconfig"
+source "fs/efs/Kconfig"
+source "fs/jffs2/Kconfig"
+# UBIFS File system configuration
+source "fs/ubifs/Kconfig"
+source "fs/logfs/Kconfig"
+source "fs/cramfs/Kconfig"
+source "fs/squashfs/Kconfig"
+source "fs/freevxfs/Kconfig"
+source "fs/minix/Kconfig"
+source "fs/omfs/Kconfig"
+source "fs/hpfs/Kconfig"
+source "fs/qnx4/Kconfig"
+source "fs/romfs/Kconfig"
+source "fs/pstore/Kconfig"
+source "fs/sysv/Kconfig"
+source "fs/ufs/Kconfig"
+source "fs/exofs/Kconfig"
+
+endif # MISC_FILESYSTEMS
+
+menuconfig NETWORK_FILESYSTEMS
+ bool "Network File Systems"
+ default y
+ depends on NET
+ ---help---
+ Say Y here to get to see options for network filesystems and
+ filesystem-related networking code, such as NFS daemon and
+ RPCSEC security modules.
+
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled; if unsure, say Y here.
+
+if NETWORK_FILESYSTEMS
+
+source "fs/nfs/Kconfig"
+source "fs/nfsd/Kconfig"
+
+config LOCKD
+ tristate
+ depends on FILE_LOCKING
+
+config LOCKD_V4
+ bool
+ depends on NFSD_V3 || NFS_V3
+ depends on FILE_LOCKING
+ default y
+
+config NFS_ACL_SUPPORT
+ tristate
+ select FS_POSIX_ACL
+
+config NFS_COMMON
+ bool
+ depends on NFSD || NFS_FS
+ default y
+
+source "net/sunrpc/Kconfig"
+source "fs/ceph/Kconfig"
+source "fs/cifs/Kconfig"
+source "fs/ncpfs/Kconfig"
+source "fs/coda/Kconfig"
+source "fs/afs/Kconfig"
+source "fs/9p/Kconfig"
+
+endif # NETWORK_FILESYSTEMS
+
+if BLOCK
+menu "Partition Types"
+
+source "fs/partitions/Kconfig"
+
+endmenu
+endif
+
+source "fs/nls/Kconfig"
+source "fs/dlm/Kconfig"
+
+endmenu
diff --git a/fs/Makefile b/fs/Makefile
index fb68c2b8..dbd3eb7e 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -124,3 +124,4 @@ obj-$(CONFIG_GFS2_FS) += gfs2/
obj-$(CONFIG_EXOFS_FS) += exofs/
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
diff --git a/fs/Makefile.pre.yaffs b/fs/Makefile.pre.yaffs
new file mode 100644
index 00000000..fb68c2b8
--- /dev/null
+++ b/fs/Makefile.pre.yaffs
@@ -0,0 +1,126 @@
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-y := open.o read_write.o file_table.o super.o \
+ char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
+ ioctl.o readdir.o select.o fifo.o dcache.o inode.o \
+ attr.o bad_inode.o file.o filesystems.o namespace.o \
+ seq_file.o xattr.o libfs.o fs-writeback.o \
+ pnode.o drop_caches.o splice.o sync.o utimes.o \
+ stack.o fs_struct.o statfs.o
+
+ifeq ($(CONFIG_BLOCK),y)
+obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
+else
+obj-y += no-block.o
+endif
+
+obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
+obj-y += notify/
+obj-$(CONFIG_EPOLL) += eventpoll.o
+obj-$(CONFIG_ANON_INODES) += anon_inodes.o
+obj-$(CONFIG_SIGNALFD) += signalfd.o
+obj-$(CONFIG_TIMERFD) += timerfd.o
+obj-$(CONFIG_EVENTFD) += eventfd.o
+obj-$(CONFIG_AIO) += aio.o
+obj-$(CONFIG_FILE_LOCKING) += locks.o
+obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
+obj-$(CONFIG_NFSD_DEPRECATED) += nfsctl.o
+obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
+obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
+obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
+
+# binfmt_script is always there
+obj-y += binfmt_script.o
+
+obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o
+obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o
+obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o
+obj-$(CONFIG_BINFMT_SOM) += binfmt_som.o
+obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o
+
+obj-$(CONFIG_FS_MBCACHE) += mbcache.o
+obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
+obj-$(CONFIG_NFS_COMMON) += nfs_common/
+obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
+
+obj-$(CONFIG_FHANDLE) += fhandle.o
+
+obj-y += quota/
+
+obj-$(CONFIG_PROC_FS) += proc/
+obj-y += partitions/
+obj-$(CONFIG_SYSFS) += sysfs/
+obj-$(CONFIG_CONFIGFS_FS) += configfs/
+obj-y += devpts/
+
+obj-$(CONFIG_PROFILING) += dcookies.o
+obj-$(CONFIG_DLM) += dlm/
+
+# Do not add any filesystems before this line
+obj-$(CONFIG_FSCACHE) += fscache/
+obj-$(CONFIG_REISERFS_FS) += reiserfs/
+obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
+obj-$(CONFIG_EXT2_FS) += ext2/
+# We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
+# unless explicitly requested by rootfstype
+obj-$(CONFIG_EXT4_FS) += ext4/
+obj-$(CONFIG_JBD) += jbd/
+obj-$(CONFIG_JBD2) += jbd2/
+obj-$(CONFIG_CRAMFS) += cramfs/
+obj-$(CONFIG_SQUASHFS) += squashfs/
+obj-y += ramfs/
+obj-$(CONFIG_HUGETLBFS) += hugetlbfs/
+obj-$(CONFIG_CODA_FS) += coda/
+obj-$(CONFIG_MINIX_FS) += minix/
+obj-$(CONFIG_FAT_FS) += fat/
+obj-$(CONFIG_BFS_FS) += bfs/
+obj-$(CONFIG_ISO9660_FS) += isofs/
+obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+
+obj-$(CONFIG_HFS_FS) += hfs/
+obj-$(CONFIG_ECRYPT_FS) += ecryptfs/
+obj-$(CONFIG_VXFS_FS) += freevxfs/
+obj-$(CONFIG_NFS_FS) += nfs/
+obj-$(CONFIG_EXPORTFS) += exportfs/
+obj-$(CONFIG_NFSD) += nfsd/
+obj-$(CONFIG_LOCKD) += lockd/
+obj-$(CONFIG_NLS) += nls/
+obj-$(CONFIG_SYSV_FS) += sysv/
+obj-$(CONFIG_CIFS) += cifs/
+obj-$(CONFIG_NCP_FS) += ncpfs/
+obj-$(CONFIG_HPFS_FS) += hpfs/
+obj-$(CONFIG_NTFS_FS) += ntfs/
+obj-$(CONFIG_UFS_FS) += ufs/
+obj-$(CONFIG_EFS_FS) += efs/
+obj-$(CONFIG_JFFS2_FS) += jffs2/
+obj-$(CONFIG_LOGFS) += logfs/
+obj-$(CONFIG_UBIFS_FS) += ubifs/
+obj-$(CONFIG_AFFS_FS) += affs/
+obj-$(CONFIG_ROMFS_FS) += romfs/
+obj-$(CONFIG_QNX4FS_FS) += qnx4/
+obj-$(CONFIG_AUTOFS4_FS) += autofs4/
+obj-$(CONFIG_ADFS_FS) += adfs/
+obj-$(CONFIG_FUSE_FS) += fuse/
+obj-$(CONFIG_UDF_FS) += udf/
+obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
+obj-$(CONFIG_OMFS_FS) += omfs/
+obj-$(CONFIG_JFS_FS) += jfs/
+obj-$(CONFIG_XFS_FS) += xfs/
+obj-$(CONFIG_9P_FS) += 9p/
+obj-$(CONFIG_AFS_FS) += afs/
+obj-$(CONFIG_NILFS2_FS) += nilfs2/
+obj-$(CONFIG_BEFS_FS) += befs/
+obj-$(CONFIG_HOSTFS) += hostfs/
+obj-$(CONFIG_HPPFS) += hppfs/
+obj-$(CONFIG_CACHEFILES) += cachefiles/
+obj-$(CONFIG_DEBUG_FS) += debugfs/
+obj-$(CONFIG_OCFS2_FS) += ocfs2/
+obj-$(CONFIG_BTRFS_FS) += btrfs/
+obj-$(CONFIG_GFS2_FS) += gfs2/
+obj-$(CONFIG_EXOFS_FS) += exofs/
+obj-$(CONFIG_CEPH_FS) += ceph/
+obj-$(CONFIG_PSTORE) += pstore/
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100755
index 00000000..658feea5
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,161 @@
+#
+# yaffs file system configurations
+#
+
+config YAFFS_FS
+ tristate "yaffs2 file system support"
+ default n
+ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+ yaffs2, or Yet Another Flash File System, is a file system
+ optimised for NAND Flash chips.
+
+ To compile the yaffs2 file system support as a module, choose M
+ here: the module will be called yaffs2.
+
+ If unsure, say N.
+
+ Further information on yaffs2 is available at
+ <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+ bool "512 byte / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable yaffs1 support -- yaffs for 512 byte / page devices
+
+ Not needed for 2K-page devices.
+
+ If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+ bool "Use older-style on-NAND data format with pageStatus byte"
+ depends on YAFFS_YAFFS1
+ default n
+ help
+
+ Older-style on-NAND data format has a "pageStatus" byte to record
+ chunk/page state. This byte is zero when the page is discarded.
+ Choose this option if you have existing on-NAND data using this
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+ older-style format. See notes on tags formats and MTD versions
+ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+config YAFFS_DOES_ECC
+ bool "Lets yaffs do its own ECC"
+ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This enables yaffs to use its own ECC functions instead of using
+ the ones from the generic MTD-NAND driver.
+
+ If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This makes yaffs_ecc.c use the same ecc byte order as Steven
+ Hill's nand_ecc.c. If not set, then you get the same ecc byte
+ order as SmartMedia.
+
+ If unsure, say N.
+
+config YAFFS_YAFFS2
+ bool "2048 byte (or larger) / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
+
+ If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+ bool "Autoselect yaffs2 format"
+ depends on YAFFS_YAFFS2
+ default y
+ help
+ Without this, you need to explicitely use yaffs2 as the file
+ system type. With this, you can say "yaffs" and yaffs or yaffs2
+ will be used depending on the device page size (yaffs on
+ 512-byte page devices, yaffs2 on 2K page devices).
+
+ If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+ bool "Disable yaffs from doing ECC on tags by default"
+ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+ This defaults yaffs to using its own ECC calculations on tags instead of
+ just relying on the MTD.
+ This behavior can also be overridden with tags_ecc_on and
+ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bool "Force chunk erase check"
+ depends on YAFFS_FS
+ default n
+ help
+ Normally yaffs only checks chunks before writing until an erased
+ chunk is found. This helps to detect any partially written
+ chunks that might have happened due to power loss.
+
+ Enabling this forces on the test that chunks are erased in flash
+ before writing to them. This takes more time but is potentially
+ a bit more secure.
+
+ Suggest setting Y during development and ironing out driver
+ issues etc. Suggest setting to N if you want faster writing.
+
+ If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+ bool "Empty lost and found on boot"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is enabled then the contents of lost and found is
+ automatically dumped at mount.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+ bool "Disable yaffs2 block refreshing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then block refreshing is disabled.
+ Block refreshing infrequently refreshes the oldest block in
+ a yaffs2 file system. This mechanism helps to refresh flash to
+ mitigate against data loss. This is particularly useful for MLC.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+ bool "Disable yaffs2 background processing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then background processing is disabled.
+ Background processing makes many foreground activities faster.
+
+ If unsure, say N.
+
+config YAFFS_XATTR
+ bool "Enable yaffs2 xattr support"
+ depends on YAFFS_FS
+ default y
+ help
+ If this is set then yaffs2 will provide xattr support.
+ If unsure, say Y.
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100755
index 00000000..f9a9fb1b
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
+yaffs-y += yaffs_mtdif.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_summary.o
+yaffs-y += yaffs_verify.o
+
diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
new file mode 100755
index 00000000..c8f2861c
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,357 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+/*
+ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
+ * of approx 100 objects that are themn allocated singly.
+ * This is basically a simplified slab allocator.
+ *
+ * We don't use the Linux slab allocator because slab does not allow
+ * us to dump all the objects in one hit when we do a umount and tear
+ * down all the tnodes and objects. slab requires that we first free
+ * the individual objects.
+ *
+ * Once yaffs has been mainlined I shall try to motivate for a change
+ * to slab to provide the extra features we need here.
+ */
+
+struct yaffs_tnode_list {
+ struct yaffs_tnode_list *next;
+ struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+ struct yaffs_obj_list *next;
+ struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+ int n_tnodes_created;
+ struct yaffs_tnode *free_tnodes;
+ int n_free_tnodes;
+ struct yaffs_tnode_list *alloc_tnode_list;
+
+ int n_obj_created;
+ struct list_head free_objs;
+ int n_free_objects;
+
+ struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->alloc_tnode_list) {
+ tmp = allocator->alloc_tnode_list->next;
+
+ kfree(allocator->alloc_tnode_list->tnodes);
+ kfree(allocator->alloc_tnode_list);
+ allocator->alloc_tnode_list = tmp;
+ }
+
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->alloc_tnode_list = NULL;
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ int i;
+ struct yaffs_tnode *new_tnodes;
+ u8 *mem;
+ struct yaffs_tnode *curr;
+ struct yaffs_tnode *next;
+ struct yaffs_tnode_list *tnl;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_tnodes < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+ mem = (u8 *) new_tnodes;
+
+ if (!new_tnodes) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: Could not allocate Tnodes");
+ return YAFFS_FAIL;
+ }
+
+ /* New hookup for wide tnodes */
+ for (i = 0; i < n_tnodes - 1; i++) {
+ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+ curr->internal[0] = next;
+ }
+
+ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+ curr->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+ allocator->n_free_tnodes += n_tnodes;
+ allocator->n_tnodes_created += n_tnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+ if (!tnl) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Could not add tnodes to management list");
+ return YAFFS_FAIL;
+ } else {
+ tnl->tnodes = new_tnodes;
+ tnl->next = allocator->alloc_tnode_list;
+ allocator->alloc_tnode_list = tnl;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
+
+ return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode *tn = NULL;
+
+ if (!allocator) {
+ BUG();
+ return NULL;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_tnodes)
+ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+ if (allocator->free_tnodes) {
+ tn = allocator->free_tnodes;
+ allocator->free_tnodes = allocator->free_tnodes->internal[0];
+ allocator->n_free_tnodes--;
+ }
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ if (tn) {
+ tn->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = tn;
+ allocator->n_free_tnodes++;
+ }
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+/*--------------- yaffs_obj alloaction ------------------------
+ *
+ * Free yaffs_objs are stored in a list using obj->siblings.
+ * The blocks of allocated objects are stored in a linked list.
+ */
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->allocated_obj_list = NULL;
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ struct yaffs_obj_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->allocated_obj_list) {
+ tmp = allocator->allocated_obj_list->next;
+ kfree(allocator->allocated_obj_list->objects);
+ kfree(allocator->allocated_obj_list);
+ allocator->allocated_obj_list = tmp;
+ }
+
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+ allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ int i;
+ struct yaffs_obj *new_objs;
+ struct yaffs_obj_list *list;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_obj < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+ if (!new_objs || !list) {
+ kfree(new_objs);
+ new_objs = NULL;
+ kfree(list);
+ list = NULL;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Could not allocate more objects");
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < n_obj; i++)
+ list_add(&new_objs[i].siblings, &allocator->free_objs);
+
+ allocator->n_free_objects += n_obj;
+ allocator->n_obj_created += n_obj;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = new_objs;
+ list->next = allocator->allocated_obj_list;
+ allocator->allocated_obj_list = list;
+
+ return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = NULL;
+ struct list_head *lh;
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return obj;
+ }
+
+ /* If there are none left make more */
+ if (list_empty(&allocator->free_objs))
+ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+ if (!list_empty(&allocator->free_objs)) {
+ lh = allocator->free_objs.next;
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ list_del_init(lh);
+ allocator->n_free_objects--;
+ }
+
+ return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ /* Link into the free list. */
+ list_add(&obj->siblings, &allocator->free_objs);
+ allocator->n_free_objects++;
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+
+ if (!dev->allocator) {
+ BUG();
+ return;
+ }
+
+ yaffs_deinit_raw_tnodes(dev);
+ yaffs_deinit_raw_objs(dev);
+ kfree(dev->allocator);
+ dev->allocator = NULL;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator;
+
+ if (dev->allocator) {
+ BUG();
+ return;
+ }
+
+ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+ if (allocator) {
+ dev->allocator = allocator;
+ yaffs_init_raw_tnodes(dev);
+ yaffs_init_raw_objs(dev);
+ }
+}
+
diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
new file mode 100755
index 00000000..a8cc3226
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
new file mode 100755
index 00000000..3d778f22
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,124 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+ obj->yst_uid = oh->yst_uid;
+ obj->yst_gid = oh->yst_gid;
+ obj->yst_atime = oh->yst_atime;
+ obj->yst_mtime = oh->yst_mtime;
+ obj->yst_ctime = oh->yst_ctime;
+ obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+ oh->yst_uid = obj->yst_uid;
+ oh->yst_gid = obj->yst_gid;
+ oh->yst_atime = obj->yst_atime;
+ oh->yst_mtime = obj->yst_mtime;
+ oh->yst_ctime = obj->yst_ctime;
+ oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+ obj->yst_mtime = Y_CURRENT_TIME;
+ if (do_a)
+ obj->yst_atime = obj->yst_mtime;
+ if (do_c)
+ obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+ yaffs_load_current_time(obj, 1, 1);
+ obj->yst_rdev = rdev;
+ obj->yst_uid = uid;
+ obj->yst_gid = gid;
+}
+
+static loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+ YCHAR *alias = NULL;
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.file_variant.file_size;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = obj->variant.symlink_variant.alias;
+ if (!alias)
+ return 0;
+ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+ default:
+ return 0;
+ }
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = attr->ia_uid;
+ if (valid & ATTR_GID)
+ obj->yst_gid = attr->ia_gid;
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_resize_file(obj, attr->ia_size);
+
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ attr->ia_uid = obj->yst_uid;
+ valid |= ATTR_UID;
+ attr->ia_gid = obj->yst_gid;
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
new file mode 100755
index 00000000..5b21b085
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+#endif
diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
new file mode 100755
index 00000000..4440e930
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,97 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "BlockBits block %d is not valid",
+ blk);
+ BUG();
+ }
+ return dev->chunk_bits +
+ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+ chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Chunk Id (%d:%d) invalid",
+ blk, chunk);
+ BUG();
+ }
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++) {
+ if (*blk_bits)
+ return 1;
+ blk_bits++;
+ }
+ return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ int n = 0;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+ n += hweight8(*blk_bits);
+
+ return n;
+}
diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
new file mode 100755
index 00000000..e26b37d8
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
new file mode 100755
index 00000000..e739fb4a
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,474 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+struct yaffs_checkpt_chunk_hdr {
+ int version;
+ int seq;
+ u32 sum;
+ u32 xor;
+} ;
+
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+ return chunk - dev->chunk_offset;
+}
+
+static int apply_block_offset(struct yaffs_dev *dev, int block)
+{
+ return block - dev->block_offset;
+}
+
+static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_chunk_hdr hdr;
+
+ hdr.version = YAFFS_CHECKPOINT_VERSION;
+ hdr.seq = dev->checkpt_page_seq;
+ hdr.sum = dev->checkpt_sum;
+ hdr.xor = dev->checkpt_xor;
+
+ dev->checkpt_byte_offs = sizeof(hdr);
+
+ memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr));
+}
+
+static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_chunk_hdr hdr;
+
+ memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr));
+
+ dev->checkpt_byte_offs = sizeof(hdr);
+
+ return hdr.version == YAFFS_CHECKPOINT_VERSION &&
+ hdr.seq == dev->checkpt_page_seq &&
+ hdr.sum == dev->checkpt_sum &&
+ hdr.xor == dev->checkpt_xor;
+}
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpt blocks_avail = %d", blocks_avail);
+
+ return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (!dev->drv.drv_erase_fn)
+ return 0;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checking blocks %d to %d",
+ dev->internal_start_block, dev->internal_end_block);
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ int offset_i = apply_block_offset(dev, i);
+ int result;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "erasing checkpt block %d", i);
+
+ dev->n_erasures++;
+
+ result = dev->drv.drv_erase_fn(dev, offset_i);
+ if(result) {
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ dev->n_free_chunks +=
+ dev->param.chunks_per_block;
+ } else {
+ dev->drv.drv_mark_bad_fn(dev, offset_i);
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+ int i;
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
+ dev->n_erased_blocks, dev->param.n_reserved_blocks,
+ blocks_avail, dev->checkpt_next_block);
+
+ if (dev->checkpt_next_block >= 0 &&
+ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocks_avail > 0) {
+
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ struct yaffs_block_info *bi;
+
+ bi = yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpt_next_block = i + 1;
+ dev->checkpt_cur_block = i;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block %d", i);
+ return;
+ }
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_ext_tags tags;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: start: blocks %d next %d",
+ dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ int chunk = i * dev->param.chunks_per_block;
+ enum yaffs_block_state state;
+ u32 seq;
+
+ dev->tagger.read_chunk_tags_fn(dev,
+ apply_chunk_offset(dev, chunk),
+ NULL, &tags);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: search: block %d state %d oid %d seq %d eccr %d",
+ i, (int) state,
+ tags.obj_id, tags.seq_number,
+ tags.ecc_result);
+
+ if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ continue;
+
+ dev->tagger.query_block_fn(dev,
+ apply_block_offset(dev, i),
+ &state, &seq);
+ if (state == YAFFS_BLOCK_STATE_DEAD)
+ continue;
+
+ /* Right kind of block */
+ dev->checkpt_next_block = tags.obj_id;
+ dev->checkpt_cur_block = i;
+ dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
+ dev->blocks_in_checkpt++;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "found checkpt block %d", i);
+ return;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+ int i;
+
+ dev->checkpt_open_write = writing;
+
+ /* Got the functions we need? */
+ if (!dev->tagger.write_chunk_tags_fn ||
+ !dev->tagger.read_chunk_tags_fn ||
+ !dev->drv.drv_erase_fn ||
+ !dev->drv.drv_mark_bad_fn)
+ return 0;
+
+ if (writing && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+ if (!dev->checkpt_buffer)
+ dev->checkpt_buffer =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ dev->checkpt_page_seq = 0;
+ dev->checkpt_byte_count = 0;
+ dev->checkpt_sum = 0;
+ dev->checkpt_xor = 0;
+ dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_chunk = -1;
+ dev->checkpt_next_block = dev->internal_start_block;
+
+ if (writing) {
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+ yaffs2_checkpt_init_chunk_hdr(dev);
+ return yaffs_checkpt_erase(dev);
+ }
+
+ /* Opening for a read */
+ /* Set to a value that will kick off a read */
+ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is
+ * (hopefully) going to be way more than we need */
+ dev->blocks_in_checkpt = 0;
+ dev->checkpt_max_blocks =
+ (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
+ dev->checkpt_block_list =
+ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+
+ if (!dev->checkpt_block_list)
+ return 0;
+
+ for (i = 0; i < dev->checkpt_max_blocks; i++)
+ dev->checkpt_block_list[i] = -1;
+
+ return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+ u32 composite_sum;
+
+ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
+ *sum = composite_sum;
+ return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+ int chunk;
+ int offset_chunk;
+ struct yaffs_ext_tags tags;
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_erased_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+ tags.is_deleted = 0;
+ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
+ tags.chunk_id = dev->checkpt_page_seq + 1;
+ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.n_bytes = dev->data_bytes_per_chunk;
+ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->checkpt_cur_block);
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocks_in_checkpt++;
+ }
+
+ chunk =
+ dev->checkpt_cur_block * dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+ tags.obj_id, tags.chunk_id);
+
+ offset_chunk = apply_chunk_offset(dev, chunk);
+
+ dev->n_page_writes++;
+
+ dev->tagger.write_chunk_tags_fn(dev, offset_chunk,
+ dev->checkpt_buffer, &tags);
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+ dev->checkpt_cur_chunk = 0;
+ dev->checkpt_cur_block = -1;
+ }
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ yaffs2_checkpt_init_chunk_hdr(dev);
+
+
+ return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (!dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ struct yaffs_ext_tags tags;
+ int chunk;
+ int offset_chunk;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0) {
+ ok = 0;
+ break;
+ }
+
+ chunk = dev->checkpt_cur_block *
+ dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ offset_chunk = apply_chunk_offset(dev, chunk);
+ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ dev->tagger.read_chunk_tags_fn(dev,
+ offset_chunk,
+ dev->checkpt_buffer,
+ &tags);
+
+ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ ok = 0;
+ break;
+ }
+ if(!yaffs2_checkpt_check_chunk_hdr(dev)) {
+ ok = 0;
+ break;
+ }
+
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+
+ if (dev->checkpt_cur_chunk >=
+ dev->param.chunks_per_block)
+ dev->checkpt_cur_block = -1;
+
+ }
+
+ *data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+ }
+
+ return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->checkpt_open_write) {
+ if (dev->checkpt_byte_offs !=
+ sizeof(sizeof(struct yaffs_checkpt_chunk_hdr)))
+ yaffs2_checkpt_flush_buffer(dev);
+ } else if (dev->checkpt_block_list) {
+ for (i = 0;
+ i < dev->blocks_in_checkpt &&
+ dev->checkpt_block_list[i] >= 0; i++) {
+ int blk = dev->checkpt_block_list[i];
+ struct yaffs_block_info *bi = NULL;
+
+ if (dev->internal_start_block <= blk &&
+ blk <= dev->internal_end_block)
+ bi = yaffs_get_block_info(dev, blk);
+ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ }
+ kfree(dev->checkpt_block_list);
+ dev->checkpt_block_list = NULL;
+ }
+
+ dev->n_free_chunks -=
+ dev->blocks_in_checkpt * dev->param.chunks_per_block;
+ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
+ dev->checkpt_byte_count);
+
+ if (dev->checkpt_buffer) {
+ /* free the buffer */
+ kfree(dev->checkpt_buffer);
+ dev->checkpt_buffer = NULL;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+ /* Erase the checkpoint data */
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint invalidate of %d blocks",
+ dev->blocks_in_checkpt);
+
+ return yaffs_checkpt_erase(dev);
+}
diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
new file mode 100755
index 00000000..cdbaba71
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
new file mode 100755
index 00000000..9294107c
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,281 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
+ * such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
+ * and therefore this bytes influence on the line parity.
+ */
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc_other)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+ ecc_other->line_parity = line_parity;
+ ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc)
+{
+ unsigned char delta_col; /* column parity delta */
+ unsigned delta_line; /* line parity delta */
+ unsigned delta_line_prime; /* line parity delta */
+ unsigned bit;
+
+ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+ delta_line_prime =
+ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+ if ((delta_col | delta_line | delta_line_prime) == 0)
+ return 0; /* no error */
+
+ if (delta_line == ~delta_line_prime &&
+ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (delta_col & 0x20)
+ bit |= 0x04;
+ if (delta_col & 0x08)
+ bit |= 0x02;
+ if (delta_col & 0x02)
+ bit |= 0x01;
+
+ if (delta_line >= n_bytes)
+ return -1;
+
+ data[delta_line] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((hweight32(delta_line) +
+ hweight32(delta_line_prime) +
+ hweight8(delta_col)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+}
diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
new file mode 100755
index 00000000..17d47bd8
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data.
+ * Thus, two such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+ unsigned char col_parity;
+ unsigned line_parity;
+ unsigned line_parity_prime;
+};
+
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100755
index 00000000..8fd0802b
--- /dev/null
+++ b/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+ *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs: get_block_info block %d is not valid",
+ blk);
+ BUG();
+ }
+ return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100755
index 00000000..794bef80
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5059 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsmarshall.h"
+#include "yaffs_nand.h"
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve);
+
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+ int buffer_size);
+
+/* Function to calculate chunk and offset */
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out)
+{
+ int chunk;
+ u32 offset;
+
+ chunk = (u32) (addr >> dev->chunk_shift);
+
+ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+ offset = (u32) (addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunk_base;
+
+ chunk /= dev->chunk_div;
+
+ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+ offset = (u32) (addr - chunk_base);
+ }
+
+ *chunk_out = chunk;
+ *offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static inline u32 calc_shifts_ceiling(u32 x)
+{
+ int extra_bits;
+ int shifts;
+
+ shifts = extra_bits = 0;
+
+ while (x > 1) {
+ if (x & 1)
+ extra_bits++;
+ x >>= 1;
+ shifts++;
+ }
+
+ if (extra_bits)
+ shifts++;
+
+ return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static inline u32 calc_shifts(u32 x)
+{
+ u32 shifts;
+
+ shifts = 0;
+
+ if (!x)
+ return 0;
+
+ while (!(x & 1)) {
+ x >>= 1;
+ shifts++;
+ }
+
+ return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+ int i;
+ u8 *buf = (u8 *) 1;
+
+ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->temp_buffer[i].in_use = 0;
+ buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ dev->temp_buffer[i].buffer = buf;
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
+{
+ int i;
+
+ dev->temp_in_use++;
+ if (dev->temp_in_use > dev->max_temp)
+ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].in_use == 0) {
+ dev->temp_buffer[i].in_use = 1;
+ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanaged_buffer_allocs++;
+ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
+{
+ int i;
+
+ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer) {
+ dev->temp_buffer[i].in_use = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Releasing unmanaged temp buffer");
+ kfree(buffer);
+ dev->unmanaged_buffer_deallocs++;
+ }
+
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ (void) dev;
+ (void) nand_chunk;
+ (void) data;
+ (void) tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+ const struct yaffs_ext_tags *tags)
+{
+ (void) dev;
+ (void) nand_chunk;
+ (void) tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+ if (!bi->gc_prioritise) {
+ bi->gc_prioritise = 1;
+ dev->has_pending_prioritised_gc = 1;
+ bi->chunk_error_strikes++;
+
+ if (bi->chunk_error_strikes > 3) {
+ bi->needs_retiring = 1; /* Too many stikes, so retire */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Block struck out");
+
+ }
+ }
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+ int erased_ok)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs_handle_chunk_error(dev, bi);
+
+ if (erased_ok) {
+ /* Was an actual write failure,
+ * so mark the block for retirement.*/
+ bi->needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d needs retiring", flash_block);
+ }
+
+ /* Delete the chunk */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+ if (n < 0)
+ n = -n;
+ return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+ return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+ return dev->lost_n_found;
+}
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 *buffer, int n_bytes)
+{
+ /* Horrible, slow implementation */
+ while (n_bytes--) {
+ if (*buffer != 0xff)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+ int retval = YAFFS_OK;
+ u8 *data = yaffs_get_temp_buffer(dev);
+ struct yaffs_ext_tags tags;
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+ tags.chunk_used) {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not erased", nand_chunk);
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_release_temp_buffer(dev, data);
+
+ return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = YAFFS_OK;
+ struct yaffs_ext_tags temp_tags;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+ temp_tags.obj_id != tags->obj_id ||
+ temp_tags.chunk_id != tags->chunk_id ||
+ temp_tags.n_bytes != tags->n_bytes)
+ retval = YAFFS_FAIL;
+
+ yaffs_release_temp_buffer(dev, buffer);
+
+ return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+ int reserved_chunks;
+ int reserved_blocks = dev->param.n_reserved_blocks;
+ int checkpt_blocks;
+
+ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+ reserved_chunks =
+ (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
+
+ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_block_info *bi;
+
+ if (dev->n_erased_blocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no more erased blocks");
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ dev->alloc_block_finder++;
+ if (dev->alloc_block_finder < dev->internal_start_block
+ || dev->alloc_block_finder > dev->internal_end_block) {
+ dev->alloc_block_finder = dev->internal_start_block;
+ }
+
+ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->seq_number++;
+ bi->seq_number = dev->seq_number;
+ dev->n_erased_blocks--;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Allocated block %d, seq %d, %d left" ,
+ dev->alloc_block_finder, dev->seq_number,
+ dev->n_erased_blocks);
+ return dev->alloc_block_finder;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs tragedy: no more erased blocks, but there should have been %d",
+ dev->n_erased_blocks);
+
+ return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+ struct yaffs_block_info **block_ptr)
+{
+ int ret_val;
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block < 0) {
+ /* Get next block to allocate off */
+ dev->alloc_block = yaffs_find_alloc_block(dev);
+ dev->alloc_page = 0;
+ }
+
+ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+ /* No space unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ && dev->alloc_page == 0)
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+ /* Next page please.... */
+ if (dev->alloc_block >= 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+ dev->alloc_page;
+ bi->pages_in_use++;
+ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+ dev->alloc_page++;
+
+ dev->n_free_chunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->alloc_page >= dev->param.chunks_per_block) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ if (block_ptr)
+ *block_ptr = bi;
+
+ return ret_val;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
+
+ return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+ int n;
+
+ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ if (dev->alloc_block > 0)
+ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+ return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block > 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+ }
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+ const u8 *data,
+ struct yaffs_ext_tags *tags, int use_reserver)
+{
+ int attempts = 0;
+ int write_ok = 0;
+ int chunk;
+
+ yaffs2_checkpt_invalidate(dev);
+
+ do {
+ struct yaffs_block_info *bi = 0;
+ int erased_ok = 0;
+
+ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
+ */
+
+ /* let's give it a try */
+ attempts++;
+
+ if (dev->param.always_check_erased)
+ bi->skip_erased_check = 0;
+
+ if (!bi->skip_erased_check) {
+ erased_ok = yaffs_check_chunk_erased(dev, chunk);
+ if (erased_ok != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d was not erased",
+ chunk);
+
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+ continue;
+ }
+ }
+
+ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+ if (!bi->skip_erased_check)
+ write_ok =
+ yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+ if (write_ok != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
+ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+ continue;
+ }
+
+ bi->skip_erased_check = 1;
+
+ /* Copy the data into the robustification buffer */
+ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+ } while (write_ok != YAFFS_OK &&
+ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+ if (!write_ok)
+ chunk = -1;
+
+ if (attempts > 1) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs write required %d attempts",
+ attempts);
+ dev->n_retried_writes += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to mark bad and erase block %d",
+ flash_block);
+ } else {
+ struct yaffs_ext_tags tags;
+ int chunk_id =
+ flash_block * dev->param.chunks_per_block;
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ memset(&tags, 0, sizeof(tags));
+ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+ if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
+ dev->chunk_offset,
+ buffer,
+ &tags) != YAFFS_OK)
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to write bad block marker to block %d",
+ flash_block);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+ }
+
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ bi->gc_prioritise = 0;
+ bi->needs_retiring = 0;
+
+ dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR *name)
+{
+ u16 sum = 0;
+ u16 i = 1;
+
+ if (!name)
+ return 0;
+
+ while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
+
+ /* 0x1f mask is case insensitive */
+ sum += ((*name) & 0x1f) * i;
+ i++;
+ name++;
+ }
+ return sum;
+}
+
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+ memset(obj->short_name, 0, sizeof(obj->short_name));
+
+ if (name && !name[0]) {
+ yaffs_fix_null_name(obj, obj->short_name,
+ YAFFS_SHORT_NAME_LENGTH);
+ name = obj->short_name;
+ } else if (name &&
+ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+ YAFFS_SHORT_NAME_LENGTH) {
+ strcpy(obj->short_name, name);
+ }
+
+ obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+ memset(tmp_name, 0, sizeof(tmp_name));
+ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_set_obj_name(obj, tmp_name);
+#else
+ yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+loff_t yaffs_max_file_size(struct yaffs_dev *dev)
+{
+ if(sizeof(loff_t) < 8)
+ return YAFFS_MAX_FILE_SIZE_32;
+ else
+ return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+
+ if (tn) {
+ memset(tn, 0, dev->tnode_size);
+ dev->n_tnodes++;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ yaffs_free_raw_tnode(dev, tn);
+ dev->n_tnodes--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ yaffs_deinit_raw_tnodes_and_objs(dev);
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+}
+
+static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos, unsigned val)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunk_grp_bits;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ mask = dev->tnode_mask << bit_in_word;
+
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val << bit_in_word));
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ mask =
+ dev->tnode_mask >> bit_in_word;
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val >> bit_in_word));
+ }
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ val = map[word_in_map] >> bit_in_word;
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ val |= (map[word_in_map] << bit_in_word);
+ }
+
+ val &= dev->tnode_mask;
+ val <<= dev->chunk_grp_bits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id)
+{
+ struct yaffs_tnode *tn = file_struct->top;
+ u32 i;
+ int required_depth;
+ int level = file_struct->top_level;
+
+ (void) dev;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level)
+ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->internal[(chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+ }
+
+ return tn;
+}
+
+/* add_find_tnode_0 finds the level 0 tnode if it exists,
+ * otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the
+ * specified tn will be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn)
+{
+ int required_depth;
+ int i;
+ int l;
+ struct yaffs_tnode *tn;
+ u32 x;
+
+ /* Check sane level and page Id */
+ if (file_struct->top_level < 0 ||
+ file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level) {
+ /* Not tall enough, gotta make the tree taller */
+ for (i = file_struct->top_level; i < required_depth; i++) {
+
+ tn = yaffs_get_tnode(dev);
+
+ if (tn) {
+ tn->internal[0] = file_struct->top;
+ file_struct->top = tn;
+ file_struct->top_level++;
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: no more tnodes");
+ return NULL;
+ }
+ }
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = file_struct->top_level;
+ tn = file_struct->top;
+
+ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passed_tn) {
+ /* If we already have one, release it */
+ if (tn->internal[x])
+ yaffs_free_tnode(dev,
+ tn->internal[x]);
+ tn->internal[x] = passed_tn;
+
+ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if (passed_tn) {
+ memcpy(tn, passed_tn,
+ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+ yaffs_free_tnode(dev, passed_tn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+ int chunk_obj)
+{
+ return (tags->chunk_id == chunk_obj &&
+ tags->obj_id == obj_id &&
+ !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+ struct yaffs_ext_tags *tags, int obj_id,
+ int inode_chunk)
+{
+ int j;
+
+ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+ if (yaffs_check_chunk_bit
+ (dev, the_chunk / dev->param.chunks_per_block,
+ the_chunk % dev->param.chunks_per_block)) {
+
+ if (dev->chunk_grp_size == 1)
+ return the_chunk;
+ else {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ tags);
+ if (yaffs_tags_match(tags,
+ obj_id, inode_chunk)) {
+ /* found it; */
+ return the_chunk;
+ }
+ }
+ }
+ the_chunk++;
+ }
+ return -1;
+}
+
+static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ int ret_val = -1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+ return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ struct yaffs_dev *dev = in->my_dev;
+ int ret_val = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (ret_val != -1)
+ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+
+ return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan)
+{
+ /* NB in_scan is zero unless scanning.
+ * For forward scanning, in_scan is > 0;
+ * for backward scanning in_scan is < 0
+ *
+ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+ */
+
+ struct yaffs_tnode *tn;
+ struct yaffs_dev *dev = in->my_dev;
+ int existing_cunk;
+ struct yaffs_ext_tags existing_tags;
+ struct yaffs_ext_tags new_tags;
+ unsigned existing_serial, new_serial;
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file
+ * during scanning.
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!in_scan) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy:attempt to put data chunk into a non-file"
+ );
+ BUG();
+ }
+
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_add_find_tnode_0(dev,
+ &in->variant.file_variant,
+ inode_chunk, NULL);
+ if (!tn)
+ return YAFFS_FAIL;
+
+ if (!nand_chunk)
+ /* Dummy insert, bail now */
+ return YAFFS_OK;
+
+ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ if (in_scan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we
+ * need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags
+ * so this is quite cheap.
+ */
+
+ if (existing_cunk > 0) {
+ /* NB Right now existing chunk will not be real
+ * chunk_id if the chunk group size > 1
+ * thus we have to do a FindChunkInFile to get the
+ * real chunk id.
+ *
+ * We have a duplicate now we need to decide which
+ * one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what
+ * we use, dump the new one.
+ * YAFFS1: Get both sets of tags and compare serial
+ * numbers.
+ */
+
+ if (in_scan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_rd_chunk_tags_nand(dev,
+ nand_chunk,
+ NULL, &new_tags);
+
+ /* Do a proper find */
+ existing_cunk =
+ yaffs_find_chunk_in_file(in, inode_chunk,
+ &existing_tags);
+ }
+
+ if (existing_cunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: existing chunk < 0 in scan"
+ );
+
+ }
+
+ /* NB The deleted flags should be false, otherwise
+ * the chunks will not be loaded during a scan
+ */
+
+ if (in_scan > 0) {
+ new_serial = new_tags.serial_number;
+ existing_serial = existing_tags.serial_number;
+ }
+
+ if ((in_scan > 0) &&
+ (existing_cunk <= 0 ||
+ ((existing_serial + 1) & 3) == new_serial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to
+ * update the tnode
+ */
+ yaffs_chunk_del(dev, existing_cunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the
+ * existing one
+ * Delete the new one and return early so that
+ * the tnode isn't changed
+ */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existing_cunk == 0)
+ in->n_data_chunks++;
+
+ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+ return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+ struct yaffs_block_info *the_block;
+ unsigned block_no;
+
+ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+ block_no = chunk / dev->param.chunks_per_block;
+ the_block = yaffs_get_block_info(dev, block_no);
+ if (the_block) {
+ the_block->soft_del_pages++;
+ dev->n_free_chunks++;
+ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
+ * the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls
+ * the chunk out of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks
+ * are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+ u32 level, int chunk_offset)
+{
+ int i;
+ int the_chunk;
+ int all_done = 1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = YAFFS_NTNODES_INTERNAL - 1;
+ all_done && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ all_done =
+ yaffs_soft_del_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (all_done) {
+ yaffs_free_tnode(dev,
+ tn->internal[i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Can this happen? */
+ }
+ }
+ }
+ return (all_done) ? 1 : 0;
+ }
+
+ /* level 0 */
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk) {
+ yaffs_soft_del_chunk(dev, the_chunk);
+ yaffs_load_tnode_0(dev, tn, i, 0);
+ }
+ }
+ return 1;
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_obj *parent;
+
+ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+ yaffs_verify_dir(parent);
+
+ if (dev && dev->param.remove_obj_fn)
+ dev->param.remove_obj_fn(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+
+ yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a null pointer directory"
+ );
+ BUG();
+ return;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a non-directory"
+ );
+ BUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ BUG();
+ }
+
+ yaffs_verify_dir(directory);
+
+ yaffs_remove_obj_from_dir(obj);
+
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+ if (directory == obj->my_dev->unlinked_dir
+ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+ obj->my_dev->n_unlinked_files++;
+ obj->rename_allowed = 0;
+ }
+
+ yaffs_verify_dir(directory);
+ yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+ struct yaffs_obj *new_dir,
+ const YCHAR *new_name, int force, int shadows)
+{
+ int unlink_op;
+ int del_op;
+ struct yaffs_obj *existing_target;
+
+ if (new_dir == NULL)
+ new_dir = obj->parent; /* use the old directory */
+
+ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
+ );
+ BUG();
+ }
+
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+ del_op = (new_dir == obj->my_dev->del_dir);
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are OK.
+ * else only proceed if the new name does not exist and we're putting
+ * it into a directory.
+ */
+ if (!(unlink_op || del_op || force ||
+ shadows > 0 || !existing_target) ||
+ new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ return YAFFS_FAIL;
+
+ yaffs_set_obj_name(obj, new_name);
+ obj->dirty = 1;
+ yaffs_add_obj_to_dir(new_dir, obj);
+
+ if (unlink_op)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc */
+ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ------------------------------
+ * In many situations where there is no high level buffering a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance
+ * benefit with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering and the short op cache
+ * provides write buffering.
+ *
+ * There are a small number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj && cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int lowest = -99; /* Stop compiler whining. */
+ int i;
+ struct yaffs_cache *cache;
+ int chunk_written = 0;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ if (n_caches < 1)
+ return;
+ do {
+ cache = NULL;
+
+ /* Find the lowest dirty chunk for this object */
+ for (i = 0; i < n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].dirty) {
+ if (!cache ||
+ dev->cache[i].chunk_id < lowest) {
+ cache = &dev->cache[i];
+ lowest = cache->chunk_id;
+ }
+ }
+ }
+
+ if (cache && !cache->locked) {
+ /* Write it out and free it up */
+ chunk_written =
+ yaffs_wr_data_obj(cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ cache->object = NULL;
+ }
+ } while (cache && chunk_written > 0);
+
+ if (cache)
+ /* Hoosterman, disk full while writing cache out. */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no space during cache write");
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int n_caches = dev->param.n_caches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for (i = 0; i < n_caches && !obj; i++) {
+ if (dev->cache[i].object && dev->cache[i].dirty)
+ obj = dev->cache[i].object;
+ }
+ if (obj)
+ yaffs_flush_file_cache(obj);
+ } while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (!dev->cache[i].object)
+ return &dev->cache[i];
+ }
+ }
+ return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_cache *cache;
+ struct yaffs_obj *the_obj;
+ int usage;
+ int i;
+ int pushout;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ /* Try find a non-dirty one... */
+
+ cache = yaffs_grab_chunk_worker(dev);
+
+ if (!cache) {
+ /* They were all dirty, find the LRU object and flush
+ * its cache, then find again.
+ * NB what's here is not very accurate,
+ * we actually flush the object with the LRU chunk.
+ */
+
+ /* With locking we can't assume we can use entry zero,
+ * Set the_obj to a valid pointer for Coverity. */
+ the_obj = dev->cache[0].object;
+ usage = -1;
+ cache = NULL;
+ pushout = -1;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object &&
+ !dev->cache[i].locked &&
+ (dev->cache[i].last_use < usage ||
+ !cache)) {
+ usage = dev->cache[i].last_use;
+ the_obj = dev->cache[i].object;
+ cache = &dev->cache[i];
+ pushout = i;
+ }
+ }
+
+ if (!cache || cache->dirty) {
+ /* Flush and try again */
+ yaffs_flush_file_cache(the_obj);
+ cache = yaffs_grab_chunk_worker(dev);
+ }
+ }
+ return cache;
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+ int chunk_id)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].chunk_id == chunk_id) {
+ dev->cache_hits++;
+
+ return &dev->cache[i];
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+ int is_write)
+{
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return;
+
+ if (dev->cache_last_use < 0 ||
+ dev->cache_last_use > 100000000) {
+ /* Reset the cache usages */
+ for (i = 1; i < dev->param.n_caches; i++)
+ dev->cache[i].last_use = 0;
+
+ dev->cache_last_use = 0;
+ }
+ dev->cache_last_use++;
+ cache->last_use = dev->cache_last_use;
+
+ if (is_write)
+ cache->dirty = 1;
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+ struct yaffs_cache *cache;
+
+ if (object->my_dev->param.n_caches > 0) {
+ cache = yaffs_find_chunk_cache(object, chunk_id);
+
+ if (cache)
+ cache->object = NULL;
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.n_caches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == in)
+ dev->cache[i].object = NULL;
+ }
+ }
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+ int bucket;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&obj->hash_link)) {
+ list_del_init(&obj->hash_link);
+ bucket = yaffs_hash_fn(obj->obj_id);
+ dev->obj_bucket[bucket].count--;
+ }
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj) {
+ BUG();
+ return;
+ }
+ dev = obj->my_dev;
+ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+ obj, obj->my_inode);
+ if (obj->parent)
+ BUG();
+ if (!list_empty(&obj->siblings))
+ BUG();
+
+ if (obj->my_inode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ obj->defered_free = 1;
+ return;
+ }
+
+ yaffs_unhash_obj(obj);
+
+ yaffs_free_raw_obj(dev, obj);
+ dev->n_obj--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+ if (obj->defered_free)
+ yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+ /* Iinvalidate the file's data in the cache, without flushing. */
+ yaffs_invalidate_whole_cache(in);
+
+ if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
+ /* Move to unlinked directory so we have a deletion record */
+ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+ 0);
+ }
+
+ yaffs_remove_obj_from_dir(in);
+ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+ in->hdr_chunk = 0;
+
+ yaffs_free_obj(in);
+ return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+ if (!obj->deleted ||
+ obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
+ obj->soft_del)
+ return;
+
+ if (obj->n_data_chunks <= 0) {
+ /* Empty file with no duplicate object headers,
+ * just delete it immediately */
+ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: Deleting empty file %d",
+ obj->obj_id);
+ yaffs_generic_obj_del(obj);
+ } else {
+ yaffs_soft_del_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.
+ file_variant.top_level, 0);
+ obj->soft_del = 1;
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+ struct yaffs_tnode *tn, u32 level,
+ int del0)
+{
+ int i;
+ int has_data;
+
+ if (!tn)
+ return tn;
+
+ has_data = 0;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_prune_worker(dev,
+ tn->internal[i],
+ level - 1,
+ (i == 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ has_data++;
+ }
+ } else {
+ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+ u32 *map = (u32 *) tn;
+
+ for (i = 0; !has_data && i < tnode_size_u32; i++) {
+ if (map[i])
+ has_data++;
+ }
+ }
+
+ if (has_data == 0 && del0) {
+ /* Free and return NULL */
+ yaffs_free_tnode(dev, tn);
+ tn = NULL;
+ }
+ return tn;
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct)
+{
+ int i;
+ int has_data;
+ int done = 0;
+ struct yaffs_tnode *tn;
+
+ if (file_struct->top_level < 1)
+ return YAFFS_OK;
+
+ file_struct->top =
+ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but
+ * the height is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (file_struct->top_level && !done) {
+ tn = file_struct->top;
+
+ has_data = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i])
+ has_data++;
+ }
+
+ if (!has_data) {
+ file_struct->top = tn->internal[0];
+ file_struct->top_level--;
+ yaffs_free_tnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* alloc_empty_obj gets us a clean Object.*/
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+ if (!obj)
+ return obj;
+
+ dev->n_obj++;
+
+ /* Now sweeten it up... */
+
+ memset(obj, 0, sizeof(struct yaffs_obj));
+ obj->being_created = 1;
+
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0;
+ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(obj->hard_links));
+ INIT_LIST_HEAD(&(obj->hash_link));
+ INIT_LIST_HEAD(&obj->siblings);
+
+ /* Now make the directory sane */
+ if (dev->root_dir) {
+ obj->parent = dev->root_dir;
+ list_add(&(obj->siblings),
+ &dev->root_dir->variant.dir_variant.children);
+ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lost-n-found in lost-n-found so
+ * check if lost-n-found exists first
+ */
+ if (dev->lost_n_found)
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+ obj->being_created = 0;
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* Search for the shortest list or one that
+ * isn't too long.
+ */
+
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucket_finder++;
+ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+ lowest = dev->obj_bucket[dev->bucket_finder].count;
+ l = dev->bucket_finder;
+ }
+ }
+
+ return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+ int bucket = yaffs_find_nice_bucket(dev);
+ int found = 0;
+ struct list_head *i;
+ u32 n = (u32) bucket;
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->obj_bucket[bucket].count > 0) {
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i && list_entry(i, struct yaffs_obj,
+ hash_link)->obj_id == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+ return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+ int bucket = yaffs_hash_fn(in->obj_id);
+ struct yaffs_dev *dev = in->my_dev;
+
+ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+ dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+ int bucket = yaffs_hash_fn(number);
+ struct list_head *i;
+ struct yaffs_obj *in;
+
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* Look if it is in the list */
+ in = list_entry(i, struct yaffs_obj, hash_link);
+ if (in->obj_id == number) {
+ /* Don't show if it is defered free */
+ if (in->defered_free)
+ return NULL;
+ return in;
+ }
+ }
+
+ return NULL;
+}
+
+static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+ struct yaffs_tnode *tn = NULL;
+
+ if (number < 0)
+ number = yaffs_new_obj_id(dev);
+
+ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_get_tnode(dev);
+ if (!tn)
+ return NULL;
+ }
+
+ the_obj = yaffs_alloc_empty_obj(dev);
+ if (!the_obj) {
+ if (tn)
+ yaffs_free_tnode(dev, tn);
+ return NULL;
+ }
+
+ the_obj->fake = 0;
+ the_obj->rename_allowed = 1;
+ the_obj->unlink_allowed = 1;
+ the_obj->obj_id = number;
+ yaffs_hash_obj(the_obj);
+ the_obj->variant_type = type;
+ yaffs_load_current_time(the_obj, 1, 1);
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ the_obj->variant.file_variant.file_size = 0;
+ the_obj->variant.file_variant.scanned_size = 0;
+ the_obj->variant.file_variant.shrink_size =
+ yaffs_max_file_size(dev);
+ the_obj->variant.file_variant.top_level = 0;
+ the_obj->variant.file_variant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+ int number, u32 mode)
+{
+
+ struct yaffs_obj *obj =
+ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ if (!obj)
+ return NULL;
+
+ obj->fake = 1; /* it is fake so it might not use NAND */
+ obj->rename_allowed = 0;
+ obj->unlink_allowed = 0;
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0; /* Not a valid chunk. */
+ return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ int i;
+
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+ yaffs_init_raw_tnodes_and_objs(dev);
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+ dev->obj_bucket[i].count = 0;
+ }
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+
+ if (number > 0)
+ the_obj = yaffs_find_by_number(dev, number);
+
+ if (!the_obj)
+ the_obj = yaffs_new_obj(dev, number, type);
+
+ return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR *str)
+{
+ YCHAR *new_str = NULL;
+ int len;
+
+ if (!str)
+ str = _Y("");
+
+ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+ if (new_str) {
+ strncpy(new_str, str, len);
+ new_str[len] = 0;
+ }
+ return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ * create dir/a : update dir's mtime/ctime
+ * rm dir/a: update dir's mtime/ctime
+ * modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj)
+ return;
+ dev = obj->my_dev;
+ obj->dirty = 1;
+ yaffs_load_current_time(obj, 0, 1);
+ if (dev->param.defered_dir_update) {
+ struct list_head *link = &obj->variant.dir_variant.dirty;
+
+ if (list_empty(link)) {
+ list_add(link, &dev->dirty_dirs);
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Added object %d to dirty directories",
+ obj->obj_id);
+ }
+
+ } else {
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+ struct list_head *link;
+ struct yaffs_obj *obj;
+ struct yaffs_dir_var *d_s;
+ union yaffs_obj_var *o_v;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+ while (!list_empty(&dev->dirty_dirs)) {
+ link = dev->dirty_dirs.next;
+ list_del_init(link);
+
+ d_s = list_entry(link, struct yaffs_dir_var, dirty);
+ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+ obj = list_entry(o_v, struct yaffs_obj, variant);
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+ obj->obj_id);
+
+ if (obj->dirty)
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+ struct yaffs_obj *parent,
+ const YCHAR *name,
+ u32 mode,
+ u32 uid,
+ u32 gid,
+ struct yaffs_obj *equiv_obj,
+ const YCHAR *alias_str, u32 rdev)
+{
+ struct yaffs_obj *in;
+ YCHAR *str = NULL;
+ struct yaffs_dev *dev = parent->my_dev;
+
+ /* Check if the entry exists.
+ * If it does then fail the call since we don't want a dup. */
+ if (yaffs_find_by_name(parent, name))
+ return NULL;
+
+ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_clone_str(alias_str);
+ if (!str)
+ return NULL;
+ }
+
+ in = yaffs_new_obj(dev, -1, type);
+
+ if (!in) {
+ kfree(str);
+ return NULL;
+ }
+
+ in->hdr_chunk = 0;
+ in->valid = 1;
+ in->variant_type = type;
+
+ in->yst_mode = mode;
+
+ yaffs_attribs_init(in, gid, uid, rdev);
+
+ in->n_data_chunks = 0;
+
+ yaffs_set_obj_name(in, name);
+ in->dirty = 1;
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ in->my_dev = parent->my_dev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardlink_variant.equiv_obj = equiv_obj;
+ in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
+ list_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+ /* Could not create the object header, fail */
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ if (in)
+ yaffs_update_parent(parent);
+
+ return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj)
+{
+ /* Get the real object in case we were fed a hard link obj */
+ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+ if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
+ parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0))
+ return equiv_obj;
+
+ return NULL;
+
+}
+
+
+
+/*---------------------- Block Management and Page Allocation -------------*/
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (!dev->block_info)
+ goto alloc_error;
+
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ if (!dev->chunk_bits)
+ goto alloc_error;
+
+
+ memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+
+alloc_error:
+ yaffs_deinit_blocks(dev);
+ return YAFFS_FAIL;
+}
+
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+ int erased_ok = 0;
+ int i;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ "yaffs_block_became_dirty block %d state %d %s",
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : "");
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc
+ * then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Erasure failed %d", block_no);
+ }
+ }
+
+ /* Verify erasure if needed */
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
+ !yaffs_skip_verification(dev))) {
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased(dev,
+ block_no * dev->param.chunks_per_block + i)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ ">>Block %d erasure supposedly OK, but chunk %d not erased",
+ block_no, i);
+ }
+ }
+ }
+
+ if (!erased_ok) {
+ /* We lost a block of free space */
+ dev->n_free_chunks -= dev->param.chunks_per_block;
+ yaffs_retire_block(dev, block_no);
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d retired", block_no);
+ return;
+ }
+
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* Clean, so no need to check */
+ bi->gc_prioritise = 0;
+ bi->has_summary = 0;
+
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
+}
+
+static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int old_chunk, u8 *buffer)
+{
+ int new_chunk;
+ int mark_flash = 1;
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj *object;
+ int matching_chunk;
+ int ret_val = YAFFS_OK;
+
+ memset(&tags, 0, sizeof(tags));
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+ "Collecting chunk in block %d, %d %d %d ",
+ dev->gc_chunk, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ if (object && !yaffs_skip_verification(dev)) {
+ if (tags.chunk_id == 0)
+ matching_chunk =
+ object->hdr_chunk;
+ else if (object->soft_del)
+ /* Defeat the test */
+ matching_chunk = old_chunk;
+ else
+ matching_chunk =
+ yaffs_find_chunk_in_file
+ (object, tags.chunk_id,
+ NULL);
+
+ if (old_chunk != matching_chunk)
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "gc: page in gc mismatch: %d %d %d %d",
+ old_chunk,
+ matching_chunk,
+ tags.obj_id,
+ tags.chunk_id);
+ }
+
+ if (!object) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "page %d in gc has no object: %d %d %d ",
+ old_chunk,
+ tags.obj_id, tags.chunk_id,
+ tags.n_bytes);
+ }
+
+ if (object &&
+ object->deleted &&
+ object->soft_del && tags.chunk_id != 0) {
+ /* Data chunk in a soft deleted file,
+ * throw it away.
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget
+ * about it and fix up the object.
+ */
+
+ /* Free chunks already includes
+ * softdeleted chunks, how ever this
+ * chunk is going to soon be really
+ * deleted which will increment free
+ * chunks. We have to decrement free
+ * chunks so this works out properly.
+ */
+ dev->n_free_chunks--;
+ bi->soft_del_pages--;
+
+ object->n_data_chunks--;
+ if (object->n_data_chunks <= 0) {
+ /* remeber to clean up obj */
+ dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
+ dev->n_clean_ups++;
+ }
+ mark_flash = 0;
+ } else if (object) {
+ /* It's either a data chunk in a live
+ * file or an ObjectHeader, so we're
+ * interested in it.
+ * NB Need to keep the ObjectHeaders of
+ * deleted files until the whole file
+ * has been deleted off
+ */
+ tags.serial_number++;
+ dev->n_gc_copies++;
+
+ if (tags.chunk_id == 0) {
+ /* It is an object Id,
+ * We need to nuke the
+ * shrinkheader flags since its
+ * work is done.
+ * Also need to clean up
+ * shadowing.
+ */
+ struct yaffs_obj_hdr *oh;
+ oh = (struct yaffs_obj_hdr *) buffer;
+
+ oh->is_shrink = 0;
+ tags.extra_is_shrink = 0;
+ oh->shadows_obj = 0;
+ oh->inband_shadowed_obj_id = 0;
+ tags.extra_shadows = 0;
+
+ /* Update file size */
+ if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ yaffs_oh_size_load(oh,
+ object->variant.file_variant.file_size);
+ tags.extra_file_size =
+ object->variant.file_variant.file_size;
+ }
+
+ yaffs_verify_oh(object, oh, &tags, 1);
+ new_chunk =
+ yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
+ } else {
+ new_chunk =
+ yaffs_write_new_chunk(dev, buffer, &tags, 1);
+ }
+
+ if (new_chunk < 0) {
+ ret_val = YAFFS_FAIL;
+ } else {
+
+ /* Now fix up the Tnodes etc. */
+
+ if (tags.chunk_id == 0) {
+ /* It's a header */
+ object->hdr_chunk = new_chunk;
+ object->serial = tags.serial_number;
+ } else {
+ /* It's a data chunk */
+ yaffs_put_chunk_in_file(object, tags.chunk_id,
+ new_chunk, 0);
+ }
+ }
+ }
+ if (ret_val == YAFFS_OK)
+ yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
+ return ret_val;
+}
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int max_copies;
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d, in use %d, shrink %d, whole_block %d",
+ block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block);
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ yaffs_summary_gc(dev, block);
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d that has no chunks in use",
+ block);
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for (/* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0;
+ dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+ /* Page is in use and might need to be copied */
+ max_copies--;
+ ret_val = yaffs_gc_process_chunk(dev, bi,
+ old_chunk, buffer);
+ }
+ }
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ yaffs_verify_collected_blk(dev, bi, block);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ /*
+ * The gc did not complete. Set block state back to FULL
+ * because checkpointing does not restore gc.
+ */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ } else {
+ /* The gc completed. */
+ /* Do any required cleanups */
+ for (i = 0; i < dev->n_clean_ups; i++) {
+ /* Time to delete the file too */
+ struct yaffs_obj *object =
+ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+ if (object) {
+ yaffs_free_tnode(dev,
+ object->variant.file_variant.top);
+ object->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: About to finally delete object %d",
+ object->obj_id);
+ yaffs_generic_obj_del(object);
+ object->my_dev->n_deleted_files--;
+ }
+
+ }
+ chunks_after = yaffs_get_erased_chunks(dev);
+ if (chunks_before >= chunks_after)
+ yaffs_trace(YAFFS_TRACE_GC,
+ "gc did not increase free chunks before %d after %d",
+ chunks_before, chunks_after);
+ dev->gc_block = 0;
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ dev->gc_disable = 0;
+
+ return ret_val;
+}
+
+/*
+ * find_gc_block() selects the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+ int aggressive, int background)
+{
+ int i;
+ int iterations;
+ unsigned selected = 0;
+ int prioritised = 0;
+ int prioritised_exist = 0;
+ struct yaffs_block_info *bi;
+ int threshold;
+
+ /* First let's see if we need to grab a prioritised block */
+ if (dev->has_pending_prioritised_gc && !aggressive) {
+ dev->gc_dirtiest = 0;
+ bi = dev->block_info;
+ for (i = dev->internal_start_block;
+ i <= dev->internal_end_block && !selected; i++) {
+
+ if (bi->gc_prioritise) {
+ prioritised_exist = 1;
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ selected = i;
+ prioritised = 1;
+ }
+ }
+ bi++;
+ }
+
+ /*
+ * If there is a prioritised block and none was selected then
+ * this happened because there is at least one old dirty block
+ * gumming up the works. Let's gc the oldest dirty block.
+ */
+
+ if (prioritised_exist &&
+ !selected && dev->oldest_dirty_block > 0)
+ selected = dev->oldest_dirty_block;
+
+ if (!prioritised_exist) /* None found, so we can clear this */
+ dev->has_pending_prioritised_gc = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty
+ * block, and search harder.
+ * else (leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ if (!selected) {
+ int pages_used;
+ int n_blocks =
+ dev->internal_end_block - dev->internal_start_block + 1;
+ if (aggressive) {
+ threshold = dev->param.chunks_per_block;
+ iterations = n_blocks;
+ } else {
+ int max_threshold;
+
+ if (background)
+ max_threshold = dev->param.chunks_per_block / 2;
+ else
+ max_threshold = dev->param.chunks_per_block / 8;
+
+ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+ if (threshold > max_threshold)
+ threshold = max_threshold;
+
+ iterations = n_blocks / 16 + 1;
+ if (iterations > 100)
+ iterations = 100;
+ }
+
+ for (i = 0;
+ i < iterations &&
+ (dev->gc_dirtiest < 1 ||
+ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
+ i++) {
+ dev->gc_block_finder++;
+ if (dev->gc_block_finder < dev->internal_start_block ||
+ dev->gc_block_finder > dev->internal_end_block)
+ dev->gc_block_finder =
+ dev->internal_start_block;
+
+ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+ pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ pages_used < dev->param.chunks_per_block &&
+ (dev->gc_dirtiest < 1 ||
+ pages_used < dev->gc_pages_in_use) &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ dev->gc_dirtiest = dev->gc_block_finder;
+ dev->gc_pages_in_use = pages_used;
+ }
+ }
+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control_fn &&
+ (dev->param.gc_control_fn(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable)
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+
+ /* This loop should pass the first time.
+ * Only loops here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we
+ * should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
+ dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ memset(&tags, 0, sizeof(tags));
+ tags.is_deleted = 1;
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+ yaffs_clear_chunk_bit(dev, block, page);
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ yaffs_block_became_dirty(dev, block);
+ }
+ }
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ BUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);
+ }
+ return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+ const YCHAR *name, const void *value, int size,
+ int flags)
+{
+ struct yaffs_xattr_mod xmod;
+ int result;
+
+ xmod.set = set;
+ xmod.name = name;
+ xmod.data = value;
+ xmod.size = size;
+ xmod.flags = flags;
+ xmod.result = -ENOSPC;
+
+ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+ if (result > 0)
+ return xmod.result;
+ else
+ return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+ struct yaffs_xattr_mod *xmod)
+{
+ int retval = 0;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer = buffer + x_offs;
+
+ if (xmod->set)
+ retval =
+ nval_set(x_buffer, x_size, xmod->name, xmod->data,
+ xmod->size, xmod->flags);
+ else
+ retval = nval_del(x_buffer, x_size, xmod->name);
+
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ xmod->result = retval;
+
+ return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
+ void *value, int size)
+{
+ char *buffer = NULL;
+ int result;
+ struct yaffs_ext_tags tags;
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer;
+ int retval = 0;
+
+ if (obj->hdr_chunk < 1)
+ return -ENODATA;
+
+ /* If we know that the object has no xattribs then don't do all the
+ * reading and parsing.
+ */
+ if (obj->xattr_known && !obj->has_xattr) {
+ if (name)
+ return -ENODATA;
+ else
+ return 0;
+ }
+
+ buffer = (char *)yaffs_get_temp_buffer(dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+ if (result != YAFFS_OK)
+ retval = -ENOENT;
+ else {
+ x_buffer = buffer + x_offs;
+
+ if (!obj->xattr_known) {
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ }
+
+ if (name)
+ retval = nval_get(x_buffer, x_size, name, value, size);
+ else
+ retval = nval_list(x_buffer, x_size, value, size);
+ }
+ yaffs_release_temp_buffer(dev, (u8 *) buffer);
+ return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags)
+{
+ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size)
+{
+ return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+ u8 *buf;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+
+ if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
+ return;
+
+ dev = in->my_dev;
+ in->lazy_loaded = 0;
+ buf = yaffs_get_temp_buffer(dev);
+
+ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
+ oh = (struct yaffs_obj_hdr *)buf;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ yaffs_set_obj_name_from_oh(in, oh);
+
+ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ in->variant.symlink_variant.alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.symlink_variant.alias)
+ alloc_failed = 1; /* Not returned */
+ }
+ yaffs_release_temp_buffer(dev, buf);
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
+ const YCHAR *oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ if (dev->param.auto_unicode) {
+ if (*oh_name) {
+ /* It is an ASCII name, do an ASCII to
+ * unicode conversion */
+ const char *ascii_oh_name = (const char *)oh_name;
+ int n = buff_size - 1;
+ while (n > 0 && *ascii_oh_name) {
+ *name = *ascii_oh_name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ strncpy(name, oh_name + 1, buff_size - 1);
+ }
+ } else {
+#else
+ (void) dev;
+ {
+#endif
+ strncpy(name, oh_name, buff_size - 1);
+ }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
+ const YCHAR *name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+ int is_ascii;
+ YCHAR *w;
+
+ if (dev->param.auto_unicode) {
+
+ is_ascii = 1;
+ w = name;
+
+ /* Figure out if the name will fit in ascii character set */
+ while (is_ascii && *w) {
+ if ((*w) & 0xff00)
+ is_ascii = 0;
+ w++;
+ }
+
+ if (is_ascii) {
+ /* It is an ASCII name, so convert unicode to ascii */
+ char *ascii_oh_name = (char *)oh_name;
+ int n = YAFFS_MAX_NAME_LENGTH - 1;
+ while (n > 0 && *name) {
+ *ascii_oh_name = *name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ /* Unicode name, so save starting at the second YCHAR */
+ *oh_name = 0;
+ strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
+ }
+ } else {
+#else
+ dev = dev;
+ {
+#endif
+ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+ }
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
+ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+ struct yaffs_block_info *bi;
+ struct yaffs_dev *dev = in->my_dev;
+ int prev_chunk_id;
+ int ret_val = 0;
+ int result = 0;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_ext_tags old_tags;
+ const YCHAR *alias = NULL;
+ u8 *buffer = NULL;
+ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj_hdr *oh = NULL;
+ loff_t file_size = 0;
+
+ strcpy(old_name, _Y("silly old name"));
+
+ if (in->fake && in != dev->root_dir && !force && !xmod)
+ return ret_val;
+
+ yaffs_check_gc(dev, 0);
+ yaffs_check_obj_details_loaded(in);
+
+ buffer = yaffs_get_temp_buffer(in->my_dev);
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ prev_chunk_id = in->hdr_chunk;
+
+ if (prev_chunk_id > 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ buffer, &old_tags);
+
+ yaffs_verify_oh(in, oh, &old_tags, 0);
+ memcpy(old_name, oh->name, sizeof(oh->name));
+ memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
+ } else {
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ }
+
+ oh->type = in->variant_type;
+ oh->yst_mode = in->yst_mode;
+ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+ yaffs_load_attribs_oh(oh, in);
+
+ if (in->parent)
+ oh->parent_obj_id = in->parent->obj_id;
+ else
+ oh->parent_obj_id = 0;
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_load_oh_from_name(dev, oh->name, name);
+ } else if (prev_chunk_id > 0) {
+ memcpy(oh->name, old_name, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->is_shrink = is_shrink;
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
+ oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
+ file_size = in->variant.file_variant.file_size;
+ yaffs_oh_size_load(oh, file_size);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = in->variant.symlink_variant.alias;
+ if (!alias)
+ alias = _Y("no alias");
+ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* process any xattrib modifications */
+ if (xmod)
+ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+ /* Tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+ in->serial++;
+ new_tags.chunk_id = 0;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number = in->serial;
+
+ /* Add extra info for file header */
+ new_tags.extra_available = 1;
+ new_tags.extra_parent_id = oh->parent_obj_id;
+ new_tags.extra_file_size = file_size;
+ new_tags.extra_is_shrink = oh->is_shrink;
+ new_tags.extra_equiv_id = oh->equiv_id;
+ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+ new_tags.extra_obj_type = in->variant_type;
+ yaffs_verify_oh(in, oh, &new_tags, 1);
+
+ /* Create new chunk in NAND */
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags,
+ (prev_chunk_id > 0) ? 1 : 0);
+
+ if (buffer)
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (new_chunk_id < 0)
+ return new_chunk_id;
+
+ in->hdr_chunk = new_chunk_id;
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ if (!yaffs_obj_cache_dirty(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block
+ * that the chunk lives on */
+ if (is_shrink) {
+ bi = yaffs_get_block_info(in->my_dev,
+ new_chunk_id /
+ in->my_dev->param.chunks_per_block);
+ bi->has_shrink_hdr = 1;
+ }
+
+
+ return new_chunk_id;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ struct yaffs_cache *cache;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->data_bytes_per_chunk)
+ n_copy = n;
+ else
+ n_copy = dev->data_bytes_per_chunk - start;
+
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than
+ * a whole chunk or we're using inband tags then use the cache
+ * (if there is caching) else bypass the cache.
+ */
+ if (cache || n_copy != dev->data_bytes_per_chunk ||
+ dev->param.inband_tags) {
+ if (dev->param.n_caches > 0) {
+
+ /* If we can't find the data in the cache,
+ * then load it up. */
+
+ if (!cache) {
+ cache =
+ yaffs_grab_chunk_cache(in->my_dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ cache->n_bytes = 0;
+ }
+
+ yaffs_use_cache(dev, cache, 0);
+
+ cache->locked = 1;
+
+ memcpy(buffer, &cache->data[start], n_copy);
+
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy.. */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev);
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(buffer, &local_buffer[start], n_copy);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Read directly into the buffer. */
+ yaffs_rd_data_obj(in, chunk, buffer);
+ }
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ int n_writeback;
+ loff_t start_write = offset;
+ int chunk_written = 0;
+ u32 n_bytes_read;
+ loff_t chunk_start;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0 && chunk_written >= 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+ if (((loff_t)chunk) *
+ dev->data_bytes_per_chunk + start != offset ||
+ start >= dev->data_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "AddrToChunk of offset %lld gives chunk %d start %d",
+ offset, chunk, start);
+ }
+ chunk++; /* File pos to chunk in file offset */
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->data_bytes_per_chunk) {
+ n_copy = n;
+
+ /* Now calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of
+ * file then we need to write back as much as was there
+ * before.
+ */
+
+ chunk_start = (((loff_t)(chunk - 1)) *
+ dev->data_bytes_per_chunk);
+
+ if (chunk_start > in->variant.file_variant.file_size)
+ n_bytes_read = 0; /* Past end of file */
+ else
+ n_bytes_read =
+ in->variant.file_variant.file_size -
+ chunk_start;
+
+ if (n_bytes_read > dev->data_bytes_per_chunk)
+ n_bytes_read = dev->data_bytes_per_chunk;
+
+ n_writeback =
+ (n_bytes_read >
+ (start + n)) ? n_bytes_read : (start + n);
+
+ if (n_writeback < 0 ||
+ n_writeback > dev->data_bytes_per_chunk)
+ BUG();
+
+ } else {
+ n_copy = dev->data_bytes_per_chunk - start;
+ n_writeback = dev->data_bytes_per_chunk;
+ }
+
+ if (n_copy != dev->data_bytes_per_chunk ||
+ !dev->param.cache_bypass_aligned ||
+ dev->param.inband_tags) {
+ /* An incomplete start or end chunk (or maybe both
+ * start and end chunk), or we're using inband tags,
+ * or we're forcing writes through the cache,
+ * so we want to use the cache buffers.
+ */
+ if (dev->param.n_caches > 0) {
+ struct yaffs_cache *cache;
+
+ /* If we can't find the data in the cache, then
+ * load the cache */
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ if (!cache &&
+ yaffs_check_alloc_available(dev, 1)) {
+ cache = yaffs_grab_chunk_cache(dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ } else if (cache &&
+ !cache->dirty &&
+ !yaffs_check_alloc_available(dev,
+ 1)) {
+ /* Drop the cache if it was a read cache
+ * item and no space check has been made
+ * for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_use_cache(dev, cache, 1);
+ cache->locked = 1;
+
+ memcpy(&cache->data[start], buffer,
+ n_copy);
+
+ cache->locked = 0;
+ cache->n_bytes = n_writeback;
+
+ if (write_through) {
+ chunk_written =
+ yaffs_wr_data_obj
+ (cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ }
+ } else {
+ chunk_written = -1; /* fail write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe
+ * both start and end chunk). Read into the
+ * local buffer then copy over and write back.
+ */
+
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+ memcpy(&local_buffer[start], buffer, n_copy);
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk,
+ local_buffer,
+ n_writeback, 0);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Write directly from the buffer. */
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk, buffer,
+ dev->data_bytes_per_chunk, 0);
+
+ /* Since we've overwritten the cached data,
+ * we better invalidate it. */
+ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+ if (chunk_written >= 0) {
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ }
+
+ /* Update file object */
+
+ if ((start_write + n_done) > in->variant.file_variant.file_size)
+ in->variant.file_variant.file_size = (start_write + n_done);
+
+ in->dirty = 1;
+ return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+ yaffs2_handle_hole(in, offset);
+ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
+{
+
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+ int i;
+ int chunk_id;
+ u32 dummy;
+ int last_del;
+ int start_del;
+
+ if (old_size > 0)
+ yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
+ else
+ last_del = 0;
+
+ yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
+ &start_del, &dummy);
+ last_del++;
+ start_del++;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = last_del; i >= start_del; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_chunk_del
+ */
+
+ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+
+ if (chunk_id < 1)
+ continue;
+
+ if (chunk_id <
+ (dev->internal_start_block * dev->param.chunks_per_block) ||
+ chunk_id >=
+ ((dev->internal_end_block + 1) *
+ dev->param.chunks_per_block)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Found daft chunk_id %d for %d",
+ chunk_id, i);
+ } else {
+ in->n_data_chunks--;
+ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+ }
+ }
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+ int new_full;
+ u32 new_partial;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+ yaffs_prune_chunks(obj, new_size);
+
+ if (new_partial != 0) {
+ int last_chunk = 1 + new_full;
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ /* Rewrite the last chunk with its new size and zero pad */
+ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+ memset(local_buffer + new_partial, 0,
+ dev->data_bytes_per_chunk - new_partial);
+
+ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+ new_partial, 1);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+
+ obj->variant.file_variant.file_size = new_size;
+
+ yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+
+ yaffs_flush_file_cache(in);
+ yaffs_invalidate_whole_cache(in);
+
+ yaffs_check_gc(dev, 0);
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ if (new_size == old_size)
+ return YAFFS_OK;
+
+ if (new_size > old_size) {
+ yaffs2_handle_hole(in, new_size);
+ in->variant.file_variant.file_size = new_size;
+ } else {
+ /* new_size < old_size */
+ yaffs_resize_file_down(in, new_size);
+ }
+
+ /* Write a new object header to reflect the resize.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories
+ * and is not shadowed.
+ */
+ if (in->parent &&
+ !in->is_shadowed &&
+ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+ return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+ if (!in->dirty)
+ return YAFFS_OK;
+
+ yaffs_flush_file_cache(in);
+
+ if (data_sync)
+ return YAFFS_OK;
+
+ if (update_time)
+ yaffs_load_current_time(in, 0, 0);
+
+ return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
+ YAFFS_OK : YAFFS_FAIL;
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+ int ret_val;
+ int del_now = 0;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!in->my_inode)
+ del_now = 1;
+
+ if (del_now) {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->del_dir,
+ _Y("deleted"), 0, 0);
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: immediate deletion of file %d",
+ in->obj_id);
+ in->deleted = 1;
+ in->my_dev->n_deleted_files++;
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+ yaffs_soft_del_file(in);
+ } else {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+ return ret_val;
+}
+
+static int yaffs_del_file(struct yaffs_obj *in)
+{
+ int ret_val = YAFFS_OK;
+ int deleted; /* Need to cache value on stack if in is freed */
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+
+ if (in->n_data_chunks > 0) {
+ /* Use soft deletion if there is data in the file.
+ * That won't be the case if it has been resized to zero.
+ */
+ if (!in->unlinked)
+ ret_val = yaffs_unlink_file_if_needed(in);
+
+ deleted = in->deleted;
+
+ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ deleted = 1;
+ in->my_dev->n_deleted_files++;
+ yaffs_soft_del_file(in);
+ }
+ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+ in->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(in);
+
+ return YAFFS_OK;
+ }
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+ return (obj &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+ !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+ /* First check that the directory is empty. */
+ if (yaffs_is_non_empty_dir(obj))
+ return YAFFS_FAIL;
+
+ return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+ kfree(in->variant.symlink_variant.alias);
+ in->variant.symlink_variant.alias = NULL;
+
+ return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+ /* remove this hardlink from the list associated with the equivalent
+ * object
+ */
+ list_del_init(&in->hard_links);
+ return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+ int ret_val = -1;
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ ret_val = yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!list_empty(&obj->variant.dir_variant.dirty)) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Remove object %d from dirty directories",
+ obj->obj_id);
+ list_del_init(&obj->variant.dir_variant.dirty);
+ }
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ ret_val = yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ ret_val = yaffs_del_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ ret_val = yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ ret_val = 0;
+ break; /* should not happen. */
+ }
+ return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+ int del_now = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (!obj->my_inode)
+ del_now = 1;
+
+ yaffs_update_parent(obj->parent);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_del_link(obj);
+ } else if (!list_empty(&obj->hard_links)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Move it from its parent directory so that the rename works.
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ struct yaffs_obj *hl;
+ struct yaffs_obj *parent;
+ int ret_val;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+ hard_links);
+
+ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
+ list_del_init(&hl->hard_links);
+
+ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+ if (ret_val == YAFFS_OK)
+ ret_val = yaffs_generic_obj_del(hl);
+
+ return ret_val;
+
+ } else if (del_now) {
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ list_del_init(&obj->variant.dir_variant.dirty);
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ } else if (yaffs_is_non_empty_dir(obj)) {
+ return YAFFS_FAIL;
+ } else {
+ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->unlink_allowed)
+ return yaffs_unlink_worker(obj);
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
+{
+ struct yaffs_obj *obj;
+
+ obj = yaffs_find_by_name(dir, name);
+ return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
+ struct yaffs_obj *new_dir, const YCHAR *new_name)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *existing_target = NULL;
+ int force = 0;
+ int result;
+ struct yaffs_dev *dev;
+
+ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems.
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (old_dir == new_dir &&
+ old_name && new_name &&
+ strcmp(old_name, new_name) == 0)
+ force = 1;
+#endif
+
+ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if (old_name)
+ obj = yaffs_find_by_name(old_dir, old_name);
+ else{
+ obj = old_dir;
+ old_dir = obj->parent;
+ }
+
+ if (obj && obj->rename_allowed) {
+ /* Now handle an existing target, if there is one */
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+ if (yaffs_is_non_empty_dir(existing_target)) {
+ return YAFFS_FAIL; /* ENOTEMPTY */
+ } else if (existing_target && existing_target != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object.
+ *
+ * Note we must disable gc here otherwise it can mess
+ * up the shadowing.
+ *
+ */
+ dev->gc_disable = 1;
+ yaffs_change_obj_name(obj, new_dir, new_name, force,
+ existing_target->obj_id);
+ existing_target->is_shadowed = 1;
+ yaffs_unlink_obj(existing_target);
+ dev->gc_disable = 0;
+ }
+
+ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+ yaffs_update_parent(old_dir);
+ if (new_dir != old_dir)
+ yaffs_update_parent(new_dir);
+
+ return result;
+ }
+ return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning)
+{
+ struct yaffs_obj *obj;
+
+ if (backward_scanning) {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ obj = yaffs_find_by_number(dev, obj_id);
+ if (obj)
+ return;
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that
+ * it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+ if (!obj)
+ return;
+ obj->is_shadowed = 1;
+ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+ obj->variant.file_variant.shrink_size = 0;
+ obj->valid = 1; /* So that we don't read any other info. */
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
+{
+ struct list_head *lh;
+ struct list_head *save;
+ struct yaffs_obj *hl;
+ struct yaffs_obj *in;
+
+ list_for_each_safe(lh, save, hard_list) {
+ hl = list_entry(lh, struct yaffs_obj, hard_links);
+ in = yaffs_find_by_number(dev,
+ hl->variant.hardlink_variant.equiv_id);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardlink_variant.equiv_obj = in;
+ list_add(&hl->hard_links, &in->hard_links);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardlink_variant.equiv_obj = NULL;
+ INIT_LIST_HEAD(&hl->hard_links);
+ }
+ }
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+ /*
+ * Sort out state of unlinked and deleted objects after scanning.
+ */
+ struct list_head *i;
+ struct list_head *n;
+ struct yaffs_obj *l;
+
+ if (dev->read_only)
+ return;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinked_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+
+ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ * This code assumes that we don't ever change the current relationships
+ * between directories:
+ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ * lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been
+ * deleted leaving the object "hanging" without being rooted in the
+ * directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+ return (obj == dev->del_dir ||
+ obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *parent;
+ int i;
+ struct list_head *lh;
+ struct list_head *n;
+ int depth_limit;
+ int hanging;
+
+ if (dev->read_only)
+ return;
+
+ /* Iterate through the objects in each hash entry,
+ * looking at each object.
+ * Make sure it is rooted.
+ */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ parent = obj->parent;
+
+ if (yaffs_has_null_parent(dev, obj)) {
+ /* These directories are not hanging */
+ hanging = 0;
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ hanging = 1;
+ } else if (yaffs_has_null_parent(dev, parent)) {
+ hanging = 0;
+ } else {
+ /*
+ * Need to follow the parent chain to
+ * see if it is hanging.
+ */
+ hanging = 0;
+ depth_limit = 100;
+
+ while (parent != dev->root_dir &&
+ parent->parent &&
+ parent->parent->variant_type ==
+ YAFFS_OBJECT_TYPE_DIRECTORY &&
+ depth_limit > 0) {
+ parent = parent->parent;
+ depth_limit--;
+ }
+ if (parent != dev->root_dir)
+ hanging = 1;
+ }
+ if (hanging) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Hanging object %d moved to lost and found",
+ obj->obj_id);
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+ }
+ }
+ }
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ BUG();
+
+ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffs_del_dir_contents(obj);
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Deleting lost_found object %d",
+ obj->obj_id);
+ yaffs_unlink_obj(obj);
+ }
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+ yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+ const YCHAR *name)
+{
+ int sum;
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj *l;
+
+ if (!name)
+ return NULL;
+
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: null pointer directory"
+ );
+ BUG();
+ return NULL;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: non-directory"
+ );
+ BUG();
+ }
+
+ sum = yaffs_calc_name_sum(name);
+
+ list_for_each(i, &directory->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+
+ if (l->parent != directory)
+ BUG();
+
+ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+ return l;
+ } else if (l->sum == sum || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_get_obj_name(l, buffer,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
+ return l;
+ }
+ }
+ return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj = obj->variant.hardlink_variant.equiv_obj;
+ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+}
+
+/*
+ * A note or two on object names.
+ * * If the object name is missing, we then make one up in the form objnnn
+ *
+ * * ASCII names are stored in the object header's name field from byte zero
+ * * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ * - If the name can fit in the ASCII character space then they are saved as
+ * ascii names as per above.
+ * - If the name needs Unicode then the name is saved in Unicode
+ * starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+ int buffer_size)
+{
+ /* Create an object name if we could not find one. */
+ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+ YCHAR local_name[20];
+ YCHAR num_string[20];
+ YCHAR *x = &num_string[19];
+ unsigned v = obj->obj_id;
+ num_string[19] = 0;
+ while (v > 0) {
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+ strcat(local_name, x);
+ strncpy(name, local_name, buffer_size - 1);
+ }
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
+{
+ memset(name, 0, buffer_size * sizeof(YCHAR));
+ yaffs_check_obj_details_loaded(obj);
+ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+ } else if (obj->short_name[0]) {
+ strcpy(name, obj->short_name);
+ } else if (obj->hdr_chunk > 0) {
+ int result;
+ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
+
+ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+ if (obj->hdr_chunk > 0) {
+ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+ obj->hdr_chunk,
+ buffer, NULL);
+ }
+ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+ buffer_size);
+
+ yaffs_release_temp_buffer(obj->my_dev, buffer);
+ }
+
+ yaffs_fix_null_name(obj, name, buffer_size);
+
+ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+ /* Dereference any hard linking */
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.file_variant.file_size;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ if (!obj->variant.symlink_variant.alias)
+ return 0;
+ return strnlen(obj->variant.symlink_variant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->my_dev->data_bytes_per_chunk;
+ }
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+ list_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ return DT_REG;
+ break;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+ return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(struct yaffs_dev *dev)
+{
+ struct yaffs_driver *drv = &dev->drv;
+ struct yaffs_tags_handler *tagger = &dev->tagger;
+
+ /* Common functions, gotta have */
+ if (!drv->drv_read_chunk_fn ||
+ !drv->drv_write_chunk_fn ||
+ !drv->drv_erase_fn)
+ return 0;
+
+ if (dev->param.is_yaffs2 &&
+ (!drv->drv_mark_bad_fn || !drv->drv_check_bad_fn))
+ return 0;
+
+ /* Install the default tags marshalling functions if needed. */
+ yaffs_tags_compat_install(dev);
+ yaffs_tags_marshall_install(dev);
+
+ /* Check we now have the marshalling functions required. */
+ if (!tagger->write_chunk_tags_fn ||
+ !tagger->read_chunk_tags_fn ||
+ !tagger->query_block_fn ||
+ !tagger->mark_bad_fn)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost+found directories */
+ dev->lost_n_found = dev->root_dir = NULL;
+ dev->unlinked_dir = dev->del_dir = NULL;
+ dev->unlinked_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+ dev->del_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+ dev->root_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lost_n_found =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+ && dev->del_dir) {
+ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()");
+
+ /* Check stuff that must be set */
+
+ if (!dev) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Need a device"
+ );
+ return YAFFS_FAIL;
+ }
+
+ if (dev->is_mounted) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+ return YAFFS_FAIL;
+ }
+
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+ dev->n_free_chunks = 0;
+
+ dev->gc_block = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 1024) ||
+ (!dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 512) ||
+ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
+ dev->param.chunks_per_block < 2 ||
+ dev->param.n_reserved_blocks < 2 ||
+ dev->internal_start_block <= 0 ||
+ dev->internal_end_block <= 0 ||
+ dev->internal_end_block <=
+ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+ ) {
+ /* otherwise it is too small */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+ dev->param.total_bytes_per_chunk,
+ dev->param.is_yaffs2 ? "2" : "",
+ dev->param.inband_tags);
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+ if (dev->param.inband_tags)
+ dev->data_bytes_per_chunk =
+ dev->param.total_bytes_per_chunk -
+ sizeof(struct yaffs_packed_tags2_tags_only);
+ else
+ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+ if (!yaffs_check_dev_fns(dev)) {
+ /* Function missing */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "device function(s) missing or wrong");
+
+ return YAFFS_FAIL;
+ }
+
+ /* Finished with most checks. Further checks happen later on too. */
+
+ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ x = dev->data_bytes_per_chunk;
+ /* We always use dev->chunk_shift and dev->chunk_div */
+ dev->chunk_shift = calc_shifts(x);
+ x >>= dev->chunk_shift;
+ dev->chunk_div = x;
+ /* We only use chunk mask if chunk_div is 1 */
+ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+ /*
+ * Calculate chunk_grp_bits.
+ * We need to find the next power of 2 > than internal_end_block
+ */
+
+ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = calc_shifts_ceiling(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+ dev->tnode_width = 16;
+ else
+ dev->tnode_width = bits;
+ } else {
+ dev->tnode_width = 16;
+ }
+
+ dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunk_grp_size
+ */
+
+ if (bits <= dev->tnode_width)
+ dev->chunk_grp_bits = 0;
+ else
+ dev->chunk_grp_bits = bits - dev->tnode_width;
+
+ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+ if (dev->tnode_size < sizeof(struct yaffs_tnode))
+ dev->tnode_size = sizeof(struct yaffs_tnode);
+
+ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+ return YAFFS_FAIL;
+ }
+
+ /* Finished verifying the device, continue with initialisation */
+
+ /* More device initialisation */
+ dev->all_gcs = 0;
+ dev->passive_gc_count = 0;
+ dev->oldest_dirty_gc_count = 0;
+ dev->bg_gcs = 0;
+ dev->gc_block_finder = 0;
+ dev->buffered_block = -1;
+ dev->doing_buffered_block_rewrite = 0;
+ dev->n_deleted_files = 0;
+ dev->n_bg_deletions = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_ecc_fixed = 0;
+ dev->n_ecc_unfixed = 0;
+ dev->n_tags_ecc_fixed = 0;
+ dev->n_tags_ecc_unfixed = 0;
+ dev->n_erase_failures = 0;
+ dev->n_erased_blocks = 0;
+ dev->gc_disable = 0;
+ dev->has_pending_prioritised_gc = 1;
+ /* Assume the worst for now, will get fixed on first GC */
+ INIT_LIST_HEAD(&dev->dirty_dirs);
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+ dev->cache = NULL;
+ dev->gc_cleanup_list = NULL;
+
+ if (!init_failed && dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+ int cache_bytes =
+ dev->param.n_caches * sizeof(struct yaffs_cache);
+
+ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+ buf = (u8 *) dev->cache;
+
+ if (dev->cache)
+ memset(dev->cache, 0, cache_bytes);
+
+ for (i = 0; i < dev->param.n_caches && buf; i++) {
+ dev->cache[i].object = NULL;
+ dev->cache[i].last_use = 0;
+ dev->cache[i].dirty = 0;
+ dev->cache[i].data = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+ if (!buf)
+ init_failed = 1;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_hits = 0;
+
+ if (!init_failed) {
+ dev->gc_cleanup_list =
+ kmalloc(dev->param.chunks_per_block * sizeof(u32),
+ GFP_NOFS);
+ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+ if (dev->param.is_yaffs2)
+ dev->param.use_header_file_size = 1;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && dev->param.is_yaffs2 &&
+ !dev->param.disable_summary &&
+ !yaffs_summary_init(dev))
+ init_failed = 1;
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->param.is_yaffs2) {
+ if (yaffs2_checkpt_restore(dev)) {
+ yaffs_check_obj_details_loaded(dev->root_dir);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT |
+ YAFFS_TRACE_MOUNT,
+ "yaffs: restored from checkpoint"
+ );
+ } else {
+
+ /* Clean up the mess caused by an aborted
+ * checkpoint load then scan backwards.
+ */
+ yaffs_deinit_blocks(dev);
+
+ yaffs_deinit_tnodes_and_objs(dev);
+
+ dev->n_erased_blocks = 0;
+ dev->n_free_chunks = 0;
+ dev->alloc_block = -1;
+ dev->alloc_page = -1;
+ dev->n_deleted_files = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_bg_deletions = 0;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed
+ && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+ } else if (!yaffs1_scan(dev)) {
+ init_failed = 1;
+ }
+
+ yaffs_strip_deleted_objs(dev);
+ yaffs_fix_hanging_objs(dev);
+ if (dev->param.empty_lost_n_found)
+ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: yaffs_guts_initialise() aborted.");
+
+ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->n_page_reads = 0;
+ dev->n_page_writes = 0;
+ dev->n_erasures = 0;
+ dev->n_gc_copies = 0;
+ dev->n_retried_writes = 0;
+
+ dev->n_retired_blocks = 0;
+
+ yaffs_verify_free_chunks(dev);
+ yaffs_verify_blocks(dev);
+
+ /* Clean up any aborted checkpoint data */
+ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: yaffs_guts_initialise() done.");
+ return YAFFS_OK;
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+ if (dev->is_mounted) {
+ int i;
+
+ yaffs_deinit_blocks(dev);
+ yaffs_deinit_tnodes_and_objs(dev);
+ yaffs_summary_deinit(dev);
+
+ if (dev->param.n_caches > 0 && dev->cache) {
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ kfree(dev->cache[i].data);
+ dev->cache[i].data = NULL;
+ }
+
+ kfree(dev->cache);
+ dev->cache = NULL;
+ }
+
+ kfree(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ kfree(dev->temp_buffer[i].buffer);
+
+ dev->is_mounted = 0;
+
+ yaffs_deinit_nand(dev);
+ }
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+ int n_free = 0;
+ int b;
+ struct yaffs_block_info *blk;
+
+ blk = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ n_free +=
+ (dev->param.chunks_per_block - blk->pages_in_use +
+ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
+ blk++;
+ }
+ return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+ /* This is what we report to the outside world */
+ int n_free;
+ int n_dirty_caches;
+ int blocks_for_checkpt;
+ int i;
+
+ n_free = dev->n_free_chunks;
+ n_free += dev->n_deleted_files;
+
+ /* Now count and subtract the number of dirty chunks in the cache. */
+
+ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].dirty)
+ n_dirty_caches++;
+ }
+
+ n_free -= n_dirty_caches;
+
+ n_free -=
+ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now figure checkpoint space and report that... */
+ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+ if (n_free < 0)
+ n_free = 0;
+
+ return n_free;
+}
+
+
+int yaffs_format_dev(struct yaffs_dev *dev)
+{
+ int i;
+ enum yaffs_block_state state;
+ u32 dummy;
+
+ if(dev->is_mounted)
+ return YAFFS_FAIL;
+
+ /*
+ * The runtime variables might not have been set up,
+ * so set up what we need.
+ */
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ yaffs_query_init_block_state(dev, i, &state, &dummy);
+ if (state != YAFFS_BLOCK_STATE_DEAD)
+ yaffs_erase_block(dev, i);
+ }
+
+ return YAFFS_OK;
+}
+
+
+/*
+ * Marshalling functions to get loff_t file sizes into and out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
+{
+ oh->file_size_low = (fsize & 0xFFFFFFFF);
+ oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
+}
+
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
+{
+ loff_t retval;
+
+ if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
+ retval = (((loff_t) oh->file_size_high) << 32) |
+ (((loff_t) oh->file_size_low) & 0xFFFFFFFF);
+ else
+ retval = (loff_t) oh->file_size_low;
+
+ return retval;
+}
diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
new file mode 100755
index 00000000..64929ed3
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,990 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xff
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941ff53
+
+/*
+ * Tnodes form a tree with the tnodes in "levels"
+ * Levels greater than 0 hold 8 slots which point to other tnodes.
+ * Those at level 0 hold 16 slots which point to chunks in NAND.
+ *
+ * A maximum level of 8 thust supports files of size up to:
+ *
+ * 2^(3*MAX_LEVEL+4)
+ *
+ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
+ * a maximum file size of around 512Gbytees with 2k chunks.
+ */
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 8
+#define YAFFS_TNODES_MAX_BITS (YAFFS_TNODES_LEVEL0_BITS + \
+ YAFFS_TNODES_INTERNAL_BITS * \
+ YAFFS_TNODES_MAX_LEVEL)
+#define YAFFS_MAX_CHUNK_ID ((1 << YAFFS_TNODES_MAX_BITS) - 1)
+
+#define YAFFS_MAX_FILE_SIZE_32 0x7fffffff
+
+/* Constants for YAFFS1 mode */
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+#define YAFFS_OBJECT_SPACE 0x40000
+#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1)
+
+/* Binary data version stamps */
+#define YAFFS_SUMMARY_VERSION 1
+#define YAFFS_CHECKPOINT_VERSION 7
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Fake object Id for summary data */
+#define YAFFS_OBJECTID_SUMMARY 0x10
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+ struct yaffs_obj *object;
+ int chunk_id;
+ int last_use;
+ int dirty;
+ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+ u8 *data;
+};
+
+/* yaffs1 tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
+ * otherwise the structure size will get blown out.
+ */
+
+struct yaffs_tags {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes_lsb:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+ struct yaffs_tags as_tags;
+ u8 as_bytes[8];
+};
+
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+ unsigned chunk_used; /* Status of the chunk: used or unused */
+ unsigned obj_id; /* If 0 this is not used */
+ unsigned chunk_id; /* If 0 this is a header, else a data chunk */
+ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ enum yaffs_ecc_result ecc_result;
+ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+ unsigned is_deleted; /* The chunk is marked deleted */
+ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extra_available; /* Extra info available if not zero */
+ unsigned extra_parent_id; /* The parent object */
+ unsigned extra_is_shrink; /* Is it a shrink header? */
+ unsigned extra_shadows; /* Does this shadow another object? */
+
+ enum yaffs_obj_type extra_obj_type; /* What object type? */
+
+ loff_t extra_file_size; /* Length if it is a file */
+ unsigned extra_equiv_id; /* Equivalent object for a hard link */
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+ u8 tb0;
+ u8 tb1;
+ u8 tb2;
+ u8 tb3;
+ u8 page_status; /* set to 0 to delete the chunk */
+ u8 block_status;
+ u8 tb4;
+ u8 tb5;
+ u8 ecc1[3];
+ u8 tb6;
+ u8 tb7;
+ u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+ struct yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ /* Being scanned */
+
+ YAFFS_BLOCK_STATE_NEEDS_SCAN,
+ /* The block might have something on it (ie it is allocating or full,
+ * perhaps empty) but it needs to be scanned to determine its true
+ * state.
+ * This state is only valid during scanning.
+ * NB We tolerate empty because the pre-scanner might be incapable of
+ * deciding
+ * However, if this state is returned on a YAFFS2 device,
+ * then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these.
+ * If a block is only partially allocated at mount it is treated as
+ * full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ * If a block was only partially allocated when mounted we treat
+ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+};
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+ int soft_del_pages:10; /* number of soft deleted pages */
+ int pages_in_use:10; /* number of pages in use */
+ unsigned block_state:4; /* One of the above block states. */
+ /* NB use unsigned because enum is sometimes
+ * an int */
+ u32 needs_retiring:1; /* Data has failed on this block, */
+ /*need to get valid data off and retire*/
+ u32 skip_erased_check:1;/* Skip the erased check on this block */
+ u32 gc_prioritise:1; /* An ECC check or blank check has failed.
+ Block should be prioritised for GC */
+ u32 chunk_error_strikes:3; /* How many times we've had ecc etc
+ failures on this block and tried to reuse it */
+ u32 has_summary:1; /* The block has a summary */
+
+ u32 has_shrink_hdr:1; /* This block has at least one shrink header */
+ u32 seq_number; /* block sequence number for yaffs2 */
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+ enum yaffs_obj_type type;
+
+ /* Apply to everything */
+ int parent_obj_id;
+ u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to all object types except for hard links */
+ u32 yst_mode; /* protection */
+
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+
+ /* File size applies to files only */
+ u32 file_size_low;
+
+ /* Equivalent object id applies to hard links only. */
+ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ u32 yst_rdev; /* stuff for block and char devices (major/min) */
+
+ u32 win_ctime[2];
+ u32 win_atime[2];
+ u32 win_mtime[2];
+
+ u32 inband_shadowed_obj_id;
+ u32 inband_is_shrink;
+
+ u32 file_size_high;
+ u32 reserved[1];
+ int shadows_obj; /* This object header shadows the
+ specified object if > 0 */
+
+ /* is_shrink applies to object headers written when wemake a hole. */
+ u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+ loff_t file_size;
+ loff_t scanned_size;
+ loff_t shrink_size;
+ int top_level;
+ struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+ struct list_head children; /* list of child links */
+ struct list_head dirty; /* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+ YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+ struct yaffs_obj *equiv_obj;
+ u32 equiv_id;
+};
+
+union yaffs_obj_var {
+ struct yaffs_file_var file_variant;
+ struct yaffs_dir_var dir_variant;
+ struct yaffs_symlink_var symlink_variant;
+ struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+ u8 deleted:1; /* This should only apply to unlinked files. */
+ u8 soft_del:1; /* it has also been soft deleted */
+ u8 unlinked:1; /* An unlinked file.*/
+ u8 fake:1; /* A fake object has no presence on NAND. */
+ u8 rename_allowed:1; /* Some objects cannot be renamed. */
+ u8 unlink_allowed:1;
+ u8 dirty:1; /* the object needs to be written to flash */
+ u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available
+ * ie. file data chunks encountered before
+ * the header.
+ */
+ u8 lazy_loaded:1; /* This object has been lazy loaded and
+ * is missing some detail */
+
+ u8 defered_free:1; /* Object is removed from NAND, but is
+ * still in the inode cache.
+ * Free of object is defered.
+ * until the inode is released.
+ */
+ u8 being_created:1; /* This object is still being created
+ * so skip some verification checks. */
+ u8 is_shadowed:1; /* This object is shadowed on the way
+ * to being renamed. */
+
+ u8 xattr_known:1; /* We know if this has object has xattribs
+ * or not. */
+ u8 has_xattr:1; /* This object has xattribs.
+ * Only valid if xattr_known. */
+
+ u8 serial; /* serial number of chunk in NAND.*/
+ u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_dev *my_dev; /* The device I'm on */
+
+ struct list_head hash_link; /* list of objects in hash bucket */
+
+ struct list_head hard_links; /* hard linked object chain*/
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_obj *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int hdr_chunk;
+
+ int n_data_chunks; /* Number of data chunks for this file. */
+
+ u32 obj_id; /* the object id value */
+
+ u32 yst_mode;
+
+ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+
+#ifdef CONFIG_YAFFS_WINCE
+ u32 win_ctime[2];
+ u32 win_mtime[2];
+ u32 win_atime[2];
+#else
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+#endif
+
+ u32 yst_rdev;
+
+ void *my_inode;
+
+ enum yaffs_obj_type variant_type;
+
+ union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+ struct list_head list;
+ int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+ int struct_type;
+ u32 obj_id;
+ u32 parent_id;
+ int hdr_chunk;
+ enum yaffs_obj_type variant_type:3;
+ u8 deleted:1;
+ u8 soft_del:1;
+ u8 unlinked:1;
+ u8 fake:1;
+ u8 rename_allowed:1;
+ u8 unlink_allowed:1;
+ u8 serial;
+ int n_data_chunks;
+ loff_t size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few.
+ */
+
+struct yaffs_buffer {
+ u8 *buffer;
+ int in_use;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+ const YCHAR *name;
+
+ /*
+ * Entry parameters set up way early. Yaffs sets up the rest.
+ * The structure should be zeroed out before use so that unused
+ * and default values are zero.
+ */
+
+ int inband_tags; /* Use unband tags */
+ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
+ be a power of 2 */
+ int chunks_per_block; /* does not need to be a power of 2 */
+ int spare_bytes_per_chunk; /* spare area size */
+ int start_block; /* Start block we're allowed to use */
+ int end_block; /* End block we're allowed to use */
+ int n_reserved_blocks; /* Tuneable so that we can reduce
+ * reserved blocks on NOR and RAM. */
+
+ int n_caches; /* If <= 0, then short op caching is disabled,
+ * else the number of short op caches.
+ */
+ int cache_bypass_aligned; /* If non-zero then bypass the cache for
+ * aligned writes.
+ */
+
+ int use_nand_ecc; /* Flag to decide whether or not to use
+ * NAND driver ECC on data (yaffs1) */
+ int tags_9bytes; /* Use 9 byte tags */
+ int no_tags_ecc; /* Flag to decide whether or not to do ECC
+ * on packed tags (yaffs2) */
+
+ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+ int refresh_period; /* How often to check for a block refresh */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ u8 skip_checkpt_rd;
+ u8 skip_checkpt_wr;
+
+ int enable_xattr; /* Enable xattribs */
+
+ int max_objects; /*
+ * Set to limit the number of objects created.
+ * 0 = no limit.
+ */
+
+ /* The remove_obj_fn function must be supplied by OS flavours that
+ * need it.
+ * yaffs direct uses it to implement the faster readdir.
+ * Linux uses it to protect the directory during unlocking.
+ */
+ void (*remove_obj_fn) (struct yaffs_obj *obj);
+
+ /* Callback to mark the superblock dirty */
+ void (*sb_dirty_fn) (struct yaffs_dev *dev);
+
+ /* Callback to control garbage collection. */
+ unsigned (*gc_control_fn) (struct yaffs_dev *dev);
+
+ /* Debug control flags. Don't use unless you know what you're doing */
+ int use_header_file_size; /* Flag to determine if we should use
+ * file sizes from the header */
+ int disable_lazy_load; /* Disable lazy loading on this device */
+ int wide_tnodes_disabled; /* Set to disable wide tnodes */
+ int disable_soft_del; /* yaffs 1 only: Set to disable the use of
+ * softdeletion. */
+
+ int defered_dir_update; /* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ int auto_unicode;
+#endif
+ int always_check_erased; /* Force chunk erased check always on */
+
+ int disable_summary;
+
+};
+
+struct yaffs_driver {
+ int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, int data_len,
+ const u8 *oob, int oob_len);
+ int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, int data_len,
+ u8 *oob, int oob_len,
+ enum yaffs_ecc_result *ecc_result);
+ int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_initialise_fn) (struct yaffs_dev *dev);
+ int (*drv_deinitialise_fn) (struct yaffs_dev *dev);
+};
+
+struct yaffs_tags_handler {
+ int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags);
+ int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags);
+
+ int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+ int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+};
+
+struct yaffs_dev {
+ struct yaffs_param param;
+ struct yaffs_driver drv;
+ struct yaffs_tags_handler tagger;
+
+ /* Context storage. Holds extra OS specific data for this device */
+
+ void *os_context;
+ void *driver_context;
+
+ struct list_head dev_list;
+
+ /* Runtime parameters. Set up by YAFFS. */
+ int data_bytes_per_chunk;
+
+ /* Non-wide tnode stuff */
+ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ * the tnodes are not wide enough.
+ */
+ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+ u32 tnode_width;
+ u32 tnode_mask;
+ u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+ u32 chunk_shift; /* Shift value */
+ u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */
+ u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+ int is_mounted;
+ int read_only;
+ int is_checkpointed;
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internal_start_block;
+ int internal_end_block;
+ int block_offset;
+ int chunk_offset;
+
+ /* Runtime checkpointing stuff */
+ int checkpt_page_seq; /* running sequence number of checkpt pages */
+ int checkpt_byte_count;
+ int checkpt_byte_offs;
+ u8 *checkpt_buffer;
+ int checkpt_open_write;
+ int blocks_in_checkpt;
+ int checkpt_cur_chunk;
+ int checkpt_cur_block;
+ int checkpt_next_block;
+ int *checkpt_block_list;
+ int checkpt_max_blocks;
+ u32 checkpt_sum;
+ u32 checkpt_xor;
+
+ int checkpoint_blocks_required; /* Number of blocks needed to store
+ * current checkpoint set */
+
+ /* Block Info */
+ struct yaffs_block_info *block_info;
+ u8 *chunk_bits; /* bitmap of chunks in use */
+ unsigned block_info_alt:1; /* allocated using alternative alloc */
+ unsigned chunk_bits_alt:1; /* allocated using alternative alloc */
+ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ * Must be consistent with chunks_per_block.
+ */
+
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int alloc_block_finder; /* Used to search for next allocation block */
+
+ /* Object and Tnode memory management */
+ void *allocator;
+ int n_obj;
+ int n_tnodes;
+
+ int n_hardlinks;
+
+ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+ u32 bucket_finder;
+
+ int n_free_chunks;
+
+ /* Garbage collection control */
+ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
+ u32 n_clean_ups;
+
+ unsigned has_pending_prioritised_gc; /* We think this device might
+ have pending prioritised gcs */
+ unsigned gc_disable;
+ unsigned gc_block_finder;
+ unsigned gc_dirtiest;
+ unsigned gc_pages_in_use;
+ unsigned gc_not_done;
+ unsigned gc_block;
+ unsigned gc_chunk;
+ unsigned gc_skip;
+ struct yaffs_summary_tags *gc_sum_tags;
+
+ /* Special directories */
+ struct yaffs_obj *root_dir;
+ struct yaffs_obj *lost_n_found;
+
+ int buffered_block; /* Which block is buffered here? */
+ int doing_buffered_block_rewrite;
+
+ struct yaffs_cache *cache;
+ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files. */
+ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
+ files live. */
+ struct yaffs_obj *del_dir; /* Directory where deleted objects are
+ sent to disappear. */
+ struct yaffs_obj *unlinked_deletion; /* Current file being
+ background deleted. */
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+ int max_temp;
+ int temp_in_use;
+ int unmanaged_buffer_allocs;
+ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ allocating block */
+ unsigned oldest_dirty_seq;
+ unsigned oldest_dirty_block;
+
+ /* Block refreshing */
+ int refresh_skip; /* A skip down counter.
+ * Refresh happens when this gets to zero. */
+
+ /* Dirty directory handling */
+ struct list_head dirty_dirs; /* List of dirty directories */
+
+ /* Summary */
+ int chunks_per_summary;
+ struct yaffs_summary_tags *sum_tags;
+
+ /* Statistics */
+ u32 n_page_writes;
+ u32 n_page_reads;
+ u32 n_erasures;
+ u32 n_bad_markings;
+ u32 n_erase_failures;
+ u32 n_gc_copies;
+ u32 all_gcs;
+ u32 passive_gc_count;
+ u32 oldest_dirty_gc_count;
+ u32 n_gc_blocks;
+ u32 bg_gcs;
+ u32 n_retried_writes;
+ u32 n_retired_blocks;
+ u32 n_ecc_fixed;
+ u32 n_ecc_unfixed;
+ u32 n_tags_ecc_fixed;
+ u32 n_tags_ecc_unfixed;
+ u32 n_deletions;
+ u32 n_unmarked_deletions;
+ u32 refresh_count;
+ u32 cache_hits;
+ u32 tags_used;
+ u32 summary_used;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes
+ *at runtime and must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+ int struct_type;
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int n_free_chunks;
+
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ * allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+ int struct_type;
+ u32 magic;
+ u32 version;
+ u32 head;
+};
+
+struct yaffs_shadow_fixer {
+ int obj_id;
+ int shadowed_id;
+ struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+ int set; /* If 0 then this is a deletion */
+ const YCHAR *name;
+ const void *data;
+ int size;
+ int flags;
+ int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+ int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+ const YCHAR *name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
+ struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
+ const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
+ int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn);
+int yaffs_check_ff(u8 *buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR *str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
+ int force, int is_shrink, int shadows,
+ struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+
+int yaffs_format_dev(struct yaffs_dev *dev);
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out);
+/*
+ * Marshalling functions to get loff_t file sizes into aand out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
+loff_t yaffs_max_file_size(struct yaffs_dev *dev);
+
+
+#endif
diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
new file mode 100755
index 00000000..c20ab14b
--- /dev/null
+++ b/fs/yaffs2/yaffs_linux.h
@@ -0,0 +1,48 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+ struct list_head context_list; /* List of these we have mounted */
+ struct yaffs_dev *dev;
+ struct super_block *super;
+ struct task_struct *bg_thread; /* Background thread for this device */
+ int bg_running;
+ struct mutex gross_lock; /* Gross locking mutex*/
+ u8 *spare_buffer; /* For mtdif2 use. Don't know the buffer size
+ * at compile time so we have to allocate it.
+ */
+ struct list_head search_contexts;
+ struct task_struct *readdir_process;
+ unsigned mount_id;
+ int dirty;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+#else
+#define WRITE_SIZE_STR "oobblock"
+#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+#endif
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
new file mode 100755
index 00000000..bd63855a
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,294 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_linux.h"
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#define mtd_erase(m, ei) (m)->erase(m, ei)
+#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops)
+#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops)
+#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs)
+#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs)
+#endif
+
+
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ u32 addr =
+ ((loff_t) block_no) * dev->param.total_bytes_per_chunk *
+ dev->param.chunks_per_block;
+ struct erase_info ei;
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ retval = mtd_erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+
+static int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, int data_len,
+ const u8 *oob, int oob_len)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ loff_t addr;
+ struct mtd_oob_ops ops;
+ int retval;
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? data_len : 0;
+ ops.ooblen = oob_len;
+ ops.datbuf = (u8 *)data;
+ ops.oobbuf = (u8 *)oob;
+
+ retval = mtd_write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, int data_len,
+ u8 *oob, int oob_len,
+ enum yaffs_ecc_result *ecc_result)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ loff_t addr;
+ struct mtd_oob_ops ops;
+ int retval;
+
+ addr = ((loff_t) nand_chunk) * dev->data_bytes_per_chunk;
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? data_len : 0;
+ ops.ooblen = oob_len;
+ ops.datbuf = data;
+ ops.oobbuf = oob;
+
+#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
+ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+ */
+ ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+#endif
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd_read_oob(mtd, addr, &ops);
+ if (retval)
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ default:
+ /* MTD's ECC could not fix the data */
+ dev->n_ecc_unfixed++;
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ return YAFFS_FAIL;
+ }
+
+ return YAFFS_OK;
+}
+
+static int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+ loff_t addr;
+ struct erase_info ei;
+ int retval = 0;
+ u32 block_size;
+
+ block_size = dev->param.total_bytes_per_chunk *
+ dev->param.chunks_per_block;
+ addr = ((loff_t) block_no) * block_size;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = block_size;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ retval = mtd_erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no);
+
+ retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "checking block %d bad", block_no);
+
+ retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_initialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
+
+static int yaffs_mtd_deinitialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
+
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev)
+{
+ struct yaffs_driver *drv = &dev->drv;
+
+ drv->drv_write_chunk_fn = yaffs_mtd_write;
+ drv->drv_read_chunk_fn = yaffs_mtd_read;
+ drv->drv_erase_fn = yaffs_mtd_erase;
+ drv->drv_mark_bad_fn = yaffs_mtd_mark_bad;
+ drv->drv_check_bad_fn = yaffs_mtd_check_bad;
+ drv->drv_initialise_fn = yaffs_mtd_initialise;
+ drv->drv_deinitialise_fn = yaffs_mtd_deinitialise;
+}
+
+
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev)
+{
+ struct mtd_info *mtd;
+
+ mtd = yaffs_get_mtd_device(sdev);
+
+ /* Check it's an mtd device..... */
+ if (MAJOR(sdev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: MTD device is not NAND it's type %d",
+ mtd->type);
+ return NULL;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+ yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size);
+#else
+ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+#endif
+
+ return mtd;
+}
+
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
+{
+ if (yaffs_version == 2) {
+ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+ !inband_tags) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not have the right page sizes"
+ );
+ return -1;
+ }
+ } else {
+ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support have the right page sizes"
+ );
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+void yaffs_put_mtd_device(struct mtd_info *mtd)
+{
+ if(mtd)
+ put_mtd_device(mtd);
+}
diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
new file mode 100755
index 00000000..9cff224c
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,25 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev);
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
+void yaffs_put_mtd_device(struct mtd_info *mtd);
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
+#endif
diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
new file mode 100755
index 00000000..4bdf4ed7
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,208 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of
+* values and fits into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ * sizeof(int) bytes record size.
+ * strnlen+1 bytes name null terminated.
+ * nbytes value.
+ * ----------
+ * total size stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR *name,
+ int *exist_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ if (!strncmp((YCHAR *) (xb + pos + sizeof(int)),
+ name, size)) {
+ if (exist_size)
+ *exist_size = size;
+ return pos;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ if (exist_size)
+ *exist_size = 0;
+ return -ENODATA;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR *name)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos < 0 || pos >= xb_size)
+ return -ENODATA;
+
+ /* Find size, shift rest over this record,
+ * then zero out the rest of buffer */
+ memcpy(&size, xb + pos, sizeof(int));
+ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+ memset(xb + (xb_size - size), 0, size);
+ return 0;
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
+ int bsize, int flags)
+{
+ int pos;
+ int namelen = strnlen(name, xb_size);
+ int reclen;
+ int size_exist = 0;
+ int space;
+ int start;
+
+ pos = nval_find(xb, xb_size, name, &size_exist);
+
+ if (flags & XATTR_CREATE && pos >= 0)
+ return -EEXIST;
+ if (flags & XATTR_REPLACE && pos < 0)
+ return -ENODATA;
+
+ start = nval_used(xb, xb_size);
+ space = xb_size - start + size_exist;
+
+ reclen = (sizeof(int) + namelen + 1 + bsize);
+
+ if (reclen > space)
+ return -ENOSPC;
+
+ if (pos >= 0) {
+ nval_del(xb, xb_size, name);
+ start = nval_used(xb, xb_size);
+ }
+
+ pos = start;
+
+ memcpy(xb + pos, &reclen, sizeof(int));
+ pos += sizeof(int);
+ strncpy((YCHAR *) (xb + pos), name, reclen);
+ pos += (namelen + 1);
+ memcpy(xb + pos, buf, bsize);
+ return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+
+ memcpy(&size, xb + pos, sizeof(int));
+ pos += sizeof(int); /* advance past record length */
+ size -= sizeof(int);
+
+ /* Advance over name string */
+ while (xb[pos] && size > 0 && pos < xb_size) {
+ pos++;
+ size--;
+ }
+ /*Advance over NUL */
+ pos++;
+ size--;
+
+ /* If bsize is zero then this is a size query.
+ * Return the size, but don't copy.
+ */
+ if (!bsize)
+ return size;
+
+ if (size <= bsize) {
+ memcpy(buf, xb + pos, size);
+ return size;
+ }
+ }
+ if (pos >= 0)
+ return -ERANGE;
+
+ return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+ int pos = 0;
+ int size;
+ int name_len;
+ int ncopied = 0;
+ int filled = 0;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > sizeof(int) &&
+ size <= xb_size &&
+ (pos + size) < xb_size &&
+ !filled) {
+ pos += sizeof(int);
+ size -= sizeof(int);
+ name_len = strnlen((YCHAR *) (xb + pos), size);
+ if (ncopied + name_len + 1 < bsize) {
+ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+ buf += name_len;
+ *buf = '\0';
+ buf++;
+ if (sizeof(YCHAR) > 1) {
+ *buf = '\0';
+ buf++;
+ }
+ ncopied += (name_len + 1);
+ } else {
+ filled = 1;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+ return nval_used(xb, xb_size) > 0;
+}
diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
new file mode 100755
index 00000000..951e64f8
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
new file mode 100755
index 00000000..9afd5ec8
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,118 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_getblockinfo.h"
+#include "yaffs_summary.h"
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+ return chunk - dev->chunk_offset;
+}
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ struct yaffs_ext_tags local_tags;
+ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+ dev->n_page_reads++;
+
+ /* If there are no tags provided use local tags. */
+ if (!tags)
+ tags = &local_tags;
+
+ result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags);
+ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ struct yaffs_block_info *bi;
+ bi = yaffs_get_block_info(dev,
+ nand_chunk /
+ dev->param.chunks_per_block);
+ yaffs_handle_chunk_error(dev, bi);
+ }
+ return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+ dev->n_page_writes++;
+
+ if (!tags) {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ tags->seq_number = dev->seq_number;
+ tags->chunk_used = 1;
+ yaffs_trace(YAFFS_TRACE_WRITE,
+ "Writing chunk %d tags %d %d",
+ nand_chunk, tags->obj_id, tags->chunk_id);
+
+ result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk,
+ buffer, tags);
+
+ yaffs_summary_add(dev, tags, nand_chunk);
+
+ return result;
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ block_no -= dev->block_offset;
+ dev->n_bad_markings++;
+ return dev->tagger.mark_bad_fn(dev, block_no);
+}
+
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ block_no -= dev->block_offset;
+ return dev->tagger.query_block_fn(dev, block_no, state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ int result;
+
+ block_no -= dev->block_offset;
+ dev->n_erasures++;
+ result = dev->drv.drv_erase_fn(dev, block_no);
+ return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+ if (dev->drv.drv_initialise_fn)
+ return dev->drv.drv_initialise_fn(dev);
+ return YAFFS_OK;
+}
+
+int yaffs_deinit_nand(struct yaffs_dev *dev)
+{
+ if (dev->drv.drv_deinitialise_fn)
+ return dev->drv.drv_deinitialise_fn(dev);
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
new file mode 100755
index 00000000..804e97ad
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+int yaffs_deinit_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
new file mode 100755
index 00000000..dd9a331d
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,56 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+static const u8 all_ff[20] = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t)
+{
+ pt->chunk_id = t->chunk_id;
+ pt->serial_number = t->serial_number;
+ pt->n_bytes = t->n_bytes;
+ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unused_stuff = 0;
+ pt->should_be_ff = 0xffffffff;
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt)
+{
+
+ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+ t->block_bad = 0;
+ if (pt->should_be_ff != 0xffffffff)
+ t->block_bad = 1;
+ t->chunk_used = 1;
+ t->obj_id = pt->obj_id;
+ t->chunk_id = pt->chunk_id;
+ t->n_bytes = pt->n_bytes;
+ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ t->is_deleted = (pt->deleted) ? 0 : 1;
+ t->serial_number = pt->serial_number;
+ } else {
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+ }
+}
diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
new file mode 100755
index 00000000..b80f0a5b
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unused_stuff:1;
+ unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
new file mode 100755
index 00000000..e1d18cc3
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,197 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xf0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(
+ const struct yaffs_packed_tags2_tags_only *ptt)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "packed tags obj %d chunk %d byte %d seq %d",
+ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+ yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+ t->seq_number);
+
+}
+
+static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
+{
+ if (t->chunk_id != 0 || !t->extra_available)
+ return 0;
+
+ /* Check if the file size is too long to store */
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
+ (t->extra_file_size >> 31) != 0)
+ return 0;
+ return 1;
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+ const struct yaffs_ext_tags *t)
+{
+ ptt->chunk_id = t->chunk_id;
+ ptt->seq_number = t->seq_number;
+ ptt->n_bytes = t->n_bytes;
+ ptt->obj_id = t->obj_id;
+
+ /* Only store extra tags for object headers.
+ * If it is a file then only store if the file size is short\
+ * enough to fit.
+ */
+ if (yaffs_check_tags_extra_packable(t)) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunk_id */
+ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+ if (t->extra_is_shrink)
+ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+ if (t->extra_shadows)
+ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ ptt->n_bytes = t->extra_equiv_id;
+ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ ptt->n_bytes = (unsigned) t->extra_file_size;
+ else
+ ptt->n_bytes = 0;
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc)
+{
+ yaffs_pack_tags2_tags_only(&pt->t, t);
+
+ if (tags_ecc)
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *ptt)
+{
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+ if (ptt->seq_number == 0xffffffff)
+ return;
+
+ t->block_bad = 0;
+ t->chunk_used = 1;
+ t->obj_id = ptt->obj_id;
+ t->chunk_id = ptt->chunk_id;
+ t->n_bytes = ptt->n_bytes;
+ t->is_deleted = 0;
+ t->serial_number = 0;
+ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+ t->chunk_id = 0;
+ t->n_bytes = 0;
+
+ t->extra_available = 1;
+ t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+ t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
+ t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
+ t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ t->extra_equiv_id = ptt->n_bytes;
+ else
+ t->extra_file_size = ptt->n_bytes;
+ }
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc)
+{
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ if (pt->t.seq_number != 0xffffffff && tags_ecc) {
+ /* Chunk is in use and we need to do ECC */
+
+ struct yaffs_ecc_other ecc;
+ int result;
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &ecc);
+ result =
+ yaffs_ecc_correct_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc, &ecc);
+ switch (result) {
+ case 0:
+ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+ yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+ t->ecc_result = ecc_result;
+
+ yaffs_dump_packed_tags2(pt);
+ yaffs_dump_tags2(t);
+}
diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
new file mode 100755
index 00000000..675e7194
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+ unsigned seq_number;
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+ struct yaffs_packed_tags2_tags_only t;
+ struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_summary.c b/fs/yaffs2/yaffs_summary.c
new file mode 100755
index 00000000..6f3c7839
--- /dev/null
+++ b/fs/yaffs2/yaffs_summary.c
@@ -0,0 +1,313 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Summaries write the useful part of the tags for the chunks in a block into an
+ * an array which is written to the last n chunks of the block.
+ * Reading the summaries gives all the tags for the block in one read. Much
+ * faster.
+ *
+ * Chunks holding summaries are marked with tags making it look like
+ * they are part of a fake file.
+ *
+ * The summary could also be used during gc.
+ *
+ */
+
+#include "yaffs_summary.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_bitmap.h"
+
+/*
+ * The summary is built up in an array of summary tags.
+ * This gets written to the last one or two (maybe more) chunks in a block.
+ * A summary header is written as the first part of each chunk of summary data.
+ * The summary header must match or the summary is rejected.
+ */
+
+/* Summary tags don't need the sequence number because that is redundant. */
+struct yaffs_summary_tags {
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+/* Summary header */
+struct yaffs_summary_header {
+ unsigned version; /* Must match current version */
+ unsigned block; /* Must be this block */
+ unsigned seq; /* Must be this sequence number */
+ unsigned sum; /* Just add up all the bytes in the tags */
+};
+
+
+static void yaffs_summary_clear(struct yaffs_dev *dev)
+{
+ if (!dev->sum_tags)
+ return;
+ memset(dev->sum_tags, 0, dev->chunks_per_summary *
+ sizeof(struct yaffs_summary_tags));
+}
+
+
+void yaffs_summary_deinit(struct yaffs_dev *dev)
+{
+ kfree(dev->sum_tags);
+ dev->sum_tags = NULL;
+ kfree(dev->gc_sum_tags);
+ dev->gc_sum_tags = NULL;
+ dev->chunks_per_summary = 0;
+}
+
+int yaffs_summary_init(struct yaffs_dev *dev)
+{
+ int sum_bytes;
+ int chunks_used; /* Number of chunks used by summary */
+ int sum_tags_bytes;
+
+ sum_bytes = dev->param.chunks_per_block *
+ sizeof(struct yaffs_summary_tags);
+
+ chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
+ (dev->data_bytes_per_chunk -
+ sizeof(struct yaffs_summary_header));
+
+ dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
+ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ if (!dev->sum_tags || !dev->gc_sum_tags) {
+ yaffs_summary_deinit(dev);
+ return YAFFS_FAIL;
+ }
+
+ yaffs_summary_clear(dev);
+
+ return YAFFS_OK;
+}
+
+static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
+{
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int i;
+ unsigned sum = 0;
+
+ i = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ while (i > 0) {
+ sum += *sum_buffer;
+ sum_buffer++;
+ i--;
+ }
+
+ return sum;
+}
+
+static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int n_bytes;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ memset(&tags, 0, sizeof(struct yaffs_ext_tags));
+ tags.obj_id = YAFFS_OBJECTID_SUMMARY;
+ tags.chunk_id = 1;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ hdr.version = YAFFS_SUMMARY_VERSION;
+ hdr.block = blk;
+ hdr.seq = bi->seq_number;
+ hdr.sum = yaffs_summary_sum(dev);
+
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ memcpy(buffer, &hdr, sizeof(hdr));
+ memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
+ tags.n_bytes = this_tx + sizeof(hdr);
+ result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (result != YAFFS_OK)
+ break;
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ dev->n_free_chunks--;
+
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ tags.chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+
+ if (result == YAFFS_OK)
+ bi->has_summary = 1;
+
+
+ return result;
+}
+
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)st;
+ int n_bytes;
+ int chunk_id;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+ int sum_tags_bytes;
+
+ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = blk * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ chunk_id = 1;
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (tags.chunk_id != chunk_id ||
+ tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
+ tags.chunk_used == 0 ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.n_bytes != (this_tx + sizeof(hdr)))
+ result = YAFFS_FAIL;
+ if (result != YAFFS_OK)
+ break;
+
+ if (st == dev->sum_tags) {
+ /* If we're scanning then update the block info */
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ }
+ memcpy(&hdr, buffer, sizeof(hdr));
+ memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (result == YAFFS_OK) {
+ /* Verify header */
+ if (hdr.version != YAFFS_SUMMARY_VERSION ||
+ hdr.block != blk ||
+ hdr.seq != bi->seq_number ||
+ hdr.sum != yaffs_summary_sum(dev))
+ result = YAFFS_FAIL;
+ }
+
+ if (st == dev->sum_tags && result == YAFFS_OK)
+ bi->has_summary = 1;
+
+ return result;
+}
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_nand)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
+ int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
+
+ if (!dev->sum_tags)
+ return YAFFS_OK;
+
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ yaffs_pack_tags2_tags_only(&tags_only, tags);
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ sum_tags->chunk_id = tags_only.chunk_id;
+ sum_tags->n_bytes = tags_only.n_bytes;
+ sum_tags->obj_id = tags_only.obj_id;
+
+ if (chunk_in_block == dev->chunks_per_summary - 1) {
+ /* Time to write out the summary */
+ yaffs_summary_write(dev, block_in_nand);
+ yaffs_summary_clear(dev);
+ yaffs_skip_rest_of_block(dev);
+ }
+ }
+ return YAFFS_OK;
+}
+
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ tags_only.chunk_id = sum_tags->chunk_id;
+ tags_only.n_bytes = sum_tags->n_bytes;
+ tags_only.obj_id = sum_tags->obj_id;
+ yaffs_unpack_tags2_tags_only(tags, &tags_only);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int i;
+
+ if (!bi->has_summary)
+ return;
+
+ for (i = dev->chunks_per_summary;
+ i < dev->param.chunks_per_block;
+ i++) {
+ if (yaffs_check_chunk_bit(dev, blk, i)) {
+ yaffs_clear_chunk_bit(dev, blk, i);
+ bi->pages_in_use--;
+ dev->n_free_chunks++;
+ }
+ }
+}
diff --git a/fs/yaffs2/yaffs_summary.h b/fs/yaffs2/yaffs_summary.h
new file mode 100755
index 00000000..be141d07
--- /dev/null
+++ b/fs/yaffs2/yaffs_summary.h
@@ -0,0 +1,37 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_SUMMARY_H__
+#define __YAFFS_SUMMARY_H__
+
+#include "yaffs_packedtags2.h"
+
+
+int yaffs_summary_init(struct yaffs_dev *dev);
+void yaffs_summary_deinit(struct yaffs_dev *dev);
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk);
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
+
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
new file mode 100755
index 00000000..092430be
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,381 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations *********/
+
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+ /* Calculate an ecc */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j)
+ ecc ^= bit;
+ }
+ }
+ tags->ecc = ecc;
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+ return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+ yaffs_calc_tags_ecc(tags_ptr);
+
+ spare_ptr->tb0 = tu->as_bytes[0];
+ spare_ptr->tb1 = tu->as_bytes[1];
+ spare_ptr->tb2 = tu->as_bytes[2];
+ spare_ptr->tb3 = tu->as_bytes[3];
+ spare_ptr->tb4 = tu->as_bytes[4];
+ spare_ptr->tb5 = tu->as_bytes[5];
+ spare_ptr->tb6 = tu->as_bytes[6];
+ spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+ struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+ int result;
+
+ tu->as_bytes[0] = spare_ptr->tb0;
+ tu->as_bytes[1] = spare_ptr->tb1;
+ tu->as_bytes[2] = spare_ptr->tb2;
+ tu->as_bytes[3] = spare_ptr->tb3;
+ tu->as_bytes[4] = spare_ptr->tb4;
+ tu->as_bytes[5] = spare_ptr->tb5;
+ tu->as_bytes[6] = spare_ptr->tb6;
+ tu->as_bytes[7] = spare_ptr->tb7;
+
+ result = yaffs_check_tags_ecc(tags_ptr);
+ if (result > 0)
+ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+ dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+ memset(spare, 0xff, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ struct yaffs_spare *spare)
+{
+ int data_size = dev->data_bytes_per_chunk;
+
+ return dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *) spare, sizeof(*spare));
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data,
+ struct yaffs_spare *spare,
+ enum yaffs_ecc_result *ecc_result,
+ int correct_errors)
+{
+ int ret_val;
+ struct yaffs_spare local_spare;
+ int data_size;
+ int spare_size;
+ int ecc_result1, ecc_result2;
+ u8 calc_ecc[3];
+
+ if (!spare) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &local_spare;
+ }
+ data_size = dev->data_bytes_per_chunk;
+ spare_size = sizeof(struct yaffs_spare);
+
+ if (dev->param.use_nand_ecc)
+ return dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *) spare, spare_size,
+ ecc_result);
+
+
+ /* Handle the ECC at this level. */
+
+ ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *)spare, spare_size,
+ NULL);
+ if (!data || !correct_errors)
+ return ret_val;
+
+ /* Do ECC correction if needed. */
+ yaffs_ecc_calc(data, calc_ecc);
+ ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+ yaffs_ecc_calc(&data[256], calc_ecc);
+ ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc);
+
+ if (ecc_result1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (ecc_result1 < 0 || ecc_result2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (ecc_result1 > 0 || ecc_result2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+ yaffs_get_block_info(dev, flash_block + dev->block_offset)->
+ needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>>Block %d marked for retirement",
+ flash_block);
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+static int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+
+ yaffs_spare_init(&spare);
+
+ if (ext_tags->is_deleted)
+ spare.page_status = 0;
+ else {
+ tags.obj_id = ext_tags->obj_id;
+ tags.chunk_id = ext_tags->chunk_id;
+
+ tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+ else
+ tags.n_bytes_msb = 3;
+
+ tags.serial_number = ext_tags->serial_number;
+
+ if (!dev->param.use_nand_ecc && data) {
+ yaffs_ecc_calc(data, spare.ecc1);
+ yaffs_ecc_calc(&data[256], spare.ecc2);
+ }
+
+ yaffs_load_tags_to_spare(&spare, &tags);
+ }
+ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+static int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ int deleted;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ if (!yaffs_rd_chunk_nand(dev, nand_chunk,
+ data, &spare, &ecc_result, 1))
+ return YAFFS_FAIL;
+
+ /* ext_tags may be NULL */
+ if (!ext_tags)
+ return YAFFS_OK;
+
+ deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+ ext_tags->is_deleted = deleted;
+ ext_tags->ecc_result = ecc_result;
+ ext_tags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ ext_tags->chunk_used =
+ memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
+
+ if (ext_tags->chunk_used) {
+ yaffs_get_tags_from_spare(dev, &spare, &tags);
+ ext_tags->obj_id = tags.obj_id;
+ ext_tags->chunk_id = tags.chunk_id;
+ ext_tags->n_bytes = tags.n_bytes_lsb;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ ext_tags->n_bytes |=
+ (((unsigned)tags.n_bytes_msb) << 10);
+
+ ext_tags->serial_number = tags.serial_number;
+ }
+
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_spare spare;
+
+ memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+ spare.block_status = 'Y';
+
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ struct yaffs_spare spare0, spare1;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ enum yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ *seq_number = 0;
+
+ /* Look for bad block markers in the first two chunks */
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block,
+ NULL, &spare0, &dummy, 0);
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+ NULL, &spare1, &dummy, 0);
+
+ if (hweight8(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+
+ return YAFFS_OK;
+}
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev)
+{
+ if(dev->param.is_yaffs2)
+ return;
+ if(!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr;
+ if(!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd;
+ if(!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
+ if(!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
+}
diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
new file mode 100755
index 00000000..92d298a6
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+
+#include "yaffs_guts.h"
+
+#if 0
+
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+
+#endif
+
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev);
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagsmarshall.c b/fs/yaffs2/yaffs_tagsmarshall.c
new file mode 100755
index 00000000..44a83b12
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsmarshall.c
@@ -0,0 +1,199 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_packedtags2.h"
+
+static int yaffs_tags_marshall_write(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags2 pt;
+ int retval;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_tags_marshall_write chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+ if (!data || !tags)
+ BUG();
+ else if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)(data +
+ dev->
+ data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ }
+
+ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ (dev->param.inband_tags) ? NULL : packed_tags_ptr,
+ (dev->param.inband_tags) ? 0 : packed_tags_size);
+
+ return retval;
+}
+
+static int yaffs_tags_marshall_read(struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = 0;
+ int local_data = 0;
+ u8 spare_buffer[100];
+ enum yaffs_ecc_result ecc_result;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_tags_marshall_read chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (dev->param.inband_tags) {
+ if (!data) {
+ local_data = 1;
+ data = yaffs_get_temp_buffer(dev);
+ }
+ }
+
+ if (dev->param.inband_tags || (data && !tags))
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ NULL, 0,
+ &ecc_result);
+ else if (tags)
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ spare_buffer, packed_tags_size,
+ &ecc_result);
+ else
+ BUG();
+
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)
+ &data[dev->data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else if (tags) {
+ memcpy(packed_tags_ptr, spare_buffer, packed_tags_size);
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+
+ if (local_data)
+ yaffs_release_temp_buffer(dev, data);
+
+ if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ dev->n_ecc_unfixed++;
+ }
+
+ if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) {
+ if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR)
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ }
+
+ if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d",
+ block_no);
+
+ retval = dev->drv.drv_check_bad_fn(dev, block_no);
+
+ if (retval== YAFFS_FAIL) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *seq_number = 0;
+ } else {
+ struct yaffs_ext_tags t;
+
+ yaffs_tags_marshall_read(dev,
+ block_no * dev->param.chunks_per_block,
+ NULL, &t);
+
+ if (t.chunk_used) {
+ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else {
+ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block query returns seq %d state %d",
+ *seq_number, *state);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ return dev->drv.drv_mark_bad_fn(dev, block_no);
+
+}
+
+
+void yaffs_tags_marshall_install(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write;
+
+ if (!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read;
+
+ if (!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_marshall_query_block;
+
+ if (!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad;
+
+}
diff --git a/fs/yaffs2/yaffs_tagsmarshall.h b/fs/yaffs2/yaffs_tagsmarshall.h
new file mode 100755
index 00000000..bf3e68a1
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsmarshall.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSMARSHALL_H__
+#define __YAFFS_TAGSMARSHALL_H__
+
+#include "yaffs_guts.h"
+void yaffs_tags_marshall_install(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
new file mode 100755
index 00000000..fd26054d
--- /dev/null
+++ b/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000f0000
+
+#define YAFFS_TRACE_SYNC 0x00100000
+#define YAFFS_TRACE_BACKGROUND 0x00200000
+#define YAFFS_TRACE_LOCK 0x00400000
+#define YAFFS_TRACE_MOUNT 0x00800000
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xf0000000
+
+#endif
diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
new file mode 100755
index 00000000..e8f2f0a6
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,529 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask &
+ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char * const block_state_name[] = {
+ "Unknown",
+ "Needs scan",
+ "Scanning",
+ "Empty",
+ "Allocating",
+ "Full",
+ "Dirty",
+ "Checkpoint",
+ "Collecting",
+ "Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+ int actually_used;
+ int in_use;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has undefined state %d",
+ n, bi->block_state);
+
+ switch (bi->block_state) {
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCAN:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has bad run-state %s",
+ n, block_state_name[bi->block_state]);
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->pages_in_use < 0 ||
+ bi->pages_in_use > dev->param.chunks_per_block ||
+ bi->soft_del_pages < 0 ||
+ bi->soft_del_pages > dev->param.chunks_per_block ||
+ actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
+ n, bi->pages_in_use, bi->soft_del_pages);
+
+ /* Check chunk bitmap legal */
+ in_use = yaffs_count_chunk_bits(dev, n);
+ if (in_use != bi->pages_in_use)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+ n, bi->pages_in_use, in_use);
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n)
+{
+ yaffs_verify_blk(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+
+ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Block %d is in state %d after gc, should be erased",
+ n, bi->block_state);
+ }
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+ int i;
+ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int illegal_states = 0;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ memset(state_count, 0, sizeof(state_count));
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ yaffs_verify_blk(dev, bi, i);
+
+ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+ state_count[bi->block_state]++;
+ else
+ illegal_states++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
+
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%d blocks have illegal states",
+ illegal_states);
+ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many allocating blocks");
+
+ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%s %d blocks",
+ block_state_name[i], state_count[i]);
+
+ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Checkpoint block count wrong dev %d count %d",
+ dev->blocks_in_checkpt,
+ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Erased block count wrong dev %d count %d",
+ dev->n_erased_blocks,
+ state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many collecting blocks %d (max is 1)",
+ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in
+ * which case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!(tags && obj && oh)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Verifying object header tags %p obj %p oh %p",
+ tags, obj, oh);
+ return;
+ }
+
+ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header type is illegal value 0x%x",
+ tags->obj_id, oh->type);
+
+ if (tags->obj_id != obj->obj_id)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch obj_id %d",
+ tags->obj_id, obj->obj_id);
+
+ /*
+ * Check that the object's parent ids match if parent_check requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if (parent_check && tags->obj_id > 1 && !obj->parent)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d obj->parent is NULL",
+ tags->obj_id, oh->parent_obj_id);
+
+ if (parent_check && obj->parent &&
+ oh->parent_obj_id != obj->parent->obj_id &&
+ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d parent_obj_id %d",
+ tags->obj_id, oh->parent_obj_id,
+ obj->parent->obj_id);
+
+ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is NULL",
+ obj->obj_id);
+
+ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Junk name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is 0xff",
+ obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+ u32 x;
+ int required_depth;
+ int actual_depth;
+ int last_chunk;
+ u32 offset_in_chunk;
+ u32 the_chunk;
+
+ u32 i;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ struct yaffs_tnode *tn;
+ u32 obj_id;
+
+ if (!obj)
+ return;
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ dev = obj->my_dev;
+ obj_id = obj->obj_id;
+
+
+ /* Check file size is consistent with tnode depth */
+ yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
+ &last_chunk, &offset_in_chunk);
+ last_chunk++;
+ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ actual_depth = obj->variant.file_variant.top_level;
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if (yaffs_skip_nand_verification(dev))
+ return;
+
+ for (i = 1; i <= last_chunk; i++) {
+ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+ if (!tn)
+ continue;
+
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ &tags);
+ if (tags.obj_id != obj_id || tags.chunk_id != i)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+ obj_id, i, the_chunk,
+ tags.obj_id, tags.chunk_id);
+ }
+ }
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+ u32 chunk_min;
+ u32 chunk_max;
+ u32 chunk_id_ok;
+ u32 chunk_in_range;
+ u32 chunk_wrongly_deleted;
+ u32 chunk_valid;
+
+ if (!obj)
+ return;
+
+ if (obj->being_created)
+ return;
+
+ dev = obj->my_dev;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+ chunk_max =
+ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+ chunk_valid = chunk_in_range &&
+ yaffs_check_chunk_bit(dev,
+ obj->hdr_chunk / dev->param.chunks_per_block,
+ obj->hdr_chunk % dev->param.chunks_per_block);
+ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has chunk_id %d %s %s",
+ obj->obj_id, obj->hdr_chunk,
+ chunk_id_ok ? "" : ",out of range",
+ chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj_hdr *oh;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+ yaffs_verify_oh(obj, oh, &tags, 1);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ /* Verify it has a parent */
+ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has parent pointer %p which does not look like an object",
+ obj->obj_id, obj->parent);
+ }
+
+ /* Verify parent is a directory */
+ if (obj->parent &&
+ obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d's parent is not a directory (type %d)",
+ obj->obj_id, obj->parent->variant_type);
+ }
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_verify_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_verify_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_verify_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_verify_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_verify_special(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has illegaltype %d",
+ obj->obj_id, obj->variant_type);
+ break;
+ }
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int i;
+ struct list_head *lh;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ yaffs_verify_obj(obj);
+ }
+ }
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+ int count = 0;
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!obj->parent) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
+ BUG();
+ return;
+ }
+
+ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_verify_obj(list_obj);
+ if (obj == list_obj)
+ count++;
+ }
+
+ if (count != 1) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory %d times",
+ count);
+ BUG();
+ }
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ if (!directory) {
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_full_verification(directory->my_dev))
+ return;
+
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Directory has wrong type: %d",
+ directory->variant_type);
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &directory->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (list_obj->parent != directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory list has wrong parent %p",
+ list_obj->parent);
+ BUG();
+ }
+ yaffs_verify_obj_in_dir(list_obj);
+ }
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+ int counted;
+ int difference;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ counted = yaffs_count_free_chunks(dev);
+
+ difference = dev->n_free_chunks - counted;
+
+ if (difference) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Freechunks verification failure %d %d %d",
+ dev->n_free_chunks, counted, difference);
+ yaffs_free_verification_failures++;
+ }
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+ (void) in;
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
new file mode 100755
index 00000000..4f4af8d2
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+ int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
new file mode 100755
index 00000000..75e8ef20
--- /dev/null
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -0,0 +1,3354 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ * this superblock
+ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
+ * superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * There are two variants of the VFS glue code. This variant should compile
+ * for any version of Linux.
+ */
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
+#define YAFFS_COMPILE_BACKGROUND
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23))
+#define YAFFS_COMPILE_FREEZER
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#define YAFFS_COMPILE_EXPORTFS
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_USE_SETATTR_COPY
+#define YAFFS_USE_TRUNCATE_SETSIZE
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_HAS_EVICT_INODE
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+#define YAFFS_NEW_FOLLOW_LINK 1
+#else
+#define YAFFS_NEW_FOLLOW_LINK 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_HAS_WRITE_SUPER
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+#include <linux/smp_lock.h>
+#endif
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+#include <linux/namei.h>
+#endif
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+#include <linux/exportfs.h>
+#endif
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#endif
+#ifdef YAFFS_COMPILE_FREEZER
+#include <linux/freezer.h>
+#endif
+
+#include <asm/div64.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+/* FIXME: use sb->s_id instead ? */
+#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+
+#else
+
+#include <linux/locks.h>
+#define BDEVNAME_SIZE 0
+#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+#define __user
+#endif
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define YPROC_ROOT (&proc_root)
+#else
+#define YPROC_ROOT NULL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define Y_INIT_TIMER(a) init_timer(a)
+#else
+#define Y_INIT_TIMER(a) init_timer_on_stack(a)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
+#define YAFFS_USE_WRITE_BEGIN_END 1
+#else
+#define YAFFS_USE_WRITE_BEGIN_END 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_SUPER_HAS_DIRTY
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define set_nlink(inode, count) do { (inode)->i_nlink = (count); } while(0)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+ uint64_t result = partition_size;
+ do_div(result, block_size);
+ return (uint32_t) result;
+}
+#else
+#define YCALCBLOCKS(s, b) ((s)/(b))
+#endif
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+
+unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+unsigned int yaffs_auto_select = 1;
+/* Module Parameters */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+#else
+MODULE_PARM(yaffs_trace_mask, "i");
+MODULE_PARM(yaffs_wr_attempts, "i");
+MODULE_PARM(yaffs_auto_checkpoint, "i");
+MODULE_PARM(yaffs_gc_control, "i");
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+/* use iget and read_inode */
+#define Y_IGET(sb, inum) iget((sb), (inum))
+
+#else
+/* Call local equivalent */
+#define YAFFS_USE_OWN_IGET
+#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
+
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#else
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip)
+#endif
+
+#define yaffs_inode_to_obj(iptr) \
+ ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
+#else
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->u.generic_sbp)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define Y_CLEAR_INODE(i) clear_inode(i)
+#else
+#define Y_CLEAR_INODE(i) end_writeback(i)
+#endif
+
+
+#define update_dir_time(dir) do {\
+ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+ } while (0)
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj);
+
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+ /* Lifted from jffs2 */
+
+ struct yaffs_obj *obj;
+ unsigned char *pg_buf;
+ int ret;
+ loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readpage_nolock at %lld, size %08x",
+ (long long)pos,
+ (unsigned)PAGE_CACHE_SIZE);
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ BUG_ON(!PageLocked(pg));
+#else
+ if (!PageLocked(pg))
+ PAGE_BUG(pg);
+#endif
+
+ pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
+
+ yaffs_gross_lock(dev);
+
+ ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE);
+
+ yaffs_gross_unlock(dev);
+
+ if (ret >= 0)
+ ret = 0;
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
+ }
+
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+ return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+ int ret = yaffs_readpage_nolock(f, pg);
+ UnlockPage(pg);
+ return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+ int ret;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+ ret = yaffs_readpage_unlock(f, pg);
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+ return ret;
+}
+
+
+static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
+{
+ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+ if (lc)
+ lc->dirty = val;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+ {
+ struct super_block *sb = lc->super;
+
+ if (sb)
+ sb->s_dirt = val;
+ }
+#endif
+
+}
+
+static void yaffs_set_super_dirty(struct yaffs_dev *dev)
+{
+ yaffs_set_super_dirty_val(dev, 1);
+}
+
+static void yaffs_clear_super_dirty(struct yaffs_dev *dev)
+{
+ yaffs_set_super_dirty_val(dev, 0);
+}
+
+static int yaffs_check_super_dirty(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+ if (lc && lc->dirty)
+ return 1;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+ {
+ struct super_block *sb = lc->super;
+
+ if (sb && sb->s_dirt)
+ return 1;
+ }
+#endif
+ return 0;
+
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+#else
+static int yaffs_writepage(struct page *page)
+#endif
+{
+ struct yaffs_dev *dev;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ unsigned long end_index;
+ char *buffer;
+ struct yaffs_obj *obj;
+ int n_written = 0;
+ unsigned n_bytes;
+ loff_t i_size;
+
+ if (!mapping)
+ BUG();
+ inode = mapping->host;
+ if (!inode)
+ BUG();
+ i_size = i_size_read(inode);
+
+ end_index = i_size >> PAGE_CACHE_SHIFT;
+
+ if (page->index < end_index)
+ n_bytes = PAGE_CACHE_SIZE;
+ else {
+ n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
+
+ if (page->index > end_index || !n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %lld, inode size = %lld!!",
+ ((loff_t)page->index) << PAGE_CACHE_SHIFT,
+ inode->i_size);
+ yaffs_trace(YAFFS_TRACE_OS,
+ " -> don't care!!");
+
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ return 0;
+ }
+ }
+
+ if (n_bytes != PAGE_CACHE_SIZE)
+ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
+
+ get_page(page);
+
+ buffer = kmap(page);
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %lld, size %08x",
+ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag0: obj = %lld, ino = %lld",
+ obj->variant.file_variant.file_size, inode->i_size);
+
+ n_written = yaffs_wr_file(obj, buffer,
+ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0);
+
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag1: obj = %lld, ino = %lld",
+ obj->variant.file_variant.file_size, inode->i_size);
+
+ yaffs_gross_unlock(dev);
+
+ kunmap(page);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ put_page(page);
+
+ return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for write_begin/end */
+/* For now we just assume few parallel writes and check against a small number. */
+/* Todo: need to do this with a counter to handle parallel reads better */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ int n_free_chunks;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+ yaffs_gross_unlock(dev);
+
+ return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_gross_unlock(dev);
+}
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct page *pg = NULL;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+ int ret = 0;
+ int space_held = 0;
+
+ /* Get a page */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+#else
+ pg = __grab_cache_page(mapping, index);
+#endif
+
+ *pagep = pg;
+ if (!pg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "start yaffs_write_begin index %d(%x) uptodate %d",
+ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+ /* Get fs space */
+ space_held = yaffs_hold_space(filp);
+
+ if (!space_held) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Update page if required */
+
+ if (!Page_Uptodate(pg))
+ ret = yaffs_readpage_nolock(filp, pg);
+
+ if (ret)
+ goto out;
+
+ /* Happy path return */
+ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+ return 0;
+
+out:
+ yaffs_trace(YAFFS_TRACE_OS,
+ "end yaffs_write_begin fail returning %d", ret);
+ if (space_held)
+ yaffs_release_space(filp);
+ if (pg) {
+ unlock_page(pg);
+ page_cache_release(pg);
+ }
+ return ret;
+}
+
+#else
+
+static int yaffs_prepare_write(struct file *f, struct page *pg,
+ unsigned offset, unsigned to)
+{
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write");
+
+ if (!Page_Uptodate(pg))
+ return yaffs_readpage_nolock(f, pg);
+ return 0;
+}
+#endif
+
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos)
+{
+ struct yaffs_obj *obj;
+ int n_written;
+ loff_t ipos;
+ struct inode *inode;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: hey obj is null!");
+ return -EINVAL;
+ }
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ inode = f->f_dentry->d_inode;
+
+ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+ else
+ ipos = *pos;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld",
+ (unsigned)n, (unsigned)n, obj->obj_id, ipos);
+
+ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: %d(%x) bytes written",
+ (unsigned)n, (unsigned)n);
+
+ if (n_written > 0) {
+ ipos += n_written;
+ *pos = ipos;
+ if (ipos > inode->i_size) {
+ inode->i_size = ipos;
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write size updated to %lld bytes, %d blocks",
+ ipos, (int)(inode->i_blocks));
+ }
+
+ }
+ yaffs_gross_unlock(dev);
+ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *pg, void *fsdadata)
+{
+ int ret = 0;
+ void *addr, *kva;
+ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+
+ kva = kmap(pg);
+ addr = kva + offset_into_page;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end addr %p pos %lld n_bytes %d",
+ addr, pos, copied);
+
+ ret = yaffs_file_write(filp, addr, copied, &pos);
+
+ if (ret != copied) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end not same size ret %d copied %d",
+ ret, copied);
+ SetPageError(pg);
+ }
+
+ kunmap(pg);
+
+ yaffs_release_space(filp);
+ unlock_page(pg);
+ page_cache_release(pg);
+ return ret;
+}
+#else
+
+static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+ unsigned to)
+{
+ void *addr, *kva;
+
+ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+ int n_bytes = to - offset;
+ int n_written;
+
+ kva = kmap(pg);
+ addr = kva + offset;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write addr %p pos %lld n_bytes %d",
+ addr, pos, n_bytes);
+
+ n_written = yaffs_file_write(f, addr, n_bytes, &pos);
+
+ if (n_written != n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write not same size n_written %d n_bytes %d",
+ n_written, n_bytes);
+ SetPageError(pg);
+ }
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write returning %d",
+ n_written == n_bytes ? 0 : n_written);
+
+ return n_written == n_bytes ? 0 : n_written;
+}
+#endif
+
+static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+ .write_begin = yaffs_write_begin,
+ .write_end = yaffs_write_end,
+#else
+ .prepare_write = yaffs_prepare_write,
+ .commit_write = yaffs_commit_write,
+#endif
+};
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+#else
+static int yaffs_file_flush(struct file *file)
+#endif
+{
+ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_flush object %d (%s)",
+ obj->obj_id,
+ obj->dirty ? "dirty" : "clean");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_file(obj, 1, 0);
+
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+static int yaffs_sync_object(struct file *file, int datasync)
+#else
+static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+ int datasync)
+#endif
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+ struct dentry *dentry = file->f_path.dentry;
+#endif
+
+ obj = yaffs_dentry_to_obj(dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_object");
+ yaffs_gross_lock(dev);
+ yaffs_flush_file(obj, 1, datasync);
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .sendfile = generic_file_sendfile,
+};
+
+#else
+
+static const struct file_operations yaffs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ .sendfile = generic_file_sendfile,
+#endif
+};
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+static void zero_user_segment(struct page *page, unsigned start, unsigned end)
+{
+ void *kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr + start, 0, end - start);
+ kunmap_atomic(kaddr, KM_USER0);
+ flush_dcache_page(page);
+}
+#endif
+
+
+static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
+{
+#ifdef YAFFS_USE_TRUNCATE_SETSIZE
+ truncate_setsize(inode, newsize);
+ return 0;
+#else
+ truncate_inode_pages(&inode->i_data, newsize);
+ return 0;
+#endif
+
+}
+
+
+static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
+{
+#ifdef YAFFS_USE_SETATTR_COPY
+ setattr_copy(inode, attr);
+ return 0;
+#else
+ return inode_setattr(inode, attr);
+#endif
+
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_setattr of object %d",
+ yaffs_inode_to_obj(inode)->obj_id);
+#if 0
+ /* Fail if a requested resize >= 2GB */
+ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+ error = -EINVAL;
+#endif
+
+ if (error == 0)
+ error = inode_change_ok(inode, attr);
+ if (error == 0) {
+ int result;
+ if (!error) {
+ error = yaffs_vfs_setattr(inode, attr);
+ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_vfs_setsize(inode, attr->ia_size);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ }
+ }
+ dev = yaffs_inode_to_obj(inode)->my_dev;
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "resize to %d(%x)",
+ (int)(attr->ia_size),
+ (int)(attr->ia_size));
+ }
+ yaffs_gross_lock(dev);
+ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+ if (result == YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+ }
+ yaffs_gross_unlock(dev);
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_set_xattrib(obj, name, value, size, flags);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name,
+ void *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_getxattr \"%s\" from object %d",
+ name, obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_get_xattrib(obj, name, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_remove_xattrib(obj, name);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_list_xattrib(obj, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr done returning %d", error);
+
+ return error;
+}
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+};
+
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen)
+{
+ unsigned char *alias;
+ int ret;
+
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+ yaffs_gross_unlock(dev);
+
+ if (!alias)
+ return -ENOMEM;
+
+ ret = vfs_readlink(dentry, buffer, buflen, alias);
+ kfree(alias);
+ return ret;
+}
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ void *ret;
+#else
+static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ int ret
+#endif
+ unsigned char *alias;
+ int ret_int = 0;
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+ yaffs_gross_unlock(dev);
+
+ if (!alias) {
+ ret_int = -ENOMEM;
+ goto out;
+ }
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+ nd_set_link(nd, alias);
+ ret = alias;
+out:
+ if (ret_int)
+ ret = ERR_PTR(ret_int);
+ return ret;
+#else
+ ret = vfs_follow_link(nd, alias);
+ kfree(alias);
+out:
+ if (ret_int)
+ ret = ret_int;
+ return ret;
+#endif
+}
+
+
+#ifdef YAFFS_HAS_PUT_INODE
+
+/* For now put inode is just for debugging
+ * Put inode is called when the inode **structure** is put.
+ */
+static void yaffs_put_inode(struct inode *inode)
+{
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_put_inode: ino %d, count %d"),
+ (int)inode->i_ino, atomic_read(&inode->i_count);
+
+}
+#endif
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
+{
+ kfree(alias);
+}
+#endif
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+ .put_link = yaffs_put_link,
+#endif
+ .setattr = yaffs_setattr,
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+};
+
+#ifdef YAFFS_USE_OWN_IGET
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ yaffs_gross_unlock(dev);
+
+ unlock_new_inode(inode);
+ return inode;
+}
+
+#else
+
+static void yaffs_read_inode(struct inode *inode)
+{
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_inode for %d", (int)inode->i_ino);
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+}
+
+#endif
+
+
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ struct yaffs_obj *obj)
+{
+ struct inode *inode;
+
+ if (!sb) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL super_block!!");
+ return NULL;
+
+ }
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL object!!");
+ return NULL;
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for object %d", obj->obj_id);
+
+ inode = Y_IGET(sb, obj->obj_id);
+ if (IS_ERR(inode))
+ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+ /* NB You can't be holding gross_lock or deadlock will happen! */
+
+ return inode;
+}
+
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+#define YCRED(x) x
+#else
+#define YCRED(x) (x->cred)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t rdev)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+#else
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ int rdev)
+#endif
+{
+ struct inode *inode;
+
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_dev *dev;
+
+ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+ int error = -ENOSPC;
+ uid_t uid = YCRED(current)->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+
+ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: parent object %d type %d",
+ parent->obj_id, parent->variant_type);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: could not get parent object");
+ return -EPERM;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: making oject for %s, mode %x dev %x",
+ dentry->d_name.name, mode, rdev);
+
+ dev = parent->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, old_encode_dev(rdev));
+#else
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, rdev);
+#endif
+ break;
+ case S_IFREG: /* file */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+ gid);
+ break;
+ case S_IFDIR: /* directory */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+
+ /* Can not call yaffs_get_inode() with gross lock held */
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod created object %d count = %d",
+ obj->obj_id, atomic_read(&inode->i_count));
+ error = 0;
+ yaffs_fill_inode_from_obj(dir, parent);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+ error = -ENOMEM;
+ }
+
+ return error;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+#else
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+ int ret_val;
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir");
+ ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+ return ret_val;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool dummy)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ struct nameidata *n)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+#else
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_create");
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int dummy)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n)
+#else
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+#endif
+{
+ struct yaffs_obj *obj;
+ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
+
+ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s",
+ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
+
+ /* Can't hold gross lock when calling yaffs_get_inode() */
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup found %d", obj->obj_id);
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+ }
+
+/* added NCB for 2.5/6 compatability - forces add even if inode is
+ * NULL which creates dentry hash */
+ d_add(dentry, inode);
+
+ return NULL;
+}
+
+/*
+ * Create a link...
+ */
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *link = NULL;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+ link =
+ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ obj);
+
+ if (link) {
+ set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj));
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_link link count %d i_count %d",
+ old_dentry->d_inode->i_nlink,
+ atomic_read(&old_dentry->d_inode->i_count));
+ }
+
+ yaffs_gross_unlock(dev);
+
+ if (link) {
+ update_dir_time(dir);
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ uid_t uid = YCRED(current)->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+ if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ return -ENAMETOOLONG;
+
+ if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) >
+ YAFFS_MAX_ALIAS_LENGTH)
+ return -ENAMETOOLONG;
+
+ dev = yaffs_inode_to_obj(dir)->my_dev;
+ yaffs_gross_lock(dev);
+ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+ return 0;
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct yaffs_dev *dev;
+ int ret_val = YAFFS_FAIL;
+ struct yaffs_obj *target;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+ dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+
+ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&target->variant.dir_variant.children)) {
+
+ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+ ret_val = YAFFS_FAIL;
+ } else {
+ /* Now does unlinking internally using shadowing mechanism */
+ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+ old_dentry->d_name.name,
+ yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+ }
+ yaffs_gross_unlock(dev);
+
+ if (ret_val == YAFFS_OK) {
+ if (target)
+ inode_dec_link_count(new_dentry->d_inode);
+
+ update_dir_time(old_dir);
+ if (old_dir != new_dir)
+ update_dir_time(new_dir);
+ return 0;
+ } else {
+ return -ENOTEMPTY;
+ }
+}
+
+
+
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int ret_val;
+
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s",
+ (int)(dir->i_ino), dentry->d_name.name);
+ obj = yaffs_inode_to_obj(dir);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+ if (ret_val == YAFFS_OK) {
+ inode_dec_link_count(dentry->d_inode);
+ dir->i_version++;
+ yaffs_gross_unlock(dev);
+ update_dir_time(dir);
+ return 0;
+ }
+ yaffs_gross_unlock(dev);
+ return -ENOTEMPTY;
+}
+
+
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+ .unlink = yaffs_unlink,
+ .symlink = yaffs_symlink,
+ .mkdir = yaffs_mkdir,
+ .rmdir = yaffs_unlink,
+ .mknod = yaffs_mknod,
+ .rename = yaffs_rename,
+ .setattr = yaffs_setattr,
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+};
+
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+ struct yaffs_dev *dev;
+ struct yaffs_obj *dir_obj;
+ struct yaffs_obj *next_return;
+ struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+ struct yaffs_dev *dev = dir->my_dev;
+ struct yaffs_search_context *sc =
+ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+ if (sc) {
+ sc->dir_obj = dir;
+ sc->dev = dev;
+ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else
+ sc->next_return =
+ list_entry(dir->variant.dir_variant.children.next,
+ struct yaffs_obj, siblings);
+ INIT_LIST_HEAD(&sc->others);
+ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+ }
+ return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+ if (sc) {
+ list_del(&sc->others);
+ kfree(sc);
+ }
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+ if (!sc)
+ return;
+
+ if (sc->next_return == NULL ||
+ list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else {
+ struct list_head *next = sc->next_return->siblings.next;
+
+ if (next == &sc->dir_obj->variant.dir_variant.children)
+ sc->next_return = NULL; /* end of list */
+ else
+ sc->next_return =
+ list_entry(next, struct yaffs_obj, siblings);
+ }
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+ struct list_head *i;
+ struct yaffs_search_context *sc;
+ struct list_head *search_contexts =
+ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+ /* Iterate through the directory search contexts.
+ * If any are currently on the object being removed, then advance
+ * the search context to the next object to prevent a hanging pointer.
+ */
+ list_for_each(i, search_contexts) {
+ sc = list_entry(i, struct yaffs_search_context, others);
+ if (sc->next_return == obj)
+ yaffs_search_advance(sc);
+ }
+
+}
+
+
+/*-----------------------------------------------------------------*/
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = f->f_pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)f->f_dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, "..", 2, offset,
+ f->f_dentry->d_parent->d_inode->i_ino,
+ DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ if (f->f_version != inode->i_version) {
+ offset = 2;
+ f->f_pos = offset;
+ f->f_version = inode->i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (filldir(dirent,
+ name,
+ strlen(name),
+ offset, this_inode, this_type) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ f->f_pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+
+static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = yaffs_readdir,
+ .fsync = yaffs_sync_object,
+ .llseek = generic_file_llseek,
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj)
+{
+ if (inode && obj) {
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+ u32 mode = obj->yst_mode;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (!S_ISREG(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFREG;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (!S_ISLNK(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFLNK;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!S_ISDIR(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFDIR;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ default:
+ /* TODO? */
+ break;
+ }
+
+ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->obj_id;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = obj->yst_uid;
+ inode->i_gid = obj->yst_gid;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ inode->i_blksize = inode->i_sb->s_blocksize;
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+ inode->i_mtime.tv_nsec = 0;
+ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+ inode->i_ctime.tv_nsec = 0;
+#else
+ inode->i_rdev = obj->yst_rdev;
+ inode->i_atime = obj->yst_atime;
+ inode->i_mtime = obj->yst_mtime;
+ inode->i_ctime = obj->yst_ctime;
+#endif
+ inode->i_size = yaffs_get_obj_length(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ set_nlink(inode, yaffs_get_obj_link_count(obj));
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
+ inode->i_mode, inode->i_uid, inode->i_gid,
+ inode->i_size, atomic_read(&inode->i_count));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ init_special_inode(inode, obj->yst_mode,
+ old_decode_dev(obj->yst_rdev));
+#else
+ init_special_inode(inode, obj->yst_mode,
+ (dev_t) (obj->yst_rdev));
+#endif
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+ break;
+ case S_IFLNK: /* symlink */
+ inode->i_op = &yaffs_symlink_inode_operations;
+ break;
+ }
+
+ yaffs_inode_to_obj_lv(inode) = obj;
+
+ obj->my_inode = inode;
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode invalid parameters");
+ }
+
+}
+
+
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+ unsigned erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned scattered = 0; /* Free chunks not in an erased block */
+
+ if (erased_chunks < dev->n_free_chunks)
+ scattered = (dev->n_free_chunks - erased_chunks);
+
+ if (!context->bg_running)
+ return 0;
+ else if (scattered < (dev->param.chunks_per_block * 2))
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 2)
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 4)
+ return 1;
+ else
+ return 2;
+}
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+
+void yaffs_background_waker(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+ struct yaffs_dev *dev = (struct yaffs_dev *)data;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned long now = jiffies;
+ unsigned long next_dir_update = now;
+ unsigned long next_gc = now;
+ unsigned long expires;
+ unsigned int urgency;
+
+ int gc_result;
+ struct timer_list timer;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "yaffs_background starting for dev %p", (void *)dev);
+
+#ifdef YAFFS_COMPILE_FREEZER
+ set_freezable();
+#endif
+ while (context->bg_running) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+ if (kthread_should_stop())
+ break;
+
+#ifdef YAFFS_COMPILE_FREEZER
+ if (try_to_freeze())
+ continue;
+#endif
+ yaffs_gross_lock(dev);
+
+ now = jiffies;
+
+ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+ yaffs_update_dirty_dirs(dev);
+ next_dir_update = now + HZ;
+ }
+
+ if (time_after(now, next_gc) && yaffs_bg_enable) {
+ if (!dev->is_checkpointed) {
+ urgency = yaffs_bg_gc_urgency(dev);
+ gc_result = yaffs_bg_gc(dev, urgency);
+ if (urgency > 1)
+ next_gc = now + HZ / 20 + 1;
+ else if (urgency > 0)
+ next_gc = now + HZ / 10 + 1;
+ else
+ next_gc = now + HZ * 2;
+ } else {
+ /*
+ * gc not running so set to next_dir_update
+ * to cut down on wake ups
+ */
+ next_gc = next_dir_update;
+ }
+ }
+ yaffs_gross_unlock(dev);
+#if 1
+ expires = next_dir_update;
+ if (time_before(next_gc, expires))
+ expires = next_gc;
+ if (time_before(expires, now))
+ expires = now + HZ;
+
+ Y_INIT_TIMER(&timer);
+ timer.expires = expires + 1;
+ timer.data = (unsigned long)current;
+ timer.function = yaffs_background_waker;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_timer(&timer);
+ schedule();
+ del_timer_sync(&timer);
+#else
+ msleep(10);
+#endif
+ }
+
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ int retval = 0;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+ if (dev->read_only)
+ return -1;
+
+ context->bg_running = 1;
+
+ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+ (void *)dev, "yaffs-bg-%d",
+ context->mount_id);
+
+ if (IS_ERR(context->bg_thread)) {
+ retval = PTR_ERR(context->bg_thread);
+ context->bg_thread = NULL;
+ context->bg_running = 0;
+ }
+ return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+ ctxt->bg_running = 0;
+
+ if (ctxt->bg_thread) {
+ kthread_stop(ctxt->bg_thread);
+ ctxt->bg_thread = NULL;
+ }
+}
+#else
+static int yaffs_bg_thread_fn(void *data)
+{
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ return 0;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+}
+#endif
+
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+ struct inode *iptr;
+ struct yaffs_obj *obj;
+
+ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+ obj = yaffs_inode_to_obj(iptr);
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "flushing obj %d",
+ obj->obj_id);
+ yaffs_flush_file(obj, 1, 0);
+ }
+ }
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ if (!dev)
+ return;
+
+ yaffs_flush_inodes(sb);
+ yaffs_update_dirty_dirs(dev);
+ yaffs_flush_whole_cache(dev);
+ if (do_checkpoint)
+ yaffs_checkpoint_save(dev);
+}
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+static void yaffs_put_super(struct super_block *sb)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+ "yaffs_put_super");
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "Shutting down yaffs background thread");
+ yaffs_bg_stop(dev);
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "yaffs background thread shut down");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_super(sb, 1);
+
+ yaffs_deinitialise(dev);
+
+ yaffs_gross_unlock(dev);
+
+ mutex_lock(&yaffs_context_lock);
+ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+ mutex_unlock(&yaffs_context_lock);
+
+ if (yaffs_dev_to_lc(dev)->spare_buffer) {
+ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+ }
+
+ kfree(dev);
+
+ yaffs_put_mtd_device(mtd);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+ "yaffs_put_super done");
+}
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+ return yaffs_gc_control;
+}
+
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+ uint32_t generation)
+{
+ return Y_IGET(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct dentry *parent = ERR_PTR(-ENOENT);
+ struct inode *inode;
+ unsigned long parent_ino;
+ struct yaffs_obj *d_obj;
+ struct yaffs_obj *parent_obj;
+
+ d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+ if (d_obj) {
+ parent_obj = d_obj->parent;
+ if (parent_obj) {
+ parent_ino = yaffs_get_obj_inode(parent_obj);
+ inode = Y_IGET(sb, parent_ino);
+
+ if (IS_ERR(inode)) {
+ parent = ERR_CAST(inode);
+ } else {
+ parent = d_obtain_alias(inode);
+ if (!IS_ERR(parent)) {
+ parent = ERR_PTR(-ENOMEM);
+ iput(inode);
+ }
+ }
+ }
+ }
+
+ return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+ .fh_to_dentry = yaffs2_fh_to_dentry,
+ .fh_to_parent = yaffs2_fh_to_parent,
+ .get_parent = yaffs2_get_parent,
+};
+
+#endif
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+ /* Clear the association between the inode and
+ * the struct yaffs_obj.
+ */
+ obj->my_inode = NULL;
+ yaffs_inode_to_obj_lv(inode) = NULL;
+
+ /* If the object freeing was deferred, then the real
+ * free happens now.
+ * This should fix the inode inconsistency problem.
+ */
+ yaffs_handle_defered_free(obj);
+}
+
+#ifdef YAFFS_HAS_EVICT_INODE
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ int deleteme = 0;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_evict_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (!inode->i_nlink && !is_bad_inode(inode))
+ deleteme = 1;
+ truncate_inode_pages(&inode->i_data, 0);
+ Y_CLEAR_INODE(inode);
+
+ if (deleteme && obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+}
+#else
+
+/* clear is called to tell the fs to release any per-inode data it holds.
+ * The object might still exist on disk and is just being thrown out of the cache
+ * or else the object has actually been deleted and we're being called via
+ * the chain
+ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
+ */
+
+static void yaffs_clear_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_clear_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+
+}
+
+/* delete is called when the link count is zero and the inode
+ * is put (ie. nobody wants to know about it anymore, time to
+ * delete the file).
+ * NB Must call clear_inode()
+ */
+static void yaffs_delete_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_delete_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+ truncate_inode_pages(&inode->i_data, 0);
+#endif
+ clear_inode(inode);
+}
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+ struct super_block *sb = dentry->d_sb;
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#else
+static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#endif
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+ yaffs_gross_lock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+
+ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+ /* Do this if chunk size is not a power of 2 */
+
+ uint64_t bytes_in_dev;
+ uint64_t bytes_free;
+
+ bytes_in_dev =
+ ((uint64_t)
+ ((dev->param.end_block - dev->param.start_block +
+ 1))) * ((uint64_t) (dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk));
+
+ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
+ buf->f_blocks = bytes_in_dev;
+
+ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+ ((uint64_t) (dev->data_bytes_per_chunk));
+
+ do_div(bytes_free, sb->s_blocksize);
+
+ buf->f_bfree = bytes_free;
+
+ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ } else {
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+ }
+
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+ int do_checkpoint;
+ int dirty = yaffs_check_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+ gc_urgent,
+ dirty ? "dirty" : "clean",
+ request_checkpoint ? "checkpoint requested" : "no checkpoint",
+ oneshot_checkpoint ? " one-shot" : "");
+
+ yaffs_gross_lock(dev);
+ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+ oneshot_checkpoint) && !dev->is_checkpointed;
+
+ if (dirty || do_checkpoint) {
+ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+ yaffs_clear_super_dirty(dev);
+ if (oneshot_checkpoint)
+ yaffs_auto_checkpoint &= ~4;
+ }
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+
+#ifdef YAFFS_HAS_WRITE_SUPER
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static void yaffs_write_super(struct super_block *sb)
+#else
+static int yaffs_write_super(struct super_block *sb)
+#endif
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_write_super %s",
+ request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+ return 0;
+#endif
+}
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+#else
+static int yaffs_sync_fs(struct super_block *sb)
+#endif
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+ return 0;
+}
+
+
+
+static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
+
+#ifndef YAFFS_USE_OWN_IGET
+ .read_inode = yaffs_read_inode,
+#endif
+#ifdef YAFFS_HAS_PUT_INODE
+ .put_inode = yaffs_put_inode,
+#endif
+ .put_super = yaffs_put_super,
+#ifdef YAFFS_HAS_EVICT_INODE
+ .evict_inode = yaffs_evict_inode,
+#else
+ .delete_inode = yaffs_delete_inode,
+ .clear_inode = yaffs_clear_inode,
+#endif
+ .sync_fs = yaffs_sync_fs,
+#ifdef YAFFS_HAS_WRITE_SUPER
+ .write_super = yaffs_write_super,
+#endif
+};
+
+struct yaffs_options {
+ int inband_tags;
+ int skip_checkpoint_read;
+ int skip_checkpoint_write;
+ int no_cache;
+ int tags_ecc_on;
+ int tags_ecc_overridden;
+ int lazy_loading_enabled;
+ int lazy_loading_overridden;
+ int empty_lost_and_found;
+ int empty_lost_and_found_overridden;
+ int disable_summary;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+ const char *options_str)
+{
+ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+ while (options_str && *options_str && !error) {
+ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+ while (*options_str == ',')
+ options_str++;
+
+ while (*options_str && *options_str != ',') {
+ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+ if (!strcmp(cur_opt, "inband-tags")) {
+ options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
+ options->tags_ecc_on = 0;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
+ options->tags_ecc_on = 1;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
+ options->lazy_loading_enabled = 0;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
+ options->lazy_loading_enabled = 1;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "disable-summary")) {
+ options->disable_summary = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+ options->empty_lost_and_found = 0;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+ options->empty_lost_and_found = 1;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "no-cache")) {
+ options->no_cache = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+ options->skip_checkpoint_read = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+ options->skip_checkpoint_write = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+ cur_opt);
+ error = 1;
+ }
+ }
+
+ return error;
+}
+
+
+static struct dentry *yaffs_make_root(struct inode *inode)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+ struct dentry *root = d_alloc_root(inode);
+
+ if (!root)
+ iput(inode);
+
+ return root;
+#else
+ return d_make_root(inode);
+#endif
+}
+
+
+
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+ struct super_block *sb,
+ void *data, int silent)
+{
+ int n_blocks;
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct yaffs_dev *dev = 0;
+ char devname_buf[BDEVNAME_SIZE + 1];
+ struct mtd_info *mtd;
+ int err;
+ char *data_str = (char *)data;
+ struct yaffs_linux_context *context = NULL;
+ struct yaffs_param *param;
+
+ int read_only = 0;
+
+ struct yaffs_options options;
+
+ unsigned mount_id;
+ int found;
+ struct yaffs_linux_context *context_iterator;
+ struct list_head *l;
+
+ if (!sb) {
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+ return NULL;
+ }
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
+ sb->s_flags |= MS_NOATIME;
+
+ read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+ sb->s_export_op = &yaffs_export_ops;
+#endif
+
+ if (!sb->s_dev)
+ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+ else if (!yaffs_devname(sb, devname_buf))
+ printk(KERN_INFO "yaffs: devname is NULL\n");
+ else
+ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+ if (!data_str)
+ data_str = "";
+
+ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+ memset(&options, 0, sizeof(options));
+
+ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: Using yaffs%d", yaffs_version);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Attempting MTD mount of %u.%u,\"%s\"",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ yaffs_devname(sb, devname_buf));
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: MTD device %u either not valid or unavailable",
+ MINOR(sb->s_dev));
+ return NULL;
+ }
+
+ if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+ yaffs_version = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+ if (yaffs_version == 2 && !options.inband_tags
+ && WRITE_SIZE(mtd) == 512) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+ yaffs_version = 1;
+ }
+
+ if(yaffs_verify_mtd(mtd, yaffs_version, options.inband_tags) < 0)
+ return NULL;
+
+ /* OK, so if we got here, we have an MTD that's NAND and looks
+ * like it has the right capabilities
+ * Set the struct yaffs_dev up for mtd
+ */
+
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only\n"
+ );
+ sb->s_flags |= MS_RDONLY;
+ }
+
+ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+ if (!dev || !context) {
+ if (dev)
+ kfree(dev);
+ if (context)
+ kfree(context);
+ dev = NULL;
+ context = NULL;
+ }
+
+ if (!dev) {
+ /* Deep shit could not allocate device structure */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: Failed trying to allocate struct yaffs_dev."
+ );
+ return NULL;
+ }
+ memset(dev, 0, sizeof(struct yaffs_dev));
+ param = &(dev->param);
+
+ memset(context, 0, sizeof(struct yaffs_linux_context));
+ dev->os_context = context;
+ INIT_LIST_HEAD(&(context->context_list));
+ context->dev = dev;
+ context->super = sb;
+
+ dev->read_only = read_only;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ sb->s_fs_info = dev;
+#else
+ sb->u.generic_sbp = dev;
+#endif
+
+
+ dev->driver_context = mtd;
+ param->name = mtd->name;
+
+ /* Set up the memory size parameters.... */
+
+
+ param->n_reserved_blocks = 5;
+ param->n_caches = (options.no_cache) ? 0 : 10;
+ param->inband_tags = options.inband_tags;
+
+ param->enable_xattr = 1;
+ if (options.lazy_loading_overridden)
+ param->disable_lazy_load = !options.lazy_loading_enabled;
+
+ param->defered_dir_update = 1;
+
+ if (options.tags_ecc_overridden)
+ param->no_tags_ecc = !options.tags_ecc_on;
+
+ param->empty_lost_n_found = 1;
+ param->refresh_period = 500;
+ param->disable_summary = options.disable_summary;
+
+ if (options.empty_lost_and_found_overridden)
+ param->empty_lost_n_found = options.empty_lost_and_found;
+
+ /* ... and the functions. */
+ if (yaffs_version == 2) {
+ param->is_yaffs2 = 1;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ param->total_bytes_per_chunk = mtd->writesize;
+ param->chunks_per_block = mtd->erasesize / mtd->writesize;
+#else
+ param->total_bytes_per_chunk = mtd->oobblock;
+ param->chunks_per_block = mtd->erasesize / mtd->oobblock;
+#endif
+ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ } else {
+ param->is_yaffs2 = 0;
+ n_blocks = YCALCBLOCKS(mtd->size,
+ YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
+
+ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+ }
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+
+ yaffs_mtd_drv_install(dev);
+
+ param->sb_dirty_fn = yaffs_set_super_dirty;
+ param->gc_control_fn = yaffs_gc_control_callback;
+
+ yaffs_dev_to_lc(dev)->super = sb;
+
+ param->use_nand_ecc = 1;
+
+ param->skip_checkpt_rd = options.skip_checkpoint_read;
+ param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+ mutex_lock(&yaffs_context_lock);
+ /* Get a mount id */
+ found = 0;
+ for (mount_id = 0; !found; mount_id++) {
+ found = 1;
+ list_for_each(l, &yaffs_context_list) {
+ context_iterator =
+ list_entry(l, struct yaffs_linux_context,
+ context_list);
+ if (context_iterator->mount_id == mount_id)
+ found = 0;
+ }
+ }
+ context->mount_id = mount_id;
+
+ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+ &yaffs_context_list);
+ mutex_unlock(&yaffs_context_lock);
+
+ /* Directory search handling... */
+ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+ yaffs_gross_lock(dev);
+
+ err = yaffs_guts_initialise(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: guts initialised %s",
+ (err == YAFFS_OK) ? "OK" : "FAILED");
+
+ if (err == YAFFS_OK)
+ yaffs_bg_start(dev);
+
+ if (!context->bg_thread)
+ param->defered_dir_update = 0;
+
+ sb->s_maxbytes = yaffs_max_file_size(dev);
+
+ /* Release lock before yaffs_get_inode() */
+ yaffs_gross_unlock(dev);
+
+ /* Create root inode */
+ if (err == YAFFS_OK)
+ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+ if (!inode)
+ return NULL;
+
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs_read_super: got root inode");
+
+ root = yaffs_make_root(inode);
+
+ if (!root)
+ return NULL;
+
+ sb->s_root = root;
+ if(!dev->is_checkpointed)
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs_read_super: done");
+ return sb;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs_mount,
+#else
+ .get_sb = yaffs_read_super,
+#endif
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs2_mount,
+#else
+ .get_sb = yaffs2_read_super,
+#endif
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs2_read_super(struct super_block *sb,
+ void *data, int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+ struct yaffs_param *param = &dev->param;
+
+ buf += sprintf(buf, "start_block.......... %d\n", param->start_block);
+ buf += sprintf(buf, "end_block............ %d\n", param->end_block);
+ buf += sprintf(buf, "total_bytes_per_chunk %d\n",
+ param->total_bytes_per_chunk);
+ buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc);
+ buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc);
+ buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2);
+ buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags);
+ buf += sprintf(buf, "empty_lost_n_found... %d\n",
+ param->empty_lost_n_found);
+ buf += sprintf(buf, "disable_lazy_load.... %d\n",
+ param->disable_lazy_load);
+ buf += sprintf(buf, "refresh_period....... %d\n",
+ param->refresh_period);
+ buf += sprintf(buf, "n_caches............. %d\n", param->n_caches);
+ buf += sprintf(buf, "n_reserved_blocks.... %d\n",
+ param->n_reserved_blocks);
+ buf += sprintf(buf, "always_check_erased.. %d\n",
+ param->always_check_erased);
+ buf += sprintf(buf, "\n");
+
+ return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+ buf += sprintf(buf, "max file size....... %lld\n",
+ (long long) yaffs_max_file_size(dev));
+ buf += sprintf(buf, "data_bytes_per_chunk. %d\n",
+ dev->data_bytes_per_chunk);
+ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
+ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
+ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
+ buf += sprintf(buf, "blocks_in_checkpt.... %d\n",
+ dev->blocks_in_checkpt);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
+ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
+ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
+ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
+ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
+ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
+ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
+ buf += sprintf(buf, "passive_gc_count..... %u\n",
+ dev->passive_gc_count);
+ buf += sprintf(buf, "oldest_dirty_gc_count %u\n",
+ dev->oldest_dirty_gc_count);
+ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
+ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
+ buf += sprintf(buf, "n_retried_writes..... %u\n",
+ dev->n_retried_writes);
+ buf += sprintf(buf, "n_retired_blocks..... %u\n",
+ dev->n_retired_blocks);
+ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
+ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
+ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n",
+ dev->n_tags_ecc_fixed);
+ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n",
+ dev->n_tags_ecc_unfixed);
+ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
+ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
+ buf += sprintf(buf, "n_unlinked_files..... %u\n",
+ dev->n_unlinked_files);
+ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
+ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
+ buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used);
+ buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used);
+
+ return buf;
+}
+
+static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+{
+ struct list_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+
+ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+ * We use 'offset' (*ppos) to indicate where we are in dev_list.
+ * This also assumes the user has posted a read buffer large
+ * enough to hold the complete output; but that's life in /proc.
+ */
+
+ *(int *)start = 1;
+
+ /* Print header first */
+ if (step == 0)
+ buf +=
+ sprintf(buf,
+ "Multi-version YAFFS built:" __DATE__ " " __TIME__
+ "\n");
+ else if (step == 1)
+ buf += sprintf(buf, "\n");
+ else {
+ step -= 2;
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (n < (step & ~1)) {
+ n += 2;
+ continue;
+ }
+ if ((step & 1) == 0) {
+ buf +=
+ sprintf(buf, "\nDevice %d \"%s\"\n", n,
+ dev->param.name);
+ buf = yaffs_dump_dev_part0(buf, dev);
+ } else {
+ buf = yaffs_dump_dev_part1(buf, dev);
+ }
+
+ break;
+ }
+ mutex_unlock(&yaffs_context_lock);
+ }
+
+ return buf - page < count ? buf - page : count;
+}
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+ char *mask_name;
+ unsigned mask_bitfield;
+} mask_flags[] = {
+ {"allocate", YAFFS_TRACE_ALLOCATE},
+ {"always", YAFFS_TRACE_ALWAYS},
+ {"background", YAFFS_TRACE_BACKGROUND},
+ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+ {"buffers", YAFFS_TRACE_BUFFERS},
+ {"bug", YAFFS_TRACE_BUG},
+ {"checkpt", YAFFS_TRACE_CHECKPOINT},
+ {"deletion", YAFFS_TRACE_DELETION},
+ {"erase", YAFFS_TRACE_ERASE},
+ {"error", YAFFS_TRACE_ERROR},
+ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+ {"gc", YAFFS_TRACE_GC},
+ {"lock", YAFFS_TRACE_LOCK},
+ {"mtd", YAFFS_TRACE_MTD},
+ {"nandaccess", YAFFS_TRACE_NANDACCESS},
+ {"os", YAFFS_TRACE_OS},
+ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+ {"scan", YAFFS_TRACE_SCAN},
+ {"mount", YAFFS_TRACE_MOUNT},
+ {"tracing", YAFFS_TRACE_TRACING},
+ {"sync", YAFFS_TRACE_SYNC},
+ {"write", YAFFS_TRACE_WRITE},
+ {"verify", YAFFS_TRACE_VERIFY},
+ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+ {"all", 0xffffffff},
+ {"none", 0},
+ {NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ unsigned rg = 0, mask_bitfield;
+ char *end;
+ char *mask_name;
+ const char *x;
+ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+ int pos = 0;
+
+ rg = yaffs_trace_mask;
+
+ while (!done && (pos < count)) {
+ done = 1;
+ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+
+ switch (buf[pos]) {
+ case '+':
+ case '-':
+ case '=':
+ add = buf[pos];
+ pos++;
+ break;
+
+ default:
+ add = ' ';
+ break;
+ }
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+ for (x = buf + pos, i = 0;
+ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ if (strcmp(substring, mask_flags[i].mask_name)
+ == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield =
+ mask_flags[i].mask_bitfield;
+ done = 0;
+ break;
+ }
+ }
+ }
+
+ if (mask_name != NULL) {
+ done = 0;
+ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+ case '+':
+ rg |= mask_bitfield;
+ break;
+ case '=':
+ rg = mask_bitfield;
+ break;
+ default:
+ rg |= mask_bitfield;
+ break;
+ }
+ }
+ }
+
+ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) ==
+ mask_flags[i].mask_bitfield) ? '+' : '-';
+ printk(KERN_DEBUG "%c%s\n", flag,
+ mask_flags[i].mask_name);
+ }
+ }
+
+ return count;
+}
+
+static int yaffs_proc_write(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ return yaffs_proc_write_trace_options(file, buf, count, data);
+}
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+ struct file_system_type *fst;
+ int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+ {&yaffs_fs_type, 0},
+ {&yaffs2_fs_type, 0},
+ {NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+ int error = 0;
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " Installing.");
+
+ mutex_init(&yaffs_context_lock);
+
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG, YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+ } else {
+ return -ENOMEM;
+ }
+
+ /* Now add the file system entries */
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+ if (!error)
+ fsinst->installed = 1;
+ fsinst++;
+ }
+
+ /* Any errors? uninstall */
+ if (error) {
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+ }
+
+ return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " removing.");
+
+ remove_proc_entry("yaffs", YPROC_ROOT);
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+}
+
+module_init(init_yaffs_fs)
+ module_exit(exit_yaffs_fs)
+
+ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011");
+MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
new file mode 100755
index 00000000..d277e20e
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int result;
+ int chunk;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int alloc_failed = 0;
+ struct yaffs_shadow_fixer *shadow_fixers = NULL;
+ u8 *chunk_data;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs1_scan starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ }
+ bi++;
+ }
+
+ /* For each block.... */
+ for (blk = dev->internal_start_block;
+ !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+ cond_resched();
+
+ bi = yaffs_get_block_info(dev, blk);
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ for (c = 0;
+ !alloc_failed && c < dev->param.chunks_per_block &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
+ tags.is_deleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->n_free_chunks++;
+ } else if (!tags.chunk_used) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in
+ *the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ /* this is the block being allocated */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->alloc_block_finder = blk;
+
+ }
+
+ dev->n_free_chunks +=
+ (dev->param.chunks_per_block - c);
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash
+ * (two data chunks with the same chunk_id).
+ */
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in) {
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, 1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk +
+ tags.n_bytes;
+ if (in &&
+ in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE &&
+ in->variant.file_variant.scanned_size <
+ endpos) {
+ in->variant.file_variant.scanned_size =
+ endpos;
+ if (!dev->param.use_header_file_size) {
+ in->variant.
+ file_variant.file_size =
+ in->variant.
+ file_variant.scanned_size;
+ }
+
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Make the object
+ */
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in = yaffs_find_by_number(dev, tags.obj_id);
+ if (in && in->variant_type != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an obj_id that
+ * has been reused but not yet deleted,
+ * and worse still it has changed type.
+ * Delete the old object.
+ */
+
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ oh->type);
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadows_obj > 0) {
+
+ struct yaffs_shadow_fixer *fixer;
+ fixer =
+ kmalloc(sizeof
+ (struct yaffs_shadow_fixer),
+ GFP_NOFS);
+ if (fixer) {
+ fixer->next = shadow_fixers;
+ shadow_fixers = fixer;
+ fixer->obj_id = tags.obj_id;
+ fixer->shadowed_id =
+ oh->shadows_obj;
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Shadow fixer: %d shadows %d",
+ fixer->obj_id,
+ fixer->shadowed_id);
+
+ }
+
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate and need to
+ * resolve it. */
+
+ unsigned existing_serial = in->serial;
+ unsigned new_serial =
+ tags.serial_number;
+
+ if (((existing_serial + 1) & 3) ==
+ new_serial) {
+ /* Use new one - destroy the
+ * exisiting one */
+ yaffs_chunk_del(dev,
+ in->hdr_chunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy
+ * this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (!parent)
+ alloc_failed = 1;
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.
+ children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, a problem....
+ * We're trying to use a
+ * non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->param.
+ use_header_file_size)
+ in->variant.
+ file_variant.file_size
+ = yaffs_oh_to_size(oh);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.
+ hardlink_variant.equiv_id =
+ oh->equiv_id;
+ list_add(&in->hard_links,
+ &hard_list);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.
+ alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.
+ symlink_variant.alias)
+ alloc_failed = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning,
+ * then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then
+ * treat it as fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add
+ * these hardlinks.
+ */
+
+ yaffs_link_fixup(dev, &hard_list);
+
+ /*
+ * Fix up any shadowed objects.
+ * There should not be more than one of these.
+ */
+ {
+ struct yaffs_shadow_fixer *fixer;
+ struct yaffs_obj *obj;
+
+ while (shadow_fixers) {
+ fixer = shadow_fixers;
+ shadow_fixers = fixer->next;
+ /* Complete the rename transaction by deleting the
+ * shadowed object then setting the object header
+ to unshadowed.
+ */
+ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+ if (obj)
+ yaffs_del_obj(obj);
+
+ obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+ if (obj)
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ kfree(fixer);
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
new file mode 100755
index 00000000..97e2fdd0
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
new file mode 100755
index 00000000..f1dc9722
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1532 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ int i;
+ unsigned seq;
+ unsigned block_no = 0;
+ struct yaffs_block_info *b;
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->seq_number + 1;
+ b = dev->block_info;
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+ (b->pages_in_use - b->soft_del_pages) <
+ dev->param.chunks_per_block &&
+ b->seq_number < seq) {
+ seq = b->seq_number;
+ block_no = i;
+ }
+ b++;
+ }
+
+ if (block_no) {
+ dev->oldest_dirty_seq = seq;
+ dev->oldest_dirty_block = block_no;
+ }
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->oldest_dirty_seq)
+ yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+ }
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (dev->oldest_dirty_seq) {
+ if (dev->oldest_dirty_seq > bi->seq_number) {
+ dev->oldest_dirty_seq = bi->seq_number;
+ dev->oldest_dirty_block = block_no;
+ }
+ }
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->has_shrink_hdr)
+ return 1; /* can gc */
+
+ yaffs2_find_oldest_dirty_seq(dev);
+
+ /* Can't do gc of this block if there are any blocks older than this
+ * one that have discarded pages.
+ */
+ return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
+{
+ u32 b;
+ u32 oldest = 0;
+ u32 oldest_seq = 0;
+ struct yaffs_block_info *bi;
+
+ if (!dev->param.is_yaffs2)
+ return oldest;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if (dev->param.refresh_period < 10)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if (dev->refresh_skip > dev->param.refresh_period)
+ dev->refresh_skip = dev->param.refresh_period;
+
+ if (dev->refresh_skip > 0)
+ return oldest;
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refresh_skip = dev->param.refresh_period;
+ dev->refresh_count++;
+ bi = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+ if (oldest < 1 || bi->seq_number < oldest_seq) {
+ oldest = b;
+ oldest_seq = bi->seq_number;
+ }
+ }
+ bi++;
+ }
+
+ if (oldest > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC refresh count %d selected block %d with seq_number %d",
+ dev->refresh_count, oldest, oldest_seq);
+ }
+
+ return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+ int nblocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ return !dev->param.skip_checkpt_wr &&
+ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+ int retval;
+ int n_bytes = 0;
+ int n_blocks;
+ int dev_blocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+ /* Not a valid value so recalculate */
+ dev_blocks = dev->param.end_block - dev->param.start_block + 1;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(struct yaffs_checkpt_dev);
+ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+ n_bytes += dev_blocks * dev->chunk_bit_stride;
+ n_bytes +=
+ (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
+ dev->n_obj;
+ n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(u32); /* checksum */
+
+ /* Round up and add 2 blocks to allow for some bad blocks,
+ * so add 3 */
+
+ n_blocks =
+ (n_bytes /
+ (dev->data_bytes_per_chunk *
+ dev->param.chunks_per_block)) + 3;
+
+ dev->checkpoint_blocks_required = n_blocks;
+ }
+
+ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+ if (retval < 0)
+ retval = 0;
+ return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.struct_type = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ if (ok)
+ ok = (cp.struct_type == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+ struct yaffs_dev *dev)
+{
+ cp->n_erased_blocks = dev->n_erased_blocks;
+ cp->alloc_block = dev->alloc_block;
+ cp->alloc_page = dev->alloc_page;
+ cp->n_free_chunks = dev->n_free_chunks;
+
+ cp->n_deleted_files = dev->n_deleted_files;
+ cp->n_unlinked_files = dev->n_unlinked_files;
+ cp->n_bg_deletions = dev->n_bg_deletions;
+ cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+ struct yaffs_checkpt_dev *cp)
+{
+ dev->n_erased_blocks = cp->n_erased_blocks;
+ dev->alloc_block = cp->alloc_block;
+ dev->alloc_page = cp->alloc_page;
+ dev->n_free_chunks = cp->n_free_chunks;
+
+ dev->n_deleted_files = cp->n_deleted_files;
+ dev->n_unlinked_files = cp->n_unlinked_files;
+ dev->n_bg_deletions = cp->n_bg_deletions;
+ dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ int ok;
+
+ /* Write device runtime values */
+ yaffs2_dev_to_checkpt_dev(&cp, dev);
+ cp.struct_type = sizeof(cp);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ /* Write block info */
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
+ if (!ok)
+ return 0;
+
+ /* Write chunk bits */
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ if (cp.struct_type != sizeof(cp))
+ return 0;
+
+ yaffs_checkpt_dev_to_dev(dev, &cp);
+
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+ if (!ok)
+ return 0;
+
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+
+ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+ struct yaffs_obj *obj)
+{
+ cp->obj_id = obj->obj_id;
+ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+ cp->hdr_chunk = obj->hdr_chunk;
+ cp->variant_type = obj->variant_type;
+ cp->deleted = obj->deleted;
+ cp->soft_del = obj->soft_del;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->rename_allowed = obj->rename_allowed;
+ cp->unlink_allowed = obj->unlink_allowed;
+ cp->serial = obj->serial;
+ cp->n_data_chunks = obj->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+ struct yaffs_checkpt_obj *cp)
+{
+ struct yaffs_obj *parent;
+
+ if (obj->variant_type != cp->variant_type) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+ cp->obj_id, cp->variant_type, cp->hdr_chunk,
+ obj->variant_type);
+ return 0;
+ }
+
+ obj->obj_id = cp->obj_id;
+
+ if (cp->parent_id)
+ parent = yaffs_find_or_create_by_number(obj->my_dev,
+ cp->parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if (parent) {
+ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+ cp->obj_id, cp->parent_id,
+ cp->variant_type, cp->hdr_chunk,
+ parent->variant_type);
+ return 0;
+ }
+ yaffs_add_obj_to_dir(parent, obj);
+ }
+
+ obj->hdr_chunk = cp->hdr_chunk;
+ obj->variant_type = cp->variant_type;
+ obj->deleted = cp->deleted;
+ obj->soft_del = cp->soft_del;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->rename_allowed = cp->rename_allowed;
+ obj->unlink_allowed = cp->unlink_allowed;
+ obj->serial = cp->serial;
+ obj->n_data_chunks = cp->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+ if (obj->hdr_chunk > 0)
+ obj->lazy_loaded = 1;
+ return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+ struct yaffs_tnode *tn, u32 level,
+ int chunk_offset)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+ int ok = 1;
+ u32 base_offset;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (!tn->internal[i])
+ continue;
+ ok = yaffs2_checkpt_tnode_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS) + i);
+ }
+ return ok;
+ }
+
+ /* Level 0 tnode */
+ base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+ ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
+ sizeof(base_offset));
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+
+ return ok;
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 end_marker = ~0;
+ int ok = 1;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return ok;
+
+ ok = yaffs2_checkpt_tnode_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.file_variant.
+ top_level, 0);
+ if (ok)
+ ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
+ sizeof(end_marker)) == sizeof(end_marker));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 base_chunk;
+ int ok = 1;
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+ struct yaffs_tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+ sizeof(base_chunk));
+
+ while (ok && (~base_chunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+ tn = yaffs_get_tnode(dev);
+ if (tn)
+ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+ else
+ ok = 0;
+
+ if (tn && ok)
+ ok = yaffs_add_find_tnode_0(dev,
+ file_stuct_ptr,
+ base_chunk, tn) ? 1 : 0;
+
+ if (ok)
+ ok = (yaffs2_checkpt_rd
+ (dev, &base_chunk,
+ sizeof(base_chunk)) == sizeof(base_chunk));
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read tnodes %d records, last %d. ok %d",
+ nread, base_chunk, ok);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ if (!obj->defered_free) {
+ yaffs2_obj_checkpt_obj(&cp, obj);
+ cp.struct_type = sizeof(cp);
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+ cp.obj_id, cp.parent_id,
+ cp.variant_type, cp.hdr_chunk, obj);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp,
+ sizeof(cp)) == sizeof(cp));
+
+ if (ok &&
+ obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs2_wr_checkpt_tnodes(obj);
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
+ cp.struct_type = sizeof(cp);
+
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int ok = 1;
+ int done = 0;
+ LIST_HEAD(hard_list);
+
+
+ while (ok && !done) {
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (cp.struct_type != sizeof(cp)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "struct size %d instead of %d ok %d",
+ cp.struct_type, (int)sizeof(cp), ok);
+ ok = 0;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read object %d parent %d type %d chunk %d ",
+ cp.obj_id, cp.parent_id, cp.variant_type,
+ cp.hdr_chunk);
+
+ if (ok && cp.obj_id == ~0) {
+ done = 1;
+ } else if (ok) {
+ obj =
+ yaffs_find_or_create_by_number(dev, cp.obj_id,
+ cp.variant_type);
+ if (obj) {
+ ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
+ if (!ok)
+ break;
+ if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_rd_checkpt_tnodes(obj);
+ } else if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_HARDLINK) {
+ list_add(&obj->hard_links, &hard_list);
+ }
+ } else {
+ ok = 0;
+ }
+ }
+ }
+
+ if (ok)
+ yaffs_link_fixup(dev, &hard_list);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+ sizeof(checkpt_sum));
+
+ if (!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum0;
+ u32 checkpt_sum1;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+ sizeof(checkpt_sum1));
+
+ if (!ok)
+ return 0;
+
+ if (checkpt_sum0 != checkpt_sum1)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!yaffs2_checkpt_required(dev)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint write");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 1);
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint device");
+ ok = yaffs2_wr_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint objects");
+ ok = yaffs2_wr_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok)
+ ok = yaffs2_wr_checkpt_sum(dev);
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!dev->param.is_yaffs2)
+ ok = 0;
+
+ if (ok && dev->param.skip_checkpt_rd) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint read");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint device");
+ ok = yaffs2_rd_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint objects");
+ ok = yaffs2_rd_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok) {
+ ok = yaffs2_rd_checkpt_sum(dev);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint checksum %d", ok);
+ }
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return ok ? 1 : 0;
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+ dev->is_checkpointed = 0;
+ yaffs2_checkpt_invalidate_stream(dev);
+ }
+ if (dev->param.sb_dirty_fn)
+ dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "save entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+
+ if (!dev->is_checkpointed) {
+ yaffs2_checkpt_invalidate(dev);
+ yaffs2_wr_checkpt_data(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "save exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ retval = yaffs2_rd_checkpt_data(dev);
+
+ if (dev->is_checkpointed) {
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+ /* if new_size > old_file_size.
+ * We're going to be writing a hole.
+ * If the hole is small then write zeros otherwise write a start
+ * of hole marker.
+ */
+ loff_t old_file_size;
+ loff_t increase;
+ int small_hole;
+ int result = YAFFS_OK;
+ struct yaffs_dev *dev = NULL;
+ u8 *local_buffer = NULL;
+ int small_increase_ok = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ dev = obj->my_dev;
+
+ /* Bail out if not yaffs2 mode */
+ if (!dev->param.is_yaffs2)
+ return YAFFS_OK;
+
+ old_file_size = obj->variant.file_variant.file_size;
+
+ if (new_size <= old_file_size)
+ return YAFFS_OK;
+
+ increase = new_size - old_file_size;
+
+ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+ small_hole = 1;
+ else
+ small_hole = 0;
+
+ if (small_hole)
+ local_buffer = yaffs_get_temp_buffer(dev);
+
+ if (local_buffer) {
+ /* fill hole with zero bytes */
+ loff_t pos = old_file_size;
+ int this_write;
+ int written;
+ memset(local_buffer, 0, dev->data_bytes_per_chunk);
+ small_increase_ok = 1;
+
+ while (increase > 0 && small_increase_ok) {
+ this_write = increase;
+ if (this_write > dev->data_bytes_per_chunk)
+ this_write = dev->data_bytes_per_chunk;
+ written =
+ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+ 0);
+ if (written == this_write) {
+ pos += this_write;
+ increase -= this_write;
+ } else {
+ small_increase_ok = 0;
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+
+ /* If out of space then reverse any chunks we've added */
+ if (!small_increase_ok)
+ yaffs_resize_file_down(obj, old_file_size);
+ }
+
+ if (!small_increase_ok &&
+ obj->parent &&
+ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+ /* Write a hole start header with the old file size */
+ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+ }
+
+ return result;
+}
+
+struct yaffs_block_index {
+ int seq;
+ int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+ int aseq = ((struct yaffs_block_index *)a)->seq;
+ int bseq = ((struct yaffs_block_index *)b)->seq;
+ int ablock = ((struct yaffs_block_index *)a)->block;
+ int bblock = ((struct yaffs_block_index *)b)->block;
+
+ if (aseq == bseq)
+ return ablock - bblock;
+
+ return aseq - bseq;
+}
+
+static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int blk, int chunk_in_block,
+ int *found_chunks,
+ u8 *chunk_data,
+ struct list_head *hard_list,
+ int summary_available)
+{
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int equiv_id;
+ loff_t file_size;
+ int is_shrink;
+ int is_unlinked;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+ int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
+ struct yaffs_file_var *file_var;
+ struct yaffs_hardlink_var *hl_var;
+ struct yaffs_symlink_var *sl_var;
+
+ if (summary_available) {
+ result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
+ tags.seq_number = bi->seq_number;
+ }
+
+ if (!summary_available || tags.obj_id == 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
+ dev->tags_used++;
+ } else {
+ dev->summary_used++;
+ }
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunk_used) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing
+ * the erased check. Just skip it so that it can
+ * be deleted.
+ * But, more typically, We get here when this is
+ * an unallocated chunk and his means that
+ * either the block is empty or this is the one
+ * being allocated from
+ */
+
+ if (*found_chunks) {
+ /* This is a chunk that was skipped due
+ * to failing the erased check */
+ } else if (chunk_in_block == 0) {
+ /* We're looking at the first chunk in
+ * the block so the block is unused */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ if (dev->seq_number == bi->seq_number) {
+ /* Allocating from this block*/
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, chunk_in_block);
+
+ bi->block_state =
+ YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = chunk_in_block;
+ dev->alloc_block_finder = blk;
+ } else {
+ /* This is a partially written block
+ * that is not the current
+ * allocation block.
+ */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Partially written block %d detected. gc will fix this.",
+ blk);
+ }
+ }
+ }
+
+ dev->n_free_chunks++;
+
+ } else if (tags.ecc_result ==
+ YAFFS_ECC_RESULT_UNFIXED) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Unfixed ECC in chunk(%d:%d), chunk ignored",
+ blk, chunk_in_block);
+ dev->n_free_chunks++;
+ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+ tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
+ (tags.chunk_id > 0 &&
+ tags.n_bytes > dev->data_bytes_per_chunk) ||
+ tags.seq_number != bi->seq_number) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+ blk, chunk_in_block, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+ dev->n_free_chunks++;
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ loff_t endpos;
+ loff_t chunk_base = (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk;
+
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ if (!in)
+ /* Out of memory */
+ alloc_failed = 1;
+
+ if (in &&
+ in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
+ chunk_base < in->variant.file_variant.shrink_size) {
+ /* This has not been invalidated by
+ * a resize */
+ if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
+ chunk, -1))
+ alloc_failed = 1;
+
+ /* File size is calculated by looking at
+ * the data chunks if we have not
+ * seen an object header yet.
+ * Stop this practice once we find an
+ * object header.
+ */
+ endpos = chunk_base + tags.n_bytes;
+
+ if (!in->valid &&
+ in->variant.file_variant.scanned_size < endpos) {
+ in->variant.file_variant.
+ scanned_size = endpos;
+ in->variant.file_variant.
+ file_size = endpos;
+ }
+ } else if (in) {
+ /* This chunk has been invalidated by a
+ * resize, or a past file deletion
+ * so delete the chunk*/
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make
+ * the object
+ */
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extra_available) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ tags.extra_obj_type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ if (!in ||
+ (!in->valid && dev->param.disable_lazy_load) ||
+ tags.extra_shadows ||
+ (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
+
+ /* If we don't have valid info then we
+ * need to read the chunk
+ * TODO In future we can probably defer
+ * reading the chunk and living with
+ * invalid data until needed.
+ */
+
+ result = yaffs_rd_chunk_tags_nand(dev,
+ chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ if (dev->param.inband_tags) {
+ /* Fix up the header if they got
+ * corrupted by inband tags */
+ oh->shadows_obj =
+ oh->inband_shadowed_obj_id;
+ oh->is_shrink =
+ oh->inband_is_shrink;
+ }
+
+ if (!in) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id, oh->type);
+ if (!in)
+ alloc_failed = 1;
+ }
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
+ tags.obj_id, chunk);
+ return YAFFS_FAIL;
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be
+ * discarded, but we first have to suck
+ * out resize info if it is a file.
+ */
+ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
+ ((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+ (tags.extra_available &&
+ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ )) {
+ loff_t this_size = (oh) ?
+ yaffs_oh_to_size(oh) :
+ tags.extra_file_size;
+ u32 parent_obj_id = (oh) ?
+ oh->parent_obj_id :
+ tags.extra_parent_id;
+
+ is_shrink = (oh) ?
+ oh->is_shrink :
+ tags.extra_is_shrink;
+
+ /* If it is deleted (unlinked
+ * at start also means deleted)
+ * we treat the file size as
+ * being zeroed at this point.
+ */
+ if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
+ parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
+ this_size = 0;
+ is_shrink = 1;
+ }
+
+ if (is_shrink &&
+ in->variant.file_variant.shrink_size >
+ this_size)
+ in->variant.file_variant.shrink_size =
+ this_size;
+
+ if (is_shrink)
+ bi->has_shrink_hdr = 1;
+ }
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+
+ if (!in->valid && in->variant_type !=
+ (oh ? oh->type : tags.extra_obj_type))
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+ oh ? oh->type : tags.extra_obj_type,
+ in->variant_type, tags.obj_id,
+ chunk);
+
+ if (!in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+
+ if (oh) {
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->lazy_loaded = 0;
+ } else {
+ in->lazy_loaded = 1;
+ }
+ in->hdr_chunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+ in->valid = 1;
+ in->hdr_chunk = chunk;
+ if (oh) {
+ in->variant_type = oh->type;
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+
+ if (oh->shadows_obj > 0)
+ yaffs_handle_shadowed_obj(dev,
+ oh->shadows_obj, 1);
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ parent = yaffs_find_or_create_by_number(dev,
+ oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = yaffs_oh_to_size(oh);
+ is_shrink = oh->is_shrink;
+ equiv_id = oh->equiv_id;
+ } else {
+ in->variant_type = tags.extra_obj_type;
+ parent = yaffs_find_or_create_by_number(dev,
+ tags.extra_parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = tags.extra_file_size;
+ is_shrink = tags.extra_is_shrink;
+ equiv_id = tags.extra_equiv_id;
+ in->lazy_loaded = 1;
+ }
+ in->dirty = 0;
+
+ if (!parent)
+ alloc_failed = 1;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent &&
+ parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * Trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+ yaffs_add_obj_to_dir(parent, in);
+
+ is_unlinked = (parent == dev->del_dir) ||
+ (parent == dev->unlinked_dir);
+
+ if (is_shrink)
+ /* Mark the block */
+ bi->has_shrink_hdr = 1;
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent
+ * object is scanned we put them all in a list.
+ * After scanning is complete, we should have all the
+ * objects, so we run through this list and fix up all
+ * the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ file_var = &in->variant.file_variant;
+ if (file_var->scanned_size < file_size) {
+ /* This covers the case where the file
+ * size is greater than the data held.
+ * This will happen if the file is
+ * resized to be larger than its
+ * current data extents.
+ */
+ file_var->file_size = file_size;
+ file_var->scanned_size = file_size;
+ }
+
+ if (file_var->shrink_size > file_size)
+ file_var->shrink_size = file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ hl_var = &in->variant.hardlink_variant;
+ if (!is_unlinked) {
+ hl_var->equiv_id = equiv_id;
+ list_add(&in->hard_links, hard_list);
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ sl_var = &in->variant.symlink_variant;
+ if (oh) {
+ sl_var->alias =
+ yaffs_clone_str(oh->alias);
+ if (!sl_var->alias)
+ alloc_failed = 1;
+ }
+ break;
+ }
+ }
+ }
+ return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+ int blk;
+ int block_iter;
+ int start_iter;
+ int end_iter;
+ int n_to_scan = 0;
+ enum yaffs_block_state state;
+ int c;
+ int deleted;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ u8 *chunk_data;
+ int found_chunks;
+ int alloc_failed = 0;
+ struct yaffs_block_index *block_index = NULL;
+ int alt_block_index = 0;
+ int summary_available;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ block_index =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
+
+ if (!block_index) {
+ block_index =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+ alt_block_index = 1;
+ }
+
+ if (!block_index) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards() could not allocate block index!"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, bi->block_state, seq_number);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocks_in_checkpt++;
+
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else if (bi->block_state ==
+ YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* Determine the highest sequence number */
+ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+ block_index[n_to_scan].seq = seq_number;
+ block_index[n_to_scan].block = blk;
+ n_to_scan++;
+ if (seq_number >= dev->seq_number)
+ dev->seq_number = seq_number;
+ } else {
+ /* TODO: Nasty sequence number! */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Block scanning block %d has bad sequence number %d",
+ blk, seq_number);
+ }
+ }
+ bi++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+ cond_resched();
+
+ /* Sort the blocks by sequence number */
+ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+ yaffs2_ybicmp, NULL);
+
+ cond_resched();
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+ /* Now scan the blocks looking at the data. */
+ start_iter = 0;
+ end_iter = n_to_scan - 1;
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+ /* For each block.... backwards */
+ for (block_iter = end_iter;
+ !alloc_failed && block_iter >= start_iter;
+ block_iter--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ cond_resched();
+
+ /* get the block to scan in the correct order */
+ blk = block_index[block_iter].block;
+ bi = yaffs_get_block_info(dev, blk);
+ deleted = 0;
+
+ summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
+
+ /* For each chunk in each block that needs scanning.... */
+ found_chunks = 0;
+ if (summary_available)
+ c = dev->chunks_per_summary - 1;
+ else
+ c = dev->param.chunks_per_block - 1;
+
+ for (/* c is already initialised */;
+ !alloc_failed && c >= 0 &&
+ (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
+ c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+ if (yaffs2_scan_chunk(dev, bi, blk, c,
+ &found_chunks, chunk_data,
+ &hard_list, summary_available) ==
+ YAFFS_FAIL)
+ alloc_failed = 1;
+ }
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning, then the block
+ * is fully allocated. */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+ }
+
+ yaffs_skip_rest_of_block(dev);
+
+ if (alt_block_index)
+ vfree(block_index);
+ else
+ kfree(block_index);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_link_fixup(dev, &hard_list);
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
new file mode 100755
index 00000000..2363bfd8
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
new file mode 100755
index 00000000..666d909b
--- /dev/null
+++ b/fs/yaffs2/yportenv.h
@@ -0,0 +1,82 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_H__
+#define __YPORTENV_H__
+
+/*
+ * Define the MTD version in terms of Linux Kernel versions
+ * This allows yaffs to be used independantly of the kernel
+ * as well as with it.
+ */
+
+#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+
+#ifdef YAFFS_OUT_OF_TREE
+#include "moduleconfig.h"
+#endif
+
+#include <linux/version.h>
+#define MTD_VERSION_CODE LINUX_VERSION_CODE
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+/* These type wrappings are used to support Unicode names in WinCE. */
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+
+#define YAFFS_ROOT_MODE 0755
+#define YAFFS_LOSTNFOUND_MODE 0700
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+#else
+#define Y_CURRENT_TIME CURRENT_TIME
+#define Y_TIME_CONVERT(x) (x)
+#endif
+
+#define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#define yaffs_trace(msk, fmt, ...) do { \
+ if (yaffs_trace_mask & (msk)) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while (0)
+
+
+#endif
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
old mode 100644
new mode 100755
index c5813c87..2a47560b
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -41,6 +41,7 @@ struct af_alg_completion {
struct af_alg_control {
struct af_alg_iv *iv;
int op;
+ struct af_alg_usr_def usr_def;
};
struct af_alg_type {
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 1d37f42a..f49e434e 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -64,6 +64,11 @@ int clk_enable(struct clk *clk);
*/
void clk_disable(struct clk *clk);
+/*
+ * clk_reset
+ */
+void clk_reset(struct clk *clk);
+
/**
* clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
* This is only valid once the clock source has been enabled.
@@ -155,4 +160,6 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id);
int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
struct device *dev);
+void clk_change_parent(struct clk *clk, int select);
+
#endif
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644
index 00000000..efee4937
--- /dev/null
+++ b/include/linux/compiler-gcc5.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
old mode 100644
new mode 100755
index a6a7a1c8..7e57baed
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/if_alg.h>
/*
* Algorithm masks and types.
@@ -329,6 +330,8 @@ struct ablkcipher_tfm {
unsigned int ivsize;
unsigned int reqsize;
+ /*add usr self data..*/
+ struct af_alg_usr_def usr_def;
};
struct aead_tfm {
@@ -607,6 +610,15 @@ static inline unsigned int crypto_ablkcipher_ivsize(
return crypto_ablkcipher_crt(tfm)->ivsize;
}
+
+
+static inline struct af_alg_usr_def *crypto_ablkcipher_usr_def(
+ struct crypto_ablkcipher *tfm)
+{
+ return &crypto_ablkcipher_crt(tfm)->usr_def;
+}
+
+
static inline unsigned int crypto_ablkcipher_blocksize(
struct crypto_ablkcipher *tfm)
{
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index eee7adde..adff3291 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -18,12 +18,16 @@
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
-#ifndef DMAENGINE_H
-#define DMAENGINE_H
+#ifndef LINUX_DMAENGINE_H
+#define LINUX_DMAENGINE_H
#include <linux/device.h>
#include <linux/uio.h>
-#include <linux/dma-mapping.h>
+#include <linux/bug.h>
+#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <asm/page.h>
/**
* typedef dma_cookie_t - an opaque DMA cookie
@@ -31,8 +35,8 @@
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
*/
typedef s32 dma_cookie_t;
-#define DMA_MIN_COOKIE 1
-#define DMA_MAX_COOKIE INT_MAX
+#define DMA_MIN_COOKIE 1
+#define DMA_MAX_COOKIE INT_MAX
#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
@@ -44,10 +48,10 @@ typedef s32 dma_cookie_t;
* @DMA_ERROR: transaction failed
*/
enum dma_status {
- DMA_SUCCESS,
- DMA_IN_PROGRESS,
- DMA_PAUSED,
- DMA_ERROR,
+ DMA_SUCCESS,
+ DMA_IN_PROGRESS,
+ DMA_PAUSED,
+ DMA_ERROR,
};
/**
@@ -57,23 +61,105 @@ enum dma_status {
* automatically set as dma devices are registered.
*/
enum dma_transaction_type {
- DMA_MEMCPY,
- DMA_XOR,
- DMA_PQ,
- DMA_XOR_VAL,
- DMA_PQ_VAL,
- DMA_MEMSET,
- DMA_INTERRUPT,
- DMA_SG,
- DMA_PRIVATE,
- DMA_ASYNC_TX,
- DMA_SLAVE,
- DMA_CYCLIC,
+ DMA_MEMCPY,
+ DMA_XOR,
+ DMA_PQ,
+ DMA_XOR_VAL,
+ DMA_PQ_VAL,
+ DMA_MEMSET,
+ DMA_INTERRUPT,
+ DMA_SG,
+ DMA_PRIVATE,
+ DMA_ASYNC_TX,
+ DMA_SLAVE,
+ DMA_CYCLIC,
+ DMA_INTERLEAVE,
+/* last transaction type for creation of the capabilities mask */
+ DMA_TX_TYPE_END,
};
-/* last transaction type for creation of the capabilities mask */
-#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
+/**
+ * enum dma_transfer_direction - dma transfer mode and direction indicator
+ * @DMA_MEM_TO_MEM: Async/Memcpy mode
+ * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
+ * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
+ * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
+ */
+enum dma_transfer_direction {
+ DMA_MEM_TO_MEM,
+ DMA_MEM_TO_DEV,
+ DMA_DEV_TO_MEM,
+ DMA_DEV_TO_DEV,
+ DMA_TRANS_NONE,
+};
+
+/**
+ * Interleaved Transfer Request
+ * ----------------------------
+ * A chunk is collection of contiguous bytes to be transfered.
+ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
+ * ICGs may or maynot change between chunks.
+ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
+ * that when repeated an integral number of times, specifies the transfer.
+ * A transfer template is specification of a Frame, the number of times
+ * it is to be repeated and other per-transfer attributes.
+ *
+ * Practically, a client driver would have ready a template for each
+ * type of transfer it is going to need during its lifetime and
+ * set only 'src_start' and 'dst_start' before submitting the requests.
+ *
+ *
+ * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
+ * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
+ *
+ * == Chunk size
+ * ... ICG
+ */
+
+/**
+ * struct data_chunk - Element of scatter-gather list that makes a frame.
+ * @size: Number of bytes to read from source.
+ * size_dst := fn(op, size_src), so doesn't mean much for destination.
+ * @icg: Number of bytes to jump after last src/dst address of this
+ * chunk and before first src/dst address for next chunk.
+ * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
+ * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ */
+struct data_chunk {
+ size_t size;
+ size_t icg;
+};
+/**
+ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
+ * and attributes.
+ * @src_start: Bus address of source for the first chunk.
+ * @dst_start: Bus address of destination for the first chunk.
+ * @dir: Specifies the type of Source and Destination.
+ * @src_inc: If the source address increments after reading from it.
+ * @dst_inc: If the destination address increments after writing to it.
+ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
+ * Otherwise, source is read contiguously (icg ignored).
+ * Ignored if src_inc is false.
+ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
+ * Otherwise, destination is filled contiguously (icg ignored).
+ * Ignored if dst_inc is false.
+ * @numf: Number of frames in this template.
+ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
+ * @sgl: Array of {chunk,icg} pairs that make up a frame.
+ */
+struct dma_interleaved_template {
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ enum dma_transfer_direction dir;
+ bool src_inc;
+ bool dst_inc;
+ bool src_sgl;
+ bool dst_sgl;
+ size_t numf;
+ size_t frame_size;
+ struct data_chunk sgl[0];
+};
/**
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -86,9 +172,9 @@ enum dma_transaction_type {
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
* @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
* @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
- * (if not set, do the source dma-unmapping as page)
+ * (if not set, do the source dma-unmapping as page)
* @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
- * (if not set, do the destination dma-unmapping as page)
+ * (if not set, do the destination dma-unmapping as page)
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
* @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -98,16 +184,16 @@ enum dma_transaction_type {
* on the result of this operation
*/
enum dma_ctrl_flags {
- DMA_PREP_INTERRUPT = (1 << 0),
- DMA_CTRL_ACK = (1 << 1),
- DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
- DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
- DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
- DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
- DMA_PREP_PQ_DISABLE_P = (1 << 6),
- DMA_PREP_PQ_DISABLE_Q = (1 << 7),
- DMA_PREP_CONTINUE = (1 << 8),
- DMA_PREP_FENCE = (1 << 9),
+ DMA_PREP_INTERRUPT = (1 << 0),
+ DMA_CTRL_ACK = (1 << 1),
+ DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
+ DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
+ DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
+ DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
+ DMA_PREP_PQ_DISABLE_P = (1 << 6),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 7),
+ DMA_PREP_CONTINUE = (1 << 8),
+ DMA_PREP_FENCE = (1 << 9),
};
/**
@@ -125,19 +211,19 @@ enum dma_ctrl_flags {
* into external start mode.
*/
enum dma_ctrl_cmd {
- DMA_TERMINATE_ALL,
- DMA_PAUSE,
- DMA_RESUME,
- DMA_SLAVE_CONFIG,
- FSLDMA_EXTERNAL_START,
+ DMA_TERMINATE_ALL,
+ DMA_PAUSE,
+ DMA_RESUME,
+ DMA_SLAVE_CONFIG,
+ FSLDMA_EXTERNAL_START,
};
/**
* enum sum_check_bits - bit position of pq_check_flags
*/
enum sum_check_bits {
- SUM_CHECK_P = 0,
- SUM_CHECK_Q = 1,
+ SUM_CHECK_P = 0,
+ SUM_CHECK_Q = 1,
};
/**
@@ -146,8 +232,8 @@ enum sum_check_bits {
* @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
*/
enum sum_check_flags {
- SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
- SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
+ SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
+ SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
};
@@ -164,15 +250,16 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
*/
struct dma_chan_percpu {
- /* stats */
- unsigned long memcpy_count;
- unsigned long bytes_transferred;
+ /* stats */
+ unsigned long memcpy_count;
+ unsigned long bytes_transferred;
};
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
* @cookie: last cookie value returned to client
+ * @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
* @device_node: used to add this to the device chan list
@@ -182,18 +269,19 @@ struct dma_chan_percpu {
* @private: private data for certain client-channel associations
*/
struct dma_chan {
- struct dma_device *device;
- dma_cookie_t cookie;
-
- /* sysfs */
- int chan_id;
- struct dma_chan_dev *dev;
-
- struct list_head device_node;
- struct dma_chan_percpu __percpu *local;
- int client_count;
- int table_count;
- void *private;
+ struct dma_device *device;
+ dma_cookie_t cookie;
+ dma_cookie_t completed_cookie;
+
+ /* sysfs */
+ int chan_id;
+ struct dma_chan_dev *dev;
+
+ struct list_head device_node;
+ struct dma_chan_percpu __percpu *local;
+ int client_count;
+ int table_count;
+ void *private;
};
/**
@@ -204,10 +292,10 @@ struct dma_chan {
* @idr_ref - reference count to gate release of dma_device dev_id
*/
struct dma_chan_dev {
- struct dma_chan *chan;
- struct device device;
- int dev_id;
- atomic_t *idr_ref;
+ struct dma_chan *chan;
+ struct device device;
+ int dev_id;
+ atomic_t *idr_ref;
};
/**
@@ -215,11 +303,11 @@ struct dma_chan_dev {
* device, source or target buses
*/
enum dma_slave_buswidth {
- DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
- DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
- DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
- DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
- DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
+ DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
+ DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
+ DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
+ DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
};
/**
@@ -247,6 +335,12 @@ enum dma_slave_buswidth {
* may or may not be applicable on memory sources.
* @dst_maxburst: same as src_maxburst but for destination target
* mutatis mutandis.
+ * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
+ * with 'true' if peripheral should be flow controller. Direction will be
+ * selected at Runtime.
+ * @slave_id: Slave requester id. Only valid for slave channels. The dma
+ * slave peripheral will have unique id as dma requester which need to be
+ * pass as slave config.
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -266,18 +360,20 @@ enum dma_slave_buswidth {
* struct, if applicable.
*/
struct dma_slave_config {
- enum dma_data_direction direction;
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- enum dma_slave_buswidth src_addr_width;
- enum dma_slave_buswidth dst_addr_width;
- u32 src_maxburst;
- u32 dst_maxburst;
+ enum dma_transfer_direction direction;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ u32 src_maxburst;
+ u32 dst_maxburst;
+ bool device_fc;
+ unsigned int slave_id;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
{
- return dev_name(&chan->dev->device);
+ return dev_name(&chan->dev->device);
}
void dma_chan_cleanup(struct kref *kref);
@@ -300,9 +396,9 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
* @cookie: tracking cookie for this transaction, set to -EBUSY if
- * this tx is sitting on a dependency list
+ * this tx is sitting on a dependency list
* @flags: flags to augment operation preparation, control completion, and
- * communicate status
+ * communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: set the prepared descriptor(s) to be executed by the engine
@@ -314,17 +410,17 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
* @lock: protect the parent and next pointers
*/
struct dma_async_tx_descriptor {
- dma_cookie_t cookie;
- enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
- dma_addr_t phys;
- struct dma_chan *chan;
- dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
- dma_async_tx_callback callback;
- void *callback_param;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
+ dma_addr_t phys;
+ struct dma_chan *chan;
+ dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ dma_async_tx_callback callback;
+ void *callback_param;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
- struct dma_async_tx_descriptor *next;
- struct dma_async_tx_descriptor *parent;
- spinlock_t lock;
+ struct dma_async_tx_descriptor *next;
+ struct dma_async_tx_descriptor *parent;
+ spinlock_t lock;
#endif
};
@@ -337,7 +433,7 @@ static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
}
static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
{
- BUG();
+ BUG();
}
static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
{
@@ -347,42 +443,42 @@ static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
}
static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
{
- return NULL;
+ return NULL;
}
static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
{
- return NULL;
+ return NULL;
}
#else
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
- spin_lock_bh(&txd->lock);
+ spin_lock_bh(&txd->lock);
}
static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
{
- spin_unlock_bh(&txd->lock);
+ spin_unlock_bh(&txd->lock);
}
static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
{
- txd->next = next;
- next->parent = txd;
+ txd->next = next;
+ next->parent = txd;
}
static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
{
- txd->parent = NULL;
+ txd->parent = NULL;
}
static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
{
- txd->next = NULL;
+ txd->next = NULL;
}
static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
{
- return txd->parent;
+ return txd->parent;
}
static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
{
- return txd->next;
+ return txd->next;
}
#endif
@@ -392,13 +488,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
* @last: last completed DMA cookie
* @used: last issued DMA cookie (i.e. the one in progress)
* @residue: the remaining number of bytes left to transmit
- * on the selected transfer for states DMA_IN_PROGRESS and
- * DMA_PAUSED if this is implemented in the driver, else 0
+ * on the selected transfer for states DMA_IN_PROGRESS and
+ * DMA_PAUSED if this is implemented in the driver, else 0
*/
struct dma_tx_state {
- dma_cookie_t last;
- dma_cookie_t used;
- u32 residue;
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
};
/**
@@ -417,7 +513,7 @@ struct dma_tx_state {
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @device_alloc_chan_resources: allocate resources and return the
- * number of allocated descriptors
+ * number of allocated descriptors
* @device_free_chan_resources: release DMA channel's resources
* @device_prep_dma_memcpy: prepares a memcpy operation
* @device_prep_dma_xor: prepares a xor operation
@@ -428,179 +524,247 @@ struct dma_tx_state {
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
- * The function takes a buffer of size buf_len. The callback function will
- * be called after period_len bytes have been transferred.
+ * The function takes a buffer of size buf_len. The callback function will
+ * be called after period_len bytes have been transferred.
+ * @device_prep_interleaved_dma: Transfer expression in a generic way.
* @device_control: manipulate all pending operations on a channel, returns
- * zero or error code
+ * zero or error code
* @device_tx_status: poll for transaction completion, the optional
- * txstate parameter can be supplied with a pointer to get a
- * struct with auxiliary transfer status information, otherwise the call
- * will just return a simple status code
+ * txstate parameter can be supplied with a pointer to get a
+ * struct with auxiliary transfer status information, otherwise the call
+ * will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
*/
struct dma_device {
- unsigned int chancnt;
- unsigned int privatecnt;
- struct list_head channels;
- struct list_head global_node;
- dma_cap_mask_t cap_mask;
- unsigned short max_xor;
- unsigned short max_pq;
- u8 copy_align;
- u8 xor_align;
- u8 pq_align;
- u8 fill_align;
- #define DMA_HAS_PQ_CONTINUE (1 << 15)
-
- int dev_id;
- struct device *dev;
-
- int (*device_alloc_chan_resources)(struct dma_chan *chan);
- void (*device_free_chan_resources)(struct dma_chan *chan);
-
- struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
- struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
- struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
- unsigned int src_cnt, size_t len, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
- struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
- size_t len, enum sum_check_flags *result, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
- struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf,
- size_t len, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
- struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf, size_t len,
- enum sum_check_flags *pqres, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
- struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
- unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
- struct dma_chan *chan, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags);
-
- struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
- unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
- struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction);
- int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
- unsigned long arg);
-
- enum dma_status (*device_tx_status)(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate);
- void (*device_issue_pending)(struct dma_chan *chan);
+ unsigned int chancnt;
+ unsigned int privatecnt;
+ struct list_head channels;
+ struct list_head global_node;
+ dma_cap_mask_t cap_mask;
+ unsigned short max_xor;
+ unsigned short max_pq;
+ u8 copy_align;
+ u8 xor_align;
+ u8 pq_align;
+ u8 fill_align;
+ #define DMA_HAS_PQ_CONTINUE (1 << 15)
+
+ int dev_id;
+ struct device *dev;
+
+ int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ void (*device_free_chan_resources)(struct dma_chan *chan);
+
+ struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, enum sum_check_flags *result, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
+ struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
+ struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
+
+ struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+ struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+ struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
+ int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg);
+
+ enum dma_status (*device_tx_status)(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+ void (*device_issue_pending)(struct dma_chan *chan);
};
static inline int dmaengine_device_control(struct dma_chan *chan,
- enum dma_ctrl_cmd cmd,
- unsigned long arg)
+ enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
- return chan->device->device_control(chan, cmd, arg);
+ if (chan->device->device_control)
+ return chan->device->device_control(chan, cmd, arg);
+
+ return -ENOSYS;
}
static inline int dmaengine_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+ struct dma_slave_config *config)
+{
+ return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
+ (unsigned long)config);
+}
+
+static inline bool is_slave_direction(enum dma_transfer_direction direction)
+{
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+ struct dma_chan *chan, dma_addr_t buf, size_t len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct scatterlist sg;
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
+
+ return chan->device->device_prep_slave_sg(chan, &sg, 1,
+ dir, flags, NULL);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags)
{
- return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
- (unsigned long)config);
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, NULL);
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ struct rio_dma_ext *rio_ext)
+{
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, rio_ext);
+}
+#endif
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
+ period_len, dir, flags, NULL);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
static inline int dmaengine_terminate_all(struct dma_chan *chan)
{
- return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+ return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
}
static inline int dmaengine_pause(struct dma_chan *chan)
{
- return dmaengine_device_control(chan, DMA_PAUSE, 0);
+ return dmaengine_device_control(chan, DMA_PAUSE, 0);
}
static inline int dmaengine_resume(struct dma_chan *chan)
{
- return dmaengine_device_control(chan, DMA_RESUME, 0);
+ return dmaengine_device_control(chan, DMA_RESUME, 0);
+}
+
+static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ return chan->device->device_tx_status(chan, cookie, state);
}
static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
{
- return desc->tx_submit(desc);
+ return desc->tx_submit(desc);
}
static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
{
- size_t mask;
-
- if (!align)
- return true;
- mask = (1 << align) - 1;
- if (mask & (off1 | off2 | len))
- return false;
- return true;
+ size_t mask;
+
+ if (!align)
+ return true;
+ mask = (1 << align) - 1;
+ if (mask & (off1 | off2 | len))
+ return false;
+ return true;
}
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
- size_t off2, size_t len)
+ size_t off2, size_t len)
{
- return dmaengine_check_align(dev->copy_align, off1, off2, len);
+ return dmaengine_check_align(dev->copy_align, off1, off2, len);
}
static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
- size_t off2, size_t len)
+ size_t off2, size_t len)
{
- return dmaengine_check_align(dev->xor_align, off1, off2, len);
+ return dmaengine_check_align(dev->xor_align, off1, off2, len);
}
static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
- size_t off2, size_t len)
+ size_t off2, size_t len)
{
- return dmaengine_check_align(dev->pq_align, off1, off2, len);
+ return dmaengine_check_align(dev->pq_align, off1, off2, len);
}
static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
- size_t off2, size_t len)
+ size_t off2, size_t len)
{
- return dmaengine_check_align(dev->fill_align, off1, off2, len);
+ return dmaengine_check_align(dev->fill_align, off1, off2, len);
}
static inline void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
- dma->max_pq = maxpq;
- if (has_pq_continue)
- dma->max_pq |= DMA_HAS_PQ_CONTINUE;
+ dma->max_pq = maxpq;
+ if (has_pq_continue)
+ dma->max_pq |= DMA_HAS_PQ_CONTINUE;
}
static inline bool dmaf_continue(enum dma_ctrl_flags flags)
{
- return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
+ return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
}
static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
{
- enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
+ enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
- return (flags & mask) == mask;
+ return (flags & mask) == mask;
}
static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
{
- return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
+ return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
}
static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
{
- return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
+ return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
}
/* dma_maxpq - reduce maxpq in the face of continued operations
@@ -618,13 +782,13 @@ static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
*/
static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
{
- if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
- return dma_dev_to_maxpq(dma);
- else if (dmaf_p_disabled_continue(flags))
- return dma_dev_to_maxpq(dma) - 1;
- else if (dmaf_continue(flags))
- return dma_dev_to_maxpq(dma) - 3;
- BUG();
+ if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma);
+ else if (dmaf_p_disabled_continue(flags))
+ return dma_dev_to_maxpq(dma) - 1;
+ else if (dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma) - 3;
+ BUG();
}
/* --- public DMA engine API --- */
@@ -642,8 +806,8 @@ static inline void dmaengine_put(void)
#endif
#ifdef CONFIG_NET_DMA
-#define net_dmaengine_get() dmaengine_get()
-#define net_dmaengine_put() dmaengine_put()
+#define net_dmaengine_get() dmaengine_get()
+#define net_dmaengine_put() dmaengine_put()
#else
static inline void net_dmaengine_get(void)
{
@@ -654,8 +818,8 @@ static inline void net_dmaengine_put(void)
#endif
#ifdef CONFIG_ASYNC_TX_DMA
-#define async_dmaengine_get() dmaengine_get()
-#define async_dmaengine_put() dmaengine_put()
+#define async_dmaengine_get() dmaengine_get()
+#define async_dmaengine_put() dmaengine_put()
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
#else
@@ -671,80 +835,64 @@ static inline void async_dmaengine_put(void)
static inline struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)
{
- return NULL;
+ return NULL;
}
#endif /* CONFIG_ASYNC_TX_DMA */
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
- void *dest, void *src, size_t len);
+ void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
- struct page *page, unsigned int offset, void *kdata, size_t len);
+ struct page *page, unsigned int offset, void *kdata, size_t len);
dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
- struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
- unsigned int src_off, size_t len);
+ struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
+ unsigned int src_off, size_t len);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
- struct dma_chan *chan);
+ struct dma_chan *chan);
static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
{
- tx->flags |= DMA_CTRL_ACK;
+ tx->flags |= DMA_CTRL_ACK;
}
static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
{
- tx->flags &= ~DMA_CTRL_ACK;
+ tx->flags &= ~DMA_CTRL_ACK;
}
static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
{
- return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
-}
-
-#define first_dma_cap(mask) __first_dma_cap(&(mask))
-static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
-{
- return min_t(int, DMA_TX_TYPE_END,
- find_first_bit(srcp->bits, DMA_TX_TYPE_END));
-}
-
-#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
-static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
-{
- return min_t(int, DMA_TX_TYPE_END,
- find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
+ return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
}
#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
static inline void
__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
- set_bit(tx_type, dstp->bits);
+ set_bit(tx_type, dstp->bits);
}
#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
static inline void
__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
- clear_bit(tx_type, dstp->bits);
+ clear_bit(tx_type, dstp->bits);
}
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
{
- bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
+ bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
}
#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
static inline int
__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
{
- return test_bit(tx_type, srcp->bits);
+ return test_bit(tx_type, srcp->bits);
}
#define for_each_dma_cap_mask(cap, mask) \
- for ((cap) = first_dma_cap(mask); \
- (cap) < DMA_TX_TYPE_END; \
- (cap) = next_dma_cap((cap), (mask)))
+ for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
/**
* dma_async_issue_pending - flush pending transactions to HW
@@ -755,11 +903,9 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
*/
static inline void dma_async_issue_pending(struct dma_chan *chan)
{
- chan->device->device_issue_pending(chan);
+ chan->device->device_issue_pending(chan);
}
-#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
-
/**
* dma_async_is_tx_complete - poll for transaction completion
* @chan: DMA channel
@@ -772,72 +918,76 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
* the status of multiple cookies without re-checking hardware state.
*/
static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
{
- struct dma_tx_state state;
- enum dma_status status;
-
- status = chan->device->device_tx_status(chan, cookie, &state);
- if (last)
- *last = state.last;
- if (used)
- *used = state.used;
- return status;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, cookie, &state);
+ if (last)
+ *last = state.last;
+ if (used)
+ *used = state.used;
+ return status;
}
-#define dma_async_memcpy_complete(chan, cookie, last, used)\
- dma_async_is_tx_complete(chan, cookie, last, used)
-
/**
* dma_async_is_complete - test a cookie against chan state
* @cookie: transaction identifier to test status of
* @last_complete: last know completed transaction
* @last_used: last cookie value handed out
*
- * dma_async_is_complete() is used in dma_async_memcpy_complete()
+ * dma_async_is_complete() is used in dma_async_is_tx_complete()
* the test logic is separated for lightweight testing of multiple cookies
*/
static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
- dma_cookie_t last_complete, dma_cookie_t last_used)
+ dma_cookie_t last_complete, dma_cookie_t last_used)
{
- if (last_complete <= last_used) {
- if ((cookie <= last_complete) || (cookie > last_used))
- return DMA_SUCCESS;
- } else {
- if ((cookie <= last_complete) && (cookie > last_used))
- return DMA_SUCCESS;
- }
- return DMA_IN_PROGRESS;
+ if (last_complete <= last_used) {
+ if ((cookie <= last_complete) || (cookie > last_used))
+ return DMA_SUCCESS;
+ } else {
+ if ((cookie <= last_complete) && (cookie > last_used))
+ return DMA_SUCCESS;
+ }
+ return DMA_IN_PROGRESS;
}
static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
- if (st) {
- st->last = last;
- st->used = used;
- st->residue = residue;
- }
+ if (st) {
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
+ }
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param);
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
#else
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
- return DMA_SUCCESS;
+ return DMA_SUCCESS;
}
static inline void dma_issue_pending_all(void)
{
}
-static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param)
+static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
{
- return NULL;
+ return NULL;
+}
+static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
+ const char *name)
+{
+ return NULL;
}
static inline void dma_release_channel(struct dma_chan *chan)
{
@@ -850,28 +1000,45 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
+ __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+
+static inline struct dma_chan
+*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device *dev, char *name)
+{
+ struct dma_chan *chan;
+
+ chan = dma_request_slave_channel(dev, name);
+ if (chan)
+ return chan;
+
+ return __dma_request_channel(mask, fn, fn_param);
+}
/* --- Helper iov-locking functions --- */
struct dma_page_list {
- char __user *base_address;
- int nr_pages;
- struct page **pages;
+ char __user *base_address;
+ int nr_pages;
+ struct page **pages;
};
struct dma_pinned_list {
- int nr_iovecs;
- struct dma_page_list page_list[0];
+ int nr_iovecs;
+ struct dma_page_list page_list[0];
};
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
+ struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, struct page *page,
- unsigned int offset, size_t len);
+ struct dma_pinned_list *pinned_list, struct page *page,
+ unsigned int offset, size_t len);
#endif /* DMAENGINE_H */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
deleted file mode 100644
index 4bfe0a2f..00000000
--- a/include/linux/dw_dmac.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
- * AVR32 systems.)
- *
- * Copyright (C) 2007 Atmel Corporation
- * Copyright (C) 2010-2011 ST Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef DW_DMAC_H
-#define DW_DMAC_H
-
-#include <linux/dmaengine.h>
-
-/**
- * struct dw_dma_platform_data - Controller configuration parameters
- * @nr_channels: Number of channels supported by hardware (max 8)
- * @is_private: The device channels should be marked as private and not for
- * by the general purpose DMA channel allocator.
- */
-struct dw_dma_platform_data {
- unsigned int nr_channels;
- bool is_private;
-#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
-#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
- unsigned char chan_allocation_order;
-#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
-#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
- unsigned char chan_priority;
-};
-
-/**
- * enum dw_dma_slave_width - DMA slave register access width.
- * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
- * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
- * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
- */
-enum dw_dma_slave_width {
- DW_DMA_SLAVE_WIDTH_8BIT,
- DW_DMA_SLAVE_WIDTH_16BIT,
- DW_DMA_SLAVE_WIDTH_32BIT,
-};
-
-/* bursts size */
-enum dw_dma_msize {
- DW_DMA_MSIZE_1,
- DW_DMA_MSIZE_4,
- DW_DMA_MSIZE_8,
- DW_DMA_MSIZE_16,
- DW_DMA_MSIZE_32,
- DW_DMA_MSIZE_64,
- DW_DMA_MSIZE_128,
- DW_DMA_MSIZE_256,
-};
-
-/* flow controller */
-enum dw_dma_fc {
- DW_DMA_FC_D_M2M,
- DW_DMA_FC_D_M2P,
- DW_DMA_FC_D_P2M,
- DW_DMA_FC_D_P2P,
- DW_DMA_FC_P_P2M,
- DW_DMA_FC_SP_P2P,
- DW_DMA_FC_P_M2P,
- DW_DMA_FC_DP_P2P,
-};
-
-/**
- * struct dw_dma_slave - Controller-specific information about a slave
- *
- * @dma_dev: required DMA master device
- * @tx_reg: physical address of data register used for
- * memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- * peripheral-to-memory transfers
- * @reg_width: peripheral register width
- * @cfg_hi: Platform-specific initializer for the CFG_HI register
- * @cfg_lo: Platform-specific initializer for the CFG_LO register
- * @src_master: src master for transfers on allocated channel.
- * @dst_master: dest master for transfers on allocated channel.
- * @src_msize: src burst size.
- * @dst_msize: dest burst size.
- * @fc: flow controller for DMA transfer
- */
-struct dw_dma_slave {
- struct device *dma_dev;
- dma_addr_t tx_reg;
- dma_addr_t rx_reg;
- enum dw_dma_slave_width reg_width;
- u32 cfg_hi;
- u32 cfg_lo;
- u8 src_master;
- u8 dst_master;
- u8 src_msize;
- u8 dst_msize;
- u8 fc;
-};
-
-/* Platform-configurable bits in CFG_HI */
-#define DWC_CFGH_FCMODE (1 << 0)
-#define DWC_CFGH_FIFO_MODE (1 << 1)
-#define DWC_CFGH_PROTCTL(x) ((x) << 2)
-#define DWC_CFGH_SRC_PER(x) ((x) << 7)
-#define DWC_CFGH_DST_PER(x) ((x) << 11)
-
-/* Platform-configurable bits in CFG_LO */
-#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
-#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
-#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
-#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
-#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
-#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
-#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
-#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
-#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
-#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
-
-/* DMA API extensions */
-struct dw_cyclic_desc {
- struct dw_desc **desc;
- unsigned long periods;
- void (*period_callback)(void *param);
- void *period_callback_param;
-};
-
-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
- dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_data_direction direction);
-void dw_dma_cyclic_free(struct dma_chan *chan);
-int dw_dma_cyclic_start(struct dma_chan *chan);
-void dw_dma_cyclic_stop(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
-
-#endif /* DW_DMAC_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a6c652ef..e6490786 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -512,6 +512,7 @@ struct i2c_msg {
__u16 flags;
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RD 0x0001 /* read data, from slave to master */
+#define I2C_M_SPERATE_MSG 0x8000 /* every msg with stop use I2C_RDWR*/
#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
diff --git a/include/linux/if_alg.h b/include/linux/if_alg.h
old mode 100644
new mode 100755
index 0f9acce5..bd6deb8f
--- a/include/linux/if_alg.h
+++ b/include/linux/if_alg.h
@@ -28,11 +28,38 @@ struct af_alg_iv {
__u8 iv[0];
};
+#define MAX_EX_KEY_MAP_SIZE 8
+struct ex_key_map {
+__u32 crypto_key_no;
+__u32 ex_mem_entry;
+} ;
+
+struct ex_key_map_para {
+__u32 map_size;
+struct ex_key_map map[MAX_EX_KEY_MAP_SIZE];
+} ;
+
+struct crypto_adv_info {
+struct ex_key_map_para ex_key_para;
+};
+struct af_alg_usr_def {
+#define CRYPTO_CPU_SET_KEY (1<<0)
+#define CRYPTO_EX_MEM_SET_KEY (1<<1)
+#define CRYPTO_EX_MEM_INDEP_POWER (1<<2)
+/*bit 8~ex mem bit field..*/
+#define CRYPTO_EX_MEM_SWITCH_KEY (1<<8)
+/*if set ex mem set switch key..then parse below..*/
+#define CRYPTO_EX_MEM_4_ENTRY_1_KEY (1<<9)
+__u32 mode;
+/*if key_flag set efuse...then parse the para below...*/
+struct crypto_adv_info adv;
+};
+
/* Socket options */
#define ALG_SET_KEY 1
#define ALG_SET_IV 2
#define ALG_SET_OP 3
-
+#define ALG_USR_DEF 8
/* Operations */
#define ALG_OP_DECRYPT 0
#define ALG_OP_ENCRYPT 1
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index bdd7ceeb..52040142 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -14,6 +14,12 @@
#ifndef _LINUX_MMC_DW_MMC_H_
#define _LINUX_MMC_DW_MMC_H_
+#include <linux/scatterlist.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
#define MAX_MCI_SLOTS 2
enum dw_mci_state {
@@ -117,6 +123,8 @@ struct dw_mci {
/* DMA interface members*/
int use_dma;
+ int using_dma;
+ unsigned int prev_blksz;
dma_addr_t sg_dma;
void *sg_cpu;
@@ -154,6 +162,9 @@ struct dw_mci {
u32 quirks;
struct regulator *vmmc; /* Power regulator */
+
+ int dma_data_mapped;
+ int data_error_flag;
};
/* DMA ops for Internal/External DMAC interface */
@@ -200,7 +211,7 @@ struct dw_mci_board {
/* delay in mS before detecting cards after interrupt */
u32 detect_delay_ms;
- int (*init)(u32 slot_id, irq_handler_t , void *);
+ int (*init)(u32 slot_id,void* irq_handler_t , void *);
int (*get_ro)(u32 slot_id);
int (*get_cd)(u32 slot_id);
int (*get_ocr)(u32 slot_id);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1ee44244..413992a3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -281,6 +281,8 @@ struct mmc_host {
struct dentry *debugfs_root;
+ unsigned int rescan_count;
+
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 245cdace..573ed64b 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -72,6 +72,7 @@
#define SDIO_CCCR_REV_1_00 0 /* CCCR/FBR Version 1.00 */
#define SDIO_CCCR_REV_1_10 1 /* CCCR/FBR Version 1.10 */
#define SDIO_CCCR_REV_1_20 2 /* CCCR/FBR Version 1.20 */
+#define SDIO_CCCR_REV_3_00 3 /* to support SDIO 3.0 (luoc) */
#define SDIO_SDIO_REV_1_00 0 /* SDIO Spec Version 1.00 */
#define SDIO_SDIO_REV_1_10 1 /* SDIO Spec Version 1.10 */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 57cc0e63..ec2c889b 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -101,10 +101,28 @@ struct nand_bbt_descr {
/* Chip stores bad block marker on BOTH 1st and 6th bytes of OOB */
#define NAND_BBT_SCANBYTE1AND6 0x00100000
/* The nand_bbt_descr was created dynamicaly and must be freed */
-#define NAND_BBT_DYNAMICSTRUCT 0x00200000
+/*#define NAND_BBT_DYNAMICSTRUCT 0x00200000*/
+/*
+ * Use a flash based bad block table. By default, OOB identifier is saved in
+ * OOB area. This option is passed to the default bad block table function.
+ */
+#define NAND_BBT_USE_FLASH 0x00020000
+
/* The bad block table does not OOB for marker */
#define NAND_BBT_NO_OOB 0x00400000
+/*
+ * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
+ * entire spare area. Must be used with NAND_BBT_USE_FLASH.
+ */
+#define NAND_BBT_NO_OOB_BBM 0x00800000
+/*
+ * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
+ * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * in nand_chip.bbt_options.
+ */
+#define NAND_BBT_DYNAMICSTRUCT 0x80000000
+
/* The maximum number of blocks to scan for a bbt */
#define NAND_BBT_SCAN_MAXBLOCKS 4
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 2541fb84..3541097b 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -168,6 +168,15 @@ struct mtd_info {
unsigned int erasesize_mask;
unsigned int writesize_mask;
+ /*
+ * read ops return -EUCLEAN if max number of bitflips corrected on any
+ * one region comprising an ecc step equals or exceeds this value.
+ * Settable by driver, else defaults to ecc_strength. User can override
+ * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed;
+ * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
+ */
+ unsigned int bitflip_threshold;
+
// Kernel-only stuff starts here.
const char *name;
int index;
diff --git a/include/linux/mtd/spi-nand.h b/include/linux/mtd/spi-nand.h
new file mode 100644
index 00000000..cfa6e80c
--- /dev/null
+++ b/include/linux/mtd/spi-nand.h
@@ -0,0 +1,345 @@
+/*-
+ *
+ * Copyright (c) 2009-2014 Micron Technology, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Peter Pan <peterpandong at micron.com>
+ *
+ * based on mt29f_spinand.h
+ */
+#ifndef __LINUX_MTD_SPI_NAND_H
+#define __LINUX_MTD_SPI_NAND_H
+
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+
+
+/*
+ * Standard SPI-NAND flash commands
+ */
+#define SPINAND_CMD_READ 0x13
+#define SPINAND_CMD_READ_RDM 0x03
+#define SPINAND_CMD_PROG_LOAD 0x02
+#define SPINAND_CMD_PROG_RDM 0x84
+#define SPINAND_CMD_PROG 0x10
+#define SPINAND_CMD_ERASE_BLK 0xd8
+#define SPINAND_CMD_WR_ENABLE 0x06
+#define SPINAND_CMD_WR_DISABLE 0x04
+#define SPINAND_CMD_READ_ID 0x9f
+#define SPINAND_CMD_RESET 0xff
+#define SPINAND_CMD_READ_REG 0x0f
+#define SPINAND_CMD_WRITE_REG 0x1f
+
+#define SPINAND_CMD_READ_CACHE_X2 0x3b
+#define SPINAND_CMD_READ_CACHE_X4 0x6b
+#define SPINAND_CMD_READ_CACHE_DUAL 0xbb
+#define SPINAND_CMD_READ_CACHE_QUAD 0xeb
+
+#define SPINAND_CMD_PROG_LOAD_X4 0x32
+#define SPINAND_CMD_PROG_RDM_X4 0xC4 /*or 34*/
+
+/* feature registers */
+#define REG_BLOCK_LOCK 0xa0
+#define REG_OTP 0xb0
+#define REG_STATUS 0xc0/* timing */
+
+/* status */
+#define STATUS_OIP_MASK 0x01
+#define STATUS_READY (0 << 0)
+#define STATUS_BUSY (1 << 0)
+
+#define STATUS_E_FAIL_MASK 0x04
+#define STATUS_E_FAIL (1 << 2)
+
+#define STATUS_P_FAIL_MASK 0x08
+#define STATUS_P_FAIL (1 << 3)
+
+/*OTP register defines*/
+#define OTP_ECC_MASK 0X10
+#define OTP_ECC_ENABLE (1 << 4)
+#define OTP_ENABLE (1 << 6)
+#define OTP_LOCK (1 << 7)
+#define QE_ENABLE (1 << 0)
+
+
+/* block lock */
+#define BL_ALL_LOCKED 0x38
+#define BL_1_2_LOCKED 0x30
+#define BL_1_4_LOCKED 0x28
+#define BL_1_8_LOCKED 0x20
+#define BL_1_16_LOCKED 0x18
+#define BL_1_32_LOCKED 0x10
+#define BL_1_64_LOCKED 0x08
+#define BL_ALL_UNLOCKED 0
+
+#define SPI_NAND_ECC_SHIFT 4
+
+#define SPI_NAND_MT29F_ECC_MASK 3
+#define SPI_NAND_MT29F_ECC_CORRECTED 1
+#define SPI_NAND_MT29F_ECC_UNCORR 2
+#define SPI_NAND_MT29F_ECC_RESERVED 3
+#define SPI_NAND_MT29F_ECC_SHIFT 4
+
+#define SPI_NAND_GD5F_ECC_MASK 7
+#define SPI_NAND_GD5F_ECC_UNCORR 7
+#define SPI_NAND_GD5F_ECC_SHIFT 4
+
+struct spi_nand_onfi_params {
+ /* rev info and features block */
+ /* 'O' 'N' 'F' 'I' */
+ u8 sig[4]; /*0-3*/
+ __le16 revision; /*4-5*/
+ __le16 features; /*6-7*/
+ __le16 opt_cmd; /*8-9*/
+ u8 reserved0[22]; /*10-31*/
+
+ /* manufacturer information block */
+ char manufacturer[12]; /*32-43*/
+ char model[20]; /*44-63*/
+ u8 mfr_id; /*64*/
+ __le16 date_code; /*65-66*/
+ u8 reserved1[13]; /*67-79*/
+
+ /* memory organization block */
+ __le32 byte_per_page; /*80-83*/
+ __le16 spare_bytes_per_page; /*84*85*/
+ __le32 data_bytes_per_ppage; /*86-89*/
+ __le16 spare_bytes_per_ppage; /*90-91*/
+ __le32 pages_per_block; /*92-95*/
+ __le32 blocks_per_lun; /*96-99*/
+ u8 lun_count; /*100*/
+ u8 addr_cycles; /*101*/
+ u8 bits_per_cell; /*102*/
+ __le16 bb_per_lun; /*103-104*/
+ __le16 block_endurance; /*105-106*/
+ u8 guaranteed_good_blocks; /*107*/
+ __le16 guaranteed_block_endurance; /*108-109*/
+ u8 programs_per_page; /*110*/
+ u8 ppage_attr; /*111*/
+ u8 ecc_bits; /*112*/
+ u8 interleaved_bits; /*113*/
+ u8 interleaved_ops; /*114*/
+ u8 reserved2[13]; /*115-127*/
+
+ /* electrical parameter block */
+ u8 io_pin_capacitance_max; /*128*/
+ __le16 timing_mode; /*129-130*/
+ __le16 program_cache_timing_mode; /*131-132*/
+ __le16 t_prog; /*133-134*/
+ __le16 t_bers; /*135-136*/
+ __le16 t_r; /*137-138*/
+ __le16 t_ccs; /*139-140*/
+ u8 reserved3[23]; /*141-163*/
+
+ /* vendor */
+ __le16 vendor_specific_revision; /*164-165*/
+ u8 vendor_specific[88]; /*166-253*/
+
+ __le16 crc; /*254-255*/
+} __packed;
+
+#define ONFI_CRC_BASE 0x4F4E
+
+#define SPINAND_MAX_ID_LEN 4
+
+/**
+ * struct spi_nand_chip - SPI-NAND Private Flash Chip Data
+ * @chip_lock: [INTERN] protection lock
+ * @name: name of the chip
+ * @wq: [INTERN] wait queue to sleep on if a SPI-NAND operation
+ * is in progress used instead of the per chip wait queue
+ * when a hw controller is available.
+ * @mfr_id: [BOARDSPECIFIC] manufacture id
+ * @dev_id: [BOARDSPECIFIC] device id
+ * @state: [INTERN] the current state of the SPI-NAND device
+ * @spi: [INTERN] point to spi device structure
+ * @mtd: [INTERN] point to MTD device structure
+ * @reset: [REPLACEABLE] function to reset the device
+ * @read_id: [REPLACEABLE] read manufacture id and device id
+ * @load_page: [REPLACEABLE] load page from NAND to cache
+ * @read_cache: [REPLACEABLE] read data from cache
+ * @store_cache: [REPLACEABLE] write data to cache
+ * @write_page: [REPLACEABLE] program NAND with cache data
+ * @erase_block: [REPLACEABLE] erase a given block
+ * @waitfunc: [REPLACEABLE] wait for ready.
+ * @write_enable: [REPLACEABLE] set write enable latch
+ * @get_ecc_status: [REPLACEABLE] get ecc and bitflip status
+ * @enable_ecc: [REPLACEABLE] enable on-die ecc
+ * @disable_ecc: [REPLACEABLE] disable on-die ecc
+ * @buf: [INTERN] buffer for read/write
+ * @oobbuf: [INTERN] buffer for read/write oob
+ * @pagebuf: [INTERN] holds the pagenumber which is currently in
+ * data_buf.
+ * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
+ * currently in data_buf.
+ * @size: [INTERN] the size of chip
+ * @block_size: [INTERN] the size of eraseblock
+ * @page_size: [INTERN] the size of page
+ * @page_spare_size: [INTERN] the size of page oob size
+ * @block_shift: [INTERN] number of address bits in a eraseblock
+ * @page_shift: [INTERN] number of address bits in a page (column
+ * address bits).
+ * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
+ * @options: [BOARDSPECIFIC] various chip options. They can partly
+ * be set to inform nand_scan about special functionality.
+ * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet.
+ * Minimum amount of bit errors per @ecc_step_ds guaranteed
+ * to be correctable. If unknown, set to zero.
+ * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds,
+ * also from the datasheet. It is the recommended ECC step
+ * size, if known; if unknown, set to zero.
+ * @ecc_mask:
+ * @ecc_uncorr:
+ * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
+ * @ecclayout: [BOARDSPECIFIC] ECC layout control structure
+ * See the defines for further explanation.
+ * @bbt_options: [INTERN] bad block specific options. All options used
+ * here must come from bbm.h. By default, these options
+ * will be copied to the appropriate nand_bbt_descr's.
+ * @bbt: [INTERN] bad block table pointer
+ * @badblockpos: [INTERN] position of the bad block marker in the oob
+ * area.
+ * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
+ * lookup.
+ * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
+ * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
+ * bad block scan.
+ * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
+ * supported, 0 otherwise.
+ */
+struct spi_nand_chip {
+ spinlock_t chip_lock;
+ char *name;
+ wait_queue_head_t wq;
+ u8 dev_id_len;
+ u8 dev_id[SPINAND_MAX_ID_LEN];
+ flstate_t state;
+ struct spi_device *spi;
+ struct mtd_info *mtd;
+
+ int (*reset)(struct spi_nand_chip *chip);
+ int (*read_id)(struct spi_nand_chip *chip, u8 *id);
+ int (*load_page)(struct spi_nand_chip *chip, unsigned int page_addr);
+ int (*read_cache)(struct spi_nand_chip *chip, unsigned int page_addr,
+ unsigned int page_offset, size_t length, u8 *read_buf);
+ int (*store_cache)(struct spi_nand_chip *chip, unsigned int page_addr,
+ unsigned int page_offset, size_t length, u8 *write_buf);
+ int (*write_page)(struct spi_nand_chip *chip, unsigned int page_addr);
+ int (*erase_block)(struct spi_nand_chip *chip, u32 page_addr);
+ int (*waitfunc)(struct spi_nand_chip *chip, u8 *status);
+ int (*write_enable)(struct spi_nand_chip *chip);
+ void (*get_ecc_status)(struct spi_nand_chip *chip, unsigned int status,
+ unsigned int *corrected,
+ unsigned int *ecc_errors);
+ int (*enable_ecc)(struct spi_nand_chip *chip);
+ int (*disable_ecc)(struct spi_nand_chip *chip);
+ int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
+ int (*set_qe)(struct spi_nand_chip *chip);
+
+ u8 *buf;
+ u8 *oobbuf;
+ int pagebuf;
+ u32 pagebuf_bitflips;
+ u64 size;
+ u32 block_size;
+ u16 page_size;
+ u16 page_spare_size;
+ u8 block_shift;
+ u8 page_shift;
+ u16 page_mask;
+ u32 options;
+ u16 ecc_strength_ds;
+ u16 ecc_step_ds;
+ u8 ecc_mask;
+ u8 ecc_uncorr;
+ u8 bits_per_cell;
+ struct nand_ecclayout *ecclayout;
+ u32 bbt_options;
+ u8 *bbt;
+ int badblockpos;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+ struct nand_bbt_descr *badblock_pattern;
+ struct spi_nand_onfi_params onfi_params;
+ u32 qe_addr;
+ u32 qe_flag;
+ u32 qe_mask;
+ u32 multi_wire_command_length;
+};
+
+
+struct spi_nand_id_info{
+#define SPI_NAND_ID_NO_DUMMY (0xff)
+ u8 id_addr;
+ u8 id_len;
+};
+
+struct spi_nand_flash {
+ char *name;
+ struct spi_nand_id_info id_info;
+ u8 dev_id[SPINAND_MAX_ID_LEN];
+ u32 page_size;
+ u32 page_spare_size;
+ u32 pages_per_blk;
+ u32 blks_per_chip;
+ u32 options;
+ u8 ecc_mask;
+ u8 ecc_uncorr;
+ struct nand_ecclayout *ecc_layout;
+ u32 qe_addr;
+ u32 qe_flag;
+ u32 qe_mask;
+ u32 multi_wire_command_length;
+};
+
+struct spi_nand_cmd {
+ u8 cmd;
+ u32 n_addr; /* Number of address */
+ u8 addr[3]; /* Reg Offset */
+ u32 n_tx; /* Number of tx bytes */
+ u8 *tx_buf; /* Tx buf */
+ u8 tx_nbits;
+ u32 n_rx; /* Number of rx bytes */
+ u8 *rx_buf; /* Rx buf */
+ u8 rx_nbits;
+};
+
+#define SPI_NAND_INFO(nm, mid, did, pagesz, sparesz, pg_per_blk,\
+ blk_per_chip, opts) \
+ { .name = (nm), .mfr_id = (mid), .dev_id = (did),\
+ .page_size = (pagesz), .page_spare_size = (sparesz),\
+ .pages_per_blk = (pg_per_blk), .blks_per_chip = (blk_per_chip),\
+ .options = (opts) }
+
+#define SPINAND_NEED_PLANE_SELECT (1 << 0)
+
+#define SPINAND_MFR_MICRON 0x2C
+#define SPINAND_MFR_GIGADEVICE 0xC8
+
+int spi_nand_send_cmd(struct spi_device *spi, struct spi_nand_cmd *cmd);
+int spi_nand_read_from_cache(struct spi_nand_chip *chip,
+ u32 page_addr, u32 column, size_t len, u8 *rbuf);
+int spi_nand_read_from_cache_snor_protocol(struct spi_nand_chip *chip,
+ u32 page_addr, u32 column, size_t len, u8 *rbuf);
+int spi_nand_scan_ident(struct mtd_info *mtd);
+int spi_nand_scan_tail(struct mtd_info *mtd);
+int spi_nand_scan_ident_release(struct mtd_info *mtd);
+int spi_nand_scan_tail_release(struct mtd_info *mtd);
+int spi_nand_release(struct mtd_info *mtd);
+int __spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo,
+ int allowbbt);
+int spi_nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
+int spi_nand_default_bbt(struct mtd_info *mtd);
+int spi_nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
+#endif /* __LINUX_MTD_SPI_NAND_H */
+
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 7c775751..31a1e273 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -1,8 +1,13 @@
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
+#include <linux/err.h>
+#include <linux/of.h>
+
struct pwm_device;
+struct seq_file;
+#if defined(CONFIG_PWM) || defined(CONFIG_HAVE_PWM)
/*
* pwm_request - request a PWM device
*/
@@ -27,5 +32,251 @@ int pwm_enable(struct pwm_device *pwm);
* pwm_disable - stop a PWM output toggling
*/
void pwm_disable(struct pwm_device *pwm);
+#else
+static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void pwm_free(struct pwm_device *pwm)
+{
+}
+
+static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ return -EINVAL;
+}
+
+static inline int pwm_enable(struct pwm_device *pwm)
+{
+ return -EINVAL;
+}
+
+static inline void pwm_disable(struct pwm_device *pwm)
+{
+}
+#endif
+
+struct pwm_chip;
+
+/**
+ * enum pwm_polarity - polarity of a PWM signal
+ * @PWM_POLARITY_NORMAL: a high signal for the duration of the duty-
+ * cycle, followed by a low signal for the remainder of the pulse
+ * period
+ * @PWM_POLARITY_INVERSED: a low signal for the duration of the duty-
+ * cycle, followed by a high signal for the remainder of the pulse
+ * period
+ */
+enum pwm_polarity {
+ PWM_POLARITY_NORMAL,
+ PWM_POLARITY_INVERSED,
+};
+
+enum {
+ PWMF_REQUESTED = 1 << 0,
+ PWMF_ENABLED = 1 << 1,
+};
+
+struct pwm_device {
+ const char *label;
+ unsigned long flags;
+ unsigned int hwpwm;
+ unsigned int pwm;
+ struct pwm_chip *chip;
+ void *chip_data;
+
+ unsigned int period; /* in nanoseconds */
+};
+
+static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
+{
+ if (pwm)
+ pwm->period = period;
+}
+
+static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+{
+ return pwm ? pwm->period : 0;
+}
+
+/*
+ * pwm_set_polarity - configure the polarity of a PWM signal
+ */
+int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+
+/**
+ * struct pwm_ops - PWM controller operations
+ * @request: optional hook for requesting a PWM
+ * @free: optional hook for freeing a PWM
+ * @config: configure duty cycles and period length for this PWM
+ * @set_polarity: configure the polarity of this PWM
+ * @enable: enable PWM output toggling
+ * @disable: disable PWM output toggling
+ * @dbg_show: optional routine to show contents in debugfs
+ * @owner: helps prevent removal of modules exporting active PWMs
+ */
+struct pwm_ops {
+ int (*request)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ void (*free)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ int (*config)(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*set_polarity)(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity);
+ int (*enable)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+#ifdef CONFIG_DEBUG_FS
+ void (*dbg_show)(struct pwm_chip *chip,
+ struct seq_file *s);
+#endif
+ struct module *owner;
+};
+
+/**
+ * struct pwm_chip - abstract a PWM controller
+ * @dev: device providing the PWMs
+ * @list: list node for internal use
+ * @ops: callbacks for this PWM controller
+ * @base: number of first PWM controlled by this chip
+ * @npwm: number of PWMs controlled by this chip
+ * @pwms: array of PWM devices allocated by the framework
+ * @can_sleep: must be true if the .config(), .enable() or .disable()
+ * operations may sleep
+ */
+struct pwm_chip {
+ struct device *dev;
+ struct list_head list;
+ const struct pwm_ops *ops;
+ int base;
+ unsigned int npwm;
+
+ struct pwm_device *pwms;
+/*
+ struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+*/
+ unsigned int of_pwm_n_cells;
+ bool can_sleep;
+};
+
+#if defined(CONFIG_PWM)
+int pwm_set_chip_data(struct pwm_device *pwm, void *data);
+void *pwm_get_chip_data(struct pwm_device *pwm);
+
+int pwmchip_add(struct pwm_chip *chip);
+int pwmchip_remove(struct pwm_chip *chip);
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label);
+/*
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+*/
+struct pwm_device *pwm_get(struct device *dev, const char *con_id);
+struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id);
+void pwm_put(struct pwm_device *pwm);
+
+struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
+struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id);
+void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
+
+bool pwm_can_sleep(struct pwm_device *pwm);
+#else
+static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
+{
+ return -EINVAL;
+}
+
+static inline void *pwm_get_chip_data(struct pwm_device *pwm)
+{
+ return NULL;
+}
+
+static inline int pwmchip_add(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
+static inline int pwmchip_remove(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
+static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct pwm_device *pwm_get(struct device *dev,
+ const char *consumer)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct pwm_device *of_pwm_get(struct device_node *np,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void pwm_put(struct pwm_device *pwm)
+{
+}
+
+static inline struct pwm_device *devm_pwm_get(struct device *dev,
+ const char *consumer)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
+{
+}
+
+static inline bool pwm_can_sleep(struct pwm_device *pwm)
+{
+ return false;
+}
+#endif
+
+struct pwm_lookup {
+ struct list_head list;
+ const char *provider;
+ unsigned int index;
+ const char *dev_id;
+ const char *con_id;
+};
+
+#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id) \
+ { \
+ .provider = _provider, \
+ .index = _index, \
+ .dev_id = _dev_id, \
+ .con_id = _con_id, \
+ }
+
+#if defined(CONFIG_PWM)
+void pwm_add_table(struct pwm_lookup *table, size_t num);
+#else
+static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
+{
+}
+#endif
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 93f4d035..75bae8a9 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -18,15 +18,15 @@
*/
struct rtc_time {
- int tm_sec;
- int tm_min;
- int tm_hour;
- int tm_mday;
- int tm_mon;
- int tm_year;
- int tm_wday;
- int tm_yday;
- int tm_isdst;
+ int tm_sec; //0~59
+ int tm_min; //0~59
+ int tm_hour; //0~23
+ int tm_mday; //1~31
+ int tm_mon; //0~11
+ int tm_year; //offset from 1900
+ int tm_wday; //0~6 sunday:0
+ int tm_yday; //offset from 1.1 0~365
+ int tm_isdst; //???
};
/*
diff --git a/drivers/spi/dw_spi.h b/include/linux/spi/dw_spi.h
similarity index 87%
rename from drivers/spi/dw_spi.h
rename to include/linux/spi/dw_spi.h
index 7a5e78d2..3bf6c7f2 100644
--- a/drivers/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -4,6 +4,27 @@
#include <linux/io.h>
#include <linux/scatterlist.h>
+
+#define YU_ADD_ISR_TASKLET
+
+#ifdef CONFIG_JLINK_DEBUG
+# define DEBUG_DW_SPI0
+
+# ifdef DEBUG_DW_SPI0
+# define DW_SPI0_REG_BASE (0xf0500000)
+ #define DW_SPI0_CS_REG (0xf0300000)
+
+# else
+ #define DW_SPI_REG_BASE (0xfe400000)
+ #define DW_SPI0_CS_REG (0xfe500000)
+# endif
+
+#else
+# define DW_SPI0_CS_REG (0xfe500000)
+#endif
+
+
+
/* Bit fields in CTRLR0 */
#define SPI_DFS_OFFSET 0
@@ -138,7 +159,7 @@ struct dw_spi {
u32 dma_width;
int cs_change;
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
- void (*cs_control)(u32 command);
+ void (*cs_control)(struct spi_device *spi, u32 command);
/* Dma info */
int dma_inited;
@@ -152,9 +173,18 @@ struct dw_spi {
struct dw_spi_dma_ops *dma_ops;
void *dma_priv; /* platform relate info */
struct pci_dev *dmac;
+ void * dma_rx_dummy;
+ void * dma_tx_dummy;
/* Bus interface info */
void *priv;
+
+
+#ifdef YU_ADD_ISR_TASKLET
+ struct tasklet_struct yu_add_isr_tasklet;
+
+#endif
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
@@ -169,6 +199,10 @@ struct dw_spi {
#define dw_writew(dw, name, val) \
__raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
+
+#define yu_write(val,add) __raw_writel((val), (add))
+
+
static inline void spi_enable_chip(struct dw_spi *dws, int enable)
{
dw_writel(dws, ssienr, (enable ? 1 : 0));
@@ -179,17 +213,19 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div)
dw_writel(dws, baudr, div);
}
-static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
+static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi)
{
+ u16 cs = spi->chip_select;
if (cs > dws->num_cs)
return;
if (dws->cs_control)
- dws->cs_control(1);
+ dws->cs_control(spi, 1);
dw_writel(dws, ser, 1 << cs);
}
+
/* Disable IRQ bits */
static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
{
@@ -218,7 +254,8 @@ struct dw_spi_chip {
u8 poll_mode; /* 0 for contoller polling mode */
u8 type; /* SPI/SSP/Micrwire */
u8 enable_dma;
- void (*cs_control)(u32 command);
+ void *cs_control;
+// void (*cs_control)(u32 command);
};
extern int dw_spi_add_host(struct dw_spi *dws);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
old mode 100644
new mode 100755
index bb4f5fbb..01cc915e
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -29,6 +29,22 @@
*/
extern struct bus_type spi_bus_type;
+
+
+struct spi_master;
+struct _spi_advanced_info {
+ unsigned int ctl_wire_support;
+ /*add spi wire init func point...*/
+ /*may be null*/
+ int (*multi_wire_func_init)(struct spi_master *p_master);
+ /*change spi bus to which wire func..*/
+ #define SPI_DATA_DIR_IN (0xaa)
+ #define SPI_DATA_DIR_OUT (0xbb)
+ #define SPI_DATA_DIR_DUOLEX (0xcc)
+ void (*change_to_1_wire)(struct spi_master *p_master);
+ void (*change_to_2_wire)(struct spi_master *p_master, unsigned int dir);
+ void (*change_to_4_wire)(struct spi_master *p_master, unsigned int dir);
+};
/**
* struct spi_device - Master side proxy for an SPI slave device
* @dev: Driver model representation of the device.
@@ -66,6 +82,7 @@ extern struct bus_type spi_bus_type;
* variant with slightly different functionality; another might be
* information about how this particular board wires the chip's pins.
*/
+
struct spi_device {
struct device dev;
struct spi_master *master;
@@ -90,6 +107,15 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
+
+ /*add spi multi wire support..*/
+#define ONE_WIRE_SUPPORT (1<<0)
+#define DUAL_WIRE_SUPPORT (1<<1)
+#define QUAD_WIRE_SUPPORT (1<<2)
+#define MULTI_WIRE_SUPPORT (1<<8)
+ u32 dev_open_multi_wire_flag;
+ /*add the wire support info of controller...*/
+ struct _spi_advanced_info *p_ctl_multi_wire_info;
/*
* likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -177,7 +203,7 @@ struct spi_driver {
int (*probe)(struct spi_device *spi);
int (*remove)(struct spi_device *spi);
void (*shutdown)(struct spi_device *spi);
- int (*suspend)(struct spi_device *spi, pm_message_t mesg);
+ int (*suspend)(struct spi_device *spi, pm_message_t mesg);
int (*resume)(struct spi_device *spi);
struct device_driver driver;
};
@@ -307,6 +333,7 @@ struct spi_master {
/* called on release() to free memory provided by spi_master */
void (*cleanup)(struct spi_device *spi);
+ struct _spi_advanced_info ctl_multi_wire_info;
};
static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -332,6 +359,32 @@ static inline void spi_master_put(struct spi_master *master)
put_device(&master->dev);
}
+static inline struct
+_spi_advanced_info *spi_master_get_advanced_data(struct spi_master *master)
+{
+ return &master->ctl_multi_wire_info;
+}
+
+static inline void
+spi_dev_set_multi_data(struct spi_master *master, struct spi_device *spi_dev)
+{
+ u32 ctl_flag;
+ u32 dev_flag;
+ ctl_flag = master->ctl_multi_wire_info.ctl_wire_support;
+ dev_flag = spi_dev->dev_open_multi_wire_flag;
+ /*if slave and ctl all support multi wire...
+ then slave open the lower support*/
+ if ((ctl_flag & MULTI_WIRE_SUPPORT)
+ && (dev_flag & MULTI_WIRE_SUPPORT))
+ spi_dev->dev_open_multi_wire_flag = ctl_flag & dev_flag;
+ else {
+ spi_dev->dev_open_multi_wire_flag = 0;
+ return;
+ }
+ spi_dev->p_ctl_multi_wire_info =
+ &master->ctl_multi_wire_info;
+}
+
/* the spi driver core manages memory for the spi_master classdev */
extern struct spi_master *
@@ -436,15 +489,23 @@ struct spi_transfer {
const void *tx_buf;
void *rx_buf;
unsigned len;
-
dma_addr_t tx_dma;
dma_addr_t rx_dma;
-
unsigned cs_change:1;
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;
-
+ /*add this transfer use mode..
+#define ONE_WIRE_SUPPORT (1<<0)
+#define DUAL_WIRE_SUPPORT (1<<1)
+#define QUAD_WIRE_SUPPORT (1<<2)
+ */
+ u32 xfer_wire_mode;
+ /*
+#define SPI_DATA_DIR_IN (0xaa)
+#define SPI_DATA_DIR_OUT (0xbb)
+ */
+ u32 xfer_dir;
struct list_head transfer_list;
};
@@ -530,7 +591,8 @@ spi_transfer_del(struct spi_transfer *t)
* structures so long as you don't free them while they're in use.
*/
-static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
+static inline struct spi_message
+*spi_message_alloc(unsigned ntrans, gfp_t flags)
{
struct spi_message *m;
diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
index 4ebaf082..dab0e66b 100644
--- a/include/linux/usb/ch11.h
+++ b/include/linux/usb/ch11.h
@@ -18,6 +18,16 @@
#define USB_RT_HUB (USB_TYPE_CLASS | USB_RECIP_DEVICE)
#define USB_RT_PORT (USB_TYPE_CLASS | USB_RECIP_OTHER)
+#define HUB_CHAR_LPSM 0x0003 /* Logical Power Switching Mode mask */
+#define HUB_CHAR_COMMON_LPSM 0x0000 /* All ports power control at once */
+#define HUB_CHAR_INDV_PORT_LPSM 0x0001 /* per-port power control */
+#define HUB_CHAR_NO_LPSM 0x0002 /* no power switching */
+#define HUB_CHAR_COMPOUND 0x0004 /* hub is part of a compound device */
+
+
+
+
+
/*
* Hub class requests
* See USB 2.0 spec Table 11-16
@@ -54,6 +64,7 @@
#define USB_PORT_FEAT_L1 5 /* L1 suspend */
#define USB_PORT_FEAT_POWER 8
#define USB_PORT_FEAT_LOWSPEED 9 /* Should never be used */
+#define USB_PORT_FEAT_HIGHSPEED 10
#define USB_PORT_FEAT_C_CONNECTION 16
#define USB_PORT_FEAT_C_ENABLE 17
#define USB_PORT_FEAT_C_SUSPEND 18
@@ -168,6 +179,7 @@ struct usb_port_status {
#define HUB_CHAR_LPSM 0x0003 /* D1 .. D0 */
#define HUB_CHAR_COMPOUND 0x0004 /* D2 */
#define HUB_CHAR_OCPM 0x0018 /* D4 .. D3 */
+#define HUB_CHAR_INDV_PORT_OCPM 0x0008 /* per-port Over-current reporting */
#define HUB_CHAR_TTTT 0x0060 /* D6 .. D5 */
#define HUB_CHAR_PORTIND 0x0080 /* D7 */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index dd1571db..6c180f78 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -16,7 +16,7 @@
#define __LINUX_USB_GADGET_H
#include <linux/slab.h>
-
+#include <linux/usb/ch9.h>
struct usb_ep;
/**
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0097136b..7396dba9 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -19,10 +19,10 @@
#ifndef __USB_CORE_HCD_H
#define __USB_CORE_HCD_H
-#ifdef __KERNEL__
+//#ifdef __KERNEL__
#include <linux/rwsem.h>
-
+#include <linux/usb.h>
#define MAX_TOPO_LEVEL 6
/* This file contains declarations of usbcore internals that are mostly
@@ -505,6 +505,11 @@ extern void usb_ep0_reinit(struct usb_device *);
/* class requests from USB 3.0 hub spec, table 10-5 */
#define SetHubDepth (0x3000 | HUB_SET_DEPTH)
#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT)
+/*-------------------------------------------------------------------------*/
+
+/* hub.h ... DeviceRemovable in 2.4.2-ac11, gone in 2.4.10 */
+/* bleech -- resurfaced in 2.4.11 or 2.4.12 */
+#define bitmap DeviceRemovable
/*
* Generic bandwidth allocation constants/support
@@ -669,6 +674,6 @@ extern struct rw_semaphore ehci_cf_port_reset_rwsem;
#define USB_EHCI_LOADED 2
extern unsigned long usb_hcds_loaded;
-#endif /* __KERNEL__ */
+//#endif /* __KERNEL__ */
#endif /* __USB_CORE_HCD_H */
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index d87f44f5..9e480472 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -11,6 +11,12 @@
#include <linux/notifier.h>
+enum usb_dr_mode {
+ USB_DR_MODE_UNKNOWN,
+ USB_DR_MODE_HOST,
+ USB_DR_MODE_PERIPHERAL,
+ USB_DR_MODE_OTG,
+};
/* OTG defines lots of enumeration states before device reset */
enum usb_otg_state {
OTG_STATE_UNDEFINED = 0,
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 605b0aa8..3f1c2dee 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -33,6 +33,8 @@ struct usbnet {
wait_queue_head_t *wait;
struct mutex phy_mutex;
unsigned char suspend_count;
+ unsigned char pkt_cnt, pkt_err;
+ unsigned short rx_qlen, tx_qlen;
/* i/o info: pipes etc */
unsigned in, out;
@@ -69,6 +71,7 @@ struct usbnet {
# define EVENT_DEV_WAKING 6
# define EVENT_DEV_ASLEEP 7
# define EVENT_DEV_OPEN 8
+# define EVENT_RX_KILL 10
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -150,6 +153,10 @@ struct driver_info {
int in; /* rx endpoint */
int out; /* tx endpoint */
+ /* add driver private info by zhangy
+ 2018-10-31 to fix asix rx fixup lost data */
+ void *driver_priv;
+
unsigned long data; /* Misc driver specific data */
};
@@ -191,7 +198,8 @@ extern void usbnet_cdc_status(struct usbnet *, struct urb *);
enum skb_state {
illegal = 0,
tx_start, tx_done,
- rx_start, rx_done, rx_cleanup
+ rx_start, rx_done, rx_cleanup,
+ unlink_start
};
struct skb_data { /* skb->cb is one of these */
diff --git a/include/linux/usb/video.h b/include/linux/usb/video.h
index 3b3b95e0..eb3eb212 100644
--- a/include/linux/usb/video.h
+++ b/include/linux/usb/video.h
@@ -564,5 +564,75 @@ struct UVC_FRAME_MJPEG(n) { \
__u32 dwFrameInterval[n]; \
} __attribute__ ((packed))
+/* Frame Based Payload - 3.1.1. Frame Based Format Descriptor */
+struct uvc_format_frameBased {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFormatIndex;
+ __u8 bNumFrameDescriptors;
+ __u8 guidFormat[16];
+ __u8 bBitsPerPixel;
+ __u8 bDefaultFrameIndex;
+ __u8 bAspectRatioX;
+ __u8 bAspectRatioY;
+ __u8 bmInterfaceFlags;
+ __u8 bCopyProtect;
+ __u8 bVariableSize;
+} __attribute__((__packed__));
+
+#define UVC_DT_FORMAT_FRAMEBASED_SIZE 28
+
+/* Frame Based Payload - 3.1.2. Frame Based Frame Descriptor */
+/* Replace dwFrameInterval[] with these tree lines
+ * when bFrameIntervalType = 0, which means continuous frame interval
+ * __u32 dwMinFrameInterval;
+ * __u32 dwMaxFrameInterval;
+ * __u32 dwFrameIntervalStep;
+ */
+struct uvc_frame_frameBased {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwBytesPerLine;
+ __u32 dwFrameInterval[];
+} __attribute__((__packed__));
+
+#define UVC_DT_FRAME_FRAMEBASED_SIZE(n) (26+4*(n))
+
+#define UVC_FRAME_FRAMEBASED(n) \
+ uvc_frame_frameBased_##n
+
+/* Replace dwFrameInterval[] with these tree lines
+ * when bFrameIntervalType = 0, which means continuous frame interval
+ * __u32 dwMinFrameInterval;
+ * __u32 dwMaxFrameInterval;
+ * __u32 dwFrameIntervalStep;
+ */
+#define DECLARE_UVC_FRAME_FRAMEBASED(n) \
+struct UVC_FRAME_FRAMEBASED(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __u16 wWidth; \
+ __u16 wHeight; \
+ __u32 dwMinBitRate; \
+ __u32 dwMaxBitRate; \
+ __u32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __u32 dwBytesPerLine; \
+ __u32 dwFrameInterval[n]; \
+} __packed
+
#endif /* __LINUX_USB_VIDEO_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 8a4c309d..74b64cd1 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -377,6 +377,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */
#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */
#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */
+#define V4L2_PIX_FMT_H264 v4l2_fourcc('H', '2', '6', '4') /* H264 */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -1923,8 +1924,8 @@ struct v4l2_dbg_chip_ident {
/* Experimental, meant for debugging, testing and internal use.
Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
You must be root to use these ioctls. Never use these in applications! */
-#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
-#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
+#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
+#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
/* Experimental, meant for debugging, testing and internal use.
Never use this ioctl in applications! */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 396e8fc8..6ea76103 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -426,7 +426,8 @@ struct station_parameters {
* @STATION_INFO_RX_BITRATE: @rxrate fields are filled
* @STATION_INFO_BSS_PARAM: @bss_param filled
* @STATION_INFO_CONNECTED_TIME: @connected_time filled
- */
+ * @STATION_INFO_ASSOC_REQ_IES: @assos_req_ies filled
+*/
enum station_info_flags {
STATION_INFO_INACTIVE_TIME = 1<<0,
STATION_INFO_RX_BYTES = 1<<1,
@@ -443,8 +444,10 @@ enum station_info_flags {
STATION_INFO_RX_DROP_MISC = 1<<12,
STATION_INFO_SIGNAL_AVG = 1<<13,
STATION_INFO_RX_BITRATE = 1<<14,
- STATION_INFO_BSS_PARAM = 1<<15,
- STATION_INFO_CONNECTED_TIME = 1<<16
+ STATION_INFO_BSS_PARAM = 1<<15,
+ STATION_INFO_CONNECTED_TIME = 1<<16,
+ STATION_INFO_ASSOC_REQ_IES = 1<<17
+
};
/**
@@ -536,6 +539,11 @@ struct sta_bss_parameters {
* This number should increase every time the list of stations
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
+ * @assoc_req_ies: IEs from (Re)Association Request.
+ * This is used only when in AP mode with drivers that do not use
+ * user space MLME/SME implementation. The information is provided for
+ * the cfg80211_new_sta() calls to notify user space of the IEs.
+ * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
*/
struct station_info {
u32 filled;
@@ -558,6 +566,9 @@ struct station_info {
struct sta_bss_parameters bss_param;
int generation;
+
+ const u8 *assoc_req_ies;
+ size_t assoc_req_ies_len;
};
/**
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dd373c8e..dcb37b28 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1,4 +1,11 @@
+config TEST_BOOT_TIME
+ bool "Set GPIO level for measuring boot time."
+ default n
+ help
+ This option will set GPIO 4 to level high at the beginning of
+ kernel decompressing stage.
+
config PRINTK_TIME
bool "Show timing information on printks"
depends on PRINTK
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 46cbd28f..ef70bf94 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -185,10 +185,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
prefetchw(skb);
size = SKB_DATA_ALIGN(size);
- data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
- gfp_mask, node);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc_node_track_caller(size, gfp_mask, node);
if (!data)
goto nodata;
+
+ /* kmalloc(size) might give us more room than requested.
+ * Put skb_shared_info exactly at the end of allocated zone,
+ * to allow max possible filling before reallocation.
+ */
+ size = SKB_WITH_OVERHEAD(ksize(data));
prefetchw(data + size);
/*
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1f1ef70f..15a57575 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,5 @@
config WIRELESS_EXT
- bool
+ def_bool y
config WEXT_CORE
def_bool y
@@ -14,7 +14,7 @@ config WEXT_SPY
bool
config WEXT_PRIV
- bool
+ def_bool y
config CFG80211
tristate "cfg80211 - wireless configuration API"
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1ac9443b..6496472e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2209,6 +2209,11 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
}
nla_nest_end(msg, sinfoattr);
+ if (sinfo->assoc_req_ies)
+ {
+ NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+ sinfo->assoc_req_ies);
+ }
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -2236,6 +2241,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
}
while (1) {
+ memset(&sinfo, 0, sizeof(sinfo));
err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
mac_addr, &sinfo);
if (err == -ENOENT)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 379574c3..7b0add2b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1759,6 +1759,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
static void restore_regulatory_settings(bool reset_user)
{
char alpha2[2];
+ char world_alpha2[2];
struct reg_beacon *reg_beacon, *btmp;
struct regulatory_request *reg_request, *tmp;
LIST_HEAD(tmp_reg_req_list);
@@ -1809,11 +1810,13 @@ static void restore_regulatory_settings(bool reset_user)
/* First restore to the basic regulatory settings */
cfg80211_regdomain = cfg80211_world_regdom;
+ world_alpha2[0] = cfg80211_regdomain->alpha2[0];
+ world_alpha2[1] = cfg80211_regdomain->alpha2[1];
mutex_unlock(&reg_mutex);
mutex_unlock(&cfg80211_mutex);
- regulatory_hint_core(cfg80211_regdomain->alpha2);
+ regulatory_hint_core(world_alpha2);
/*
* This restores the ieee80211_regdom module parameter
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 413c5369..ba3f0c98 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1694,7 +1694,7 @@ static void read_symbols(char *modname)
if (version)
maybe_frob_rcs_version(modname, version, info.modinfo,
version - (char *)info.hdr);
- if (version || (all_versions && !is_vmlinux(modname)))
+ if ((all_versions && !is_vmlinux(modname)))
get_src_version(modname, mod->srcversion,
sizeof(mod->srcversion)-1);
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a3..4eb638bd 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -27,7 +27,26 @@ config SND_PXA2XX_LIB
config SND_PXA2XX_LIB_AC97
bool
+
+config SND_FH_LIB
+ tristate
+ select SND_FH_CODEC if SND_FH_LIB_AC97
+config SND_FH_LIB_AC97
+ bool
+config SND_FH_PCM
+ bool
+config SND_FH_AC97
+ tristate "AC97 driver for the Intel FH chip"
+
+ select SND_FH_PCM
+ select SND_AC97_CODEC
+ select SND_FH_LIB
+ select SND_FH_LIB_AC97
+ help
+ Say Y or M if you want to support any AC97 codec attached to
+ the fh81 AC97 interface.
+
config SND_PXA2XX_AC97
tristate "AC97 driver for the Intel PXA2xx chip"
depends on ARCH_PXA
diff --git a/sound/arm/Makefile b/sound/arm/Makefile
index 8c0c851d..72cd341a 100644
--- a/sound/arm/Makefile
+++ b/sound/arm/Makefile
@@ -7,10 +7,16 @@ snd-aaci-objs := aaci.o
obj-$(CONFIG_SND_PXA2XX_PCM) += snd-pxa2xx-pcm.o
snd-pxa2xx-pcm-objs := pxa2xx-pcm.o
+obj-$(CONFIG_SND_FH_PCM) += snd-fh-pcm.o
+snd-fh-pcm-objs := fh_pcm.o
obj-$(CONFIG_SND_PXA2XX_LIB) += snd-pxa2xx-lib.o
+obj-$(CONFIG_SND_FH_LIB) += snd-fh-lib.o
snd-pxa2xx-lib-y := pxa2xx-pcm-lib.o
+snd-fh-lib-y := fh-pcm-lib.o
snd-pxa2xx-lib-$(CONFIG_SND_PXA2XX_LIB_AC97) += pxa2xx-ac97-lib.o
-
+snd-fh-lib-$(CONFIG_SND_FH_LIB_AC97) += fh-ac97-lib.o
obj-$(CONFIG_SND_PXA2XX_AC97) += snd-pxa2xx-ac97.o
+obj-$(CONFIG_SND_FH_AC97) += snd-fh-ac97.o
snd-pxa2xx-ac97-objs := pxa2xx-ac97.o
+snd-fh-ac97-objs := fh-ac97.o
\ No newline at end of file
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 98175a09..cd9b5329 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -119,6 +119,11 @@ config SND_SOC_AC97_CODEC
tristate
select SND_AC97_CODEC
+config SND_SOC_FH_CODEC
+ tristate
+ select SND_FH_CODEC
+ select SND_AC97_CODEC
+
config SND_SOC_AD1836
tristate
@@ -222,6 +227,9 @@ config SND_SOC_STAC9766
config SND_SOC_TLV320AIC23
tristate
+config SND_SOC_FSH0LS029AA
+ tristate
+
config SND_SOC_TLV320AIC26
tristate "TI TLV320AIC26 Codec support" if SND_SOC_OF_SIMPLE
depends on SPI
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index fd855840..116edc8a 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -1,5 +1,6 @@
snd-soc-88pm860x-objs := 88pm860x-codec.o
snd-soc-ac97-objs := ac97.o
+snd-soc-fh-objs := fh.o
snd-soc-ad1836-objs := ad1836.o
snd-soc-ad193x-objs := ad193x.o
snd-soc-ad1980-objs := ad1980.o
@@ -30,6 +31,7 @@ snd-soc-spdif-objs := spdif_transciever.o
snd-soc-ssm2602-objs := ssm2602.o
snd-soc-stac9766-objs := stac9766.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
+snd-soc-fsh0ls029aa-objs := fsh0ls029aa.o
snd-soc-tlv320aic26-objs := tlv320aic26.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
@@ -91,6 +93,7 @@ snd-soc-wm9090-objs := wm9090.o
obj-$(CONFIG_SND_SOC_88PM860X) += snd-soc-88pm860x.o
obj-$(CONFIG_SND_SOC_AC97_CODEC) += snd-soc-ac97.o
+obj-$(CONFIG_SND_SOC_FH_CODEC) += snd-soc-fh.o
obj-$(CONFIG_SND_SOC_AD1836) += snd-soc-ad1836.o
obj-$(CONFIG_SND_SOC_AD193X) += snd-soc-ad193x.o
obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o
@@ -122,6 +125,7 @@ obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o
obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o
obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
+obj-$(CONFIG_SND_SOC_FSH0LS029AA) += snd-soc-fsh0ls029aa.o
obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TVL320AIC32X4) += snd-soc-tlv320aic32x4.o
diff --git a/sound/soc/codecs/fsh0ls029aa.c b/sound/soc/codecs/fsh0ls029aa.c
new file mode 100644
index 00000000..10dec074
--- /dev/null
+++ b/sound/soc/codecs/fsh0ls029aa.c
@@ -0,0 +1,99 @@
+/*
+ * ad73311.c -- ALSA Soc AD73311 codec support
+ *
+ * Copyright: Analog Device Inc.
+ * Author: Cliff Cai <cliff.cai@analog.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include "fsh0ls029aa.h"
+static int ak4104_set_dai_fmt(){
+
+ return 0;
+}
+static int ak4104_hw_params(){
+
+ return 0;
+}
+static int ak4104_sys_params(){
+
+ return 0;
+}
+static struct snd_soc_dai_ops ak4101_dai_ops = {
+ .hw_params = ak4104_hw_params,
+ .set_fmt = ak4104_set_dai_fmt,
+ .set_sysclk=ak4104_sys_params,
+};
+
+static struct snd_soc_dai_driver ad73311_dai = {
+ .name = "fh-acodec-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE, },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE, },
+ .ops=&ak4101_dai_ops,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_ad73311;
+
+static int ad73311_probe(struct platform_device *pdev)
+{
+// printk("ad73311 probe \n");
+ return snd_soc_register_codec(&pdev->dev,
+ &soc_codec_dev_ad73311, &ad73311_dai, 1);
+}
+
+static int __devexit ad73311_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver ad73311_codec_driver = {
+ .driver = {
+ .name = "fh-acodec",
+ .owner = THIS_MODULE,
+ },
+
+ .probe = ad73311_probe,
+ .remove = __devexit_p(ad73311_remove),
+};
+
+static int __init ad73311_init(void)
+{
+ return platform_driver_register(&ad73311_codec_driver);
+}
+module_init(ad73311_init);
+
+static void __exit ad73311_exit(void)
+{
+ platform_driver_unregister(&ad73311_codec_driver);
+}
+module_exit(ad73311_exit);
+
+MODULE_DESCRIPTION("ASoC ad73311 driver");
+MODULE_AUTHOR("Cliff Cai ");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/fsh0ls029aa.h b/sound/soc/codecs/fsh0ls029aa.h
new file mode 100644
index 00000000..4b353eef
--- /dev/null
+++ b/sound/soc/codecs/fsh0ls029aa.h
@@ -0,0 +1,88 @@
+/*
+ * File: sound/soc/codec/ad73311.h
+ * Based on:
+ * Author: Cliff Cai <cliff.cai@analog.com>
+ *
+ * Created: Thur Sep 25, 2008
+ * Description: definitions for AD73311 registers
+ *
+ *
+ * Modified:
+ * Copyright 2006 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __AD73311_H__
+#define __AD73311_H__
+
+#define AD_CONTROL 0x8000
+#define AD_DATA 0x0000
+#define AD_READ 0x4000
+#define AD_WRITE 0x0000
+
+/* Control register A */
+#define CTRL_REG_A (0 << 8)
+
+#define REGA_MODE_PRO 0x00
+#define REGA_MODE_DATA 0x01
+#define REGA_MODE_MIXED 0x03
+#define REGA_DLB 0x04
+#define REGA_SLB 0x08
+#define REGA_DEVC(x) ((x & 0x7) << 4)
+#define REGA_RESET 0x80
+
+/* Control register B */
+#define CTRL_REG_B (1 << 8)
+
+#define REGB_DIRATE(x) (x & 0x3)
+#define REGB_SCDIV(x) ((x & 0x3) << 2)
+#define REGB_MCDIV(x) ((x & 0x7) << 4)
+#define REGB_CEE (1 << 7)
+
+/* Control register C */
+#define CTRL_REG_C (2 << 8)
+
+#define REGC_PUDEV (1 << 0)
+#define REGC_PUADC (1 << 3)
+#define REGC_PUDAC (1 << 4)
+#define REGC_PUREF (1 << 5)
+#define REGC_REFUSE (1 << 6)
+
+/* Control register D */
+#define CTRL_REG_D (3 << 8)
+
+#define REGD_IGS(x) (x & 0x7)
+#define REGD_RMOD (1 << 3)
+#define REGD_OGS(x) ((x & 0x7) << 4)
+#define REGD_MUTE (1 << 7)
+
+/* Control register E */
+#define CTRL_REG_E (4 << 8)
+
+#define REGE_DA(x) (x & 0x1f)
+#define REGE_IBYP (1 << 5)
+
+/* Control register F */
+#define CTRL_REG_F (5 << 8)
+
+#define REGF_SEEN (1 << 5)
+#define REGF_INV (1 << 6)
+#define REGF_ALB (1 << 7)
+
+#endif
diff --git a/sound/soc/dwc/Kconfig b/sound/soc/dwc/Kconfig
new file mode 100644
index 00000000..ae37a7a6
--- /dev/null
+++ b/sound/soc/dwc/Kconfig
@@ -0,0 +1,54 @@
+config SND_FULLHAN_SOC
+ tristate "SoC Audio for the FULLHAN System-on-Chip"
+ help
+ Say Y or M if you want to add support for codecs attached to
+ the ATMEL SSC interface. You will also need
+ to select the audio interfaces to support below.
+
+config SND_FULLHAN_SOC_SSC
+ tristate
+ depends on SND_FULLHAN_SOC
+ help
+ Say Y or M if you want to add support for codecs the
+ ATMEL SSC interface. You will also needs to select the individual
+ machine drivers to support below.
+
+config SND_FULLHAN_SOC_SAM9G20_WM8731
+ tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board"
+ depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC && \
+ AT91_PROGRAMMABLE_CLOCKS
+ select SND_FULLHAN_SOC_SSC
+ select SND_SOC_WM8731
+ help
+ Say Y if you want to add support for SoC audio on WM8731-based
+ AT91sam9g20 evaluation board.
+
+config SND_FULLHAN_SOC_PLAYPAQ
+ tristate "SoC Audio support for PlayPaq with WM8510"
+ depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
+ select SND_ATMEL_SOC_SSC
+ select SND_SOC_WM8510
+ help
+ Say Y or M here if you want to add support for SoC audio
+ on the LRS PlayPaq.
+config SND_FH_SOC_I2S
+ tristate
+
+
+config SND_FULLHAN_SOC_PLAYPAQ_SLAVE
+ bool "Run CODEC on PlayPaq in slave mode"
+ depends on SND_FULLHAN_SOC_PLAYPAQ
+ default n
+ help
+ Say Y if you want to run with the AT32 SSC generating the BCLK
+ and FRAME signals on the PlayPaq. Unless you want to play
+ with the AT32 as the SSC master, you probably want to say N here,
+ as this will give you better sound quality.
+
+config SND_FULLHAN_SOC_FH
+ tristate "SoC Audio support for fullhan-81 board"
+ select SND_FULLHAN_SOC_SSC
+ select SND_SOC_FSH0LS029AA
+ select SND_FH_SOC_I2S
+ help
+ Say Y here to support sound on fh81 board.
diff --git a/sound/soc/dwc/Makefile b/sound/soc/dwc/Makefile
new file mode 100644
index 00000000..96f58598
--- /dev/null
+++ b/sound/soc/dwc/Makefile
@@ -0,0 +1,20 @@
+# AT91 Platform Support
+snd-soc-fullhan-pcm-objs := fullhan-pcm.o
+#snd-soc-fullhan_ssc_dai-objs := fh_i2s_dai.o
+
+obj-$(CONFIG_SND_FULLHAN_SOC) += snd-soc-fullhan-pcm.o
+#obj-$(CONFIG_SND_FULLHAN_SOC_SSC) += snd-soc-fullhan_ssc_dai.o
+
+# AT91 Machine Support
+snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
+
+# AT32 Machine Support
+snd-soc-playpaq-objs := playpaq_wm8510.o
+snd-soc-fh-objs := fh.o dma.o
+obj-$(CONFIG_SND_FH_SOC_I2S) += snd-soc-fh-i2s.o
+snd-soc-fh-i2s-objs := fh_i2s.o
+
+
+obj-$(CONFIG_SND_FULLHAN_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
+obj-$(CONFIG_SND_FULLHAN_SOC_PLAYPAQ) += snd-soc-playpaq.o
+obj-$(CONFIG_SND_FULLHAN_SOC_FH) += snd-soc-fh.o
diff --git a/sound/soc/dwc/dma.c b/sound/soc/dwc/dma.c
new file mode 100644
index 00000000..4dd098ce
--- /dev/null
+++ b/sound/soc/dwc/dma.c
@@ -0,0 +1,636 @@
+
+/*******************************************
+ *
+ * new drive add by xuww
+ *
+ *
+ * **********************************/
+/**
+* @file
+* @brief
+* @version
+* @author xuww
+* @date
+* @note
+*
+*
+* @copy
+*
+* <20>˴<EFBFBD><CBB4><EFBFBD>Ϊ<EFBFBD>Ϻ<EFBFBD><CFBA><EFBFBD><EFBFBD>΢<EFBFBD><CEA2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>޹<EFBFBD>˾<EFBFBD><CBBE>Ŀ<EFBFBD><C4BF><EFBFBD><EFBFBD>κ<EFBFBD><CEBA>˼<EFBFBD><CBBC><EFBFBD>˾δ<CBBE><CEB4><EFBFBD><EFBFBD>ɲ<EFBFBD><C9B2>ø<EFBFBD><C3B8>ƴ<EFBFBD><C6B4><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
+* <20><><EFBFBD><EFBFBD>˾<EFBFBD><CBBE><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ŀ<EFBFBD><C4BF><EFBFBD><EFBFBD>˾<EFBFBD><CBBE><EFBFBD><EFBFBD>һ<EFBFBD><D2BB>׷<EFBFBD><D7B7>Ȩ<EFBFBD><C8A8><EFBFBD><EFBFBD>
+*
+* <h1><center>&copy; COPYRIGHT 2013 fullhan</center></h1>
+*/
+/* Includes ------------------------------------------------------------------*/
+#include "dma.h"
+#include <mach/fh_predefined.h>
+
+#define MAX_DMA_CHANS (4)
+
+/* Private typedef -----------------------------------------------------------*/
+/* Private define ------------------------------------------------------------*/
+/* Private macro -------------------------------------------------------------*/
+/* Private variables ---------------------------------------------------------*/
+/* Private function prototypes -----------------------------------------------*/
+/* Private functions ---------------------------------------------------------*/
+/*******************************************************************************
+* Function Name : Dma_GetChanStatus
+* Description : get the channel status
+* Input : nChanID :channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+int Dma_GetChanStatus( int nChanID )
+{
+ return (int)(GET_REG( REG_DMAC_CHAN_EN ) & ( 1 << nChanID ));
+}
+/*******************************************************************************
+* Function Name : Dma_EnableChan
+* Description : enable channel
+* Input : nChanID :channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_EnableChan( int nChanID )
+{
+ int nMask = (1 << (nChanID + 8)) | ( 1 << nChanID );
+
+ SET_REG_M( REG_DMAC_CHAN_EN, nMask, nMask );
+}
+/*******************************************************************************
+* Function Name : Dma_DisableChan
+* Description : disable channel
+* Input : nChanID :channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_DisableChan( int nChanID )
+{
+ int nMask = ( 1 << ( nChanID + 8 ) );
+
+ SET_REG_M( REG_DMAC_CHAN_EN, nMask, (nMask + (1 << nChanID)) );
+ while( GET_REG( REG_DMAC_CHAN_EN) & (1 << nChanID) );
+}
+/*******************************************************************************
+* Function Name : Dma_ClearIsrBit
+* Description : clear the interruput bit
+* Input : iChan :channel ID nMask :unchange bit
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_ClearIsrBit( int iChan, int nMask )
+{
+ if( nMask & DMA_INTT_TXR)
+ SET_REG( REG_DMAC_INTCLR_TFR, 1 << iChan );
+ if( nMask & DMA_INTT_BLOCK)
+ SET_REG( REG_DMAC_INTCLR_BLK, 1 << iChan );
+ if( nMask & DMA_INTT_SOURCE )
+ SET_REG( REG_DMAC_INTCLR_SRCTXR, 1 << iChan );
+ if( nMask & DMA_INTT_DEST )
+ SET_REG( REG_DMAC_INTCLR_DSTTXR, 1 << iChan );
+ if( nMask & DMA_INTT_ERR )
+ SET_REG( REG_DMAC_INTCLR_ERR, 1 << iChan );
+}
+/*******************************************************************************
+* Function Name : Dma_EnableIsrBit
+* Description : enable intruput bit
+* Input : iChan :channel ID nMask:unchange bit
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_EnableIsrBit( int iChan, int nMask )
+{
+ int nEnable = (1 << (iChan + 8)) | (1 << iChan);
+
+ if( nMask & DMA_INTT_TXR)
+ SET_REG_M( REG_DMAC_INTMSK_TFR, nEnable, nEnable );
+ if( nMask & DMA_INTT_BLOCK)
+ SET_REG_M( REG_DMAC_INTMSK_BLK, nEnable, nEnable );
+ if( nMask & DMA_INTT_SOURCE)
+ SET_REG_M( REG_DMAC_INTMSK_SRCTXR, nEnable, nEnable );
+ if( nMask & DMA_INTT_DEST)
+ SET_REG_M( REG_DMAC_INTMSK_DSTTXR, nEnable, nEnable );
+ if( nMask & DMA_INTT_ERR)
+ SET_REG_M( REG_DMAC_INTMSK_ERR, nEnable, nEnable );
+
+ SET_REG_M( REG_DMAC_CTXi(iChan), 1, 1 ); // Enable isr.
+}
+/*******************************************************************************
+* Function Name : Dma_DisableIsrBit
+* Description : disbale interruput
+* Input : iChan:channel ID nMask:unchange bit
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+
+void Dma_DisableIsrBit( int iChan, int nMask )
+{
+ int nEnable = (1 << (iChan + 8)) | (1 << iChan);
+
+ if( nMask & DMA_INTT_TXR)
+ SET_REG_M( REG_DMAC_INTCLR_TFR, 0, nEnable );
+ if( nMask & DMA_INTT_BLOCK)
+ SET_REG_M( REG_DMAC_INTCLR_BLK, 0, nEnable );
+ if( nMask & DMA_INTT_SOURCE)
+ SET_REG_M( REG_DMAC_INTCLR_SRCTXR, 0, nEnable );
+ if( nMask & DMA_INTT_DEST)
+ SET_REG_M( REG_DMAC_INTCLR_DSTTXR, 0, nEnable );
+ if( nMask & DMA_INTT_ERR)
+ SET_REG_M( REG_DMAC_INTCLR_ERR, 0, nEnable );
+}
+/*******************************************************************************
+* Function Name : Dma_QueryISRStatus
+* Description : not use
+* Input : None
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+int Dma_QueryISRStatus( int iChan )
+{
+ return 0;
+}
+/*******************************************************************************
+* Function Name : Dma_ClearTfrDone
+* Description : clear tfr Done bit
+* Input : iChan:channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_ClearTfrDone(int iChan )
+{
+ SET_REG_M( REG_DMAC_CTXi(iChan) + 4, 0, 1 << 12 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetTxrSize
+* Description : set txr size
+* Input : iChan:channel ID nByes:size
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetTxrSize( int iChan, int nBytes )
+{
+ if( nBytes > 4095 )
+ nBytes = 4095;
+ SET_REG_M( REG_DMAC_CTXi(iChan) + 4, (unsigned long long)nBytes, 0xfff );
+}
+/*******************************************************************************
+* Function Name : Dma_SetSrcWidth
+* Description : set source width
+* Input : iChan:channel ID nWidth :fifo width
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcWidth( int iChan, int nWidth )
+{
+ SET_REG_M( REG_DMAC_CTXi(iChan), nWidth << 4, 0x70 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstWidth
+* Description : set destination
+* Input : iChan:channel ID nWidth :fifo width
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstWidth( int iChan, int nWidth )
+{
+ SET_REG_M( REG_DMAC_CTXi(iChan), nWidth << 1, 0xe );
+}
+/*******************************************************************************
+* Function Name : Dma_SetSrcSize
+* Description : set source size
+* Input : iChan :channel ID nSize : fifo depth
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcSize( int iChan, int nSize ) // burst size
+{
+ SET_REG_M( REG_DMAC_CTXi(iChan), nSize << 14, 0x1c000 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstSize
+* Description : set destination size
+* Input : iChan :channel ID nSize : fifo depth
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstSize( int iChan, int nSize )
+{
+ SET_REG_M( REG_DMAC_CTXi(iChan), nSize << 11, 0x3800 );
+}
+/*******************************************************************************
+* Function Name : Dma_EnableSrcBlkChain
+* Description : enable source block chain
+* Input : iChan:channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_EnableSrcBlkChain(int iChan )
+{
+ SET_REG_M( REG_DMAC_CTXi(iChan), 1 << 28, 1 << 28 );
+}
+/*******************************************************************************
+* Function Name : Dma_EnableDstBlkChain
+* Description : enable destinationg block chain
+* Input : iChan:channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_EnableDstBlkChain(int iChan )
+{
+ SET_REG_M( REG_DMAC_CTXi(iChan), 1 << 27, 1 << 27 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetTxrType
+* Description : set txr mode
+* Input : iChan:channel ID nMode :transation mode
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetTxrType( int iChan, int nMode )
+{
+ if( nMode >= 0 && nMode < DMA_TTFC_INVALID )
+ {
+ SET_REG_M( REG_DMAC_CTXi(iChan), nMode << 20, 0x7 << 20 );
+ }
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstIncDirection
+* Description : set source address increment decrement or not change
+* Input : iChan:channel ID nDir :0 :increment 1:decrement other :not change
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcIncDirection( int iChan, int nDir )
+{
+ if( nDir == DMA_DIR_INC )
+ SET_REG_M( REG_DMAC_CTXi(iChan), 0, 0x600 );
+ else if( nDir == DMA_DIR_DEC )
+ SET_REG_M( REG_DMAC_CTXi(iChan), 0x200, 0x200 );
+ else
+ SET_REG_M( REG_DMAC_CTXi(iChan), 0x400, 0x400 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstIncDirection
+* Description : set destination address increment decrement or not change
+* Input : iChan:channel ID nDir :0 :increment 1:decrement other :not change
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstIncDirection( int iChan, int nDir )
+{
+ if( nDir == DMA_DIR_INC )
+ SET_REG_M( REG_DMAC_CTXi(iChan), 0, 0x180 );
+ else if( nDir == DMA_DIR_DEC )
+ SET_REG_M( REG_DMAC_CTXi(iChan), 0x80, 0x080 );
+ else
+ SET_REG_M( REG_DMAC_CTXi(iChan), 0x100, 0x100 );
+}
+/*******************************************************************************
+* Function Name : Dma_EnableGather
+* Description : set enable gather
+* Input : iChan :channel ID bEnable :0 disable 1:enable
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_EnableGather(int iChan, int bEnable )
+{
+ int v;
+ if( bEnable ) v = 1;
+ else v = 0;
+ SET_REG_M( REG_DMAC_CTXi(iChan), v << 17, 1 << 17 );
+}
+/*******************************************************************************
+* Function Name : Dma_EnableScatter
+* Description : set enable scatter
+* Input : iChan :channel ID bEnable :0 disable 1:enable
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_EnableScatter(int iChan, int bEnable )
+{
+ int v = 0;
+ if( bEnable ) v = 1;
+ SET_REG_M( REG_DMAC_CTXi(iChan), v << 18, 1 << 18 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetSrcHsMode
+* Description : set the source handshaking mode
+* Input : iChan:channe ID nMode:0 hardware 1:software
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcHsMode( int iChan, int nMode )
+{
+ nMode &= 0x1;
+ SET_REG_M( REG_DMAC_CFGi(iChan), nMode << 11, 1 << 11 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstHsMode
+* Description : set the destination handshaking mode
+* Input : iChan:channe ID nMode:0 hardware 1:software
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstHsMode( int iChan, int nMode )
+{
+ nMode &= 1;
+ SET_REG_M( REG_DMAC_CFGi(iChan), nMode << 10, 1 << 10 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetFifoMode
+* Description : set fifo request transation mode
+* Input : iChan :channel nMode :1:half fifo or 0:enough one burst
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetFifoMode( int iChan, int nMode )
+{
+ nMode &= 1;
+ SET_REG_M( REG_DMAC_CFGi(iChan) + 4, nMode << 1, 1 << 1 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetFlowCtrl
+* Description : set dam flow control :source or destionation
+* Input : iChan :channel ID ctrl:0: source 1:destinationg
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetFlowCtrl( int iChan, int ctrl )
+{
+ ctrl &= 1;
+ SET_REG_M( REG_DMAC_CFGi(iChan) + 4, ctrl, 1 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetSrcAutoload
+* Description : set destination auto load the init address
+* Input : iChan :channel ID bEnable :enable or disable
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcAutoload(int iChan, int bEnable )
+{
+ int v = bEnable ? 1 : 0;
+ SET_REG_M( REG_DMAC_CFGi(iChan), v << 30, 1 << 30 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstAutoload
+* Description : set destination auto load the init address
+* Input : iChan :channel ID bEnable :enable or disable
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstAutoload(int iChan, int bEnable )
+{
+ int v = bEnable ? 1 : 0;
+ SET_REG_M( REG_DMAC_CFGi(iChan), v << 31, 1 << 31 );
+}
+/*******************************************************************************
+* Function Name : Write_Regm
+* Description : write the reg mask the unchange bit
+* Input : addr: reg address value :reg value mask :unchange bit
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+static void Write_Regm( unsigned int addr, unsigned int value, unsigned int mask )
+{
+ unsigned int tmp = GET_REG(addr);
+ tmp &= ~mask;
+ value &= mask;
+ tmp |= value;
+ SET_REG(addr, tmp);
+}
+/*******************************************************************************
+* Function Name : Dma_SetMaxBurst
+* Description : set the max burst size
+* Input : iChan:channel ID nSize : burst size
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetMaxBurst( int iChan, int nSize )
+{
+ if( nSize > 1023 )
+ nSize = 1023;
+ Write_Regm( REG_DMAC_CFGi(iChan), (nSize << 20), 0x3ff00000 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetSrcHsPol
+* Description : set the source handshaking polatity
+* Input : iChan:channel ID nPol: polarity high or low
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcHsPol( int iChan, int nPol )
+{
+ nPol &= 1;
+ SET_REG_M( REG_DMAC_CFGi(iChan), nPol << 19, 1 << 19 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstHsPol
+* Description : set the destination handshaking polatity
+* Input : iChan:channel ID nPol: polarity high or low
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstHsPol( int iChan, int nPol )
+{
+ nPol &= 1;
+ SET_REG_M( REG_DMAC_CFGi(iChan), nPol << 18, 1 << 18 );
+}
+/*******************************************************************************
+* Function Name : Dma_SetLinkEntry
+* Description : enbale the link list
+* Input : iChan:channel ID nAddr :link list address
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetLinkEntry( int iChan, unsigned int nAddr )
+{
+ nAddr &= 0xfffffffc;
+ // force to use AHB Master 0, for this is the only AHB Master.
+ SET_REG_M( REG_DMAC_LLPi(iChan), nAddr, 0xffffffff );
+}
+/*******************************************************************************
+* Function Name : Dma_SetSrcAddress
+* Description : set source address
+* Input : iChan:channel ID nAddr:destination address
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcAddress( int iChan, unsigned int nAddr )
+{
+ SET_REG_M( REG_DMAC_SARi(iChan), nAddr, 0xffffffff );
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstAddress
+* Description : set destination address
+* Input : iChan:channel ID nAddr:destination address
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstAddress( int iChan, unsigned int nAddr )
+{
+ SET_REG_M( REG_DMAC_DARi(iChan), nAddr, 0xffffffff );
+}
+/*******************************************************************************
+* Function Name : Dma_SetSrcPe
+* Description : select the hardshaking interface
+* Input : iChan:channel ID nPer:handshaking interface
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetSrcPer( int iChan, unsigned int nPer )
+{
+ if( nPer < DMA_HSP_INVALID )
+ {
+ SET_REG_M( REG_DMAC_CFGi(iChan) + 4, nPer << 7, 0xf << 7 );
+ }
+}
+/*******************************************************************************
+* Function Name : Dma_SetDstPer
+* Description : select the hardshaking interface
+* Input : iChan:channel ID nPer:handshaking interface
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_SetDstPer( int iChan, unsigned int nPer )
+{
+ if( nPer < DMA_HSP_INVALID )
+ {
+ SET_REG_M( REG_DMAC_CFGi(iChan) + 4, nPer << 11, 0xf << 11 );
+ }
+}
+/*******************************************************************************
+* Function Name : Dma_GetIsrChan
+* Description : get the status of dam interruput
+* Input : iChan :channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+unsigned int Dma_GetIsrChan(unsigned int nMask)
+{
+ if( nMask & DMA_INTT_TXR )
+ return GET_REG(REG_DMAC_INTSTAT_TFR);
+ if( nMask & DMA_INTT_BLOCK )
+ return GET_REG(REG_DMAC_INTSTAT_BLK);
+ if( nMask & DMA_INTT_SOURCE )
+ return GET_REG(REG_DMAC_INTSTAT_SRCTXR);
+ if( nMask & DMA_INTT_DEST )
+ return GET_REG(REG_DMAC_INTSTAT_DSTTXR);
+ if( nMask & DMA_INTT_ERR )
+ return GET_REG(REG_DMAC_INTSTAT_ERR);
+
+ return 0;
+}
+/*******************************************************************************
+* Function Name : Dma_StartSrctfr
+* Description : dma source start transaction
+* Input : iChan:channel
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_StartSrctfr( int iChan )
+{
+ int nMask = 0x101 << (iChan);
+
+ SET_REG( REG_DMAC_REQSRC, 0x101 );
+ SET_REG( REG_DMAC_SGLREQSRC, 0x101 );
+}
+/*******************************************************************************
+* Function Name : Dma_StartDsttfr
+* Description : destination transaction request
+* Input : iChan: channel ID
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_StartDsttfr( int iChan )
+{
+ int nMask = 0x101 << (iChan);
+ SET_REG( REG_DMAC_REQDST, nMask );
+ SET_REG( REG_DMAC_SGLREQDST, nMask );
+}
+/*******************************************************************************
+* Function Name : fh_dma_init
+* Description : dma init
+* Input : None
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void fh_dma_init()
+{
+ Dma_Init();
+}
+/*******************************************************************************
+* Function Name : Dma_Init
+* Description : dma init
+* Input : None
+* Output : None
+* Return : None
+*
+ *******************************************************************************/
+void Dma_Init()
+{
+ int i;
+ SET_REG( REG_DMAC_CFG_REG, 1 );
+
+ for( i = 0; i < MAX_DMA_CHANS; i ++ )
+ {
+ Dma_DisableChan(i);
+ Dma_ClearIsrBit( i, DMA_INTT_TXR | DMA_INTT_BLOCK | DMA_INTT_SOURCE | DMA_INTT_DEST | DMA_INTT_ERR );
+ Dma_ClearTfrDone(i);
+ Dma_SetTxrType(i, DMA_TTFC_M2P_DMAC);
+ Dma_SetSrcWidth( i, DMA_TXR_32BITS );
+ Dma_SetSrcSize( i, DMA_BURST_8 );
+ Dma_SetDstWidth( i, DMA_TXR_8BITS );
+ Dma_SetDstSize( i, DMA_BURST_8 );
+ Dma_SetSrcHsPol( i, DMA_HSPOL_ACTHIGH );
+ Dma_SetDstHsPol( i, DMA_HSPOL_ACTHIGH );
+ Dma_SetSrcIncDirection( i, DMA_DIR_INC );
+ Dma_SetDstIncDirection( i, DMA_DIR_UNCHG );
+ Dma_SetSrcHsMode( i, DMA_HSMODE_SOFTWARE );
+ Dma_SetDstHsMode( i, DMA_HSMODE_SOFTWARE );
+ Dma_SetMaxBurst( i, 0 );
+ Dma_SetFifoMode( i, 0 );
+ Dma_SetLinkEntry( i, 0 );
+ Dma_EnableGather( i, 0 );
+ Dma_EnableScatter( i, 0 );
+ }
+}
diff --git a/sound/soc/dwc/dma.h b/sound/soc/dwc/dma.h
new file mode 100644
index 00000000..81fccc3f
--- /dev/null
+++ b/sound/soc/dwc/dma.h
@@ -0,0 +1,212 @@
+
+/*********************************
+ *
+ * new drive add by xuww
+ *
+ * ****************************/
+#ifndef DMA__H
+#define DMA__H
+
+
+#if(1)
+enum
+{
+ DMA_INTT_TXR = 1,
+ DMA_INTT_BLOCK = 2,
+ DMA_INTT_SOURCE = 4,
+ DMA_INTT_DEST = 8,
+ DMA_INTT_ERR = 16
+};
+
+enum
+{
+ DMA_TXR_8BITS = 0,
+ DMA_TXR_16BITS = 1,
+ DMA_TXR_32BITS = 2,
+ DMA_TXR_64BITS = 3,
+ DMA_TXR_128BITS = 4,
+ DMA_TXR_256BITS = 5,
+ DMA_TXR_INVALID = 6
+};
+
+enum
+{
+ DMA_BURST_1 = 0,
+ DMA_BURST_4 = 1,
+ DMA_BURST_8 = 2,
+ DMA_BURST_16 = 3,
+ DMA_BURST_32 = 4,
+ DMA_BURST_64 = 5,
+ DMA_BURST_128 = 6,
+ DMA_BURST_256 = 7,
+ DMA_BURST_INVALID = 8
+};
+
+enum
+{
+ DMA_TTFC_M2M_DMAC,
+ DMA_TTFC_M2P_DMAC,
+ DMA_TTFC_P2M_DMAC,
+ DMA_TTFC_P2P_DMAC,
+ DMA_TTFC_P2M_PFR,
+ DMA_TTFC_P2P_PSRC,
+ DMA_TTFC_M2P_PFR,
+ DMA_TTFC_P2P_PDST,
+ DMA_TTFC_INVALID
+};
+
+enum
+{
+ DMA_DIR_INC,
+ DMA_DIR_DEC,
+ DMA_DIR_UNCHG,
+ DMA_DIR_INVALID
+};
+
+enum
+{
+ DMA_HSPOL_ACTHIGH,
+ DMA_HSPOL_ACTLOW
+};
+
+enum
+{
+ DMA_HSMODE_HARDWARE = 0,
+ DMA_HSMODE_SOFTWARE = 1
+};
+
+enum
+{
+ DMA_HSP_SDC,
+ DMA_HSP_AIFRX,
+ DMA_HSP_AIFTX,
+ DMA_HSP_TAE,
+ DMA_HSP_I2SRX,
+ DMA_HSP_I2STX,
+ DMA_HSP_SPI0RX,
+ DMA_HSP_SPI0TX,
+ DMA_HSP_SPI1RX,
+ DMA_HSP_SPI1TX,
+ DMA_HSP_UART0RX,
+ DMA_HSP_UART0TX,
+ DMA_HSP_UART1RX,
+ DMA_HSP_UART1TX,
+ DMA_HSP_SPI2RX,
+ DMA_HSP_SPI2TX,
+ DMA_HSP_INVALID
+};
+#endif
+#define DMAC_REG_BASE (0xfe600000)
+#define REG_DMAC_SAR_OFFSET (0x0)
+#define REG_DMAC_DAR_OFFSET (0x8)
+#define REG_DMAC_LLP_OFFSET (0x10)
+#define REG_DMAC_CTX_OFFSET (0x18)
+#define REG_DMAC_SSTAT_OFFSET (0x20)
+#define REG_DMAC_DSTAT_OFFSET (0x28)
+#define REG_DMAC_SSTATAR_OFFSET (0x30)
+#define REG_DMAC_DSTATAR_OFFSET (0x38)
+#define REG_DMAC_CFG_OFFSET (0x40)
+#define REG_DMAC_SGR_OFFSET (0x48)
+#define REG_DMAC_DSR_OFFSET (0x50)
+#define REG_DMAC_SARi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SAR_OFFSET)
+#define REG_DMAC_DARi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DAR_OFFSET)
+#define REG_DMAC_LLPi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_LLP_OFFSET)
+#define REG_DMAC_CTXi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_CTX_OFFSET)
+#define REG_DMAC_SSTATi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SSTAT_OFFSET)
+#define REG_DMAC_DSTATi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DSTAT_OFFSET)
+#define REG_DMAC_SSTATARi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SSTATAR_OFFSET)
+#define REG_DMAC_DSTATARi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DSTATAR_OFFSET)
+#define REG_DMAC_CFGi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_CFG_OFFSET)
+#define REG_DMAC_SGRi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SGR_OFFSET)
+#define REG_DMAC_DSRi(n) (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DSR_OFFSETR)
+
+#define REG_DMAC_INTRAWTFR (DMAC_REG_BASE + 0x2c0)
+#define REG_DMAC_INTRAWBLK (DMAC_REG_BASE + 0x2c8)
+#define REG_DMAC_INTRAWSRCTXR (DMAC_REG_BASE + 0x2d0)
+#define REG_DMAC_INTRAWDSTTXR (DMAC_REG_BASE + 0x2d8)
+#define REG_DMAC_INTRAWERR (DMAC_REG_BASE + 0x2e0)
+#define REG_DMAC_INTSTAT_TFR (DMAC_REG_BASE + 0x2e8)
+#define REG_DMAC_INTSTAT_BLK (DMAC_REG_BASE + 0x2f0)
+#define REG_DMAC_INTSTAT_SRCTXR (DMAC_REG_BASE + 0x2f8)
+#define REG_DMAC_INTSTAT_DSTTXR (DMAC_REG_BASE + 0x300)
+#define REG_DMAC_INTSTAT_ERR (DMAC_REG_BASE + 0x308)
+#define REG_DMAC_INTMSK_TFR (DMAC_REG_BASE + 0x310)
+#define REG_DMAC_INTMSK_BLK (DMAC_REG_BASE + 0x318)
+#define REG_DMAC_INTMSK_SRCTXR (DMAC_REG_BASE + 0x320)
+#define REG_DMAC_INTMSK_DSTTXR (DMAC_REG_BASE + 0x328)
+#define REG_DMAC_INTMSK_ERR (DMAC_REG_BASE + 0x330)
+#define REG_DMAC_INTCLR_TFR (DMAC_REG_BASE + 0x338)
+#define REG_DMAC_INTCLR_BLK (DMAC_REG_BASE + 0x340)
+#define REG_DMAC_INTCLR_SRCTXR (DMAC_REG_BASE + 0x348)
+#define REG_DMAC_INTCLR_DSTTXR (DMAC_REG_BASE + 0x350)
+#define REG_DMAC_INTCLR_ERR (DMAC_REG_BASE + 0x358)
+#define REG_DMAC_INT_STATUS_ALL (DMAC_REG_BASE + 0x360)
+
+#define REG_DMAC_REQSRC (DMAC_REG_BASE + 0x368)
+#define REG_DMAC_REQDST (DMAC_REG_BASE + 0x370)
+#define REG_DMAC_SGLREQSRC (DMAC_REG_BASE + 0x378)
+#define REG_DMAC_SGLREQDST (DMAC_REG_BASE + 0x380)
+#define REG_DMAC_LSTSRC (DMAC_REG_BASE + 0x388)
+#define REG_DMAC_LSTDST (DMAC_REG_BASE + 0x390)
+#define REG_DMAC_CFG_REG (DMAC_REG_BASE + 0x398)
+#define REG_DMAC_CHAN_EN (DMAC_REG_BASE + 0x3a0)
+#define REG_DMAC_IDREG (DMAC_REG_BASE + 0x3a8)
+#define REG_DMAC_TESTREG (DMAC_REG_BASE + 0x3b0)
+#define REG_DMAC_COMPARAMS_6 (DMAC_REG_BASE + 0x3c8)
+#define REG_DMAC_COMPARAMS_5 (DMAC_REG_BASE + 0x3d0)
+#define REG_DMAC_COMPARAMS_4 (DMAC_REG_BASE + 0x3d8)
+#define REG_DMAC_COMPARAMS_3 (DMAC_REG_BASE + 0x3e0)
+#define REG_DMAC_COMPARAMS_2 (DMAC_REG_BASE + 0x3e8)
+#define REG_DMAC_COMPARAMS_1 (DMAC_REG_BASE + 0x3f0)
+#define REG_DMAC_COMP_IDREG (DMAC_REG_BASE + 0x3f8)
+int Dma_GetChanStatus( int nChanID );
+void Dma_EnableChan( int nChanID );
+void Dma_DisableChan( int nChanID );
+
+void Dma_ClearIsrBit( int iChan, int nMask );
+void Dma_EnableIsrBit( int iChan, int nMask );
+void Dma_DisableIsrBit( int iChan, int nMask );
+unsigned int Dma_GetIsrChan(unsigned int nMask);
+int Dma_QueryIsrStatus( );
+
+void Dma_ClearTfrDone(int iChan);
+void Dma_SetTxrSize( int iChan, int nBytes );
+void Dma_SetSrcWidth( int iChan, int nWidth );
+void Dma_SetDstWidth( int iChan, int nWidth );
+void Dma_SetSrcSize( int iChan, int nSize ); // burst size
+void Dma_SetDstSize( int iChan, int nSize );
+void Dma_EnableSrcBlkChain(int iChan);
+void Dma_EnableDstBlkChain(int iChan);
+
+void Dma_SetTxrType( int iChan, int nMode );
+void Dma_SetSrcIncDirection( int iChan, int nDir );
+void Dma_SetDstIncDirection( int iChan, int nDir );
+
+void Dma_EnableGather( int iChan, int bEnable );
+void Dma_EnableScatter( int iChan, int bEnable);
+
+void Dma_SetSrcHsMode( int iChan, int nMode );
+void Dma_SetDstHsMode( int iChan, int nMode );
+
+void Dma_SetFifoMode( int iChan, int nMode );
+void Dma_SetFlowCtrl( int iChan, int ctrl );
+void Dma_SetSrcAutoload(int iChan, int bEnable );
+void Dma_SetDstAutoload(int iChan, int bEnable );
+void Dma_SetMaxBurst( int iChan, int nSize );
+
+void Dma_SetSrcHsPol( int iChan, int nPol );
+void Dma_SetDstHsPol( int iChan, int nPol );
+
+void Dma_SetLinkEntry( int iChan, unsigned int nAddr );
+void Dma_SetSrcAddress( int iChan, unsigned int nAddr );
+void Dma_SetDstAddress( int iChan, unsigned int nAddr );
+void Dma_SetSrcPer( int iChan, unsigned int nPer );
+void Dma_SetDstPer( int iChan, unsigned int nPer );
+
+void Dma_StartSrctfr( int iChan );
+void Dma_StartDsttfr( int iChan );
+
+void Dma_Init();
+
+#endif
+
diff --git a/sound/soc/dwc/fh.c b/sound/soc/dwc/fh.c
new file mode 100644
index 00000000..31b8639c
--- /dev/null
+++ b/sound/soc/dwc/fh.c
@@ -0,0 +1,240 @@
+/*
+ * ASoC driver for Stretch s6105 IP camera platform
+ *
+ * Author: Daniel Gloeckner, <dg@emlix.com>
+ * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+
+
+#include "fullhan-pcm.h"
+#include "fh_i2s.h"
+
+#define S6105_CAM_CODEC_CLOCK 12288000
+
+static int s6105_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+
+ /* set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+ /* set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_NB_NF);
+ if (ret < 0)
+ return ret;
+
+ /* set the codec system clock */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK,
+ SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static struct snd_soc_ops s6105_ops = {
+ .hw_params = s6105_hw_params,
+};
+
+/* s6105 machine dapm widgets */
+static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
+ SND_SOC_DAPM_LINE("Audio Out Differential", NULL),
+ SND_SOC_DAPM_LINE("Audio Out Stereo", NULL),
+ SND_SOC_DAPM_LINE("Audio In", NULL),
+};
+
+/* s6105 machine audio_mapnections to the codec pins */
+static const struct snd_soc_dapm_route audio_map[] = {
+ /* Audio Out connected to HPLOUT, HPLCOM, HPROUT */
+ {"Audio Out Differential", NULL, "HPLOUT"},
+ {"Audio Out Differential", NULL, "HPLCOM"},
+ {"Audio Out Stereo", NULL, "HPLOUT"},
+ {"Audio Out Stereo", NULL, "HPROUT"},
+
+ /* Audio In connected to LINE1L, LINE1R */
+ {"LINE1L", NULL, "Audio In"},
+ {"LINE1R", NULL, "Audio In"},
+};
+
+static int output_type_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 2;
+ if (uinfo->value.enumerated.item) {
+ uinfo->value.enumerated.item = 1;
+ strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT");
+ } else {
+ strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM");
+ }
+ return 0;
+}
+
+static int output_type_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.enumerated.item[0] = kcontrol->private_value;
+ return 0;
+}
+
+static int output_type_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = kcontrol->private_data;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
+ char *differential = "Audio Out Differential";
+ char *stereo = "Audio Out Stereo";
+
+ if (kcontrol->private_value == val)
+ return 0;
+ kcontrol->private_value = val;
+ snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
+ snd_soc_dapm_sync(dapm);
+ snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
+ snd_soc_dapm_sync(dapm);
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new audio_out_mux = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Output Mux",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = output_type_info,
+ .get = output_type_get,
+ .put = output_type_put,
+ .private_value = 1 /* default to stereo */
+};
+
+/* Logic for a aic3x as connected on the s6105 ip camera ref design */
+static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+ /* Add s6105 specific widgets */
+ snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
+ ARRAY_SIZE(aic3x_dapm_widgets));
+
+ /* Set up s6105 specific audio path audio_map */
+ snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+
+ /* not present */
+ snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
+ snd_soc_dapm_nc_pin(dapm, "LINE2L");
+ snd_soc_dapm_nc_pin(dapm, "LINE2R");
+
+ /* not connected */
+ snd_soc_dapm_nc_pin(dapm, "MIC3L"); /* LINE2L on this chip */
+ snd_soc_dapm_nc_pin(dapm, "MIC3R"); /* LINE2R on this chip */
+ snd_soc_dapm_nc_pin(dapm, "LLOUT");
+ snd_soc_dapm_nc_pin(dapm, "RLOUT");
+ snd_soc_dapm_nc_pin(dapm, "HPRCOM");
+
+ /* always connected */
+ snd_soc_dapm_enable_pin(dapm, "Audio In");
+
+ /* must correspond to audio_out_mux.private_value initializer */
+ snd_soc_dapm_disable_pin(dapm, "Audio Out Differential");
+ snd_soc_dapm_sync(dapm);
+ snd_soc_dapm_enable_pin(dapm, "Audio Out Stereo");
+
+ snd_soc_dapm_sync(dapm);
+
+ snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec));
+
+ return 0;
+}
+
+/* s6105 digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link s6105_dai = {
+ .name = "TLV320AIC31",
+ .stream_name = "AIC31",
+ .cpu_dai_name = "s6000-i2s.0",
+ .codec_dai_name = "fh-acodec-hifi",
+ .platform_name = "fh-pcm-audio",
+ .codec_name = "fh-acodec",
+ .init = s6105_aic3x_init,
+ .ops = &s6105_ops,
+};
+
+/* s6105 audio machine driver */
+static struct snd_soc_card snd_soc_card_s6105 = {
+ .name = "Stretch IP Camera",
+ .dai_link = &s6105_dai,
+ .num_links = 1,
+};
+
+static struct s6000_snd_platform_data __initdata s6105_snd_data = {
+ .wide = 0,
+ .channel_in = 0,
+ .channel_out = 1,
+ .lines_in = 1,
+ .lines_out = 1,
+ .same_rate = 1,
+};
+
+static struct platform_device *s6105_snd_device;
+
+/* temporary i2c device creation until this can be moved into the machine
+ * support file.
+*/
+static struct i2c_board_info i2c_device[] = {
+ { I2C_BOARD_INFO("tlv320aic33", 0x18), }
+};
+
+static int __init s6105_init(void)
+{
+ int ret;
+ i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device));
+
+ s6105_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!s6105_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(s6105_snd_device, &snd_soc_card_s6105);
+ platform_device_add_data(s6105_snd_device, &s6105_snd_data,
+ sizeof(s6105_snd_data));
+
+ ret = platform_device_add(s6105_snd_device);
+ if (ret)
+ platform_device_put(s6105_snd_device);
+
+ return ret;
+}
+
+static void __exit s6105_exit(void)
+{
+ platform_device_unregister(s6105_snd_device);
+}
+
+module_init(s6105_init);
+module_exit(s6105_exit);
+
+MODULE_AUTHOR("Daniel Gloeckner");
+MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/dwc/fh_i2s.c b/sound/soc/dwc/fh_i2s.c
new file mode 100644
index 00000000..8658b13a
--- /dev/null
+++ b/sound/soc/dwc/fh_i2s.c
@@ -0,0 +1,1072 @@
+/*
+ * ALSA SoC I2S (McBSP) Audio Layer for TI DAVINCI processor
+ *
+ * Author: Vladimir Barinov, <vbarinov@embeddedalley.com>
+ * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/irqreturn.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <linux/kernel.h>
+//#include <mach/asp.h>
+
+#include "fullhan-pcm.h"
+#include "fh_i2s.h"
+
+#define I2S_FIFO_LEN_RX 40
+#define I2S_FIFO_LEN_TX 40
+extern void i2s_irq_enquen(int type, u8 *buff, u8 len,u8 reset);
+/*
+ * NOTE: terminology here is confusing.
+ *
+ * - This driver supports the "Audio Serial Port" (ASP),
+ * found on dm6446, dm355, and other DaVinci chips.
+ *
+ * - But it labels it a "Multi-channel Buffered Serial Port"
+ * (McBSP) as on older chips like the dm642 ... which was
+ * backward-compatible, possibly explaining that confusion.
+ *
+ * - OMAP chips have a controller called McBSP, which is
+ * incompatible with the DaVinci flavor of McBSP.
+ *
+ * - Newer DaVinci chips have a controller called McASP,
+ * incompatible with ASP and with either McBSP.
+ *
+ * In short: this uses ASP to implement I2S, not McBSP.
+ * And it won't be the only DaVinci implemention of I2S.
+ */
+#define DAVINCI_MCBSP_DRR_REG 0x00
+#define DAVINCI_MCBSP_DXR_REG 0x04
+#define DAVINCI_MCBSP_SPCR_REG 0x08
+#define DAVINCI_MCBSP_RCR_REG 0x0c
+#define DAVINCI_MCBSP_XCR_REG 0x10
+#define DAVINCI_MCBSP_SRGR_REG 0x14
+#define DAVINCI_MCBSP_PCR_REG 0x24
+
+#define DAVINCI_MCBSP_SPCR_RRST (1 << 0)
+#define DAVINCI_MCBSP_SPCR_RINTM(v) ((v) << 4)
+#define DAVINCI_MCBSP_SPCR_XRST (1 << 16)
+#define DAVINCI_MCBSP_SPCR_XINTM(v) ((v) << 20)
+#define DAVINCI_MCBSP_SPCR_GRST (1 << 22)
+#define DAVINCI_MCBSP_SPCR_FRST (1 << 23)
+#define DAVINCI_MCBSP_SPCR_FREE (1 << 25)
+
+#define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5)
+#define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8)
+#define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16)
+#define DAVINCI_MCBSP_RCR_RFIG (1 << 18)
+#define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21)
+#define DAVINCI_MCBSP_RCR_RFRLEN2(v) ((v) << 24)
+#define DAVINCI_MCBSP_RCR_RPHASE BIT(31)
+
+#define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5)
+#define DAVINCI_MCBSP_XCR_XFRLEN1(v) ((v) << 8)
+#define DAVINCI_MCBSP_XCR_XDATDLY(v) ((v) << 16)
+#define DAVINCI_MCBSP_XCR_XFIG (1 << 18)
+#define DAVINCI_MCBSP_XCR_XWDLEN2(v) ((v) << 21)
+#define DAVINCI_MCBSP_XCR_XFRLEN2(v) ((v) << 24)
+#define DAVINCI_MCBSP_XCR_XPHASE BIT(31)
+
+#define DAVINCI_MCBSP_SRGR_FWID(v) ((v) << 8)
+#define DAVINCI_MCBSP_SRGR_FPER(v) ((v) << 16)
+#define DAVINCI_MCBSP_SRGR_FSGM (1 << 28)
+#define DAVINCI_MCBSP_SRGR_CLKSM BIT(29)
+
+#define DAVINCI_MCBSP_PCR_CLKRP (1 << 0)
+#define DAVINCI_MCBSP_PCR_CLKXP (1 << 1)
+#define DAVINCI_MCBSP_PCR_FSRP (1 << 2)
+#define DAVINCI_MCBSP_PCR_FSXP (1 << 3)
+#define DAVINCI_MCBSP_PCR_SCLKME (1 << 7)
+#define DAVINCI_MCBSP_PCR_CLKRM (1 << 8)
+#define DAVINCI_MCBSP_PCR_CLKXM (1 << 9)
+#define DAVINCI_MCBSP_PCR_FSRM (1 << 10)
+#define DAVINCI_MCBSP_PCR_FSXM (1 << 11)
+
+enum {
+ DAVINCI_MCBSP_WORD_8 = 0,
+ DAVINCI_MCBSP_WORD_12,
+ DAVINCI_MCBSP_WORD_16,
+ DAVINCI_MCBSP_WORD_20,
+ DAVINCI_MCBSP_WORD_24,
+ DAVINCI_MCBSP_WORD_32,
+};
+struct my_data{
+ struct work_struct my_work;
+ int value;
+ u8 buff;
+ u8 len;
+ void __iomem *base;
+ };
+struct my_data *rx_md,*tx_md;
+struct work_struct rx_work_queue,tx_work_queue;
+int g_i2s_base;
+//init test data
+struct my_data *init_data(struct my_data *md,struct work_struct work_queue)
+{
+md = (struct my_data *)kmalloc(sizeof(struct my_data),GFP_KERNEL);
+md->my_work=work_queue;
+return md;
+}
+
+
+static const unsigned char data_type[SNDRV_PCM_FORMAT_S32_LE + 1] = {
+ [SNDRV_PCM_FORMAT_S8] = 1, [SNDRV_PCM_FORMAT_S16_LE] = 2,
+ [SNDRV_PCM_FORMAT_S32_LE] = 4, };
+
+static const unsigned char asp_word_length[SNDRV_PCM_FORMAT_S32_LE + 1] = {
+ [SNDRV_PCM_FORMAT_S8] = DAVINCI_MCBSP_WORD_8, [SNDRV_PCM_FORMAT_S16_LE
+ ] = DAVINCI_MCBSP_WORD_16, [SNDRV_PCM_FORMAT_S32_LE
+ ] = DAVINCI_MCBSP_WORD_32, };
+
+static const unsigned char double_fmt[SNDRV_PCM_FORMAT_S32_LE + 1] = {
+ [SNDRV_PCM_FORMAT_S8] = SNDRV_PCM_FORMAT_S16_LE,
+ [SNDRV_PCM_FORMAT_S16_LE] = SNDRV_PCM_FORMAT_S32_LE, };
+enum dma_event_q {
+ EVENTQ_0 = 0, EVENTQ_1 = 1, EVENTQ_2 = 2, EVENTQ_3 = 3, EVENTQ_DEFAULT = -1
+};
+struct davinci_pcm_dma_params {
+ int channel; /* sync dma channel ID */
+ unsigned short acnt;
+ dma_addr_t dma_addr; /* device physical address for DMA */
+ unsigned sram_size;
+ enum dma_event_q asp_chan_q; /* event queue number for ASP channel */
+ enum dma_event_q ram_chan_q; /* event queue number for RAM channel */
+ unsigned char data_type; /* xfer data type */
+ unsigned char convert_mono_stereo;
+ unsigned int fifo_level;
+ int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after);
+};
+struct s6000_i2s_dev {
+ dma_addr_t sifbase;
+ u8 __iomem *scbbase;
+ unsigned int wide;
+ unsigned int channel_in;
+ unsigned int channel_out;
+ unsigned int lines_in;
+ unsigned int lines_out;
+ struct s6000_pcm_dma_params dma_params;
+ int irq;
+ void __iomem *base;
+ struct clk *clk;
+ struct device *dev;
+};
+struct davinci_mcbsp_dev {
+ struct device *dev;
+ struct davinci_pcm_dma_params dma_params;
+ void __iomem *base;
+#define MOD_DSP_A 0
+#define MOD_DSP_B 1
+ int mode;
+ u32 pcr;
+ struct clk *clk;
+ /*
+ * Combining both channels into 1 element will at least double the
+ * amount of time between servicing the dma channel, increase
+ * effiency, and reduce the chance of overrun/underrun. But,
+ * it will result in the left & right channels being swapped.
+ *
+ * If relabeling the left and right channels is not possible,
+ * you may want to let the codec know to swap them back.
+ *
+ * It may allow x10 the amount of time to service dma requests,
+ * if the codec is master and is using an unnecessarily fast bit clock
+ * (ie. tlvaic23b), independent of the sample rate. So, having an
+ * entire frame at once means it can be serviced at the sample rate
+ * instead of the bit clock rate.
+ *
+ * In the now unlikely case that an underrun still
+ * occurs, both the left and right samples will be repeated
+ * so that no pops are heard, and the left and right channels
+ * won't end up being swapped because of the underrun.
+ */
+ unsigned enable_channel_combine :1;
+
+ unsigned int fmt;
+ int clk_div;
+ int clk_input_pin;
+ bool i2s_accurate_sck;
+};
+struct i2c_adapter *codec_i2c_adapter;
+void set_i2c_codec_adapter(struct i2c_adapter * adapter) {
+ codec_i2c_adapter = adapter;
+}
+EXPORT_SYMBOL(set_i2c_codec_adapter);
+
+int i2c_write_codec(u8 addr, u8 data) {
+ int rval;
+ struct i2c_msg msgs[1];
+ u8 send[2];
+ msgs[0].len = 2;
+ msgs[0].addr = 0x1a;
+ msgs[0].flags = 0;
+ msgs[0].buf = send;
+ send[0] = addr;
+ send[1] = data;
+ rval = i2c_transfer(codec_i2c_adapter, msgs, 1);
+ return rval;
+}
+
+static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev,
+ int reg, u32 val) {
+ __raw_writel(val, dev->base + reg);
+}
+
+static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg) {
+ return __raw_readl(dev->base + reg);
+}
+
+static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback) {
+ u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP;
+ /* The clock needs to toggle to complete reset.
+ * So, fake it by toggling the clk polarity.
+ */
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m);
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr);
+}
+
+static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev,
+ struct snd_pcm_substream *substream) {
+// struct snd_soc_pcm_runtime *rtd = substream->private_data;
+// struct snd_soc_platform *platform = rtd->platform;
+// int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+// u32 spcr;
+// u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST;
+// spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+// if (spcr & mask) {
+// /* start off disabled */
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG,
+// spcr & ~mask);
+// toggle_clock(dev, playback);
+// }
+// if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM |
+// DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) {
+// /* Start the sample generator */
+// spcr |= DAVINCI_MCBSP_SPCR_GRST;
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+// }
+//
+// if (playback) {
+// /* Stop the DMA to avoid data loss */
+// /* while the transmitter is out of reset to handle XSYNCERR */
+// if (platform->driver->ops->trigger) {
+// int ret = platform->driver->ops->trigger(substream,
+// SNDRV_PCM_TRIGGER_STOP);
+// if (ret < 0)
+// printk(KERN_DEBUG "Playback DMA stop failed\n");
+// }
+//
+// /* Enable the transmitter */
+// spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+// spcr |= DAVINCI_MCBSP_SPCR_XRS SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+//
+// /* wait for any unexpected frame sync error to occur */
+// udelay(100);
+//
+// /* Disable the transmitter to clear any outstanding XSYNCERR */
+// spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+// spcr &= ~DAVINCI_MCBSP_SPCR_XRST;
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+// toggle_clock(dev, playback);
+//
+// /* Restart the DMA */
+// if (platform->driver->ops->trigger) {
+// int ret = platform->driver->ops->trigger(substream,
+// SNDRV_PCM_TRIGGER_START);2
+// if (ret < 0)
+// printk(KERN_DEBUG "Playback DMA start failed\n");
+// }
+// }
+//
+// /* Enable transmitter or receiver */
+// spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+// spcr |= mask;
+//
+// if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) {
+// /* Start frame sync */
+// spcr |= DAVINCI_MCBSP_SPCR_FRST;
+// }
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+}
+
+static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback) {
+
+// u32 spcr;
+//
+// /* Reset transmitter/receiver and sample rate/frame sync generators */
+// spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+// spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST);
+// spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST;
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+// toggle_clock(dev, playback);
+}
+
+#define DEFAULT_BITPERSAMPLE 16
+
+static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt) {
+// struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
+// unsigned int pcr;
+// unsigned int srgr;
+// /* Attention srgr is updated by hw_params! */
+// srgr = DAVINCI_MCBSP_SRGR_FSGM |
+// DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) |
+// DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1);
+//
+// dev->fmt = fmt;
+// /* set master/slave audio interface */
+// switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+// case SND_SOC_DAIFMT_CBS_CFS:
+// /* cpu is master */
+// pcr = DAVINCI_MCBSP_PCR_FSXM |
+// DAVINCI_MCBSP_PCR_FSRM |
+// DAVINCI_MCBSP_PCR_CLKXM |
+// DAVINCI_MCBSP_PCR_CLKRM;
+// break;
+// case SND_SOC_DAIFMT_CBM_CFS:
+// pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM;
+// /*
+// * Selection of the clock input pin that is the
+// * input for the Sample Rate Generator.
+// * McBSP FSR and FSX are driven by the Sample Rate
+// * Generator.
+// */
+// switch (dev->clk_input_pin) {
+// case MCBSP_CLKS:
+// pcr |= DAVINCI_MCBSP_PCR_CLKXM |
+// DAVINCI_MCBSP_PCR_CLKRM;
+// break;
+// case MCBSP_CLKR:
+// pcr |= DAVINCI_MCBSP_PCR_SCLKME;
+// break;
+// default:
+// dev_err(dev->dev, "bad clk_input_pin\n");
+// return -EINVAL;
+// }
+//
+// break;
+// case SND_SOC_DAIFMT_CBM_CFM:
+// /* codec is master */
+// pcr = 0;
+// break;
+// default:
+// printk(KERN_ERR "%s:bad master\n", __func__);
+// return -EINVAL;
+// }
+//
+// /* interface format */
+// switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+// case SND_SOC_DAIFMT_I2S:
+// /* Davinci doesn't support TRUE I2S, but some codecs will have
+// * the left and right channels contiguous. This allows
+// * dsp_a mode to be used with an inverted normal frame clk.
+// * If your codec is master and does not have contiguous
+// * channels, then you will have sound on only one channel.
+// * Try using a different mode, or codec as slave.
+// *
+// * The TLV320AIC33 is an example of a codec where this works.
+// * It has a variable bit clock frequency allowing it to have
+// * valid data on every bit clock.
+// *
+// * The TLV320AIC23 is an example of a codec where this does not
+// * work. It has a fixed bit clock frequency with progressively
+// * more empty bit clock slots between channels as the sample
+// * rate is lowered.
+// */
+// fmt ^= SND_SOC_DAIFMT_NB_IF;
+// case SND_SOC_DAIFMT_DSP_A:
+// dev->mode = MOD_DSP_A;
+// break;
+// case SND_SOC_DAIFMT_DSP_B:
+// dev->mode = MOD_DSP_B;
+// break;
+// default:
+// printk(KERN_ERR "%s:bad format\n", __func__);
+// return -EINVAL;
+// }
+//
+// switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+// case SND_SOC_DAIFMT_NB_NF:
+// /* CLKRP Receive clock polarity,
+// * 1 - sampled on rising edge of CLKR
+// * valid on rising edge
+// * CLKXP Transmit clock polarity,
+// * 1 - clocked on falling edge of CLKX
+// * valid on rising edge
+// * FSRP Receive frame sync pol, 0 - active high
+// * FSXP Transmit frame sync pol, 0 - active high
+// */
+// pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP);
+// break;
+// case SND_SOC_DAIFMT_IB_IF:
+// /* CLKRP Receive clock polarity,
+// * 0 - sampled on falling edge of CLKR
+// * valid on falling edge
+// * CLKXP Transmit clock polarity,
+// * 0 - clocked on rising edge of CLKX
+// * valid on falling edge
+// * FSRP Receive frame sync pol, 1 - active low
+// * FSXP Transmit frame sync pol, 1 - active low
+// */
+// pcr |= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP);
+// break;
+// case SND_SOC_DAIFMT_NB_IF:
+// /* CLKRP Receive clock polarity,
+// * 1 - sampled on rising edge of CLKR
+// * valid on rising edge
+// * CLKXP Transmit clock polarity,
+// * 1 - clocked on falling edge of CLKX
+// * valid on rising edge
+// * FSRP Receive frame sync pol, 1 - active low
+// * FSXP Transmit frame sync pol, 1 - active low
+// */
+// pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP |
+// DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP);
+// break;
+// case SND_SOC_DAIFMT_IB_NF:
+// /* CLKRP Receive clock polarity,
+// * 0 - sampled on falling edge of CLKR
+// * valid on falling edge
+// * CLKXP Transmit clock polarity,
+// * 0 - clocked on rising edge of CLKX
+// * valid on falling edge
+// * FSRP Receive frame sync pol, 0 - active high
+// * FSXP Transmit frame sync pol, 0 - active high
+// */
+// break;
+// default:
+// return -EINVAL;
+// }
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
+// dev->pcr = pcr;
+// davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr);
+ return 0;
+}
+
+static int davinci_i2s_dai_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id,
+ int div) {
+
+// struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
+//
+// if (div_id != DAVINCI_MCBSP_CLKGDV)
+// return -ENODEV;
+//
+// dev->clk_div = div;
+ return 0;
+}
+
+static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) {
+
+
+ struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
+ struct davinci_pcm_dma_params *dma_params = &dev->dma_params;
+ struct snd_interval *i = NULL;
+ int mcbsp_word_length, master;
+ unsigned int rcr, xcr, srgr, clk_div, freq, framesize;
+ u32 spcr;
+ snd_pcm_format_t fmt;
+ unsigned element_cnt = 1;
+
+ /* general line settings */
+#if 0
+ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+ } else {
+ spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+ }
+
+ master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK;
+ fmt = params_format(params);
+ mcbsp_word_length = asp_word_length[fmt];
+
+ switch (master) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ freq = clk_get_rate(dev->clk);
+ srgr = DAVINCI_MCBSP_SRGR_FSGM |
+ DAVINCI_MCBSP_SRGR_CLKSM;
+ srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length *
+ 8 - 1);
+ if (dev->i2s_accurate_sck) {
+ clk_div = 256;
+ do {
+ framesize = (freq / (--clk_div)) /
+ params->rate_num *
+ params->rate_den;
+ }while (((framesize < 33) || (fram SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX), .formats =
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates =
+ (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 |
+ SNDRV_PCM_RATE_8000_192000), .rate_min = 0, .rate_max = 1562500,
+ .channels_min = 2, .channels_max = 8, .buffer_bytes_max = 0x7ffffff0,
+ .period_bytes_min = 16, .period_bytes_max = 0xfffff0, .periods_min = 2,
+ .periods_max = 1024, /* no limit */
+ .fifo_size = 0, };
+esize > 4095)) &&
+ (clk_div));
+ clk_div--;
+ srgr |= DAVINCI_MCBSP_SRGR_FPER(framesize - 1);
+ } else {
+ /* symmetric waveforms */
+ clk_div = freq / (mcbsp_word_length * 16) /
+ params->rate_num * params->rate_den;
+ srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length *
+ 16 - 1);
+ }
+ clk_div &= 0xFF;
+ srgr |= clk_div;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ srgr = DAVINCI_MCBSP_SRGR_FSGM;
+ clk_div = dev->clk_div - 1;
+ srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1);
+ srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1);
+ clk_div &= 0xFF;
+ srgr |= clk_div;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* Clock and frame sync given from external sources */
+ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
+ srgr = DAVINCI_MCBSP_SRGR_FSGM;
+ srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1);
+ pr_debug("%s - %d FWID set: re-read srgr = %X\n",
+ __func__, __LINE__, snd_interval_value(i) - 1);
+
+ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS);
+ srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
+
+ rcr = DAVINCI_MCBSP_RCR_RFIG;
+ xcr = DAVINCI_MCBSP_XCR_XFIG;
+ if (dev->mode == MOD_DSP_B) {
+ rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0);
+ xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0);
+ } else {
+ rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1);
+ xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1);
+ }
+ /* Determine xfer data type */
+ fmt = params_format(params);
+ if ((fmt > SNDRV_PCM_FORMAT_S32_LE) || !data_type[fmt]) {
+ printk(KERN_WARNING "davinci-i2s: unsupported PCM format\n");
+ return -EINVAL;
+ }
+
+ if (params_channels(params) == 2) {
+ element_cnt = 2;
+ if (double_fmt[fmt] && dev->enad work_func(struct work_struct *work)
+ {
+ printk("%s \n",__FUNCTION__);ble_channel_combine) {
+ element_cnt = 1;
+ fmt = double_fmt[fmt];
+ }
+ switch (master) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBS_CFM:
+ rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(0);
+ xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(0);
+ rcr |= DAVINCI_MCBSP_RCR_RPHASE;
+ xcr |= DAVINCI_MCBSP_XCR_XPHASE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(element_cnt - 1);
+ xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(element_cnt - 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+// dma_params->acnt = dma_params->data_type = data_type[fmt];
+// dma_params->fifo_level = 0;
+ mcbsp_word_length = asp_word_length[fmt];
+
+ switch (master) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBS_CFM:
+ rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(0);
+ xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(0);
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(element_cnt - 1);
+ xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(element_cnt - 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) |
+ DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length);
+ xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) |
+ DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr);
+ else
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr);
+
+ pr_debug("%s - %d srgr=%X\n", __func__, __LINE__, srgr);
+ pr_debug("%s - %d xcr=%X\n", __func__, __LINE__, xcr);
+ pr_debug("%s - %d rcr=%X\n", __func__, __LINE__, rcr);
+#endif
+
+
+ return 0;
+}
+
+static int davinci_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai) {
+
+// struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
+// int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+// davinci_mcbsp_stop(dev, playback);
+ return 0;
+}
+static struct {
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long in_use;
+ unsigned long next_heartbeat;
+ struct timer_list timer;
+ int expect_close;
+} dw_i2s;
+#define I2S_IOCTL_BASE 'W'
+#define I2S_GETSUPPORT _IOR(I2S_IOCTL_BASE, 3, int)
+typedef unsigned short UINT16;
+typedef unsigned int uint32;
+//#define BASEADDR_I2S_REG dw_i2s.regs
+#define OFFSET_I2S_IER 0x0000
+#define OFFSET_I2S_IRER 0x0004
+#define OFFSET_I2S_ITER 0x0008
+#define OFFSET_I2S_CER 0x000c
+#define OFFSET_I2S_CCR 0x0010
+#define OFFSET_I2S_RXFFR 0x0014
+#define OFFSET_I2S_TXFFR 0x0018
+#define OFFSET_I2S_LRBR0 0x0020
+#define OFFSET_I2S_LRBR1 0x0060
+#define OFFSET_I2S_LRBR2 0x00A0
+#define OFFSET_I2S_LRBR3 0x00E0
+#define OFFSET_I2S_LTHR0 0x0020
+#define OFFSET_I2S_LTHR1 0x0060
+#define OFFSET_I2S_LTHR2 0x00A0
+#define OFFSET_I2S_LTHR3 0x00E0
+#define OFFSET_I2S_RRBR0 0x0024
+#define OFFSET_I2S_RRBR1 0x0064
+#define OFFSET_I2S_RRBR2 0x00A4
+#define OFFSET_I2S_RRBR3 0x00E4
+#define OFFSET_I2S_RTHR0 0x0024
+#define OFFSET_I2S_RTHR1 0x0064
+#define OFFSET_I2S_RTHR2 0x00A4
+#define OFFSET_I2S_RTHR3 0x00E4
+#define OFFSET_I2S_RER0 0x0028
+#define OFFSET_I2S_RER1 0x0068
+#define OFFSET_I2S_RER2 0x00A8
+#define OFFSET_I2S_RER3 0x00E8
+#define OFFSET_I2S_TER0 0x002C
+#define OFFSET_I2S_TER1 0x006C
+#define OFFSET_I2S_TER2 0x00AC
+#define OFFSET_I2S_TER3 0x00EC
+#define OFFSET_I2S_RCR0 0x0030
+#define OFFSET_I2S_RCR1 0x0070
+#define OFFSET_I2S_RCR2 0x00B0
+#define OFFSET_I2S_RCR3 0x00F0
+#define OFFSET_I2S_TCR0 0x0034
+#define OFFSET_I2S_TCR1 0x0074
+#define OFFSET_I2S_TCR2 0x00B4
+#define OFFSET_I2S_TCR3 0x00F4
+#define OFFSET_I2S_ISR0 0x0038
+#define OFFSET_I2S_ISR1 0x0078
+#define OFFSET_I2S_ISR2 0x00B8
+#define OFFSET_I2S_ISR3 0x00F8
+#define OFFSET_I2S_IMR0 0x003C
+#define OFFSET_I2S_IMR1 0x007C
+#define OFFSET_I2S_IMR2 0x00BC
+#define OFFSET_I2S_IMR3 0x00FC
+#define OFFSET_I2S_ROR0 0x0040
+#define OFFSET_I2S_ROR1 0x0080
+#define OFFSET_I2S_ROR2 0x00C0
+#define OFFSET_I2S_ROR3 0x0100
+#define OFFSET_I2S_TOR0 0x0044
+#define OFFSET_I2S_TOR1 0x0084
+#define OFFSET_I2S_TOR2 0x00C4
+#define OFFSET_I2S_TOR3 0x0104
+#define OFFSET_I2S_RFCR0 0x0048
+#define OFFSET_I2S_RFCR1 0x0088
+#define OFFSET_I2S_RFCR2 0x00C8
+#define OFFSET_I2S_RFCR3 0x0108
+#define OFFSET_I2S_TFCR0 0x004C
+#define OFFSET_I2S_TFCR1 0x008C
+#define OFFSET_I2S_TFCR2 0x00CC
+#define OFFSET_I2S_TFCR3 0x010C
+#define OFFSET_I2S_RFF0 0x0050
+#define OFFSET_I2S_RFF1 0x0090
+#define OFFSET_I2S_RFF2 0x00D4
+#define OFFSET_I2S_RFF3 0x0110
+#define OFFSET_I2S_TFF0 0x0054
+#define OFFSET_I2S_TFF1 0x0094
+#define OFFSET_I2S_TFF2 0x00D4
+#define OFFSET_I2S_TFF3 0x0114
+#define OFFSET_I2S_RXDMA 0x01C0
+#define OFFSET_I2S_RRXDMA 0x01C4
+#define OFFSET_I2S_TXDMA 0x01C8
+#define OFFSET_I2S_RTXDMA 0x01CC
+#define OFFSET_I2S_COMP_PARAM_2 0x01f0
+#define OFFSET_I2S_COMP_PARAM_1 0x01f4
+#define OFFSET_I2S_COMP_VERSION 0x01f8
+#define OFFSET_I2S_COMP_TYPE 0x01fc
+#define RESOLUTION12
+#define write_reg(addr,reg) (*((volatile uint32 *)(addr)))=(uint32)(reg)
+#define read_reg(addr) (*((volatile uint32 *)(addr)))
+static int dw_i2s_action(void * base,int channel) {
+
+ int data, rx_data_right, rx_data_left,temp;
+ unsigned int i2s_base;
+ i2s_base = base;
+ temp = read_reg(OFFSET_I2S_IMR0 + i2s_base);
+ if (SNDRV_PCM_STREAM_PLAYBACK == channel) {
+ write_reg(OFFSET_I2S_TCR0 + i2s_base, 0x4);
+ write_reg(OFFSET_I2S_TFCR0 + i2s_base, 0x10);
+ write_reg(OFFSET_I2S_ITER + i2s_base, 0x01);
+ write_reg(OFFSET_I2S_TXFFR + i2s_base, 1);
+ temp &=~(1<<4);
+ temp |= (1<<1);
+ temp |= (1<<5);
+ write_reg(OFFSET_I2S_TER0 + i2s_base, 1);
+
+ } else {
+ write_reg(OFFSET_I2S_IRER + i2s_base, 0x01);
+ write_reg(OFFSET_I2S_RCR0 + i2s_base, 0x4);
+ write_reg(OFFSET_I2S_RFCR0 + i2s_base, I2S_FIFO_LEN_RX);
+ write_reg(OFFSET_I2S_RXFFR + i2s_base, 1);
+ temp &=~(1<<0);
+ temp |= (1<<1);
+ temp |= (1<<5);
+ write_reg(OFFSET_I2S_RER0 + i2s_base, 1);
+
+ }
+ write_reg(OFFSET_I2S_IMR0 + i2s_base, temp); //interrupt mask
+}
+
+static void codec_config(void)
+{
+ i2c_write_codec(0x0, 0x44);//set 8K sample
+ i2c_write_codec(0x9, 0x2);
+ i2c_write_codec(0x4, 0x10);
+ i2c_write_codec(0x1, 0x3c);
+ i2c_write_codec(0x5, 0x5);
+ i2c_write_codec(0x7, 0xe6);
+ i2c_write_codec(0x2, 0x14);
+ i2c_write_codec(0x8, 0x38);
+ i2c_write_codec(0xf, 0x1b);
+ i2c_write_codec(0x10, 0x1b);
+}
+
+static int s6000_i2s_start_channel(struct s6000_i2s_dev *dev, int channel) {
+
+ dw_i2s_action(dev->base,channel);
+ return 0;
+}
+static void s6000_i2s_start(struct snd_pcm_substream *substream) {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ s6000_i2s_start_channel(dev, substream->stream);
+
+}
+static int s6000_i2s_stop_channel(struct s6000_i2s_dev *dev, int channel) {
+ int temp,i;
+ temp = read_reg(OFFSET_I2S_IMR0 + dev->base);
+ if (SNDRV_PCM_STREAM_PLAYBACK == channel) {
+ write_reg(OFFSET_I2S_TER0 + dev->base, 0);
+ temp |=(1<<4);
+ temp |= (1<<1);
+ temp |= (1<<5);
+ write_reg(OFFSET_I2S_IMR0 + dev->base,temp); //interrupt mask
+
+
+ } else {
+ write_reg(OFFSET_I2S_RER0 + dev->base, 0);
+ temp |=(1<<0);
+ temp |= (1<<1);
+ temp |= (1<<5);
+ write_reg(OFFSET_I2S_IMR0 + dev->base,temp); //interrupt mask
+
+ }
+ return 0;
+
+}
+static void s6000_i2s_stop(struct snd_pcm_substream *substream) {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ s6000_i2s_stop_channel(dev, substream->stream);
+}
+static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ int after) {
+ int ret = 0;
+ int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ s6000_i2s_start(substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ // if (!after)
+ s6000_i2s_stop(substream);
+ break;
+ }
+ return 0;
+}
+
+static int davinci_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai) {
+// struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
+//
+// snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
+ return 0;
+}
+
+static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai) {
+// struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
+// int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+// davinci_mcbsp_stop(dev, playback);
+}
+
+#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
+
+static struct snd_soc_dai_ops davinci_i2s_dai_ops = { .startup =
+ davinci_i2s_startup, .shutdown = davinci_i2s_shutdown, .prepare =
+ davinci_i2s_prepare, .trigger = davinci_i2s_trigger, .hw_params =
+ davinci_i2s_hw_params, .set_fmt = davinci_i2s_set_dai_fmt, .set_clkdiv =
+ davinci_i2s_dai_set_clkdiv,
+
+};
+int s6000_i2s_dai_probe(struct snd_soc_dai *dai) {
+
+ struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+ struct s6000_snd_platform_data *pdata = dai->dev->platform_data;
+ dai->capture_dma_data = &dev->dma_params;
+ dai->playback_dma_data = &dev->dma_params;
+ dev->dma_params.trigger = davinci_i2s_trigger;
+// dev->wide = pdata->wide;
+// dev->channel_in = pdata->channel_in;
+// dev->channel_out = pdata->channel_out;
+// dev->lines_in = pdata->lines_in;
+// dev->lines_out = pdata->lines_out;
+ dev->dma_params.sif_in = 0xf0901c0;
+ dev->dma_params.sif_out = 0xf0901c8;
+ return 0;
+
+}
+static struct snd_soc_dai_driver davinci_i2s_dai = { .probe =
+ s6000_i2s_dai_probe, .playback = { .channels_min = 2, .channels_max = 2,
+ .rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE, },
+ .capture = { .channels_min = 2, .channels_max = 2, .rates =
+ DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops =
+ &davinci_i2s_dai_ops,
+
+};
+//work queue funtion
+
+
+ void capture_work_func(struct work_struct *work)
+{
+ uint32 count_data,i,audio_data,temp;
+ uint32 buff[I2S_FIFO_LEN_RX];
+ for(count_data=0;count_data<I2S_FIFO_LEN_RX;count_data++) {
+ audio_data = read_reg(OFFSET_I2S_RRBR0 + g_i2s_base);
+ i = read_reg(OFFSET_I2S_LRBR0 + g_i2s_base);
+ buff[count_data] = audio_data;
+ }
+ i2s_irq_enquen(SNDRV_PCM_STREAM_CAPTURE,(u8 *)buff,count_data<<2,0);
+ temp = read_reg(OFFSET_I2S_IMR0 + g_i2s_base);
+ temp &=~(1<<0);
+ write_reg(OFFSET_I2S_IMR0 + g_i2s_base,temp);
+}
+ void playback_work_func(struct work_struct *work)
+{
+ uint32 count_data,temp;
+ uint32 buff[I2S_FIFO_LEN_TX];
+ i2s_irq_enquen(SNDRV_PCM_STREAM_PLAYBACK,(u8 *)buff,I2S_FIFO_LEN_TX<<2,0);
+ for(count_data=0;count_data<I2S_FIFO_LEN_TX;count_data++) {
+ write_reg(OFFSET_I2S_RTHR0 + g_i2s_base,buff[count_data]);
+ write_reg(OFFSET_I2S_LTHR0 + g_i2s_base,buff[count_data]);
+ }
+ temp = read_reg(OFFSET_I2S_IMR0 + g_i2s_base);
+ temp &=~(1<<4);
+ write_reg(OFFSET_I2S_IMR0 + g_i2s_base,temp);
+}
+
+static irqreturn_t davinci_i2s_irq(int irq, void *data) {
+ uint32 irq_data,temp;
+ struct s6000_i2s_dev *fdev = data;
+ irq_data = read_reg(OFFSET_I2S_ISR0 + fdev->base);
+ temp = read_reg(OFFSET_I2S_IMR0 + fdev->base);
+ if ( (irq_data & 0x10)&&( !(temp&(1<<4))) ) {
+ temp |= (1<<4);
+ write_reg(OFFSET_I2S_IMR0 + fdev->base,temp);
+ schedule_work(&tx_md->my_work);
+ //playback_work_func(&tx_md->my_work);
+ }
+ if ( (irq_data & 0x01)&&( !(temp&(1<<0))) ) {
+ temp|= (1<<0);
+ write_reg(OFFSET_I2S_IMR0 +fdev->base,temp);
+ schedule_work(&rx_md->my_work);
+ //capture_work_func(&rx_md->my_work);
+ }
+ return IRQ_HANDLED;
+ }
+
+static void i2s_config(void)
+{
+ write_reg(OFFSET_I2S_IER + g_i2s_base, 0x01);//i2s enable
+ write_reg(OFFSET_I2S_CCR + g_i2s_base, 0x8);
+ write_reg(OFFSET_I2S_CER + g_i2s_base, 0x01);
+}
+ static int davinci_i2s_probe(struct platform_device *pdev) {
+ struct snd_platform_data *pdata = pdev->dev.platform_data;
+ struct s6000_i2s_dev *dev;
+ struct resource *mem, *ioarea, *res;
+
+
+ if (!pdata)
+ return -EINVAL;
+
+ int ret;
+
+ rx_md=init_data(rx_md,rx_work_queue);
+ INIT_WORK(&rx_md->my_work,capture_work_func);
+ //rx_wq=create_singlethread_workqueue("capture_workqueue");
+ tx_md = init_data(tx_md,tx_work_queue);
+ INIT_WORK(&tx_md->my_work,playback_work_func);
+ //tx_wq=create_singlethread_workqueue("capture_workqueue");
+ //queue_work(wq,&md->my_work);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ printk("i2s platform get resource err\n");
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem), pdev->name);
+ if (!ioarea) {
+ printk("i2s request mem region err\n");
+ dev_err(&pdev->dev, "McBSP region already claimed\n");
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct s6000_i2s_dev), GFP_KERNEL);
+ if (!dev) {
+ printk("i2s kzalloc err \n");
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+ dev->irq = platform_get_irq(pdev, 0);
+ int rc;
+ rc = request_irq(dev->irq, davinci_i2s_irq,
+ IRQF_DISABLED, pdev->name, dev);
+ if (rc) {
+ printk("request irq err \n");
+ free_irq(dev->irq, dev);
+ }
+
+ dev->base = ioremap(mem->start, resource_size(mem));
+ g_i2s_base = dev->base;
+ rx_md->base =dev->base;
+ tx_md->base = rx_md->base;
+ if (!dev->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release_clk;
+ }
+
+ dev->dev = &pdev->dev;
+
+ dev_set_drvdata(&pdev->dev, dev);
+
+ ret = snd_soc_register_dai(&pdev->dev, &davinci_i2s_dai);
+ if (ret != 0)
+ goto err_iounmap;
+
+ codec_config();
+ i2s_config();
+
+ return 0;
+
+ err_iounmap: iounmap(dev->base);
+ err_release_clk: clk_disable(dev->clk);
+ clk_put(dev->clk);
+ err_free_mem: kfree(dev);
+ err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return ret;
+ }
+
+ static int davinci_i2s_remove(struct platform_device *pdev) {
+ struct davinci_mcbsp_dev *dev = dev_get_drvdata(&pdev->dev);
+ struct resource *mem;
+
+ snd_soc_unregister_dai(&pdev->dev);
+// clk_disable(dev->clk);
+// clk_put(dev->clk);
+// dev->clk = NULL;
+ kfree(dev);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return 0;
+ }
+
+ static struct platform_driver davinci_mcbsp_driver = {
+ .probe =davinci_i2s_probe,
+ .remove = davinci_i2s_remove,
+ .driver = {
+ .name =
+ "s6000-i2s", .owner = THIS_MODULE,
+ },
+ };
+
+static int __init davinci_i2s_init(void)
+{
+ return platform_driver_register(&davinci_mcbsp_driver);
+}
+ module_init(davinci_i2s_init);
+
+static void __exit davinci_i2s_exit(void)
+{
+ platform_driver_unregister(&davinci_mcbsp_driver);
+}
+ module_exit(davinci_i2s_exit);
+
+ MODULE_AUTHOR("Vladimir Barinov");
+ MODULE_DESCRIPTION("TI DAVINCI I2S (McBSP) SoC Interface");
+ MODULE_LICENSE("GPL");
diff --git a/sound/soc/dwc/fh_i2s.h b/sound/soc/dwc/fh_i2s.h
new file mode 100644
index 00000000..86aa1921
--- /dev/null
+++ b/sound/soc/dwc/fh_i2s.h
@@ -0,0 +1,23 @@
+/*
+ * ALSA SoC I2S Audio Layer for the Stretch s6000 family
+ *
+ * Author: Daniel Gloeckner, <dg@emlix.com>
+ * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _S6000_I2S_H
+#define _S6000_I2S_H
+
+struct s6000_snd_platform_data {
+ int lines_in;
+ int lines_out;
+ int channel_in;
+ int channel_out;
+ int wide;
+ int same_rate;
+};
+#endif
diff --git a/sound/soc/dwc/fh_i2s_dai.c b/sound/soc/dwc/fh_i2s_dai.c
new file mode 100644
index 00000000..28f8ea67
--- /dev/null
+++ b/sound/soc/dwc/fh_i2s_dai.c
@@ -0,0 +1,1003 @@
+/*
+ * ALSA SoC McASP Audio Layer for TI DAVINCI processor
+ *
+ * Multi-channel Audio Serial Port Driver
+ *
+ * Author: Nirmal Pandey <n-pandey@ti.com>,
+ * Suresh Rajashekara <suresh.r@ti.com>
+ * Steve Chen <schen@.mvista.com>
+ *
+ * Copyright: (C) 2009 MontaVista Software, Inc., <source@mvista.com>
+ * Copyright: (C) 2009 Texas Instruments, India
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include "fullhan-pcm.h"
+#include "fh_i2s_dai.h"
+
+/*
+ * McASP register definitions
+ */
+#define DAVINCI_MCASP_PID_REG 0x00
+#define DAVINCI_MCASP_PWREMUMGT_REG 0x04
+
+#define DAVINCI_MCASP_PFUNC_REG 0x10
+#define DAVINCI_MCASP_PDIR_REG 0x14
+#define DAVINCI_MCASP_PDOUT_REG 0x18
+#define DAVINCI_MCASP_PDSET_REG 0x1c
+
+#define DAVINCI_MCASP_PDCLR_REG 0x20
+
+#define DAVINCI_MCASP_TLGC_REG 0x30
+#define DAVINCI_MCASP_TLMR_REG 0x34
+
+#define DAVINCI_MCASP_GBLCTL_REG 0x44
+#define DAVINCI_MCASP_AMUTE_REG 0x48
+#define DAVINCI_MCASP_LBCTL_REG 0x4c
+
+#define DAVINCI_MCASP_TXDITCTL_REG 0x50
+
+#define DAVINCI_MCASP_GBLCTLR_REG 0x60
+#define DAVINCI_MCASP_RXMASK_REG 0x64
+#define DAVINCI_MCASP_RXFMT_REG 0x68
+#define DAVINCI_MCASP_RXFMCTL_REG 0x6c
+
+#define DAVINCI_MCASP_ACLKRCTL_REG 0x70
+#define DAVINCI_MCASP_AHCLKRCTL_REG 0x74
+#define DAVINCI_MCASP_RXTDM_REG 0x78
+#define DAVINCI_MCASP_EVTCTLR_REG 0x7c
+
+#define DAVINCI_MCASP_RXSTAT_REG 0x80
+#define DAVINCI_MCASP_RXTDMSLOT_REG 0x84
+#define DAVINCI_MCASP_RXCLKCHK_REG 0x88
+#define DAVINCI_MCASP_REVTCTL_REG 0x8c
+
+#define DAVINCI_MCASP_GBLCTLX_REG 0xa0
+#define DAVINCI_MCASP_TXMASK_REG 0xa4
+#define DAVINCI_MCASP_TXFMT_REG 0xa8
+#define DAVINCI_MCASP_TXFMCTL_REG 0xac
+
+#define DAVINCI_MCASP_ACLKXCTL_REG 0xb0
+#define DAVINCI_MCASP_AHCLKXCTL_REG 0xb4
+#define DAVINCI_MCASP_TXTDM_REG 0xb8
+#define DAVINCI_MCASP_EVTCTLX_REG 0xbc
+
+#define DAVINCI_MCASP_TXSTAT_REG 0xc0
+#define DAVINCI_MCASP_TXTDMSLOT_REG 0xc4
+#define DAVINCI_MCASP_TXCLKCHK_REG 0xc8
+#define DAVINCI_MCASP_XEVTCTL_REG 0xcc
+
+/* Left(even TDM Slot) Channel Status Register File */
+#define DAVINCI_MCASP_DITCSRA_REG 0x100
+/* Right(odd TDM slot) Channel Status Register File */
+#define DAVINCI_MCASP_DITCSRB_REG 0x118
+/* Left(even TDM slot) User Data Register File */
+#define DAVINCI_MCASP_DITUDRA_REG 0x130
+/* Right(odd TDM Slot) User Data Register File */
+#define DAVINCI_MCASP_DITUDRB_REG 0x148
+
+/* Serializer n Control Register */
+#define DAVINCI_MCASP_XRSRCTL_BASE_REG 0x180
+#define DAVINCI_MCASP_XRSRCTL_REG(n) (DAVINCI_MCASP_XRSRCTL_BASE_REG + \
+ (n << 2))
+
+/* Transmit Buffer for Serializer n */
+#define DAVINCI_MCASP_TXBUF_REG 0x200
+/* Receive Buffer for Serializer n */
+#define DAVINCI_MCASP_RXBUF_REG 0x280
+
+/* McASP FIFO Registers */
+#define DAVINCI_MCASP_WFIFOCTL (0x1010)
+#define DAVINCI_MCASP_WFIFOSTS (0x1014)
+#define DAVINCI_MCASP_RFIFOCTL (0x1018)
+#define DAVINCI_MCASP_RFIFOSTS (0x101C)
+
+/*
+ * DAVINCI_MCASP_PWREMUMGT_REG - Power Down and Emulation Management
+ * Register Bits
+ */
+#define MCASP_FREE BIT(0)
+#define MCASP_SOFT BIT(1)
+
+/*
+ * DAVINCI_MCASP_PFUNC_REG - Pin Function / GPIO Enable Register Bits
+ */
+#define AXR(n) (1<<n)
+#define PFUNC_AMUTE BIT(25)
+#define ACLKX BIT(26)
+#define AHCLKX BIT(27)
+#define AFSX BIT(28)
+#define ACLKR BIT(29)
+#define AHCLKR BIT(30)
+#define AFSR BIT(31)
+
+/*
+ * DAVINCI_MCASP_PDIR_REG - Pin Direction Register Bits
+ */
+#define AXR(n) (1<<n)
+#define PDIR_AMUTE BIT(25)
+#define ACLKX BIT(26)
+#define AHCLKX BIT(27)
+#define AFSX BIT(28)
+#define ACLKR BIT(29)
+#define AHCLKR BIT(30)
+#define AFSR BIT(31)
+
+/*
+ * DAVINCI_MCASP_TXDITCTL_REG - Transmit DIT Control Register Bits
+ */
+#define DITEN BIT(0) /* Transmit DIT mode enable/disable */
+#define VA BIT(2)
+#define VB BIT(3)
+
+/*
+ * DAVINCI_MCASP_TXFMT_REG - Transmit Bitstream Format Register Bits
+ */
+#define TXROT(val) (val)
+#define TXSEL BIT(3)
+#define TXSSZ(val) (val<<4)
+#define TXPBIT(val) (val<<8)
+#define TXPAD(val) (val<<13)
+#define TXORD BIT(15)
+#define FSXDLY(val) (val<<16)
+
+/*
+ * DAVINCI_MCASP_RXFMT_REG - Receive Bitstream Format Register Bits
+ */
+#define RXROT(val) (val)
+#define RXSEL BIT(3)
+#define RXSSZ(val) (val<<4)
+#define RXPBIT(val) (val<<8)
+#define RXPAD(val) (val<<13)
+#define RXORD BIT(15)
+#define FSRDLY(val) (val<<16)
+
+/*
+ * DAVINCI_MCASP_TXFMCTL_REG - Transmit Frame Control Register Bits
+ */
+#define FSXPOL BIT(0)
+#define AFSXE BIT(1)
+#define FSXDUR BIT(4)
+#define FSXMOD(val) (val<<7)
+
+/*
+ * DAVINCI_MCASP_RXFMCTL_REG - Receive Frame Control Register Bits
+ */
+#define FSRPOL BIT(0)
+#define AFSRE BIT(1)
+#define FSRDUR BIT(4)
+#define FSRMOD(val) (val<<7)
+
+/*
+ * DAVINCI_MCASP_ACLKXCTL_REG - Transmit Clock Control Register Bits
+ */
+#define ACLKXDIV(val) (val)
+#define ACLKXE BIT(5)
+#define TX_ASYNC BIT(6)
+#define ACLKXPOL BIT(7)
+
+/*
+ * DAVINCI_MCASP_ACLKRCTL_REG Receive Clock Control Register Bits
+ */
+#define ACLKRDIV(val) (val)
+#define ACLKRE BIT(5)
+#define RX_ASYNC BIT(6)
+#define ACLKRPOL BIT(7)
+
+/*
+ * DAVINCI_MCASP_AHCLKXCTL_REG - High Frequency Transmit Clock Control
+ * Register Bits
+ */
+#define AHCLKXDIV(val) (val)
+#define AHCLKXPOL BIT(14)
+#define AHCLKXE BIT(15)
+
+/*
+ * DAVINCI_MCASP_AHCLKRCTL_REG - High Frequency Receive Clock Control
+ * Register Bits
+ */
+#define AHCLKRDIV(val) (val)
+#define AHCLKRPOL BIT(14)
+#define AHCLKRE BIT(15)
+
+/*
+ * DAVINCI_MCASP_XRSRCTL_BASE_REG - Serializer Control Register Bits
+ */
+#define MODE(val) (val)
+#define DISMOD (val)(val<<2)
+#define TXSTATE BIT(4)
+#define RXSTATE BIT(5)
+
+/*
+ * DAVINCI_MCASP_LBCTL_REG - Loop Back Control Register Bits
+ */
+#define LBEN BIT(0)
+#define LBORD BIT(1)
+#define LBGENMODE(val) (val<<2)
+
+/*
+ * DAVINCI_MCASP_TXTDMSLOT_REG - Transmit TDM Slot Register configuration
+ */
+#define TXTDMS(n) (1<<n)
+
+/*
+ * DAVINCI_MCASP_RXTDMSLOT_REG - Receive TDM Slot Register configuration
+ */
+#define RXTDMS(n) (1<<n)
+
+/*
+ * DAVINCI_MCASP_GBLCTL_REG - Global Control Register Bits
+ */
+#define RXCLKRST BIT(0) /* Receiver Clock Divider Reset */
+#define RXHCLKRST BIT(1) /* Receiver High Frequency Clock Divider */
+#define RXSERCLR BIT(2) /* Receiver Serializer Clear */
+#define RXSMRST BIT(3) /* Receiver State Machine Reset */
+#define RXFSRST BIT(4) /* Frame Sync Generator Reset */
+#define TXCLKRST BIT(8) /* Transmitter Clock Divider Reset */
+#define TXHCLKRST BIT(9) /* Transmitter High Frequency Clock Divider*/
+#define TXSERCLR BIT(10) /* Transmit Serializer Clear */
+#define TXSMRST BIT(11) /* Transmitter State Machine Reset */
+#define TXFSRST BIT(12) /* Frame Sync Generator Reset */
+
+/*
+ * DAVINCI_MCASP_AMUTE_REG - Mute Control Register Bits
+ */
+#define MUTENA(val) (val)
+#define MUTEINPOL BIT(2)
+#define MUTEINENA BIT(3)
+#define MUTEIN BIT(4)
+#define MUTER BIT(5)
+#define MUTEX BIT(6)
+#define MUTEFSR BIT(7)
+#define MUTEFSX BIT(8)
+#define MUTEBADCLKR BIT(9)
+#define MUTEBADCLKX BIT(10)
+#define MUTERXDMAERR BIT(11)
+#define MUTETXDMAERR BIT(12)
+
+/*
+ * DAVINCI_MCASP_REVTCTL_REG - Receiver DMA Event Control Register bits
+ */
+#define RXDATADMADIS BIT(0)
+
+/*
+ * DAVINCI_MCASP_XEVTCTL_REG - Transmitter DMA Event Control Register bits
+ */
+#define TXDATADMADIS BIT(0)
+
+/*
+ * DAVINCI_MCASP_W[R]FIFOCTL - Write/Read FIFO Control Register bits
+ */
+#define FIFO_ENABLE BIT(16)
+#define NUMEVT_MASK (0xFF << 8)
+#define NUMDMA_MASK (0xFF)
+
+#define DAVINCI_MCASP_NUM_SERIALIZER 16
+
+static inline void mcasp_set_bits(void __iomem *reg, u32 val)
+{
+ __raw_writel(__raw_readl(reg) | val, reg);
+}
+
+static inline void mcasp_clr_bits(void __iomem *reg, u32 val)
+{
+ __raw_writel((__raw_readl(reg) & ~(val)), reg);
+}
+
+static inline void mcasp_mod_bits(void __iomem *reg, u32 val, u32 mask)
+{
+ __raw_writel((__raw_readl(reg) & ~mask) | val, reg);
+}
+
+static inline void mcasp_set_reg(void __iomem *reg, u32 val)
+{
+ __raw_writel(val, reg);
+}
+
+static inline u32 mcasp_get_reg(void __iomem *reg)
+{
+ return (unsigned int)__raw_readl(reg);
+}
+
+static inline void mcasp_set_ctl_reg(void __iomem *regs, u32 val)
+{
+// int i = 0;
+//
+// mcasp_set_bits(regs, val);
+//
+// /* programming GBLCTL needs to read back from GBLCTL and verfiy */
+// /* loop count is to avoid the lock-up */
+// for (i = 0; i < 1000; i++) {
+// if ((mcasp_get_reg(regs) & val) == val)
+// break;
+// }
+//
+// if (i == 1000 && ((mcasp_get_reg(regs) & val) != val))
+// printk(KERN_ERR "GBLCTL write error\n");
+}
+
+static void mcasp_start_rx(struct davinci_audio_dev *dev)
+{
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
+//
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
+//
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
+}
+
+static void mcasp_start_tx(struct davinci_audio_dev *dev)
+{
+// u8 offset = 0, i;
+// u32 cnt;
+//
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
+//
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
+// mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
+// for (i = 0; i < dev->num_serializer; i++) {
+// if (dev->serial_dir[i] == TX_MODE) {
+// offset = i;
+// break;
+// }
+// }
+//
+// /* wait for TX ready */
+// cnt = 0;
+// while (!(mcasp_get_reg(dev->base + DAVINCI_MCASP_XRSRCTL_REG(offset)) &
+// TXSTATE) && (cnt < 100000))
+// cnt++;
+//
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
+}
+
+static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
+{
+// if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+// if (dev->txnumevt) /* enable FIFO */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+// FIFO_ENABLE);
+// mcasp_start_tx(dev);
+// } else {
+// if (dev->rxnumevt) /* enable FIFO */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+// FIFO_ENABLE);
+// mcasp_start_rx(dev);
+// }
+}
+
+static void mcasp_stop_rx(struct davinci_audio_dev *dev)
+{
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, 0);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
+}
+
+static void mcasp_stop_tx(struct davinci_audio_dev *dev)
+{
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, 0);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
+}
+
+static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream)
+{
+// if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+// if (dev->txnumevt) /* disable FIFO */
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+// FIFO_ENABLE);
+// mcasp_stop_tx(dev);
+// } else {
+// if (dev->rxnumevt) /* disable FIFO */
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+// FIFO_ENABLE);
+// mcasp_stop_rx(dev);
+// }
+}
+
+static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+// struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
+// void __iomem *base = dev->base;
+//
+// switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+// case SND_SOC_DAIFMT_CBS_CFS:
+// /* codec is clock and frame slave */
+// mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+// mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
+//
+// mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+// mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
+//
+// mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+// ACLKX | AHCLKX | AFSX);
+// break;
+// case SND_SOC_DAIFMT_CBM_CFS:
+// /* codec is clock master and frame slave */
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+// mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
+//
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+// mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
+//
+// mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
+// ACLKX | ACLKR);
+// mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+// AFSX | AFSR);
+// break;
+// case SND_SOC_DAIFMT_CBM_CFM:
+// /* codec is clock and frame master */
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+// mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
+//
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+// mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
+//
+// mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
+// ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR);
+// break;
+//
+// default:
+// return -EINVAL;
+// }
+//
+// switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+// case SND_SOC_DAIFMT_IB_NF:
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+// mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+//
+// mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+// mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+// break;
+//
+// case SND_SOC_DAIFMT_NB_IF:
+// mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+// mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+//
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+// mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+// break;
+//
+// case SND_SOC_DAIFMT_IB_IF:
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+// mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+//
+// mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+// mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+// break;
+//
+// case SND_SOC_DAIFMT_NB_NF:
+// mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+// mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+//
+// mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+// mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+// break;
+//
+// default:
+// return -EINVAL;
+// }
+
+ return 0;
+}
+
+static int davinci_config_channel_size(struct davinci_audio_dev *dev,
+ int channel_size)
+{
+// u32 fmt = 0;
+// u32 mask, rotate;
+//
+// switch (channel_size) {
+// case DAVINCI_AUDIO_WORD_8:
+// fmt = 0x03;
+// rotate = 6;
+// mask = 0x000000ff;
+// break;
+//
+// case DAVINCI_AUDIO_WORD_12:
+// fmt = 0x05;
+// rotate = 5;
+// mask = 0x00000fff;
+// break;
+//
+// case DAVINCI_AUDIO_WORD_16:
+// fmt = 0x07;
+// rotate = 4;
+// mask = 0x0000ffff;
+// break;
+//
+// case DAVINCI_AUDIO_WORD_20:
+// fmt = 0x09;
+// rotate = 3;
+// mask = 0x000fffff;
+// break;
+//
+// case DAVINCI_AUDIO_WORD_24:
+// fmt = 0x0B;
+// rotate = 2;
+// mask = 0x00ffffff;
+// break;
+//
+// case DAVINCI_AUDIO_WORD_28:
+// fmt = 0x0D;
+// rotate = 1;
+// mask = 0x0fffffff;
+// break;
+//
+// case DAVINCI_AUDIO_WORD_32:
+// fmt = 0x0F;
+// rotate = 0;
+// mask = 0xffffffff;
+// break;
+//
+// default:
+// return -EINVAL;
+// }
+//
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
+// RXSSZ(fmt), RXSSZ(0x0F));
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
+// TXSSZ(fmt), TXSSZ(0x0F));
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(rotate),
+// TXROT(7));
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rotate),
+// RXROT(7));
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, mask);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask);
+//
+// return 0;
+}
+
+static void davinci_hw_common_param(struct davinci_audio_dev *dev, int stream)
+{
+// int i;
+// u8 tx_ser = 0;
+// u8 rx_ser = 0;
+//
+// /* Default configuration */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
+//
+// /* All PINS as McASP */
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_PFUNC_REG, 0x00000000);
+//
+// if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG,
+// TXDATADMADIS);
+// } else {
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_REVTCTL_REG,
+// RXDATADMADIS);
+// }
+//
+// for (i = 0; i < dev->num_serializer; i++) {
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_XRSRCTL_REG(i),
+// dev->serial_dir[i]);
+// if (dev->serial_dir[i] == TX_MODE) {
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
+// AXR(i));
+// tx_ser++;
+// } else if (dev->serial_dir[i] == RX_MODE) {
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
+// AXR(i));
+// rx_ser++;
+// }
+// }
+//
+// if (dev->txnumevt && stream == SNDRV_PCM_STREAM_PLAYBACK) {
+// if (dev->txnumevt * tx_ser > 64)
+// dev->txnumevt = 1;
+//
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, tx_ser,
+// NUMDMA_MASK);
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+// ((dev->txnumevt * tx_ser) << 8), NUMEVT_MASK);
+// }
+//
+// if (dev->rxnumevt && stream == SNDRV_PCM_STREAM_CAPTURE) {
+// if (dev->rxnumevt * rx_ser > 64)
+// dev->rxnumevt = 1;
+//
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, rx_ser,
+// NUMDMA_MASK);
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+// ((dev->rxnumevt * rx_ser) << 8), NUMEVT_MASK);
+// }
+}
+
+static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
+{
+// int i, active_slots;
+// u32 mask = 0;
+//
+// active_slots = (dev->tdm_slots > 31) ? 32 : dev->tdm_slots;
+// for (i = 0; i < active_slots; i++)
+// mask |= (1 << i);
+//
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC);
+//
+// if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+// /* bit stream is MSB first with no delay */
+// /* DSP_B mode */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG,
+// AHCLKXE);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask);
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD);
+//
+// if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
+// FSXMOD(dev->tdm_slots), FSXMOD(0x1FF));
+// else
+// printk(KERN_ERR "playback tdm slot %d not supported\n",
+// dev->tdm_slots);
+//
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
+// } else {
+// /* bit stream is MSB first with no delay */
+// /* DSP_B mode */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXORD);
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG,
+// AHCLKRE);
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask);
+//
+// if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
+// mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG,
+// FSRMOD(dev->tdm_slots), FSRMOD(0x1FF));
+// else
+// printk(KERN_ERR "capture tdm slot %d not supported\n",
+// dev->tdm_slots);
+//
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
+// }
+}
+
+/* S/PDIF */
+static void davinci_hw_dit_param(struct davinci_audio_dev *dev)
+{
+// /* Set the PDIR for Serialiser as output */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG, AFSX);
+//
+// /* TXMASK for 24 bits */
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0x00FFFFFF);
+//
+// /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
+// and LSB first */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
+// TXROT(6) | TXSSZ(15));
+//
+// /* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
+// AFSXE | FSXMOD(0x180));
+//
+// /* Set the TX tdm : for all the slots */
+// mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, 0xFFFFFFFF);
+//
+// /* Set the TX clock controls : div = 1 and internal */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG,
+// ACLKXE | TX_ASYNC);
+//
+// mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
+//
+// /* Only 44100 and 48000 are valid, both have the same setting */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXDIV(3));
+//
+// /* Enable the DIT */
+// mcasp_set_bits(dev->base + DAVINCI_MCASP_TXDITCTL_REG, DITEN);
+}
+
+static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+// struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
+// struct davinci_pcm_dma_params *dma_params =
+// &dev->dma_params[substream->stream];
+// int word_length;
+// u8 fifo_level;
+//
+// davinci_hw_common_param(dev, substream->stream);
+// if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+// fifo_level = dev->txnumevt;
+// else
+// fifo_level = dev->rxnumevt;
+//
+// if (dev->op_mode == DAVINCI_MCASP_DIT_MODE)
+// davinci_hw_dit_param(dev);
+// else
+// davinci_hw_param(dev, substream->stream);
+//
+// switch (params_format(params)) {
+// case SNDRV_PCM_FORMAT_S8:
+// dma_params->data_type = 1;
+// word_length = DAVINCI_AUDIO_WORD_8;
+// break;
+//
+// case SNDRV_PCM_FORMAT_S16_LE:
+// dma_params->data_type = 2;
+// word_length = DAVINCI_AUDIO_WORD_16;
+// break;
+//
+// case SNDRV_PCM_FORMAT_S32_LE:
+// dma_params->data_type = 4;
+// word_length = DAVINCI_AUDIO_WORD_32;
+// break;
+//
+// default:
+// printk(KERN_WARNING "davinci-mcasp: unsupported PCM format");
+// return -EINVAL;
+// }
+//
+// if (dev->version == MCASP_VERSION_2 && !fifo_level)
+// dma_params->acnt = 4;
+// else
+// dma_params->acnt = dma_params->data_type;
+//
+// dma_params->fifo_level = fifo_level;
+// davinci_config_channel_size(dev, word_length);
+
+ return 0;
+}
+
+static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *cpu_dai)
+{
+ struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (!dev->clk_active) {
+ clk_enable(dev->clk);
+ dev->clk_active = 1;
+ }
+ davinci_mcasp_start(dev, substream->stream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ davinci_mcasp_stop(dev, substream->stream);
+ if (dev->clk_active) {
+ clk_disable(dev->clk);
+ dev->clk_active = 0;
+ }
+
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ davinci_mcasp_stop(dev, substream->stream);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ //snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
+ return 0;
+}
+
+static struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
+ .startup = davinci_mcasp_startup,
+ .trigger = davinci_mcasp_trigger,
+ .hw_params = davinci_mcasp_hw_params,
+ .set_fmt = davinci_mcasp_set_dai_fmt,
+
+};
+
+static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
+ {
+ .name = "davinci-mcasp.0",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = DAVINCI_MCASP_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = DAVINCI_MCASP_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &davinci_mcasp_dai_ops,
+
+ },
+ {
+ "davinci-mcasp.1",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 384,
+ .rates = DAVINCI_MCASP_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &davinci_mcasp_dai_ops,
+ },
+
+};
+
+static int davinci_mcasp_probe(struct platform_device *pdev)
+{
+ struct davinci_pcm_dma_params *dma_data;
+ struct resource *mem, *ioarea, *res;
+ struct snd_platform_data *pdata;
+ struct davinci_audio_dev *dev;
+ int ret = 0;
+
+ dev = kzalloc(sizeof(struct davinci_audio_dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ ret = -ENODEV;
+ goto err_release_data;
+ }
+
+ ioarea = request_mem_region(mem->start,
+ resource_size(mem), pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "Audio region already claimed\n");
+ ret = -EBUSY;
+ goto err_release_data;
+ }
+
+ pdata = pdev->dev.platform_data;
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = -ENODEV;
+ goto err_release_region;
+ }
+
+ clk_enable(dev->clk);
+ dev->clk_active = 1;
+
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (!dev->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_release_clk;
+ }
+
+// dev->op_mode = pdata->op_mode;
+// dev->tdm_slots = pdata->tdm_slots;
+// dev->num_serializer = pdata->num_serializer;
+// dev->serial_dir = pdata->serial_dir;
+// dev->codec_fmt = pdata->codec_fmt;
+// dev->version = pdata->version;
+// dev->txnumevt = pdata->txnumevt;
+// dev->rxnumevt = pdata->rxnumevt;
+
+// dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
+// dma_data->asp_chan_q = pdata->asp_chan_q;
+// dma_data->ram_chan_q = pdata->ram_chan_q;
+// dma_data->sram_size = pdata->sram_size_playback;
+// dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
+// mem->start);
+
+ /* first TX, then RX */
+// res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+// if (!res) {
+// dev_err(&pdev->dev, "no DMA resource\n");
+// ret = -ENODEV;
+// goto err_iounmap;
+// }
+
+// dma_data->channel = res->start;
+//
+// dma_data = &dev->dma_params[SNDRV_PCM_STREAM_CAPTURE];
+// dma_data->asp_chan_q = pdata->asp_chan_q;
+// dma_data->ram_chan_q = pdata->ram_chan_q;
+// dma_data->sram_size = pdata->sram_size_capture;
+// dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
+// mem->start);
+
+// res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+// if (!res) {
+// dev_err(&pdev->dev, "no DMA resource\n");
+// ret = -ENODEV;
+// goto err_iounmap;
+// }
+
+// dma_data->channel = res->start;
+ dev_set_drvdata(&pdev->dev, dev);
+ ret = snd_soc_register_dai(&pdev->dev, &davinci_mcasp_dai[0]);
+
+ if (ret != 0)
+ goto err_iounmap;
+ return 0;
+
+err_iounmap:
+ iounmap(dev->base);
+err_release_clk:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+err_release_data:
+ kfree(dev);
+
+ return ret;
+}
+
+static int davinci_mcasp_remove(struct platform_device *pdev)
+{
+ struct davinci_audio_dev *dev = dev_get_drvdata(&pdev->dev);
+ struct resource *mem;
+
+ snd_soc_unregister_dai(&pdev->dev);
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver davinci_mcasp_driver = {
+ .probe = davinci_mcasp_probe,
+ .remove = davinci_mcasp_remove,
+ .driver = {
+ .name = "davinci-mcasp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init davinci_mcasp_init(void)
+{
+ return platform_driver_register(&davinci_mcasp_driver);
+}
+module_init(davinci_mcasp_init);
+
+static void __exit davinci_mcasp_exit(void)
+{
+ platform_driver_unregister(&davinci_mcasp_driver);
+}
+module_exit(davinci_mcasp_exit);
+
+MODULE_AUTHOR("Steve Chen");
+MODULE_DESCRIPTION("TI DAVINCI McASP SoC Interface");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/dwc/fh_i2s_dai.h b/sound/soc/dwc/fh_i2s_dai.h
new file mode 100644
index 00000000..5b2f207b
--- /dev/null
+++ b/sound/soc/dwc/fh_i2s_dai.h
@@ -0,0 +1,59 @@
+/*
+ * ALSA SoC McASP Audio Layer for TI DAVINCI processor
+ *
+ * MCASP related definitions
+ *
+ * Author: Nirmal Pandey <n-pandey@ti.com>,
+ * Suresh Rajashekara <suresh.r@ti.com>
+ * Steve Chen <schen@.mvista.com>
+ *
+ * Copyright: (C) 2009 MontaVista Software, Inc., <source@mvista.com>
+ * Copyright: (C) 2009 Texas Instruments, India
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DAVINCI_MCASP_H
+#define DAVINCI_MCASP_H
+
+#include <linux/io.h>
+//#include <mach/asp.h>
+#include "fullhan-pcm.h"
+
+#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_96000
+#define DAVINCI_MCASP_I2S_DAI 0
+#define DAVINCI_MCASP_DIT_DAI 1
+
+enum {
+ DAVINCI_AUDIO_WORD_8 = 0,
+ DAVINCI_AUDIO_WORD_12,
+ DAVINCI_AUDIO_WORD_16,
+ DAVINCI_AUDIO_WORD_20,
+ DAVINCI_AUDIO_WORD_24,
+ DAVINCI_AUDIO_WORD_32,
+ DAVINCI_AUDIO_WORD_28, /* This is only valid for McASP */
+};
+
+struct davinci_audio_dev {
+ //struct davinci_pcm_dma_params dma_params[2];
+ void __iomem *base;
+ int sample_rate;
+ struct clk *clk;
+ unsigned int codec_fmt;
+ u8 clk_active;
+
+ /* McASP specific data */
+ int tdm_slots;
+ u8 op_mode;
+ u8 num_serializer;
+ u8 *serial_dir;
+ u8 version;
+
+ /* McASP FIFO related */
+ u8 txnumevt;
+ u8 rxnumevt;
+};
+
+#endif /* DAVINCI_MCASP_H */
diff --git a/sound/soc/dwc/fullhan-pcm.c b/sound/soc/dwc/fullhan-pcm.c
new file mode 100644
index 00000000..bd57e863
--- /dev/null
+++ b/sound/soc/dwc/fullhan-pcm.c
@@ -0,0 +1,555 @@
+/*
+ * ALSA PCM interface for the Stetch s6000 family
+ *
+ * Author: Daniel Gloeckner, <dg@emlix.com>
+ * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <asm/dma.h>
+//#include <variant/dmac.h>
+#include "dma.h"
+#include "fullhan-pcm.h"
+
+#define S6_PCM_PREALLOCATE_SIZE (96 * 1024)
+#define S6_PCM_PREALLOCATE_MAX (2048 * 1024)
+
+
+struct snd_pcm_substream *capture_substream,*play_substream;
+
+
+
+static struct snd_pcm_hardware s6000_pcm_hardware = { .info =
+ (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX), .formats =
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates =
+ (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 |
+ SNDRV_PCM_RATE_8000_192000), .rate_min = 0, .rate_max = 1562500,
+ .channels_min = 2, .channels_max = 8, .buffer_bytes_max = 0x7ffffff0,
+ .period_bytes_min = 16, .period_bytes_max = 0xfffff0, .periods_min = 2,
+ .periods_max = 1024, /* no limit */
+ .fifo_size = 0, };
+
+struct s6000_runtime_data {
+ spinlock_t lock;
+ int period; /* current DMA period */
+ int pos;
+};
+
+
+void test_dma_copy(unsigned int src, unsigned int dst) {
+ Dma_SetTxrType(0, DMA_TTFC_M2M_DMAC);
+ Dma_SetSrcWidth(0, 2);
+ Dma_SetSrcSize(0, 0);
+ Dma_SetDstWidth(0, 2); // UART can only accept 8bits input
+ Dma_SetDstSize(0, 0); // burst size, UART has 16bytes FIFO, 1/2 thrl
+ Dma_SetSrcAddress(0, src);
+ Dma_SetDstAddress(0, dst);
+ Dma_SetSrcIncDirection(0, DMA_DIR_INC);
+ Dma_SetDstIncDirection(0, DMA_DIR_INC);
+ Dma_EnableIsrBit(0, DMA_INTT_BLOCK); // block finish ISR.
+
+ Dma_SetTxrSize(0, 4); // copy 1K bytes.
+
+ Dma_EnableChan(0);
+
+}
+
+static void copy_finish(struct snd_pcm_substream *substream ) {
+ snd_pcm_period_elapsed(substream);
+}
+
+void i2s_irq_enquen(int type, u8 *buff, u8 len,u8 reset)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+ struct s6000_runtime_data *prtd;
+ static int rx_count = 0,tx_count = 0;
+ if (SNDRV_PCM_STREAM_PLAYBACK == type) {
+ if(reset){
+ tx_count = 0;
+ return;
+ }
+ if (!play_substream) {
+ return;
+ }
+ substream = play_substream;
+ runtime = substream->runtime;
+ prtd = runtime->private_data;
+ memcpy(buff,runtime->dma_area+prtd->pos,len);
+ tx_count += len;
+ if (tx_count >= snd_pcm_lib_period_bytes(substream)) {
+ tx_count = 0;
+ copy_finish(substream);
+ }
+ } else {
+ if(reset){
+ rx_count = 0;
+ return;
+ }
+ if(!capture_substream){
+ return;
+ }
+ substream = capture_substream;
+ runtime = substream->runtime;
+ prtd = runtime->private_data;
+ memcpy(runtime->dma_area+prtd->pos,buff,len);
+ rx_count += len;
+ if (rx_count >= snd_pcm_lib_period_bytes(substream) ) {
+ rx_count = 0;
+ copy_finish(substream);
+ }
+ }
+ prtd->pos += len;
+ if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) {
+ prtd->pos = 0;
+ }
+}
+EXPORT_SYMBOL(i2s_irq_enquen);
+
+
+static irqreturn_t s6000_pcm_irq(int irq, void *data) {
+// struct snd_pcm *pcm = data;
+// struct snd_soc_pcm_runtime *runtime = pcm->private_data;
+// struct s6000_runtime_data *prtd;
+// unsigned int has_xrun;
+// int i, ret = IRQ_NONE;
+//
+// for (i = 0; i < 2; ++i) {
+// struct snd_pcm_substream *substream = pcm->streams[i].substream;
+// struct s6000_pcm_dma_params *params = snd_soc_dai_get_dma_data(
+// runtime->cpu_dai, substream);
+// u32 channel;
+// unsigned int pending;
+//
+// if (substream == SNDRV_PCM_STREAM_PLAYBACK)
+// channel = params->dma_s6000_runtime_dataout;
+// else
+// channel = params->dma_in;
+//
+// has_xrun = params->check_xrun(runtime->cpu_dai);
+//
+// if (!channel)
+// continue;
+//
+// if (unlikely(has_xrun & (1 << i)) && substream->runtime
+// && snd_pcm_running(substream)) {
+// dev_dbg(pcm->dev, "xrun\n");
+// snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+// ret = IRQ_HANDLED;
+// }
+//
+// pending = s6dmac_int_sources(DMA_MASK_DMAC(channel),
+// DMA_INDEX_CHNL(channel));
+//
+// if (pending & 1) {
+// ret = IRQ_HANDLED;
+// if (likely(substream->runtime && snd_pcm_running(substream))) {
+// snd_pcm_period_elapsed(substream);
+// dev_dbg(pcm->dev, "period elapsed %x %x\n",
+// s6dmac_cur_src(DMA_MASK_DMAC(channel),
+// DMA_INDEX_CHNL(channel)),
+// s6dmac_cur_dst(DMA_MASK_DMAC(channel),
+// DMA_INDEX_CHNL(channel)));
+// prtd = substrcopy_finisheam->runtime->private_data;
+// spin_lock(&prtd->lock);
+// s6000_pcm_enqueue_dma(substream);
+// spin_unlock(&prtd->lock);
+// }
+// }
+//
+// if (unlikely(pending & ~7)) {
+// if (pending & (1 << 3))
+// printk(KERN_WARNING
+// "s6000-pcm: DMA %x Underflow\n",
+// channel);
+// if (pending & (1 << 4))
+// printk(KERN_WARNING
+// "s6000-pcm: DMA %x Overflow\n",
+// channel);
+// if (pending & 0x1e0)
+// printk(KERN_WARNING
+// "s6000-pcm: DMA %x Master Error "
+// "(mask %x)\n",
+// channel, pending >> 5);
+//
+// }
+//}
+
+//return ret;
+}
+
+static int s6000_pcm_start(struct snd_pcm_substream *substream) {
+
+struct s6000_runtime_data *prtd = substream->runtime->private_data;
+struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+struct s6000_pcm_dma_params *par;
+unsigned long flags;
+int srcinc;
+u32 dma;
+par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+
+//spin_lock_irqsave(&prtd->lock, flags);
+
+if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ srcinc = 1;
+ dma = par->dma_out;
+} else {
+ srcinc = 0;
+ dma = par->dma_in;
+}
+
+// s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma),
+// 1 /* priority 1 (0 is max) */,
+// 0 /* peripheral requests w/o xfer length mode */,
+// srcinc /* source address increment */,
+// srcinc^1 /* destination address increment */,
+// 0 /* chunksize 0 (skip impossible on this dma) */,
+// 0 /* source skip after chunk (impossible) */,
+// 0 /* destination skip after chunk (impossible) */,
+// 4 /* 16 byte burst size */,
+// -1 /* don't conserve bandwidth */,
+// 0 /* low watermark irq descriptor threshold */,
+// 0 /* disable hardware timestamps */,
+// 1 /* enable channel */);
+// prtd->period = 0;
+// s6000_pcm_enqueue_dma(substream);
+
+//spin_unlock_irqrestore(&prtd->lock, flags);
+
+return 0;
+}
+
+static int s6000_pcm_stop(struct snd_pcm_substream *substream) {
+ struct s6000_runtime_data *prtd = substream->runtime->private_data;
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ unsigned long flags;
+ capture_substream = play_substream = 0;
+// u32 channel;
+
+// par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+//
+// if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+// channel = par->dma_out;
+// else
+// channel = par->dma_in;
+//
+// s6dmac_set_terminal_count(DMA_MASK_DMAC(channel),
+// DMA_INDEX_CHNL(channel), 0);
+//
+
+
+return 0;
+}
+
+static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd) {
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct s6000_pcm_dma_params *par;
+ int ret;
+
+ par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+
+ ret = par->trigger(substream, cmd, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ i2s_irq_enquen(substream->stream,0,0,1);
+ ret = s6000_pcm_start(substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = s6000_pcm_stop(substream);
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret < 0)
+ return ret;
+
+ return par->trigger(substream, cmd, 1);
+}
+
+static int s6000_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+
+static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) {
+struct snd_pcm_runtime *runtime = substream->runtime;
+struct s6000_runtime_data *prtd = runtime->private_data;
+unsigned int offset;
+offset = bytes_to_frames(runtime, prtd->pos);
+if (unlikely(offset >= runtime->buffer_size))
+ offset = 0;
+return offset;
+}
+
+static int s6000_pcm_open(struct snd_pcm_substream *substream) {
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct s6000_pcm_dma_params *par;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct s6000_runtime_data *prtd;
+ int ret;
+ par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+ snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware);
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16);
+ if (ret < 0)
+ return ret;
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
+ if (ret < 0)
+ return ret;
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+
+
+
+ // if (par->same_rate) {
+ // printk("s6000 pcm open 5.0\n");
+ // int rate;
+ // spin_lock(&par->lock); /* needed? */
+ // rate = par->rate;
+ // spin_unlock(&par->lock);
+ // printk("s6000 pcm open 5.1\n");
+ // if (rate != -1) {
+ // ret = snd_pcm_hw_constraint_minmax(runtime,
+ // SNDRV_PCM_HW_PARAM_RATE,
+ // rate, rate);
+ // printk("s6000 pcm open 5.2\n");
+ // if (ret < 0)
+ // return ret;
+ // }
+ // }
+ prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL);
+ if (prtd == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&prtd->lock);
+ prtd->period = 0;
+ prtd->pos = 0;
+ runtime->private_data = prtd;
+ /*remember to judge capture or play stream*/
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ play_substream = substream;
+ }else {
+ capture_substream = substream;
+ }
+ //irq_emulation();
+ return 0;
+}
+
+static int s6000_pcm_close(struct snd_pcm_substream *substream) {
+ struct s6000_runtime_data *prtd = substream->runtime->private_data;
+ unsigned long flags;
+ spin_lock_irqsave(&prtd->lock, flags);
+ kfree(prtd);
+ spin_unlock_irqrestore(&prtd->lock, flags);
+return 0;
+}
+
+static int s6000_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params) {
+struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+struct s6000_pcm_dma_params *par;
+int ret;
+ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+if (ret < 0) {
+ printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n");
+ return ret;
+}
+par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+
+ if (par->same_rate) {
+ spin_lock(&par->lock);
+ if (par->rate == -1 ||
+ !(par->in_use & ~(1 << substream->stream))) {
+ par->rate = params_rate(hw_params);
+ par->in_use |= 1 << substream->stream;
+ } else if (params_rate(hw_params) != par->rate) {
+ snd_pcm_lib_free_pages(substream);
+ par->in_use &= ~(1 << substream->stream);
+ ret = -EBUSY;
+ }
+ spin_unlock(&par->lock);
+ }
+ return ret;
+}
+
+static int s6000_pcm_hw_free(struct snd_pcm_substream *substream) {
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct s6000_pcm_dma_params *par =
+ snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+
+ spin_lock(&par->lock);
+ par->in_use &= ~(1 << substream->stream);
+ if (!par->in_use)
+ par->rate = -1;
+ spin_unlock(&par->lock);
+
+ return snd_pcm_lib_free_pages(substream);
+}
+int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma) {
+struct snd_pcm_runtime *runtime = substream->runtime;
+return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area,
+ runtime->dma_addr, runtime->dma_bytes);
+}
+int s6000_pcm_copy(struct snd_pcm_substream *substream, int channel,
+ snd_pcm_uframes_t pos, void __user *buf, snd_pcm_uframes_t count) {
+
+
+return 0;
+}
+static struct snd_pcm_ops s6000_pcm_ops = { .copy = s6000_pcm_copy, .open =
+ s6000_pcm_open, .close = s6000_pcm_close, .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = s6000_pcm_hw_params, .hw_free = s6000_pcm_hw_free, .trigger =
+ s6000_pcm_trigger, .prepare = s6000_pcm_prepare, .pointer =
+ s6000_pcm_pointer,
+// .mmap=pxa2xx_pcm_mmap,
+ };
+
+static void s6000_pcm_free(struct snd_pcm *pcm) {
+// struct snd_soc_pcm_runtime *runtime = pcm->private_data;
+// struct s6000_pcm_dma_params *params =
+// snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[0].substream);
+//
+// free_irq(params->irq, pcm);
+// snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
+static int davinci_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
+ size_t size) {
+struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+struct snd_dma_buffer *buf = &substream->dma_buffer;
+
+buf->dev.type = SNDRV_DMA_TYPE_DEV;
+buf->dev.dev = pcm->card->dev;
+buf->private_data = NULL;
+buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr,
+GFP_KERNEL);
+pr_debug("davinci_pcm: preallocate_dma_buffer: area=%p, addr=%p, "
+ "size=%d\n", (void *) buf->area, (void *) buf->addr, size);
+
+if (!buf->area)
+ return -ENOMEM;
+
+buf->bytes = size;
+return 0;
+}
+static u64 davinci_pcm_dmamask = 0xffffffff;
+
+static int s6000_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
+ struct snd_pcm *pcm) {
+struct snd_soc_pcm_runtime *runtime = pcm->private_data;
+struct s6000_pcm_dma_params *params;
+int res;
+int ret;
+
+#if 0
+if (dai->driver->playback.channels_min) {
+ ret = davinci_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK,
+ pcm_hardware_playback.buffer_bytes_max);
+ if (ret)
+ return ret;
+}
+
+if (dai->driver->capture.channels_min) {
+ ret = davinci_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE,
+ pcm_hardware_capture.buffer_bytes_max);
+ if (ret)
+ return ret;
+}
+#endif
+params = snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[0].substream);
+if (!card->dev->dma_mask)
+ card->dev->dma_mask = &s6000_pcm_dmamask;
+if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+
+res = snd_pcm_lib_preallocate_pages_for_all(pcm,
+SNDRV_DMA_TYPE_DEV, card->dev,
+S6_PCM_PREALLOCATE_SIZE,
+S6_PCM_PREALLOCATE_MAX);
+if (res)
+printk(KERN_WARNING "s6000-pcm: preallocation failed\n");
+
+
+return 0;
+}
+
+static struct snd_soc_platform_driver s6000_soc_platform = { .ops =
+ &s6000_pcm_ops, .pcm_new = s6000_pcm_new, .pcm_free = s6000_pcm_free, };
+
+static int __devinit s6000_soc_platform_probe(struct platform_device *pdev)
+{
+void * dw;
+int err,irq;
+
+err = request_irq(9, s6000_pcm_irq, 0, "dw_dmac", dw);
+if (err) {
+
+ printk("pcm dma interrput err \n");
+}
+return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform);
+}
+
+static int __devexit s6000_soc_platform_remove(struct platform_device *pdev)
+{
+snd_soc_unregister_platform(&pdev->dev);
+return 0;
+}
+
+static struct platform_driver s6000_pcm_driver = { .driver = { .name =
+ "fh-pcm-audio", .owner = THIS_MODULE, },
+
+.probe = s6000_soc_platform_probe, .remove = __devexit_p(
+ s6000_soc_platform_remove), };
+
+static int __init snd_s6000_pcm_init(void)
+{
+
+ return platform_driver_register(&s6000_pcm_driver);
+}
+module_init(snd_s6000_pcm_init);
+
+static void __exit snd_s6000_pcm_exit(void)
+{
+ platform_driver_unregister(&s6000_pcm_driver);
+}
+module_exit(snd_s6000_pcm_exit);
+
+MODULE_AUTHOR("Daniel Gloeckner");
+MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/dwc/fullhan-pcm.h b/sound/soc/dwc/fullhan-pcm.h
new file mode 100644
index 00000000..09d9b883
--- /dev/null
+++ b/sound/soc/dwc/fullhan-pcm.h
@@ -0,0 +1,33 @@
+/*
+ * ALSA PCM interface for the Stretch s6000 family
+ *
+ * Author: Daniel Gloeckner, <dg@emlix.com>
+ * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _S6000_PCM_H
+#define _S6000_PCM_H
+
+struct snd_soc_dai;
+struct snd_pcm_substream;
+
+struct s6000_pcm_dma_params {
+ unsigned int (*check_xrun)(struct snd_soc_dai *cpu_dai);
+ int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after);
+ dma_addr_t sif_in;
+ dma_addr_t sif_out;
+ u32 dma_in;
+ u32 dma_out;
+ int irq;
+ int same_rate;
+
+ spinlock_t lock;
+ int in_use;
+ int rate;
+};
+
+#endif
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index c1683661..d5d628fb 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -96,7 +96,7 @@ ifndef PERF_DEBUG
endif
CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-EXTLIBS = -lpthread -lrt -lelf -lm
+EXTLIBS = -lpthread -lrt -lelf -lm -lebl -lz -ldl
ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ALL_LDFLAGS = $(LDFLAGS)
STRIP ?= strip
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 3b9d0b80..4e93e8ce 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -18,25 +18,14 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
-
+#include "util.h"
#include <sys/utsname.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <stdio.h>
-#include <unistd.h>
#include <getopt.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <ctype.h>
#include <dwarf-regs.h>
#include <linux/bitops.h>
#include "event.h"
#include "debug.h"
-#include "util.h"
#include "symbol.h"
#include "probe-finder.h"
diff --git a/usr/rootfs.cpio.gz b/usr/rootfs.cpio.gz
new file mode 100644
index 00000000..35e40a30
Binary files /dev/null and b/usr/rootfs.cpio.gz differ