diff --git a/.github/workflows/fh8632_images.yml b/.github/workflows/fh8833v100_images.yml
similarity index 81%
rename from .github/workflows/fh8632_images.yml
rename to .github/workflows/fh8833v100_images.yml
index 106bf3d4..06d99939 100644
--- a/.github/workflows/fh8632_images.yml
+++ b/.github/workflows/fh8833v100_images.yml
@@ -1,4 +1,4 @@
-name: "FH8632  "
+name: "FH8833V100"
 
 on:
   push:
@@ -10,7 +10,7 @@ on:
 
 jobs:
   build_core:
-    name: OpenIPC firmware for FH8632
+    name: OpenIPC firmware for FH8833V100
     runs-on: ubuntu-latest
 
     steps:
@@ -57,25 +57,25 @@ jobs:
           cd $GITHUB_WORKSPACE
           make prepare
 
-      - name: Build FH8632 firmware
-        id: build-fh8632-firmware
+      - name: Build FH8833V100 firmware
+        id: build-fh8833v100-firmware
         continue-on-error: true
         run: |
-          ARCHIVE_FW="${GITHUB_WORKSPACE}/output/images/openipc.fh8632-br.tgz"
+          ARCHIVE_FW="${GITHUB_WORKSPACE}/output/images/openipc.fh8833v100-br.tgz"
           echo "ARCHIVE_FW=$ARCHIVE_FW" >> $GITHUB_ENV
           cd $GITHUB_WORKSPACE
-          make PLATFORM=fullhan BOARD=unknown_unknown_fh8632_openipc all
-          [[ $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/uImage) -gt 2097152 ]] && echo "TG_NOTIFY=Warning, kernel size exceeded : $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/uImage) vs 2097152... FH8632" >> $GITHUB_ENV && exit 1
-          [[ $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/rootfs.squashfs) -gt 5242880 ]] && echo "TG_NOTIFY=Warning, rootfs size exceeded - $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/rootfs.squashfs) vs 5242880... FH8632" >> $GITHUB_ENV && exit 1
+          make PLATFORM=fullhan BOARD=unknown_unknown_fh8833v100_openipc all
+          [[ $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/uImage) -gt 2097152 ]] && echo "TG_NOTIFY=Warning, kernel size exceeded : $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/uImage) vs 2097152... FH8833V100" >> $GITHUB_ENV && exit 1
+          [[ $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/rootfs.squashfs) -gt 5242880 ]] && echo "TG_NOTIFY=Warning, rootfs size exceeded - $(stat --printf="%s" ${GITHUB_WORKSPACE}/output/images/rootfs.squashfs) vs 5242880... FH8833V100" >> $GITHUB_ENV && exit 1
           cd ${GITHUB_WORKSPACE}/output/images
-          mv uImage uImage.fh8632
-          mv rootfs.squashfs rootfs.squashfs.fh8632
-          md5sum rootfs.squashfs.fh8632 > rootfs.squashfs.fh8632.md5sum
-          md5sum uImage.fh8632 > uImage.fh8632.md5sum
-          tar -cvzf $ARCHIVE_FW *fh8632*
+          mv uImage uImage.fh8833v100
+          mv rootfs.squashfs rootfs.squashfs.fh8833v100
+          md5sum rootfs.squashfs.fh8833v100 > rootfs.squashfs.fh8833v100.md5sum
+          md5sum uImage.fh8833v100 > uImage.fh8833v100.md5sum
+          tar -cvzf $ARCHIVE_FW *fh8833v100*
 
-      - name: Build FH8632 SDK
-        id: build-fh8632-sdk
+      - name: Build FH8833V100 SDK
+        id: build-fh8833v100-sdk
         continue-on-error: true
         run: |
           ARCHIVE_SDK="${GITHUB_WORKSPACE}/output/images/arm-openipc-linux-musleabi_sdk-buildroot.tar.gz"
@@ -87,10 +87,10 @@ jobs:
         env:
           TG_TOKEN: ${{ secrets.TELEGRAM_TOKEN_BOT_OPENIPC }}
           TG_CHANNEL: ${{ secrets.TELEGRAM_CHANNEL_OPENIPC_DEV }}
-        if: steps.build-fh8632-firmware.outcome != 'success' || steps.build-fh8632-sdk.outcome != 'success'
+        if: steps.build-fh8833v100-firmware.outcome != 'success' || steps.build-fh8833v100-sdk.outcome != 'success'
         run: |
           TG_OPTIONS="-s --connect-timeout 30 --max-time 30"
-          TG_NOTIFY="${TG_NOTIFY:=Warning, Buildroot compiling error... FH8632}"
+          TG_NOTIFY="${TG_NOTIFY:=Warning, Buildroot compiling error... FH8833V100}"
           TG_HEADER=$(echo -e "\r\n$TG_NOTIFY \r\n\r\nCommit: $GIT_HASH \r\nBranch: $BRANCH_NAME \r\nTag: $TAG_NAME \r\n\r\n\xE2\x9A\xA0 GitHub Actions")
           curl $TG_OPTIONS -H "Content-Type: multipart/form-data" -X POST https://api.telegram.org/bot$TG_TOKEN/sendMessage \
             -F chat_id=$TG_CHANNEL -F text="$TG_HEADER"
@@ -112,7 +112,7 @@ jobs:
         with:
           repo_token: ${{ secrets.GITHUB_TOKEN }}
           file: ${{ env.ARCHIVE_FW }}
-          asset_name: "openipc.fh8632-br.tgz"
+          asset_name: "openipc.fh8833v100-br.tgz"
           tag: ${{ env.TAG_NAME }}
           overwrite: true
 
@@ -122,7 +122,7 @@ jobs:
         with:
           repo_token: ${{ secrets.GITHUB_TOKEN }}
           file: ${{ env.ARCHIVE_SDK }}
-          asset_name: "arm-openipc-fh8632-linux-musleabi_sdk-buildroot.tar.gz"
+          asset_name: "arm-openipc-fh8833v100-linux-musleabi_sdk-buildroot.tar.gz"
           tag: ${{ env.TAG_NAME }}
           overwrite: true
 
diff --git a/br-ext-chip-fullhan/board/fh8833v100.config b/br-ext-chip-fullhan/board/fh8833v100.config
new file mode 100644
index 00000000..bd20a71b
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100.config
@@ -0,0 +1,2 @@
+MEM_START_ADDR=0xa0000000
+KERNEL_UPLOAD_ADDR=0xa0008000
diff --git a/br-ext-chip-fullhan/board/fh8852v100/kernel/fh8833v100.generic.config b/br-ext-chip-fullhan/board/fh8833v100/kernel/fh8833v100.generic.config
similarity index 98%
rename from br-ext-chip-fullhan/board/fh8852v100/kernel/fh8833v100.generic.config
rename to br-ext-chip-fullhan/board/fh8833v100/kernel/fh8833v100.generic.config
index 968e51f9..2d11c597 100644
--- a/br-ext-chip-fullhan/board/fh8852v100/kernel/fh8833v100.generic.config
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/fh8833v100.generic.config
@@ -25,6 +25,7 @@ CONFIG_VECTORS_BASE=0xffff0000
 # CONFIG_ARM_PATCH_PHYS_VIRT is not set
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 CONFIG_HAVE_IRQ_WORK=y
+CONFIG_IRQ_WORK=y
 
 #
 # General setup
@@ -243,6 +244,7 @@ CONFIG_ARCH_FULLHAN=y
 # CONFIG_PLAT_SPEAR is not set
 # CONFIG_ARCH_VT8500 is not set
 # CONFIG_GPIO_PCA953X is not set
+# CONFIG_KEYBOARD_GPIO_POLLED is not set
 CONFIG_CPU_FH8833=y
 
 #
@@ -252,21 +254,14 @@ CONFIG_CPU_FH8833=y
 #
 # FullHan Core Type
 #
-# CONFIG_ARCH_FH8810 is not set
 CONFIG_ARCH_FH8833=y
-# CONFIG_ARCH_FH8830 is not set
-# CONFIG_ARCH_FH8856 is not set
-# CONFIG_ARCH_FH8626V100 is not set
-# CONFIG_ARCH_WUDANG is not set
-# CONFIG_ARCH_ZY2 is not set
 
 #
 # FullHan Board Type
 #
-# CONFIG_USE_PTS_AS_CLOCKSOURCE is not set
+CONFIG_USE_PTS_AS_CLOCKSOURCE=y
 # CONFIG_FH_SIMPLE_TIMER is not set
 CONFIG_MACH_FH8833=y
-CONFIG_MACH_FH8833_QFN56=y
 # CONFIG_MACH_FH_NAND is not set
 # CONFIG_JLINK_DEBUG is not set
 
@@ -312,9 +307,8 @@ CONFIG_ARM_ERRATA_411920=y
 #
 # Kernel Features
 #
-CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 CONFIG_VMSPLIT_3G=y
 # CONFIG_VMSPLIT_2G is not set
@@ -645,9 +639,11 @@ CONFIG_MISC_DEVICES=y
 # CONFIG_SENSORS_BH1770 is not set
 # CONFIG_SENSORS_APDS990X is not set
 # CONFIG_HMC6352 is not set
-# CONFIG_FH_DW_I2S is not set
+# CONFIG_FH_I2S_SLAVE is not set
+# CONFIG_FH_I2S_MASTER is not set
 CONFIG_FH_PINCTRL=y
-CONFIG_FH_SADC_V1=y
+CONFIG_FH_SADC=y
+# CONFIG_FH_FIRMWARE_LOADER is not set
 CONFIG_FH_EFUSE=y
 CONFIG_FH_CLK_MISC=y
 # CONFIG_DS1682 is not set
@@ -1236,7 +1232,7 @@ CONFIG_RTC_INTF_DEV=y
 #
 # on-CPU RTC drivers
 #
-# CONFIG_RTC_DRV_FH is not set
+CONFIG_RTC_DRV_FH=y
 CONFIG_DMADEVICES=y
 # CONFIG_DMADEVICES_DEBUG is not set
 
@@ -1261,6 +1257,8 @@ CONFIG_DMA_ENGINE=y
 CONFIG_CLKDEV_LOOKUP=y
 CONFIG_PWM=y
 CONFIG_PWM_FULLHAN=y
+CONFIG_FH_PWM_NUM=8
+CONFIG_PWM_FULLHAN_V20=y
 
 #
 # File systems
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/overlay/arch/arm/mach-fh/include/mach/board_config.fh8833.appboard b/br-ext-chip-fullhan/board/fh8833v100/kernel/overlay/arch/arm/mach-fh/include/mach/board_config.fh8833.appboard
new file mode 100644
index 00000000..128fbcd2
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/overlay/arch/arm/mach-fh/include/mach/board_config.fh8833.appboard
@@ -0,0 +1,40 @@
+/*
+ * board_config.h
+ *
+ *  Created on: Jan 9, 2017
+ *      Author: duobao
+ */
+
+#ifndef BOARD_CONFIG_H_
+#define BOARD_CONFIG_H_
+
+/*
+ * GPIO0  -> IRCUT_ON
+ * GPIO1  -> IRCUT_OFF
+ * GPIO2  -> PHY Reset
+ * GPIO3  -> IR
+ * GPIO13 -> Sensor Reset
+ * GPIO14 -> Sensor Power Down
+ * GPIO55 -> CSN1
+ */
+
+#define CONFIG_GPIO_EMACPHY_RESET	2
+#define CONFIG_GPIO_EMACPHY_RXDV	41
+#define CONFIG_SD_WP_FIXED
+
+#define CONFIG_PINCTRL_SELECT					\
+	"MIPI", "RMII", "UART0", "USB", "DWI2S",		\
+	"I2C0", "SSI0", "SD0_CARD_1BIT",			\
+	"GPIO0", "GPIO1", "GPIO2", "GPIO3",			\
+	"GPIO13",						\
+								\
+	"GPIO4", "GPIO11", "GPIO5", "GPIO6", "GPIO7",		\
+	"GPIO8", "GPIO9", "GPIO10", "GPIO14", "GPIO19",		\
+	"GPIO20", "GPIO21", "GPIO23", "GPIO28", "GPIO29",	\
+	"GPIO30", "GPIO31", "GPIO32", "GPIO33", "GPIO35",	\
+	"GPIO36", "GPIO37", "GPIO39", "GPIO40", "GPIO44",	\
+	"GPIO45", "GPIO47", "GPIO50", "GPIO51",	"GPIO55",	\
+	"GPIO61",						\
+    
+    
+#endif /* BOARD_CONFIG_H_ */
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/overlay/include/linux/compiler-gcc7.h b/br-ext-chip-fullhan/board/fh8833v100/kernel/overlay/include/linux/compiler-gcc7.h
new file mode 100644
index 00000000..613f9936
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/overlay/include/linux/compiler-gcc7.h
@@ -0,0 +1,65 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc7.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used				__attribute__((__used__))
+#define __must_check			__attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b)	__builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+   to them will be unlikely.  This means a lot of manual unlikely()s
+   are unnecessary now for any paths leading to the usual suspects
+   like BUG(), printk(), panic() etc. [but let's keep them for now for
+   older compilers]
+
+   Early snapshots of gcc 4.3 don't support this and we can't detect this
+   in the preprocessor, but we can live with this because they're unreleased.
+   Maketime probing would be overkill here.
+
+   gcc also has a __attribute__((__hot__)) to move hot functions into
+   a special section, but I don't see any sense in this right now in
+   the kernel context */
+#define __cold			__attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable.  This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased.  Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone	__attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0000-fh8833v100-kernel-3.0.8.patch b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0000-fh8833v100-kernel-3.0.8.patch
new file mode 100644
index 00000000..08792420
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0000-fh8833v100-kernel-3.0.8.patch
@@ -0,0 +1,123920 @@
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 91c84cbe..28ac5c82 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -858,6 +858,20 @@ config ARCH_OMAP
+ 	help
+ 	  Support for TI's OMAP platform (OMAP1/2/3/4).
+ 
++config ARCH_FULLHAN
++	bool "FullHan"
++	select GENERIC_CLOCKEVENTS
++#	select GENERIC_TIME
++	select HAVE_SCHED_CLOCK
++	select ARCH_REQUIRE_GPIOLIB
++#	select ZONE_DMA
++	select CLKDEV_LOOKUP
++	select GENERIC_ALLOCATOR
++#	select GENERIC_IRQ_CHIP
++	select HAVE_SYSCALL_TRACEPOINTS
++	help
++	  Support for FullHan's FH platform.
++
+ config PLAT_SPEAR
+ 	bool "ST SPEAr"
+ 	select ARM_AMBA
+@@ -900,6 +914,8 @@ source "arch/arm/mach-dove/Kconfig"
+ 
+ source "arch/arm/mach-ep93xx/Kconfig"
+ 
++source "arch/arm/mach-fh/Kconfig"
++
+ source "arch/arm/mach-footbridge/Kconfig"
+ 
+ source "arch/arm/mach-gemini/Kconfig"
+@@ -1577,7 +1593,8 @@ config LEDS
+ 		   ARCH_OMAP || ARCH_P720T || ARCH_PXA_IDP || \
+ 		   ARCH_SA1100 || ARCH_SHARK || ARCH_VERSATILE || \
+ 		   ARCH_AT91 || ARCH_DAVINCI || \
+-		   ARCH_KS8695 || MACH_RD88F5182 || ARCH_REALVIEW
++		   ARCH_KS8695 || MACH_RD88F5182 || ARCH_REALVIEW || \
++		   ARCH_FULLHAN
+ 	help
+ 	  If you say Y here, the LEDs on your machine will be used
+ 	  to provide useful information about your current system status.
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index f5b2b390..20ef496c 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -139,6 +139,7 @@ machine-$(CONFIG_ARCH_DAVINCI)		:= davinci
+ machine-$(CONFIG_ARCH_DOVE)		:= dove
+ machine-$(CONFIG_ARCH_EBSA110)		:= ebsa110
+ machine-$(CONFIG_ARCH_EP93XX)		:= ep93xx
++machine-$(CONFIG_ARCH_FULLHAN)		:= fh
+ machine-$(CONFIG_ARCH_GEMINI)		:= gemini
+ machine-$(CONFIG_ARCH_H720X)		:= h720x
+ machine-$(CONFIG_ARCH_INTEGRATOR)	:= integrator
+diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
+index 832d3723..0bcea2c1 100644
+--- a/arch/arm/boot/compressed/misc.c
++++ b/arch/arm/boot/compressed/misc.c
+@@ -169,6 +169,23 @@ asmlinkage void __div0(void)
+ 
+ extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
+ 
++#ifdef CONFIG_TEST_BOOT_TIME
++#define SET_TIMING_GPIO(port, level)					\
++	do {								\
++		unsigned char *p_gpio = (unsigned char *) 0xf0300000;	\
++		int data = *(p_gpio + 0x0004);				\
++		data |= 1 << (port);					\
++		*(p_gpio + 0x0004) = data;				\
++		data = *(p_gpio);					\
++		if ((level) == 0)					\
++			data &= ~(1 << (port));				\
++		else							\
++			data |= 1 << (port);				\
++		*(p_gpio) = data;					\
++	} while (0)
++#else
++#define SET_TIMING_GPIO(port, level)
++#endif
+ 
+ void
+ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
+@@ -182,6 +199,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
+ 	free_mem_end_ptr	= free_mem_ptr_end_p;
+ 	__machine_arch_type	= arch_id;
+ 
++	SET_TIMING_GPIO(4, 1);
++
+ 	arch_decomp_setup();
+ 
+ 	putstr("Uncompressing Linux...");
+diff --git a/arch/arm/configs/fh8632_defconfig b/arch/arm/configs/fh8632_defconfig
+new file mode 100644
+index 00000000..4e5f7a34
+--- /dev/null
++++ b/arch/arm/configs/fh8632_defconfig
+@@ -0,0 +1,1797 @@
++#
++# Automatically generated make config: don't edit
++# Linux/arm 3.0.8 Kernel Configuration
++#
++CONFIG_ARM=y
++CONFIG_SYS_SUPPORTS_APM_EMULATION=y
++CONFIG_HAVE_SCHED_CLOCK=y
++CONFIG_GENERIC_GPIO=y
++# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_KTIME_SCALAR=y
++CONFIG_HAVE_PROC_CPU=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_HAVE_LATENCYTOP_SUPPORT=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++CONFIG_HARDIRQS_SW_RESEND=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_NEED_DMA_MAP_STATE=y
++CONFIG_VECTORS_BASE=0xffff0000
++# CONFIG_ARM_PATCH_PHYS_VIRT is not set
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++CONFIG_HAVE_IRQ_WORK=y
++CONFIG_IRQ_WORK=y
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_CROSS_COMPILE=""
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_HAVE_KERNEL_GZIP=y
++CONFIG_HAVE_KERNEL_LZMA=y
++CONFIG_HAVE_KERNEL_LZO=y
++# CONFIG_KERNEL_GZIP is not set
++CONFIG_KERNEL_LZMA=y
++# CONFIG_KERNEL_LZO is not set
++CONFIG_DEFAULT_HOSTNAME="(none)"
++# CONFIG_SWAP is not set
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_POSIX_MQUEUE_SYSCTL=y
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_FHANDLE is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++CONFIG_HAVE_GENERIC_HARDIRQS=y
++
++#
++# IRQ subsystem
++#
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_HAVE_SPARSE_IRQ=y
++CONFIG_GENERIC_IRQ_SHOW=y
++# CONFIG_SPARSE_IRQ is not set
++
++#
++# RCU Subsystem
++#
++CONFIG_TINY_RCU=y
++# CONFIG_PREEMPT_RCU is not set
++# CONFIG_RCU_TRACE is not set
++# CONFIG_TREE_RCU_TRACE is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=16
++# CONFIG_CGROUPS is not set
++# CONFIG_NAMESPACES is not set
++# CONFIG_SCHED_AUTOGROUP is not set
++# CONFIG_SYSFS_DEPRECATED is not set
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE="usr/rootfs"
++CONFIG_INITRAMFS_ROOT_UID=0
++CONFIG_INITRAMFS_ROOT_GID=0
++# CONFIG_RD_GZIP is not set
++# CONFIG_RD_BZIP2 is not set
++# CONFIG_RD_LZMA is not set
++CONFIG_RD_XZ=y
++# CONFIG_RD_LZO is not set
++# CONFIG_INITRAMFS_COMPRESSION_NONE is not set
++CONFIG_INITRAMFS_COMPRESSION_XZ=y
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++CONFIG_SYSCTL=y
++CONFIG_ANON_INODES=y
++CONFIG_EXPERT=y
++CONFIG_UID16=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_TIMERFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_AIO=y
++CONFIG_EMBEDDED=y
++CONFIG_HAVE_PERF_EVENTS=y
++CONFIG_PERF_USE_VMALLOC=y
++
++#
++# Kernel Performance Events And Counters
++#
++CONFIG_PERF_EVENTS=y
++# CONFIG_PERF_COUNTERS is not set
++CONFIG_VM_EVENT_COUNTERS=y
++# CONFIG_SLUB_DEBUG is not set
++CONFIG_COMPAT_BRK=y
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++# CONFIG_PROFILING is not set
++CONFIG_HAVE_OPROFILE=y
++# CONFIG_KPROBES is not set
++CONFIG_HAVE_KPROBES=y
++CONFIG_HAVE_KRETPROBES=y
++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
++CONFIG_HAVE_CLK=y
++CONFIG_HAVE_DMA_API_DEBUG=y
++CONFIG_HAVE_HW_BREAKPOINT=y
++
++#
++# GCOV-based kernel profiling
++#
++CONFIG_HAVE_GENERIC_DMA_COHERENT=y
++CONFIG_RT_MUTEXES=y
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++# CONFIG_MODULE_FORCE_LOAD is not set
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_BLOCK=y
++# CONFIG_LBDAF is not set
++# CONFIG_BLK_DEV_BSG is not set
++# CONFIG_BLK_DEV_INTEGRITY is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++# CONFIG_IOSCHED_DEADLINE is not set
++# CONFIG_IOSCHED_CFQ is not set
++CONFIG_DEFAULT_NOOP=y
++CONFIG_DEFAULT_IOSCHED="noop"
++# CONFIG_INLINE_SPIN_TRYLOCK is not set
++# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
++# CONFIG_INLINE_SPIN_LOCK is not set
++# CONFIG_INLINE_SPIN_LOCK_BH is not set
++# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
++# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
++CONFIG_INLINE_SPIN_UNLOCK=y
++# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
++# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
++# CONFIG_INLINE_READ_TRYLOCK is not set
++# CONFIG_INLINE_READ_LOCK is not set
++# CONFIG_INLINE_READ_LOCK_BH is not set
++# CONFIG_INLINE_READ_LOCK_IRQ is not set
++# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
++CONFIG_INLINE_READ_UNLOCK=y
++# CONFIG_INLINE_READ_UNLOCK_BH is not set
++CONFIG_INLINE_READ_UNLOCK_IRQ=y
++# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
++# CONFIG_INLINE_WRITE_TRYLOCK is not set
++# CONFIG_INLINE_WRITE_LOCK is not set
++# CONFIG_INLINE_WRITE_LOCK_BH is not set
++# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
++# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
++CONFIG_INLINE_WRITE_UNLOCK=y
++# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
++# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
++# CONFIG_MUTEX_SPIN_ON_OWNER is not set
++CONFIG_FREEZER=y
++
++#
++# System Type
++#
++CONFIG_MMU=y
++# CONFIG_ARCH_INTEGRATOR is not set
++# CONFIG_ARCH_REALVIEW is not set
++# CONFIG_ARCH_VERSATILE is not set
++# CONFIG_ARCH_VEXPRESS is not set
++# CONFIG_ARCH_AT91 is not set
++# CONFIG_ARCH_BCMRING is not set
++# CONFIG_ARCH_CLPS711X is not set
++# CONFIG_ARCH_CNS3XXX is not set
++# CONFIG_ARCH_GEMINI is not set
++# CONFIG_ARCH_EBSA110 is not set
++# CONFIG_ARCH_EP93XX is not set
++# CONFIG_ARCH_FOOTBRIDGE is not set
++# CONFIG_ARCH_MXC is not set
++# CONFIG_ARCH_MXS is not set
++# CONFIG_ARCH_NETX is not set
++# CONFIG_ARCH_H720X is not set
++# CONFIG_ARCH_IOP13XX is not set
++# CONFIG_ARCH_IOP32X is not set
++# CONFIG_ARCH_IOP33X is not set
++# CONFIG_ARCH_IXP23XX is not set
++# CONFIG_ARCH_IXP2000 is not set
++# CONFIG_ARCH_IXP4XX is not set
++# CONFIG_ARCH_DOVE is not set
++# CONFIG_ARCH_KIRKWOOD is not set
++# CONFIG_ARCH_LOKI is not set
++# CONFIG_ARCH_LPC32XX is not set
++# CONFIG_ARCH_MV78XX0 is not set
++# CONFIG_ARCH_ORION5X is not set
++# CONFIG_ARCH_MMP is not set
++# CONFIG_ARCH_KS8695 is not set
++# CONFIG_ARCH_W90X900 is not set
++# CONFIG_ARCH_NUC93X is not set
++# CONFIG_ARCH_TEGRA is not set
++# CONFIG_ARCH_PNX4008 is not set
++# CONFIG_ARCH_PXA is not set
++# CONFIG_ARCH_MSM is not set
++# CONFIG_ARCH_SHMOBILE is not set
++# CONFIG_ARCH_RPC is not set
++# CONFIG_ARCH_SA1100 is not set
++# CONFIG_ARCH_S3C2410 is not set
++# CONFIG_ARCH_S3C64XX is not set
++# CONFIG_ARCH_S5P64X0 is not set
++# CONFIG_ARCH_S5PC100 is not set
++# CONFIG_ARCH_S5PV210 is not set
++# CONFIG_ARCH_EXYNOS4 is not set
++# CONFIG_ARCH_SHARK is not set
++# CONFIG_ARCH_TCC_926 is not set
++# CONFIG_ARCH_U300 is not set
++# CONFIG_ARCH_U8500 is not set
++# CONFIG_ARCH_NOMADIK is not set
++# CONFIG_ARCH_DAVINCI is not set
++# CONFIG_ARCH_OMAP is not set
++CONFIG_ARCH_FULLHAN=y
++# CONFIG_PLAT_SPEAR is not set
++# CONFIG_ARCH_VT8500 is not set
++# CONFIG_GPIO_PCA953X is not set
++# CONFIG_KEYBOARD_GPIO_POLLED is not set
++CONFIG_CPU_FH8833=y
++
++#
++# FullHan Implementations
++#
++
++#
++# FullHan Core Type
++#
++CONFIG_ARCH_FH8833=y
++
++#
++# FullHan Board Type
++#
++CONFIG_USE_PTS_AS_CLOCKSOURCE=y
++# CONFIG_FH_SIMPLE_TIMER is not set
++CONFIG_MACH_FH8833=y
++
++#
++# Hikvision board settings
++#
++CONFIG_HIKVISION_BOARD=y
++# CONFIG_MACH_FH_NAND is not set
++# CONFIG_JLINK_DEBUG is not set
++
++#
++# System MMU
++#
++
++#
++# Processor Type
++#
++CONFIG_CPU_V6=y
++CONFIG_CPU_32v6=y
++CONFIG_CPU_ABRT_EV6=y
++CONFIG_CPU_PABRT_V6=y
++CONFIG_CPU_CACHE_V6=y
++CONFIG_CPU_CACHE_VIPT=y
++CONFIG_CPU_COPY_V6=y
++CONFIG_CPU_TLB_V6=y
++CONFIG_CPU_HAS_ASID=y
++CONFIG_CPU_CP15=y
++CONFIG_CPU_CP15_MMU=y
++CONFIG_CPU_USE_DOMAINS=y
++
++#
++# Processor Features
++#
++CONFIG_ARM_THUMB=y
++# CONFIG_CPU_ICACHE_DISABLE is not set
++# CONFIG_CPU_DCACHE_DISABLE is not set
++# CONFIG_CPU_BPREDICT_DISABLE is not set
++CONFIG_ARM_L1_CACHE_SHIFT=5
++CONFIG_ARM_DMA_MEM_BUFFERABLE=y
++CONFIG_CPU_HAS_PMU=y
++CONFIG_ARM_ERRATA_411920=y
++
++#
++# Bus support
++#
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Kernel Features
++#
++# CONFIG_NO_HZ is not set
++# CONFIG_HIGH_RES_TIMERS is not set
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++CONFIG_VMSPLIT_3G=y
++# CONFIG_VMSPLIT_2G is not set
++# CONFIG_VMSPLIT_1G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_HZ=100
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
++# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
++CONFIG_HAVE_ARCH_PFN_VALID=y
++# CONFIG_HIGHMEM is not set
++CONFIG_HW_PERF_EVENTS=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_HAVE_MEMBLOCK=y
++CONFIG_PAGEFLAGS_EXTENDED=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_COMPACTION is not set
++# CONFIG_PHYS_ADDR_T_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=0
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_KSM is not set
++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
++CONFIG_NEED_PER_CPU_KM=y
++# CONFIG_CLEANCACHE is not set
++CONFIG_FORCE_MAX_ZONEORDER=11
++# CONFIG_LEDS is not set
++CONFIG_ALIGNMENT_TRAP=y
++# CONFIG_UACCESS_WITH_MEMCPY is not set
++# CONFIG_SECCOMP is not set
++# CONFIG_CC_STACKPROTECTOR is not set
++# CONFIG_DEPRECATED_PARAM_STRUCT is not set
++
++#
++# Boot options
++#
++# CONFIG_USE_OF is not set
++CONFIG_ZBOOT_ROM_TEXT=0x0
++CONFIG_ZBOOT_ROM_BSS=0x0
++CONFIG_CMDLINE=""
++# CONFIG_XIP_KERNEL is not set
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++# CONFIG_AUTO_ZRELADDR is not set
++
++#
++# CPU Power Management
++#
++# CONFIG_CPU_IDLE is not set
++
++#
++# Floating point emulation
++#
++
++#
++# At least one emulation must be selected
++#
++CONFIG_VFP=y
++
++#
++# Userspace binary formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++CONFIG_HAVE_AOUT=y
++# CONFIG_BINFMT_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Power management options
++#
++CONFIG_SUSPEND=y
++CONFIG_SUSPEND_FREEZER=y
++CONFIG_PM_SLEEP=y
++# CONFIG_PM_RUNTIME is not set
++CONFIG_PM=y
++# CONFIG_PM_DEBUG is not set
++# CONFIG_APM_EMULATION is not set
++CONFIG_ARCH_SUSPEND_POSSIBLE=y
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_XFRM_STATISTICS is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++# CONFIG_IP_FIB_TRIE_STATS is not set
++# CONFIG_IP_MULTIPLE_TABLES is not set
++# CONFIG_IP_ROUTE_MULTIPATH is not set
++# CONFIG_IP_ROUTE_VERBOSE is not set
++# CONFIG_IP_PNP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE_DEMUX is not set
++CONFIG_IP_MROUTE=y
++CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
++# CONFIG_IP_PIMSM_V1 is not set
++# CONFIG_IP_PIMSM_V2 is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++CONFIG_TCP_CONG_ADVANCED=y
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_HTCP=m
++# CONFIG_TCP_CONG_HSTCP is not set
++# CONFIG_TCP_CONG_HYBLA is not set
++# CONFIG_TCP_CONG_VEGAS is not set
++# CONFIG_TCP_CONG_SCALABLE is not set
++# CONFIG_TCP_CONG_LP is not set
++# CONFIG_TCP_CONG_VENO is not set
++# CONFIG_TCP_CONG_YEAH is not set
++# CONFIG_TCP_CONG_ILLINOIS is not set
++CONFIG_DEFAULT_CUBIC=y
++# CONFIG_DEFAULT_RENO is not set
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++CONFIG_IPV6=m
++# CONFIG_IPV6_PRIVACY is not set
++# CONFIG_IPV6_ROUTER_PREF is not set
++# CONFIG_IPV6_OPTIMISTIC_DAD is not set
++# CONFIG_INET6_AH is not set
++# CONFIG_INET6_ESP is not set
++# CONFIG_INET6_IPCOMP is not set
++# CONFIG_IPV6_MIP6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_BEET=m
++# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
++CONFIG_IPV6_SIT=m
++# CONFIG_IPV6_SIT_6RD is not set
++CONFIG_IPV6_NDISC_NODETYPE=y
++# CONFIG_IPV6_TUNNEL is not set
++# CONFIG_IPV6_MULTIPLE_TABLES is not set
++# CONFIG_IPV6_MROUTE is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_NETFILTER_ADVANCED=y
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK_QUEUE is not set
++# CONFIG_NETFILTER_NETLINK_LOG is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++# CONFIG_IP_VS is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_NF_DEFRAG_IPV4 is not set
++# CONFIG_IP_NF_QUEUE is not set
++# CONFIG_IP_NF_IPTABLES is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++
++#
++# IPv6: Netfilter Configuration
++#
++# CONFIG_NF_DEFRAG_IPV6 is not set
++# CONFIG_IP6_NF_QUEUE is not set
++# CONFIG_IP6_NF_IPTABLES is not set
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_RDS is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_L2TP is not set
++CONFIG_STP=m
++CONFIG_BRIDGE=m
++CONFIG_BRIDGE_IGMP_SNOOPING=y
++# CONFIG_NET_DSA is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=m
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_PHONET is not set
++# CONFIG_IEEE802154 is not set
++CONFIG_NET_SCHED=y
++
++#
++# Queueing/Scheduling
++#
++# CONFIG_NET_SCH_CBQ is not set
++# CONFIG_NET_SCH_HTB is not set
++# CONFIG_NET_SCH_HFSC is not set
++# CONFIG_NET_SCH_PRIO is not set
++# CONFIG_NET_SCH_MULTIQ is not set
++# CONFIG_NET_SCH_RED is not set
++# CONFIG_NET_SCH_SFB is not set
++# CONFIG_NET_SCH_SFQ is not set
++# CONFIG_NET_SCH_TEQL is not set
++# CONFIG_NET_SCH_TBF is not set
++# CONFIG_NET_SCH_GRED is not set
++# CONFIG_NET_SCH_DSMARK is not set
++# CONFIG_NET_SCH_NETEM is not set
++# CONFIG_NET_SCH_DRR is not set
++# CONFIG_NET_SCH_MQPRIO is not set
++# CONFIG_NET_SCH_CHOKE is not set
++# CONFIG_NET_SCH_QFQ is not set
++
++#
++# Classification
++#
++# CONFIG_NET_CLS_BASIC is not set
++# CONFIG_NET_CLS_TCINDEX is not set
++# CONFIG_NET_CLS_ROUTE4 is not set
++# CONFIG_NET_CLS_FW is not set
++# CONFIG_NET_CLS_U32 is not set
++# CONFIG_NET_CLS_RSVP is not set
++# CONFIG_NET_CLS_RSVP6 is not set
++# CONFIG_NET_CLS_FLOW is not set
++# CONFIG_NET_EMATCH is not set
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_SCH_FIFO=y
++# CONFIG_DCB is not set
++CONFIG_DNS_RESOLVER=y
++# CONFIG_BATMAN_ADV is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_CAN is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++CONFIG_FIB_RULES=y
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_PRIV=y
++CONFIG_CFG80211=y
++CONFIG_NL80211_TESTMODE=y
++# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
++# CONFIG_CFG80211_REG_DEBUG is not set
++CONFIG_CFG80211_DEFAULT_PS=y
++# CONFIG_CFG80211_INTERNAL_REGDB is not set
++CONFIG_CFG80211_WEXT=y
++CONFIG_WIRELESS_EXT_SYSFS=y
++CONFIG_LIB80211=y
++# CONFIG_LIB80211_DEBUG is not set
++CONFIG_MAC80211=y
++CONFIG_MAC80211_HAS_RC=y
++# CONFIG_MAC80211_RC_PID is not set
++CONFIG_MAC80211_RC_MINSTREL=y
++CONFIG_MAC80211_RC_MINSTREL_HT=y
++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
++CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
++# CONFIG_MAC80211_MESH is not set
++# CONFIG_MAC80211_DEBUG_MENU is not set
++# CONFIG_WIMAX is not set
++# CONFIG_RFKILL is not set
++# CONFIG_NET_9P is not set
++# CONFIG_CAIF is not set
++# CONFIG_CEPH_LIB is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/mdev"
++CONFIG_DEVTMPFS=y
++# CONFIG_DEVTMPFS_MOUNT is not set
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++CONFIG_FIRMWARE_IN_KERNEL=y
++CONFIG_EXTRA_FIRMWARE=""
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_TESTS is not set
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++# CONFIG_MTD_AFS_PARTS is not set
++# CONFIG_MTD_AR7_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_SM_FTL is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++# CONFIG_MTD_CFI is not set
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_DATAFLASH is not set
++CONFIG_MTD_M25P80=y
++CONFIG_M25PXX_USE_FAST_READ=y
++# CONFIG_MTD_SST25L is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# LPDDR flash memory drivers
++#
++# CONFIG_MTD_LPDDR is not set
++# CONFIG_MTD_UBI is not set
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++
++#
++# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
++#
++# CONFIG_BLK_DEV_NBD is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=1
++CONFIG_BLK_DEV_RAM_SIZE=4096
++# CONFIG_BLK_DEV_XIP is not set
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_MG_DISK is not set
++# CONFIG_BLK_DEV_RBD is not set
++# CONFIG_SENSORS_LIS3LV02D is not set
++CONFIG_MISC_DEVICES=y
++# CONFIG_AD525X_DPOT is not set
++# CONFIG_INTEL_MID_PTI is not set
++# CONFIG_ICS932S401 is not set
++# CONFIG_ENCLOSURE_SERVICES is not set
++# CONFIG_APDS9802ALS is not set
++# CONFIG_ISL29003 is not set
++# CONFIG_ISL29020 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_SENSORS_BH1780 is not set
++# CONFIG_SENSORS_BH1770 is not set
++# CONFIG_SENSORS_APDS990X is not set
++# CONFIG_HMC6352 is not set
++# CONFIG_FH_I2S_SLAVE is not set
++# CONFIG_FH_I2S_MASTER is not set
++CONFIG_FH_PINCTRL=y
++CONFIG_FH_SADC=y
++CONFIG_FH_FIRMWARE_LOADER=m
++CONFIG_FH_EFUSE=y
++CONFIG_FH_CLK_MISC=y
++# CONFIG_DS1682 is not set
++# CONFIG_TI_DAC7512 is not set
++# CONFIG_BMP085 is not set
++# CONFIG_C2PORT is not set
++
++#
++# EEPROM support
++#
++CONFIG_EEPROM_AT24=m
++# CONFIG_EEPROM_AT25 is not set
++# CONFIG_EEPROM_LEGACY is not set
++# CONFIG_EEPROM_MAX6875 is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_IWMC3200TOP is not set
++
++#
++# Texas Instruments shared transport line discipline
++#
++# CONFIG_TI_ST is not set
++# CONFIG_SENSORS_LIS3_SPI is not set
++# CONFIG_SENSORS_LIS3_I2C is not set
++CONFIG_HAVE_IDE=y
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI_MOD=y
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++# CONFIG_BLK_DEV_SD is not set
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_ISCSI_BOOT_SYSFS is not set
++# CONFIG_LIBFC is not set
++# CONFIG_LIBFCOE is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_DH is not set
++# CONFIG_SCSI_OSD_INITIATOR is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_TARGET_CORE is not set
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=m
++# CONFIG_VETH is not set
++CONFIG_MII=y
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_REALTEK_PHY is not set
++# CONFIG_NATIONAL_PHY is not set
++# CONFIG_STE10XP is not set
++# CONFIG_LSI_ET1011C_PHY is not set
++# CONFIG_MICREL_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_NET_ETHERNET=y
++# CONFIG_AX88796 is not set
++# CONFIG_SMC91X is not set
++# CONFIG_DM9000 is not set
++CONFIG_FH_GMAC=y
++CONFIG_FH_GMAC_DA=y
++# CONFIG_ENC28J60 is not set
++# CONFIG_ETHOC is not set
++# CONFIG_SMC911X is not set
++# CONFIG_SMSC911X is not set
++# CONFIG_DNET is not set
++# CONFIG_IBM_NEW_EMAC_ZMII is not set
++# CONFIG_IBM_NEW_EMAC_RGMII is not set
++# CONFIG_IBM_NEW_EMAC_TAH is not set
++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
++# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
++# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
++# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
++# CONFIG_B44 is not set
++# CONFIG_KS8842 is not set
++# CONFIG_KS8851 is not set
++# CONFIG_KS8851_MLL is not set
++# CONFIG_FTMAC100 is not set
++# CONFIG_NETDEV_1000 is not set
++# CONFIG_NETDEV_10000 is not set
++CONFIG_WLAN=y
++CONFIG_LIBERTAS_THINFIRM=y
++# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
++# CONFIG_MAC80211_HWSIM is not set
++# CONFIG_ATH_COMMON is not set
++# CONFIG_B43 is not set
++# CONFIG_B43LEGACY is not set
++# CONFIG_HOSTAP is not set
++# CONFIG_IWM is not set
++# CONFIG_LIBERTAS is not set
++# CONFIG_P54_COMMON is not set
++# CONFIG_RT2X00 is not set
++# CONFIG_RTL8192SE is not set
++# CONFIG_WL1251 is not set
++# CONFIG_WL12XX_MENU is not set
++# CONFIG_MWIFIEX is not set
++
++#
++# Enable WiMAX (Networking options) to see the WiMAX drivers
++#
++# CONFIG_WAN is not set
++
++#
++# CAIF transport drivers
++#
++CONFIG_PPP=m
++# CONFIG_PPP_MULTILINK is not set
++# CONFIG_PPP_FILTER is not set
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++# CONFIG_PPP_BSDCOMP is not set
++# CONFIG_PPP_MPPE is not set
++# CONFIG_PPPOE is not set
++# CONFIG_SLIP is not set
++CONFIG_SLHC=m
++CONFIG_NETCONSOLE=y
++CONFIG_NETPOLL=y
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++# CONFIG_INPUT_SPARSEKMAP is not set
++
++#
++# Userland interfaces
++#
++# CONFIG_INPUT_MOUSEDEV is not set
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ADP5588 is not set
++# CONFIG_KEYBOARD_ADP5589 is not set
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_QT1070 is not set
++# CONFIG_KEYBOARD_QT2160 is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_GPIO is not set
++# CONFIG_KEYBOARD_TCA6416 is not set
++# CONFIG_KEYBOARD_MATRIX is not set
++# CONFIG_KEYBOARD_MAX7359 is not set
++# CONFIG_KEYBOARD_MCS is not set
++# CONFIG_KEYBOARD_MPR121 is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_OPENCORES is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_SERPORT=y
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_SERIO_ALTERA_PS2 is not set
++# CONFIG_SERIO_PS2MULT is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_CONSOLE_TRANSLATIONS=y
++# CONFIG_VT_CONSOLE is not set
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++CONFIG_UNIX98_PTYS=y
++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++# CONFIG_N_GSM is not set
++# CONFIG_TRACE_SINK is not set
++CONFIG_DEVKMEM=y
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_MAX3100 is not set
++# CONFIG_SERIAL_MAX3107 is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_TIMBERDALE is not set
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
++# CONFIG_SERIAL_ALTERA_UART is not set
++# CONFIG_SERIAL_IFX6X60 is not set
++# CONFIG_SERIAL_XILINX_PS_UART is not set
++CONFIG_SERIAL_FH=y
++CONFIG_SERIAL_FH_CONSOLE=y
++# CONFIG_TTY_PRINTK is not set
++# CONFIG_HVC_DCC is not set
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++# CONFIG_RAMOOPS is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_COMPAT=y
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_MUX is not set
++# CONFIG_I2C_HELPER_AUTO is not set
++# CONFIG_I2C_SMBUS is not set
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_ALGOBIT is not set
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++
++#
++# I2C system bus drivers (mostly embedded / system-on-chip)
++#
++CONFIG_I2C_FH_INTERRUPT=y
++# CONFIG_I2C_DESIGNWARE is not set
++# CONFIG_I2C_GPIO is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PCA_PLATFORM is not set
++# CONFIG_I2C_PXA_PCI is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_XILINX is not set
++
++#
++# External I2C/SMBus adapter drivers
++#
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_TAOS_EVM is not set
++
++#
++# Other I2C/SMBus bus drivers
++#
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++CONFIG_SPI=y
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++# CONFIG_SPI_ALTERA is not set
++# CONFIG_SPI_BITBANG is not set
++# CONFIG_SPI_GPIO is not set
++# CONFIG_SPI_OC_TINY is not set
++# CONFIG_SPI_PXA2XX_PCI is not set
++# CONFIG_SPI_XILINX is not set
++# CONFIG_SPI_DESIGNWARE is not set
++CONFIG_SPI_FH=y
++CONFIG_SPI_FH_SLAVE=y
++
++#
++# SPI Protocol Masters
++#
++CONFIG_SPI_SPIDEV=y
++# CONFIG_SPI_TLE62X0 is not set
++
++#
++# PPS support
++#
++# CONFIG_PPS is not set
++
++#
++# PPS generators support
++#
++
++#
++# PTP clock support
++#
++
++#
++# Enable Device Drivers -> PPS to see the PTP clock options.
++#
++CONFIG_ARCH_REQUIRE_GPIOLIB=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SYSFS=y
++
++#
++# Memory mapped GPIO drivers:
++#
++# CONFIG_GPIO_BASIC_MMIO is not set
++# CONFIG_GPIO_IT8761E is not set
++CONFIG_GPIO_FH=y
++
++#
++# I2C GPIO expanders:
++#
++# CONFIG_GPIO_MAX7300 is not set
++# CONFIG_GPIO_MAX732X is not set
++# CONFIG_GPIO_PCF857X is not set
++# CONFIG_GPIO_SX150X is not set
++# CONFIG_GPIO_ADP5588 is not set
++
++#
++# PCI GPIO expanders:
++#
++
++#
++# SPI GPIO expanders:
++#
++# CONFIG_GPIO_MAX7301 is not set
++# CONFIG_GPIO_MCP23S08 is not set
++# CONFIG_GPIO_MC33880 is not set
++# CONFIG_GPIO_74X164 is not set
++
++#
++# AC97 GPIO expanders:
++#
++
++#
++# MODULbus GPIO expanders:
++#
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Native drivers
++#
++# CONFIG_SENSORS_AD7414 is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADCXX is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7411 is not set
++# CONFIG_SENSORS_ADT7462 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ADT7475 is not set
++# CONFIG_SENSORS_ASC7621 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS620 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_G760A is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_GPIO_FAN is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_JC42 is not set
++# CONFIG_SENSORS_LINEAGE is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM70 is not set
++# CONFIG_SENSORS_LM73 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_LTC4151 is not set
++# CONFIG_SENSORS_LTC4215 is not set
++# CONFIG_SENSORS_LTC4245 is not set
++# CONFIG_SENSORS_LTC4261 is not set
++# CONFIG_SENSORS_LM95241 is not set
++# CONFIG_SENSORS_MAX1111 is not set
++# CONFIG_SENSORS_MAX16065 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX6639 is not set
++# CONFIG_SENSORS_MAX6642 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_PMBUS is not set
++# CONFIG_SENSORS_SHT15 is not set
++# CONFIG_SENSORS_SHT21 is not set
++# CONFIG_SENSORS_SMM665 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_EMC1403 is not set
++# CONFIG_SENSORS_EMC2103 is not set
++# CONFIG_SENSORS_EMC6W201 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_SCH5627 is not set
++# CONFIG_SENSORS_ADS1015 is not set
++# CONFIG_SENSORS_ADS7828 is not set
++# CONFIG_SENSORS_ADS7871 is not set
++# CONFIG_SENSORS_AMC6821 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_TMP102 is not set
++# CONFIG_SENSORS_TMP401 is not set
++# CONFIG_SENSORS_TMP421 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83795 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83L786NG is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_THERMAL is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++# CONFIG_MAX63XX_WATCHDOG is not set
++CONFIG_FH_WATCHDOG=y
++CONFIG_SSB_POSSIBLE=y
++
++#
++# Sonics Silicon Backplane
++#
++# CONFIG_SSB is not set
++CONFIG_BCMA_POSSIBLE=y
++
++#
++# Broadcom specific AMBA
++#
++# CONFIG_BCMA is not set
++CONFIG_MFD_SUPPORT=y
++# CONFIG_MFD_CORE is not set
++# CONFIG_MFD_88PM860X is not set
++# CONFIG_MFD_SM501 is not set
++# CONFIG_MFD_ASIC3 is not set
++# CONFIG_HTC_EGPIO is not set
++# CONFIG_HTC_PASIC3 is not set
++# CONFIG_HTC_I2CPLD is not set
++# CONFIG_TPS6105X is not set
++# CONFIG_TPS65010 is not set
++# CONFIG_TPS6507X is not set
++# CONFIG_MFD_TPS6586X is not set
++# CONFIG_TWL4030_CORE is not set
++# CONFIG_MFD_STMPE is not set
++# CONFIG_MFD_TC3589X is not set
++# CONFIG_MFD_TMIO is not set
++# CONFIG_MFD_T7L66XB is not set
++# CONFIG_MFD_TC6387XB is not set
++# CONFIG_MFD_TC6393XB is not set
++# CONFIG_PMIC_DA903X is not set
++# CONFIG_PMIC_ADP5520 is not set
++# CONFIG_MFD_MAX8925 is not set
++# CONFIG_MFD_MAX8997 is not set
++# CONFIG_MFD_MAX8998 is not set
++# CONFIG_MFD_WM8400 is not set
++# CONFIG_MFD_WM831X_I2C is not set
++# CONFIG_MFD_WM831X_SPI is not set
++# CONFIG_MFD_WM8350_I2C is not set
++# CONFIG_MFD_WM8994 is not set
++# CONFIG_MFD_PCF50633 is not set
++# CONFIG_MFD_MC13XXX is not set
++# CONFIG_ABX500_CORE is not set
++# CONFIG_EZX_PCAP is not set
++# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_TPS65910 is not set
++# CONFIG_REGULATOR is not set
++# CONFIG_MEDIA_SUPPORT is not set
++
++#
++# Graphics support
++#
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++# CONFIG_FB is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Display device support
++#
++# CONFIG_DISPLAY_SUPPORT is not set
++
++#
++# Console display driver support
++#
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_SOUND is not set
++# CONFIG_HID_SUPPORT is not set
++# CONFIG_USB_SUPPORT is not set
++CONFIG_MMC=y
++# CONFIG_MMC_DEBUG is not set
++# CONFIG_MMC_UNSAFE_RESUME is not set
++# CONFIG_MMC_CLKGATE is not set
++
++#
++# MMC/SD/SDIO Card Drivers
++#
++CONFIG_MMC_BLOCK=y
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_MMC_BLOCK_BOUNCE=y
++# CONFIG_SDIO_UART is not set
++# CONFIG_MMC_TEST is not set
++
++#
++# MMC/SD/SDIO Host Controller Drivers
++#
++# CONFIG_MMC_SDHCI is not set
++# CONFIG_MMC_SPI is not set
++# CONFIG_MMC_DW is not set
++CONFIG_MMC_FH=y
++CONFIG_MMC_FH_IDMAC=y
++# CONFIG_MEMSTICK is not set
++CONFIG_NEW_LEDS=y
++# CONFIG_LEDS_CLASS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++# CONFIG_NFC_DEVICES is not set
++# CONFIG_ACCESSIBILITY is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++# CONFIG_RTC_DRV_DS1307 is not set
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_DS3232 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_ISL12022 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++# CONFIG_RTC_DRV_BQ32K is not set
++# CONFIG_RTC_DRV_S35390A is not set
++# CONFIG_RTC_DRV_FM3130 is not set
++# CONFIG_RTC_DRV_RX8581 is not set
++# CONFIG_RTC_DRV_RX8025 is not set
++# CONFIG_RTC_DRV_EM3027 is not set
++# CONFIG_RTC_DRV_RV3029C2 is not set
++
++#
++# SPI RTC drivers
++#
++# CONFIG_RTC_DRV_M41T93 is not set
++# CONFIG_RTC_DRV_M41T94 is not set
++# CONFIG_RTC_DRV_DS1305 is not set
++# CONFIG_RTC_DRV_DS1390 is not set
++# CONFIG_RTC_DRV_MAX6902 is not set
++# CONFIG_RTC_DRV_R9701 is not set
++# CONFIG_RTC_DRV_RS5C348 is not set
++# CONFIG_RTC_DRV_DS3234 is not set
++# CONFIG_RTC_DRV_PCF2123 is not set
++
++#
++# Platform RTC drivers
++#
++# CONFIG_RTC_DRV_CMOS is not set
++# CONFIG_RTC_DRV_DS1286 is not set
++# CONFIG_RTC_DRV_DS1511 is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T35 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_MSM6242 is not set
++# CONFIG_RTC_DRV_BQ4802 is not set
++# CONFIG_RTC_DRV_RP5C01 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++CONFIG_RTC_DRV_FH=y
++CONFIG_DMADEVICES=y
++# CONFIG_DMADEVICES_DEBUG is not set
++
++#
++# DMA Devices
++#
++# CONFIG_DW_DMAC is not set
++CONFIG_FH_DMAC=y
++CONFIG_FH_DMAC_MISC=y
++# CONFIG_TIMB_DMA is not set
++CONFIG_DMA_ENGINE=y
++
++#
++# DMA Clients
++#
++# CONFIG_NET_DMA is not set
++# CONFIG_ASYNC_TX_DMA is not set
++# CONFIG_DMATEST is not set
++# CONFIG_AUXDISPLAY is not set
++# CONFIG_UIO is not set
++# CONFIG_STAGING is not set
++CONFIG_CLKDEV_LOOKUP=y
++CONFIG_PWM=y
++CONFIG_PWM_FULLHAN=y
++CONFIG_FH_PWM_NUM=8
++
++#
++# File systems
++#
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++# CONFIG_EXT4_FS is not set
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_BTRFS_FS is not set
++# CONFIG_NILFS2_FS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_FILE_LOCKING=y
++CONFIG_FSNOTIFY=y
++CONFIG_DNOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_FANOTIFY is not set
++# CONFIG_QUOTA is not set
++# CONFIG_QUOTACTL is not set
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++CONFIG_GENERIC_ACL=y
++
++#
++# Caches
++#
++# CONFIG_FSCACHE is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_PROC_PAGE_MONITOR=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_TMPFS_XATTR=y
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++CONFIG_MISC_FILESYSTEMS=y
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_ECRYPT_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_YAFFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++CONFIG_JFFS2_COMPRESSION_OPTIONS=y
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++# CONFIG_JFFS2_RTIME is not set
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_JFFS2_CMODE_NONE is not set
++CONFIG_JFFS2_CMODE_PRIORITY=y
++# CONFIG_JFFS2_CMODE_SIZE is not set
++# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
++# CONFIG_LOGFS is not set
++# CONFIG_CRAMFS is not set
++CONFIG_SQUASHFS=y
++# CONFIG_SQUASHFS_XATTR is not set
++# CONFIG_SQUASHFS_LZO is not set
++CONFIG_SQUASHFS_XZ=y
++# CONFIG_SQUASHFS_EMBEDDED is not set
++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
++# CONFIG_VXFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_OMFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_PSTORE is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++# CONFIG_NETWORK_FILESYSTEMS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=m
++
++#
++# Kernel hacking
++#
++# CONFIG_TEST_BOOT_TIME is not set
++CONFIG_PRINTK_TIME=y
++CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_FRAME_WARN=1024
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_STRIP_ASM_SYMS is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_HARDLOCKUP_DETECTOR is not set
++# CONFIG_SLUB_STATS is not set
++# CONFIG_SPARSE_RCU_POINTER is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_MEMORY_INIT is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_SYSCTL_SYSCALL_CHECK is not set
++CONFIG_HAVE_FUNCTION_TRACER=y
++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
++CONFIG_HAVE_DYNAMIC_FTRACE=y
++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
++CONFIG_HAVE_C_RECORDMCOUNT=y
++CONFIG_TRACING_SUPPORT=y
++CONFIG_FTRACE=y
++# CONFIG_FUNCTION_TRACER is not set
++# CONFIG_IRQSOFF_TRACER is not set
++# CONFIG_SCHED_TRACER is not set
++# CONFIG_ENABLE_DEFAULT_TRACERS is not set
++# CONFIG_FTRACE_SYSCALLS is not set
++CONFIG_BRANCH_PROFILE_NONE=y
++# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
++# CONFIG_PROFILE_ALL_BRANCHES is not set
++# CONFIG_STACK_TRACER is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++# CONFIG_DMA_API_DEBUG is not set
++# CONFIG_ATOMIC64_SELFTEST is not set
++# CONFIG_SAMPLES is not set
++CONFIG_HAVE_ARCH_KGDB=y
++# CONFIG_TEST_KSTRTOX is not set
++# CONFIG_STRICT_DEVMEM is not set
++# CONFIG_ARM_UNWIND is not set
++CONFIG_DEBUG_USER=y
++# CONFIG_OC_ETM is not set
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++# CONFIG_SECURITY is not set
++# CONFIG_SECURITYFS is not set
++CONFIG_DEFAULT_SECURITY_DAC=y
++CONFIG_DEFAULT_SECURITY=""
++CONFIG_CRYPTO=y
++
++#
++# Crypto core or helper
++#
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_ALGAPI2=y
++CONFIG_CRYPTO_AEAD=y
++CONFIG_CRYPTO_AEAD2=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_BLKCIPHER2=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_HASH2=y
++CONFIG_CRYPTO_RNG=y
++CONFIG_CRYPTO_RNG2=y
++CONFIG_CRYPTO_PCOMP2=y
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_MANAGER2=y
++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_NULL is not set
++CONFIG_CRYPTO_WORKQUEUE=y
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_AUTHENC=y
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Authenticated Encryption with Associated Data
++#
++# CONFIG_CRYPTO_CCM is not set
++# CONFIG_CRYPTO_GCM is not set
++CONFIG_CRYPTO_SEQIV=y
++
++#
++# Block modes
++#
++CONFIG_CRYPTO_CBC=y
++# CONFIG_CRYPTO_CTR is not set
++# CONFIG_CRYPTO_CTS is not set
++CONFIG_CRYPTO_ECB=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_PCBC is not set
++# CONFIG_CRYPTO_XTS is not set
++
++#
++# Hash modes
++#
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_VMAC is not set
++
++#
++# Digest
++#
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_GHASH is not set
++# CONFIG_CRYPTO_MD4 is not set
++# CONFIG_CRYPTO_MD5 is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_RMD128 is not set
++# CONFIG_CRYPTO_RMD160 is not set
++# CONFIG_CRYPTO_RMD256 is not set
++# CONFIG_CRYPTO_RMD320 is not set
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_WP512 is not set
++
++#
++# Ciphers
++#
++CONFIG_CRYPTO_AES=y
++# CONFIG_CRYPTO_ANUBIS is not set
++CONFIG_CRYPTO_ARC4=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_SALSA20 is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++
++#
++# Compression
++#
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_ZLIB is not set
++# CONFIG_CRYPTO_LZO is not set
++
++#
++# Random Number Generation
++#
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_USER_API=y
++# CONFIG_CRYPTO_USER_API_HASH is not set
++CONFIG_CRYPTO_USER_API_SKCIPHER=y
++CONFIG_CRYPTO_HW=y
++CONFIG_FH_AES=y
++# CONFIG_FH_AES_SELF_TEST is not set
++# CONFIG_BINARY_PRINTF is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++CONFIG_CRC_CCITT=m
++# CONFIG_CRC16 is not set
++CONFIG_CRC_T10DIF=m
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++CONFIG_XZ_DEC_IA64=y
++CONFIG_XZ_DEC_ARM=y
++CONFIG_XZ_DEC_ARMTHUMB=y
++CONFIG_XZ_DEC_SPARC=y
++CONFIG_XZ_DEC_BCJ=y
++# CONFIG_XZ_DEC_TEST is not set
++CONFIG_DECOMPRESS_XZ=y
++CONFIG_GENERIC_ALLOCATOR=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_NLATTR=y
++CONFIG_GENERIC_ATOMIC64=y
++CONFIG_AVERAGE=y
+diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
+index ee2ad8ae..3aefe527 100644
+--- a/arch/arm/include/asm/setup.h
++++ b/arch/arm/include/asm/setup.h
+@@ -143,6 +143,12 @@ struct tag_memclk {
+ 	__u32 fmemclk;
+ };
+ 
++#define ATAG_PHYMODE 0x41000601
++
++struct tag_phymode {
++    u32 phymode;
++};
++
+ struct tag {
+ 	struct tag_header hdr;
+ 	union {
+@@ -165,6 +171,11 @@ struct tag {
+ 		 * DC21285 specific
+ 		 */
+ 		struct tag_memclk	memclk;
++
++		/*
++		 * Fullhan specific
++		 */
++		struct tag_phymode  phymode;
+ 	} u;
+ };
+ 
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 278c1b0e..9a375efa 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -226,6 +226,7 @@ __create_page_tables:
+ 	 * This allows debug messages to be output
+ 	 * via a serial console before paging_init.
+ 	 */
++#ifndef CONFIG_JLINK_DEBUG
+ 	addruart r7, r3
+ 
+ 	mov	r3, r3, lsr #20
+@@ -243,6 +244,7 @@ __create_page_tables:
+ 	add	r3, r3, #1 << 20
+ 	teq	r0, r6
+ 	bne	1b
++#endif
+ 
+ #else /* CONFIG_DEBUG_ICEDCC */
+ 	/* we don't need any serial debugging mappings for ICEDCC */
+@@ -362,6 +364,13 @@ __enable_mmu:
+ #ifdef CONFIG_CPU_ICACHE_DISABLE
+ 	bic	r0, r0, #CR_I
+ #endif
++	/*
++	* add by fullhan
++	*/
++#ifdef CONFIG_JLINK_DEBUG
++	mov r4, #0x10000000
++#endif
++
+ 	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+ 		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ 		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+diff --git a/arch/arm/mach-fh/Kconfig b/arch/arm/mach-fh/Kconfig
+new file mode 100644
+index 00000000..624516ae
+--- /dev/null
++++ b/arch/arm/mach-fh/Kconfig
+@@ -0,0 +1,52 @@
++if ARCH_FULLHAN
++
++config CPU_FH8833
++	select CPU_V6
++	bool	
++menu "FullHan Implementations"
++
++comment "FullHan Core Type"
++
++choice
++	prompt "Select Fullhan Chip:"
++	default ARCH_FH8833
++
++config ARCH_FH8833
++	bool "FullHan FH8833 based system"
++	select CPU_FH8833
++
++endchoice
++
++comment "FullHan Board Type"
++
++config USE_PTS_AS_CLOCKSOURCE
++	bool "use pts as clock source"
++	default n
++	
++config FH_SIMPLE_TIMER
++	bool "use fh self-defined simple timer"
++	default n
++
++config MACH_FH8833
++	bool "FullHan FH8833 board"
++	default y
++	depends on ARCH_FH8833
++	select MISC_DEVICES
++	select I2C
++	help
++	  Configure this option to specify the whether the board used
++	  for development is FH8833
++
++	  
++config  MACH_FH_NAND
++	bool "USE NAND FLASH"
++	default n
++	help
++	  use NAND FLASH 
++
++config JLINK_DEBUG
++	bool "Use jlink to debug kernel."
++
++endmenu
++
++endif
+diff --git a/arch/arm/mach-fh/Makefile b/arch/arm/mach-fh/Makefile
+new file mode 100644
+index 00000000..28de292a
+--- /dev/null
++++ b/arch/arm/mach-fh/Makefile
+@@ -0,0 +1,19 @@
++#
++# Makefile for the linux kernel.
++#
++#
++
++# Common objects
++obj-y 			:= time.o clock.o \
++			   sram.o irq.o pmu.o pm.o sram.o
++# gpio.o
++# Chip specific
++obj-$(CONFIG_ARCH_FH8833)       += fh8833.o
++# Board specific
++obj-$(CONFIG_MACH_FH8833) 	+= board-fh8833.o pinctrl.o
++obj-$(CONFIG_FH_SIMPLE_TIMER) 	+= fh_simple_timer.o
++
++# Power Management
++obj-$(CONFIG_CPU_FREQ)		+= cpufreq.o
++obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
++obj-$(CONFIG_SUSPEND)		+= pm.o sleep.o
+diff --git a/arch/arm/mach-fh/Makefile.boot b/arch/arm/mach-fh/Makefile.boot
+new file mode 100644
+index 00000000..f05489f0
+--- /dev/null
++++ b/arch/arm/mach-fh/Makefile.boot
+@@ -0,0 +1,4 @@
++   zreladdr-y	:= 0xA0008000
++params_phys-y	:= 0xA0000100
++initrd_phys-y	:= 0xA0800000
++
+diff --git a/arch/arm/mach-fh/board-fh8833.c b/arch/arm/mach-fh/board-fh8833.c
+new file mode 100644
+index 00000000..2c81f6f3
+--- /dev/null
++++ b/arch/arm/mach-fh/board-fh8833.c
+@@ -0,0 +1,1200 @@
++/*
++ * Fullhan FH8810 board support
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/i2c.h>
++#include <linux/io.h>
++#include <linux/mmc/dw_mmc.h>
++#include <linux/clk.h>
++#include <linux/i2c/at24.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/slab.h>
++#include <linux/input.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/flash.h>
++#include <linux/phy.h>
++#include <linux/dma-mapping.h>
++#include <linux/spi/eeprom.h>
++#include <linux/delay.h>
++#include <asm/mach-types.h>
++#include <asm/mach/arch.h>
++#include <asm/mach/map.h>
++#include <asm/pmu.h>
++
++#include <mach/system.h>
++#include <mach/chip.h>
++#include <mach/iomux.h>
++#include <mach/irqs.h>
++#include <mach/pmu.h>
++#include <mach/fh_dmac.h>
++#include <mach/fh_gmac.h>
++#include <mach/gpio.h>
++#include <mach/spi.h>
++#include <mach/clock.h>
++#include <mach/rtc.h>
++#include <mach/pinctrl.h>
++#include <mach/fh_wdt.h>
++#include <mach/fhmci.h>
++#include <mach/board_config.h>
++
++
++static struct map_desc fh8833_io_desc[] = {
++	{
++		.virtual	= VA_RAM_REG_BASE,
++		.pfn		= __phys_to_pfn(RAM_BASE),
++		.length		= SZ_16K,
++		.type		= MT_MEMORY,
++	},
++	{
++		.virtual	= VA_DDRC_REG_BASE,
++		.pfn		= __phys_to_pfn(DDRC_REG_BASE),
++		.length		= SZ_16K,
++		.type		= MT_DEVICE,
++	},
++	{
++		.virtual	= VA_INTC_REG_BASE,
++		.pfn		= __phys_to_pfn(INTC_REG_BASE),
++		.length		= SZ_16K,
++		.type		= MT_DEVICE,
++	},
++	{
++		.virtual	= VA_TIMER_REG_BASE,
++		.pfn		= __phys_to_pfn(TIMER_REG_BASE),
++		.length		= SZ_16K,
++		.type		= MT_DEVICE,
++	},
++	{
++		.virtual	= VA_PMU_REG_BASE,
++		.pfn		= __phys_to_pfn(PMU_REG_BASE),
++		.length		= SZ_16K,
++		.type		= MT_DEVICE,
++	},
++	{
++		.virtual	= VA_UART0_REG_BASE,
++		.pfn		= __phys_to_pfn(UART0_REG_BASE),
++		.length		= SZ_16K,
++		.type		= MT_DEVICE,
++	},
++	{
++		.virtual	= VA_UART1_REG_BASE,
++		.pfn		= __phys_to_pfn(UART1_REG_BASE),
++		.length		= SZ_16K,
++		.type		= MT_DEVICE,
++	},
++	{
++		.virtual	= VA_PAE_REG_BASE,
++		.pfn		= __phys_to_pfn(PAE_REG_BASE),
++		.length		= SZ_16K,
++		.type		= MT_DEVICE,
++	},
++};
++
++
++static struct resource fh_gpio0_resources[] = {
++	{
++		.start		= GPIO0_REG_BASE,
++		.end		= GPIO0_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= GPIO0_IRQ,
++		.end		= GPIO0_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++static struct resource fh_gpio1_resources[] = {
++	{
++		.start		= GPIO1_REG_BASE,
++		.end		= GPIO1_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= GPIO1_IRQ,
++		.end		= GPIO1_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++
++static struct resource fh_uart0_resources[] = {
++	{
++		.start		= (UART0_REG_BASE),
++		.end		= (UART0_REG_BASE) + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= UART0_IRQ,
++		.end		= UART0_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++static struct resource fh_uart1_resources[] = {
++	{
++		.start		= (UART1_REG_BASE),
++		.end		= (UART1_REG_BASE) + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= UART1_IRQ,
++		.end		= UART1_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++static struct resource fh_dma_resources[] = {
++	{
++		.start		= (DMAC_REG_BASE),
++		.end		= (DMAC_REG_BASE) + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= DMAC0_IRQ,
++		.end		= DMAC0_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++static struct resource fh_i2c_resources_0[] = {
++	{
++		.start		= I2C0_REG_BASE,
++		.end		= I2C0_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= I2C0_IRQ,
++		.end		= I2C0_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++static struct resource fh_i2c_resources_1[] = {
++	{
++		.start		= I2C1_REG_BASE,
++		.end		= I2C1_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= I2C1_IRQ,
++		.end		= I2C1_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++static struct resource fh_sdc0_resources[] = {
++	{
++		.start		= SDC0_REG_BASE,
++		.end		= SDC0_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++	{
++		.start		= SDC0_IRQ,
++		.end		= SDC0_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++static struct resource fh_sdc1_resources[] = {
++	{
++		.start		= SDC1_REG_BASE,
++		.end		= SDC1_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++	{
++		.start		= SDC1_IRQ,
++		.end		= SDC1_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++static struct resource fh_wdt_resources[] = {
++	{
++		.start		= WDT_REG_BASE,
++		.end		= WDT_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++	{
++		.start		= WDT_IRQ,
++		.end		= WDT_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++static struct resource fh_i2s_resources[] = {
++	{
++		.start		= I2S_REG_BASE,
++		.end		= I2S_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++	{
++		.start		= I2S0_IRQ,
++		.end		= I2S0_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++static struct resource fh_spi0_resources[] = {
++	{
++		.start		= SPI0_REG_BASE,
++		.end		= SPI0_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++		.name = "fh spi0 mem",
++	},
++	{
++		.start		= SPI0_IRQ,
++		.end		= SPI0_IRQ,
++		.flags		= IORESOURCE_IRQ,
++		.name = "fh spi0 irq",
++	},
++};
++
++static struct resource fh_spi1_resources[] = {
++	{
++		.start		= SPI1_REG_BASE,
++		.end		= SPI1_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++		.name = "fh spi1 mem",
++	},
++	{
++		.start		= SPI1_IRQ,
++		.end		= SPI1_IRQ,
++		.flags		= IORESOURCE_IRQ,
++		.name = "fh spi1 irq",
++	},
++};
++
++static struct resource fh_spi2_resources[] = {
++	{
++		.start		= SPI2_REG_BASE,
++		.end		= SPI2_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++		.name = "fh spi2 mem",
++	},
++	{
++		.start		= SPI2_IRQ,
++		.end		= SPI2_IRQ,
++		.flags		= IORESOURCE_IRQ,
++		.name = "fh spi2 irq",
++	},
++};
++
++
++
++static struct resource fh_gmac_resources[] = {
++	{
++		.start		= GMAC_REG_BASE,
++		.end		= GMAC_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++	{
++		.start		= GMAC_IRQ,
++		.end		= GMAC_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++static struct resource fh_pwm_resources[] = {
++	{
++		.start		= PWM_REG_BASE,
++		.end		= PWM_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++	{
++		.start		= PWM_IRQ,
++		.end		= PWM_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++static struct resource fh_sadc_resources[] = {
++	{
++		.start		= SADC_REG_BASE,
++		.end		= SADC_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++		.name = "fh sadc mem",
++	},
++	{
++		.start		= SADC_IRQ,
++		.end		= SADC_IRQ,
++		.flags		= IORESOURCE_IRQ,
++		.name = "fh sadc irq",
++	},
++};
++
++static struct resource fh_aes_resources[] = {
++	{
++		.start		= AES_REG_BASE,
++		.end		= AES_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++		.name = "fh aes mem",
++	},
++	{
++		.start		= AES_IRQ,
++		.end		= AES_IRQ,
++		.flags		= IORESOURCE_IRQ,
++		.name = "fh aes irq",
++	},
++};
++
++static struct resource fh_acw_resources[] = {
++	{
++		.start		= ACW_REG_BASE,
++		.end		= ACW_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++	{
++		.start		= ACW_IRQ,
++		.end		= ACW_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++
++static struct resource fh_rtc_resources[] = {
++	{
++		.start		= RTC_REG_BASE,
++		.end		= RTC_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++	{
++		.start		= RTC_IRQ,
++		.end		= RTC_IRQ,
++		.flags		= IORESOURCE_IRQ,
++	},
++};
++
++
++static struct resource fh_efuse_resources[] = {
++	{
++		.start		= EFUSE_REG_BASE,
++		.end		= EFUSE_REG_BASE + SZ_16K - 1,
++		.flags		= IORESOURCE_MEM,
++	},
++
++};
++
++static void fh_gmac_early_init(struct fh_gmac_platform_data *plat_data)
++{
++	if (plat_data->interface == PHY_INTERFACE_MODE_RMII)
++		fh_pmu_set_reg(REG_PMU_PAD_MAC_REF_CLK_CFG, 0x10001000);
++	else if (plat_data->interface == PHY_INTERFACE_MODE_MII)
++		fh_pmu_set_reg(REG_PMU_PAD_MAC_REF_CLK_CFG, 0x1000);
++}
++
++static void fh_gmac_plat_init(struct fh_gmac_platform_data *plat_data)
++{
++	u32 reg;
++
++	if (plat_data->interface == PHY_INTERFACE_MODE_RMII) {
++		reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
++		reg |= 0x7000000;
++		fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
++
++		fh_pmu_set_reg(REG_PMU_SWRST_AHB_CTRL, 0xfffdffff);
++		while (fh_pmu_get_reg(REG_PMU_SWRST_AHB_CTRL) != 0xffffffff)
++			;
++
++	} else if (plat_data->interface == PHY_INTERFACE_MODE_MII) {
++		reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
++		reg &= ~(0x7000000);
++		reg |= 0x1000000;
++		fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
++
++		fh_pmu_set_reg(REG_PMU_SWRST_AHB_CTRL, 0xfffdffff);
++		while (fh_pmu_get_reg(REG_PMU_SWRST_AHB_CTRL) != 0xffffffff)
++			;
++	}
++}
++
++static void fh_set_rmii_speed(int speed)
++{
++	u32 reg;
++
++	if (speed == gmac_speed_10m) {
++		reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
++		reg &= ~(0x1000000);
++		fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
++	} else {
++		reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
++		reg |= 0x1000000;
++		fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
++	}
++}
++
++static void fh_phy_reset(void)
++{
++	/*
++	 * RXDV must be low during phy reset
++	 * also, use AC_MCLK as our RMII REF CLK
++	 * just for temp use
++	 */
++	fh_pmu_set_reg(0x16c, 0x3001000);
++	fh_pmu_set_reg(0x1c, 0x0);
++	fh_pmu_set_reg(0x3c, 0x0b77030b);
++	fh_pmu_set_reg(0xe8, 0x1101000);
++
++	gpio_request(CONFIG_GPIO_EMACPHY_RESET, "phy_reset");
++	gpio_request(CONFIG_GPIO_EMACPHY_RXDV, "phy_rxdv");
++
++	gpio_direction_output(CONFIG_GPIO_EMACPHY_RXDV, 0);
++	gpio_direction_output(CONFIG_GPIO_EMACPHY_RESET, 0);
++	mdelay(10);
++	gpio_direction_output(CONFIG_GPIO_EMACPHY_RESET, 1);
++	mdelay(10);
++	gpio_free(CONFIG_GPIO_EMACPHY_RESET);
++	gpio_free(CONFIG_GPIO_EMACPHY_RXDV);
++
++	fh_pmu_set_reg(0xe8, 0x101000);
++
++}
++
++static struct fh_gmac_platform_data fh_gmac_data = {
++	.early_init = fh_gmac_early_init,
++	.plat_init = fh_gmac_plat_init,
++	.set_rmii_speed = fh_set_rmii_speed,
++	.phy_reset = fh_phy_reset,
++	.phyid = -1,
++};
++
++static const char *const fh_gpio0_names[] = {
++	"GPIO0",    "GPIO1",    "GPIO2",    "GPIO3",
++	"GPIO4",    "GPIO5",    "GPIO6",    "GPIO7",
++	"GPIO8",    "GPIO9",    "GPIO10",   "GPIO11",
++	"GPIO12",   "GPIO13",   "GPIO14",   "GPIO15",
++	"GPIO16",   "GPIO17",   "GPIO18",   "GPIO19",
++	"GPIO20",   "GPIO21",   "GPIO22",   "GPIO23",
++	"GPIO24",   "GPIO25",   "GPIO26",   "GPIO27",
++	"GPIO28",   "GPIO29",   "GPIO30",   "GPIO31",
++};
++
++static const char *const fh_gpio1_names[] = {
++	"GPIO32",   "GPIO33",   "GPIO34",   "GPIO35",
++	"GPIO36",   "GPIO37",   "GPIO38",   "GPIO39",
++	"GPIO40",   "GPIO41",   "GPIO42",   "GPIO43",
++	"GPIO44",   "GPIO45",   "GPIO46",   "GPIO47",
++	"GPIO48",   "GPIO49",   "GPIO50",   "GPIO51",
++	"GPIO52",   "GPIO53",   "GPIO54",   "GPIO55",
++	"GPIO56",   "GPIO57",   "GPIO58",   "GPIO59",
++	"GPIO60",   "GPIO61",   "GPIO62",   "GPIO63",
++	"GPIO64",   "GPIO65",   "GPIO66",   "GPIO67",
++	"GPIO68",   "GPIO69",   "GPIO70",   "GPIO71",
++};
++
++static struct fh_gpio_chip fh_gpio0_chip = {
++	.chip = {
++		.owner = THIS_MODULE,
++		.label = "FH_GPIO0",
++		.base = 0,
++		.ngpio = 32,
++		.names = fh_gpio0_names,
++	},
++};
++
++static struct fh_gpio_chip fh_gpio1_chip = {
++	.chip = {
++		.owner = THIS_MODULE,
++		.label = "FH_GPIO1",
++		.base = 32,
++		.ngpio = 32,
++		.names = fh_gpio1_names,
++	},
++};
++
++
++static void fh_wdt_pause(void)
++{
++	unsigned int reg;
++
++	reg = fh_pmu_get_reg(REG_PMU_WDT_CTRL);
++	reg |= 0x100;
++	fh_pmu_set_reg(REG_PMU_WDT_CTRL, reg);
++
++	printk(KERN_INFO "wdt pause\n");
++}
++
++static void fh_wdt_resume(void)
++{
++	unsigned int reg;
++
++	reg = fh_pmu_get_reg(REG_PMU_WDT_CTRL);
++	reg &= ~(0x100);
++	fh_pmu_set_reg(REG_PMU_WDT_CTRL, reg);
++}
++
++static struct fh_wdt_platform_data fh_wdt_data = {
++	.pause = fh_wdt_pause,
++	.resume = fh_wdt_resume,
++};
++
++static int fh_buswd(u32 slot_id)
++{
++	return 4;
++}
++
++static int sd_init(unsigned int slot_id, void *data, void *v)
++{
++	u32 reg;
++
++	reg = slot_id ? 0xfffffffd : 0xfffffffb;
++	fh_pmu_set_reg(REG_PMU_SWRST_AHB_CTRL, reg);
++	while (fh_pmu_get_reg(REG_PMU_SWRST_AHB_CTRL) != 0xffffffff)
++		;
++	return 0;
++}
++
++static unsigned int __maybe_unused
++fh_mci_sys_card_detect_fixed(struct fhmci_host *host)
++{
++	return 0;
++}
++
++static unsigned int __maybe_unused
++fh_mci_sys_read_only_fixed(struct fhmci_host *host)
++{
++	return 0;
++}
++
++struct fh_mci_board fh_mci = {
++	.init = sd_init,
++#ifdef CONFIG_SD_CD_FIXED
++	.get_cd = fh_mci_sys_card_detect_fixed,
++#endif
++#ifdef CONFIG_SD_WP_FIXED
++	.get_ro = fh_mci_sys_read_only_fixed,
++#endif
++	.num_slots = 1,
++	.bus_hz = 50000000,
++	.detect_delay_ms = 200,
++	.get_bus_wd = fh_buswd,
++	.caps = MMC_CAP_4_BIT_DATA
++	| MMC_CAP_SD_HIGHSPEED
++	| MMC_CAP_MMC_HIGHSPEED
++	| MMC_CAP_NEEDS_POLL
++	/*  | MMC_CAP_SDIO_IRQ  */,
++};
++
++struct fh_mci_board fh_mci_sd = {
++#ifdef CONFIG_SD_WP_FIXED
++	.get_ro = fh_mci_sys_read_only_fixed,
++#endif
++	.init = sd_init,
++	.num_slots = 1,
++	.bus_hz = 50000000,
++	.detect_delay_ms = 200,
++	.get_bus_wd = fh_buswd,
++	.caps = MMC_CAP_SD_HIGHSPEED
++	| MMC_CAP_MMC_HIGHSPEED
++	| MMC_CAP_NEEDS_POLL
++	/*  | MMC_CAP_SDIO_IRQ  */,
++};
++
++static struct fh_dma_platform_data fh_dma_data = {
++	.chan_priority  = CHAN_PRIORITY_DESCENDING,
++	.nr_channels    = 8,
++};
++
++static struct at24_platform_data at24c02 = {
++	.byte_len   = SZ_2K / 8,
++	.page_size  = 8,
++	.flags      = AT24_FLAG_TAKE8ADDR,
++};
++
++static struct platform_device fh_gmac_device = {
++	.name			= "fh_gmac",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_gmac_resources),
++	.resource		= fh_gmac_resources,
++	.dev			= {
++		.coherent_dma_mask = DMA_BIT_MASK(32),
++		.platform_data = &fh_gmac_data,
++	},
++};
++
++static struct platform_device fh_i2s_misc_device = {
++	.name			= "fh_i2s",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_i2s_resources),
++	.resource		= fh_i2s_resources,
++};
++
++static struct platform_device fh_acw_misc_device = {
++	.name			= "fh_acw",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_acw_resources),
++	.resource		= fh_acw_resources,
++};
++
++static struct platform_device fh_gpio0_device = {
++	.name			= GPIO_NAME,
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_gpio0_resources),
++	.resource		= fh_gpio0_resources,
++	.dev			= {
++		.platform_data = &fh_gpio0_chip,
++	},
++};
++
++static struct platform_device fh_gpio1_device = {
++	.name			= GPIO_NAME,
++	.id			= 1,
++	.num_resources		= ARRAY_SIZE(fh_gpio1_resources),
++	.resource		= fh_gpio1_resources,
++	.dev			= {
++		.platform_data = &fh_gpio1_chip,
++	},
++};
++
++struct platform_device fh_sd0_device = {
++	.name			= "fh_mci",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_sdc0_resources),
++	.resource		= fh_sdc0_resources,
++	.dev			= {
++		.coherent_dma_mask	= DMA_BIT_MASK(32),
++		.platform_data = &fh_mci_sd,
++	}
++};
++
++struct platform_device fh_sd1_device = {
++	.name			= "fh_mci",
++	.id			= 1,
++	.num_resources		= ARRAY_SIZE(fh_sdc1_resources),
++	.resource		= fh_sdc1_resources,
++	.dev			= {
++		.coherent_dma_mask	= DMA_BIT_MASK(32),
++		.platform_data = &fh_mci,
++	}
++};
++
++struct platform_device fh_wdt_device = {
++	.name			= "fh_wdt",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_wdt_resources),
++	.resource		= fh_wdt_resources,
++	.dev			= {
++		.platform_data = &fh_wdt_data,
++	}
++};
++
++static struct platform_device fh_uart0_device = {
++	.name			= "ttyS",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_uart0_resources),
++	.resource		= fh_uart0_resources,
++};
++
++static struct platform_device fh_uart1_device = {
++	.name			= "ttyS",
++	.id			= 1,
++	.num_resources		= ARRAY_SIZE(fh_uart1_resources),
++	.resource		= fh_uart1_resources,
++};
++
++static struct platform_device fh_dma_device = {
++	.name			= "fh_dmac",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_dma_resources),
++	.resource		= fh_dma_resources,
++	.dev.platform_data	= &fh_dma_data,
++};
++
++static struct platform_device fh_i2c0_device = {
++	.name			= "fh_i2c",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_i2c_resources_0),
++	.resource		= fh_i2c_resources_0,
++};
++
++static struct platform_device fh_i2c1_device = {
++	.name			= "fh_i2c",
++	.id			= 1,
++	.num_resources		= ARRAY_SIZE(fh_i2c_resources_1),
++	.resource		= fh_i2c_resources_1,
++};
++
++static struct i2c_board_info __initdata fh_i2c_devices[] = {
++	{
++		I2C_BOARD_INFO("24c02", 0x50),
++		.platform_data = &at24c02,
++	},
++	{
++		I2C_BOARD_INFO("pcf8563", 0x51)
++	}
++};
++
++#define FH_SPI0_CS0	(54)
++#define FH_SPI0_CS1	(55)
++
++#define FH_SPI1_CS0	(56)
++#define FH_SPI1_CS1	(57)
++
++#define FH_SPI3_CS0	(58)
++#define FH_SPI3_CS1	(59)
++
++#define SPI0_FIFO_DEPTH				(64)
++#define SPI0_CLK_IN				(100000000)
++#define SPI0_MAX_SLAVE_NO			(2)
++#define SPI0_DMA_RX_CHANNEL			(0)
++#define SPI0_DMA_TX_CHANNEL			(1)
++
++#define SPI1_FIFO_DEPTH				(32)
++#define SPI1_CLK_IN				(100000000)
++#define SPI1_MAX_SLAVE_NO			(2)
++#define SPI1_DMA_RX_CHANNEL			(2)
++#define SPI1_DMA_TX_CHANNEL			(3)
++
++#define SPI2_FIFO_DEPTH				(64)
++#define SPI2_CLK_IN				(100000000)
++
++#define SPI3_FIFO_DEPTH				(32)
++#define SPI3_CLK_IN				(100000000)
++#define SPI3_MAX_SLAVE_NO			(2)
++#define SPI3_DMA_RX_CHANNEL			(4)
++#define SPI3_DMA_TX_CHANNEL			(5)
++/* SPI_TRANSFER_USE_DMA */
++
++static struct fh_spi_platform_data fh_spi0_data = {
++	.apb_clock_in = SPI0_CLK_IN,
++	.fifo_len = SPI0_FIFO_DEPTH,
++	.slave_max_num = SPI0_MAX_SLAVE_NO,
++	.cs_data[0].GPIO_Pin = FH_SPI0_CS0,
++	.cs_data[0].name = "spi0_cs0",
++	.cs_data[1].GPIO_Pin = FH_SPI0_CS1,
++	.cs_data[1].name = "spi0_cs1",
++	.dma_transfer_enable = 0,
++	.rx_handshake_num = 2,
++	.tx_handshake_num = 3,
++	.rx_dma_channel = SPI0_DMA_RX_CHANNEL,
++	.tx_dma_channel = SPI0_DMA_TX_CHANNEL,
++	.clk_name = "spi0_clk",
++};
++
++static struct fh_spi_platform_data fh_spi1_data = {
++	.apb_clock_in = SPI1_CLK_IN,
++	.fifo_len = SPI1_FIFO_DEPTH,
++	.slave_max_num = SPI1_MAX_SLAVE_NO,
++	.cs_data[0].GPIO_Pin = FH_SPI1_CS0,
++	.cs_data[0].name = "spi1_cs0",
++	.cs_data[1].GPIO_Pin = FH_SPI1_CS1,
++	.cs_data[1].name = "spi1_cs1",
++	.dma_transfer_enable = 0,
++	.rx_handshake_num = 4,
++	.tx_handshake_num = 5,
++	.rx_dma_channel = SPI1_DMA_RX_CHANNEL,
++	.tx_dma_channel = SPI1_DMA_TX_CHANNEL,
++	.clk_name = "spi1_clk",
++};
++
++static struct fh_spi_platform_data fh_spi2_data = {
++	.apb_clock_in = SPI2_CLK_IN,
++	.fifo_len = SPI2_FIFO_DEPTH,
++	.dma_transfer_enable = 0,
++	.rx_handshake_num = 12,
++	.tx_handshake_num = 13,
++	.clk_name = "spi2_clk",
++};
++
++static struct fh_rtc_platform_data fh_rtc_data = {
++	.clock_in = 32768,
++	.dev_name = "rtc",
++	.clk_name = "rtc_clk",
++	.base_year = 2000,
++	.base_month = 1,
++	.base_day = 1,
++	.sadc_channel = -1,
++};
++
++
++
++static struct platform_device fh_spi0_device = {
++	.name			= "fh_spi",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_spi0_resources),
++	.resource		= fh_spi0_resources,
++	.dev			= {
++		.platform_data = &fh_spi0_data,
++	},
++};
++
++static struct platform_device fh_spi1_device = {
++	.name			= "fh_spi",
++	.id			= 1,
++	.num_resources		= ARRAY_SIZE(fh_spi1_resources),
++	.resource		= fh_spi1_resources,
++	.dev			= {
++		.platform_data = &fh_spi1_data,
++	},
++};
++
++
++static struct platform_device fh_spi2_device = {
++	.name			= "fh_spi_slave",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_spi2_resources),
++	.resource		= fh_spi2_resources,
++	.dev			= {
++		.platform_data = &fh_spi2_data,
++	},
++};
++
++static struct platform_device fh_pwm_device = {
++	.name			= "fh_pwm",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_pwm_resources),
++	.resource		= fh_pwm_resources,
++
++};
++
++static struct platform_device fh_pinctrl_device = {
++	.name			= "fh_pinctrl",
++	.id			= 0,
++};
++
++static struct platform_device fh_sadc_device = {
++	.name			= "fh_sadc",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_sadc_resources),
++	.resource		= fh_sadc_resources,
++	.dev			= {
++		.platform_data = NULL,
++	},
++};
++
++static struct platform_device fh_aes_device = {
++	.name			= "fh_aes",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_aes_resources),
++	.resource		= fh_aes_resources,
++	.dev			= {
++		.platform_data = NULL,
++	},
++};
++
++static struct platform_device fh_ac97_device = {
++	.name			= "fh-acodec",
++	.id			= -1,
++};
++
++static struct platform_device fh_pcm_device = {
++	.name			= "fh-pcm-audio",
++	.id			= -1,
++};
++
++static struct platform_device fh_rtc_device = {
++	.name			= "fh_rtc",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_rtc_resources),
++	.resource		= fh_rtc_resources,
++	.dev			= {
++		.platform_data = &fh_rtc_data,
++	},
++};
++
++static struct platform_device fh_efuse_device = {
++	.name			= "fh_efuse",
++	.id			= 0,
++	.num_resources		= ARRAY_SIZE(fh_efuse_resources),
++	.resource		= fh_efuse_resources,
++};
++
++/*
++ * fh8833 usb board config
++ * add 2016/12/20
++ *
++ */
++#define USB_REG_BASE 0xe0700000
++#define S3C64XX_PA_USBHOST  USB_REG_BASE
++#define IRQ_UHOST           USBC_IRQ
++#define S3C_PA_OTG 			S3C64XX_PA_USBHOST
++#define IRQ_OTG             IRQ_UHOST
++#define S3C64XX_SZ_USBHOST	SZ_1M
++#define S3C_SZ_OTG          SZ_1M
++/* USB Host Controller */
++
++static struct resource s3c_usb_resource[] = {
++	[0] = {
++		.start = S3C64XX_PA_USBHOST,
++		.end   = S3C64XX_PA_USBHOST + S3C64XX_SZ_USBHOST - 1,
++		.flags = IORESOURCE_MEM,
++	},
++	[1] = {
++		.start = IRQ_UHOST,
++		.end   = IRQ_UHOST,
++		.flags = IORESOURCE_IRQ,
++        }
++};
++
++static u64 s3c_device_usb_dmamask = 0xffffffffUL;
++
++struct platform_device s3c_device_usb = {
++	.name             = "s3c2410-ohci",
++	.id               = -1,
++	.num_resources    = ARRAY_SIZE(s3c_usb_resource),
++	.resource         = s3c_usb_resource,
++	.dev              = {
++		.dma_mask = &s3c_device_usb_dmamask,
++		.coherent_dma_mask = 0xffffffffUL
++	}
++};
++
++EXPORT_SYMBOL(s3c_device_usb);
++
++/* USB Device (Gadget)*/
++
++static struct resource s3c_usbgadget_resource[] = {
++	[0] = {
++		.start = S3C_PA_OTG,
++		.end   = S3C_PA_OTG + S3C_SZ_OTG - 1,
++		.flags = IORESOURCE_MEM,
++	},
++	[1] = {
++		.start = IRQ_OTG,
++		.end   = IRQ_OTG,
++		.flags = IORESOURCE_IRQ,
++	}
++};
++
++struct platform_device s3c_device_usbgadget = {
++	.name		  = "s3c-usbgadget",
++	.id		  = -1,
++	.num_resources	  = ARRAY_SIZE(s3c_usbgadget_resource),
++	.resource	  = s3c_usbgadget_resource,
++};
++
++EXPORT_SYMBOL(s3c_device_usbgadget);
++
++/* USB Device (OTG hcd)*/
++
++static struct resource s3c_usb_otghcd_resource[] = {
++	{
++		.start = S3C_PA_OTG,
++		.end   = S3C_PA_OTG + S3C_SZ_OTG - 1,
++		.flags = IORESOURCE_MEM,
++	},
++	 {
++		.start = IRQ_OTG,
++		.end   = IRQ_OTG,
++		.flags = IORESOURCE_IRQ,
++	}
++};
++
++static u64 s3c_device_usb_otghcd_dmamask = 0xffffffffUL;
++
++struct platform_device s3c_device_usb_otghcd = {
++	.name		= "s3c_otghcd",
++	.id		= -1,
++	.num_resources	= ARRAY_SIZE(s3c_usb_otghcd_resource),
++	.resource	= s3c_usb_otghcd_resource,
++	.dev              = {
++		.dma_mask = &s3c_device_usb_otghcd_dmamask,
++		.coherent_dma_mask = 0xffffffffUL
++	}
++};
++
++static u64 fh_usb_otghcd_dmamask = 0xffffffffUL;
++struct platform_device fh_device_usb_otghcd = {
++	.name		= "fh_otg",
++	.id		= -1,
++	.num_resources	= ARRAY_SIZE(s3c_usb_otghcd_resource),
++	.resource	= s3c_usb_otghcd_resource,
++	.dev = {
++			.dma_mask = &fh_usb_otghcd_dmamask,
++			.coherent_dma_mask	=	0xffffffffUL,
++	}
++};
++
++static struct platform_device *fh8833_devices[] __initdata = {
++	&fh_gmac_device,
++	&fh_uart0_device,
++	&fh_uart1_device,
++	&fh_dma_device,
++	&fh_i2c0_device,
++	&fh_i2c1_device,
++	&fh_sd0_device,
++	&fh_sd1_device,
++	&fh_spi0_device,
++	&fh_spi1_device,
++	&fh_spi2_device,
++	&fh_gpio0_device,
++	&fh_gpio1_device,
++	&fh_wdt_device,
++	&fh_pwm_device,
++	&fh_pinctrl_device,
++	&fh_sadc_device,
++	&fh_aes_device,
++	&fh_pcm_device,
++	&fh_ac97_device,
++	&fh_acw_misc_device,
++	&fh_i2s_misc_device,
++	&fh_rtc_device,
++	&fh_device_usb_otghcd,
++	&fh_efuse_device,
++
++};
++
++static struct mtd_partition fh_sf_parts[] = {
++	{
++		/* head & Ramboot */
++		.name		= "bootstrap",
++		.offset		= 0,
++		.size		= SZ_256K,
++		.mask_flags	= MTD_WRITEABLE, /* force read-only */
++	}, {
++		/* Ramboot & U-Boot environment */
++		.name		= "uboot-env",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= SZ_64K,
++		.mask_flags	= MTD_WRITEABLE, /* force read-only */
++	}, {
++		/* U-Boot */
++		.name		= "uboot",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= 3 * SZ_64K,
++		.mask_flags	= MTD_WRITEABLE, /* force read-only */
++	}, {
++		.name		= "kernel",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= SZ_4M,
++		.mask_flags	= 0,
++	}, {
++		.name		= "rootfs",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= SZ_8M,
++		.mask_flags	= 0,
++	}, {
++		.name		= "app",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= MTDPART_SIZ_FULL,
++		.mask_flags	= 0,
++	}
++	/* mtdparts=
++	 * spi_flash:256k(bootstrap),
++	 * 64k(u-boot-env),
++	 * 192k(u-boot),4M(kernel),
++	 * 8M(rootfs),
++	 * -(app) */
++	/* two blocks with bad block table (and mirror) at the end */
++};
++#ifdef CONFIG_MACH_FH_NAND
++static struct mtd_partition fh_sf_nand_parts[] = {
++	{
++		/* head & Ramboot */
++		.name		= "bootstrap",
++		.offset		= 0,
++		.size		= SZ_256K,
++		.mask_flags	= MTD_WRITEABLE, /* force read-only */
++	}, {
++		.name		= "uboot-env",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= SZ_256K,
++		.mask_flags	= MTD_WRITEABLE,
++	}, {
++		.name		= "uboot",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= SZ_256K,
++		.mask_flags	= MTD_WRITEABLE,
++	}, {
++		.name		= "kernel",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= SZ_4M,
++		.mask_flags	= 0,
++	}, {
++		.name		= "rootfs",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= SZ_8M,
++		.mask_flags	= 0,
++	}, {
++		.name		= "app",
++		.offset		= MTDPART_OFS_APPEND,
++		.size		= MTDPART_SIZ_FULL,
++		.mask_flags	= 0,
++	}
++	/* mtdparts=
++	 * spi0.0:64k(bootstrap),
++	 * 64k(u-boot-env),
++	 * 192k(u-boot),
++	 * 4M(kernel),
++	 * 8M(rootfs),
++	 * -(app) */
++	/* two blocks with bad block table (and mirror) at the end */
++};
++static struct flash_platform_data fh_nandflash_platform_data  = {
++	.name		= "spi_nandflash",
++	.parts		= fh_sf_nand_parts,
++	.nr_parts	= ARRAY_SIZE(fh_sf_nand_parts),
++};
++#endif
++static struct flash_platform_data fh_flash_platform_data  = {
++	.name		= "spi_flash",
++	.parts		= fh_sf_parts,
++	.nr_parts	= ARRAY_SIZE(fh_sf_parts),
++};
++static struct spi_board_info fh_spi_devices[] = {
++#ifdef CONFIG_MACH_FH_NAND
++	{
++		.modalias       = "spi-nand",
++		.bus_num        = 0,
++		.chip_select    = 0,
++		.max_speed_hz   = 50000000,
++		.mode           = SPI_MODE_3,
++		.platform_data   = &fh_nandflash_platform_data,
++	},
++#endif
++	{
++		.modalias        = "m25p80",
++		.bus_num         = 0,
++		.chip_select     = 0,
++		.mode            = SPI_MODE_3,
++		.max_speed_hz    = 25000000,
++		.platform_data   = &fh_flash_platform_data,
++	},
++
++};
++static void __init fh8833_map_io(void)
++{
++	iotable_init(fh8833_io_desc, ARRAY_SIZE(fh8833_io_desc));
++}
++
++
++static __init void fh8833_board_init(void)
++{
++	printk(KERN_INFO "fh8833 board init\n");
++	platform_add_devices(fh8833_devices, ARRAY_SIZE(fh8833_devices));
++	i2c_register_board_info(1, fh_i2c_devices, ARRAY_SIZE(fh_i2c_devices));
++	spi_register_board_info(fh_spi_devices, ARRAY_SIZE(fh_spi_devices));
++	fh_clk_procfs_init();
++	fh_pmu_init();
++}
++
++static void __init fh8833_init_early(void)
++{
++	fh_clk_init();
++	fh_pinctrl_init(VA_PMU_REG_BASE + 0x80);
++}
++
++MACHINE_START(FH8833, "FH8833")
++	.boot_params	= DDR_BASE + 0x100,
++	.map_io		= fh8833_map_io,
++	.init_irq	= fh_intc_init,
++	.timer		= &fh_timer,
++	.init_machine	= fh8833_board_init,
++	.init_early	= fh8833_init_early,
++MACHINE_END
+diff --git a/arch/arm/mach-fh/clock.c b/arch/arm/mach-fh/clock.c
+new file mode 100644
+index 00000000..c883b639
+--- /dev/null
++++ b/arch/arm/mach-fh/clock.c
+@@ -0,0 +1,638 @@
++/*
++ * Clock and PLL control for FH devices
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/mutex.h>
++#include <linux/io.h>
++#include <asm/bitops.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#include <mach/hardware.h>
++#include <asm/uaccess.h>
++#include <linux/miscdevice.h>
++#include <mach/clock.h>
++#include <mach/clock.h>
++#include <linux/platform_device.h>
++#include <mach/pmu.h>
++
++#define PROC_FILE   "driver/clock"
++
++static LIST_HEAD(clocks);
++static DEFINE_MUTEX(clocks_mutex);
++static DEFINE_SPINLOCK(clocks_lock);
++
++struct proc_dir_entry *proc_file;
++
++
++//#define FH_CLK_DEBUG
++
++#if defined(FH_CLK_DEBUG)
++#define PRINT_CLK_DBG(fmt, args...)   \
++    do                                \
++    {                                 \
++        printk("FH_CLK_DEBUG: "); \
++        printk(fmt, ##args);      \
++    } while (0)
++#else
++#define PRINT_CLK_DBG(fmt, args...) \
++    do                              \
++    {                               \
++    } while (0)
++#endif
++
++
++void clk_set_clk_sel(unsigned int reg)
++{
++	fh_pmu_set_reg(REG_PMU_CLK_SEL, reg);
++}
++EXPORT_SYMBOL(clk_set_clk_sel);
++
++unsigned int clk_get_clk_sel(void)
++{
++	return fh_pmu_get_reg(REG_PMU_CLK_SEL);
++}
++EXPORT_SYMBOL(clk_get_clk_sel);
++
++#ifdef FH_CLOCK_DEBUG
++static void __clk_sel_ddr_clk(int source)
++{
++	unsigned int clk_sel;
++	int shift = 24;
++	clk_sel = clk_get_clk_sel();
++	clk_sel &= ~(0x1 << shift);
++	clk_sel |= (source & 0x1) << shift;
++	clk_set_clk_sel(clk_sel);
++}
++
++static void __clk_sel_pix_clk(int source)
++{
++	unsigned int clk_sel;
++	int shift = 4;
++	clk_sel = clk_get_clk_sel();
++	clk_sel &= ~(0x3 << shift);
++	clk_sel |= (source & 0x3) << shift;
++	clk_set_clk_sel(clk_sel);
++}
++
++static void __clk_sel_ac_clk(int source)
++{
++	unsigned int clk_sel;
++	int shift = 0;
++	clk_sel = clk_get_clk_sel();
++	clk_sel &= ~(0x1 << shift);
++	clk_sel |= (source & 0x1) << shift;
++	clk_set_clk_sel(clk_sel);
++}
++#endif
++
++
++static void fh_clk_enable(struct clk *clk)
++{
++	unsigned int reg;
++
++	if (clk->flag & CLOCK_NOGATE) {
++		PRINT_CLK_DBG("%s, %s has no gate register\n", __func__, clk->name);
++		return;
++	}
++
++	reg = fh_pmu_get_reg(clk->en_reg_offset);
++	PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
++	reg &= ~(clk->en_reg_mask);
++	fh_pmu_set_reg(clk->en_reg_offset, reg);
++	PRINT_CLK_DBG("%s, clk: %s, after mask: 0x%x\n", __func__, clk->name, reg);
++}
++
++static void fh_clk_disable(struct clk *clk)
++{
++	unsigned int reg;
++
++	if (clk->flag & CLOCK_NOGATE) {
++		PRINT_CLK_DBG("%s, %s has no gate register\n", __func__, clk->name);
++		return;
++	}
++
++	reg = fh_pmu_get_reg(clk->en_reg_offset);
++	reg |= clk->en_reg_mask;
++	fh_pmu_set_reg(clk->en_reg_offset, reg);
++
++	PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
++}
++
++static int fh_clk_get_sel(struct clk *clk)
++{
++	unsigned int reg, shift;
++	int ret;
++
++	if (!(clk->flag & CLOCK_MULTI_PARENT))
++		return 0;
++
++	shift = ffs(clk->sel_reg_mask) - 1;
++	reg = fh_pmu_get_reg(clk->sel_reg_offset);
++	reg &= clk->sel_reg_mask;
++	ret = reg >> shift;
++	PRINT_CLK_DBG("%s, clk: %s, sel: %d\n", __func__, clk->name, ret);
++
++	return ret;
++}
++
++static void fh_clk_set_sel(struct clk *clk, int sel)
++{
++	unsigned int reg, shift;
++
++	if (!(clk->flag & CLOCK_MULTI_PARENT)) {
++		PRINT_CLK_DBG("%s, clk: %s has only one parent\n", __func__, clk->name);
++		return;
++	}
++
++	clk->select = sel;
++	shift = ffs(clk->sel_reg_mask) - 1;
++	reg = fh_pmu_get_reg(clk->sel_reg_offset);
++	reg &= ~(clk->sel_reg_mask);
++	reg |= (sel << shift);
++	fh_pmu_set_reg(clk->rst_reg_offset, reg);
++	PRINT_CLK_DBG("%s, clk: %s, select: %d, reg: 0x%x\n", __func__, clk->name, sel,
++		      reg);
++}
++
++static unsigned long fh_clk_get_pll_rate(struct clk *clk)
++{
++	unsigned int reg, m, n, od, no = 1, i;
++
++	reg = fh_pmu_get_reg(clk->div_reg_offset);
++	m = reg & 0xff;
++	n = (reg >> 8) & 0xf;
++	od = (reg >> 16) & 0x3;
++
++	for(i=0; i<od; i++)
++		no *= 2;
++
++	clk->frequency = OSC_FREQUENCY * m / n / no;
++
++	return clk->frequency;
++}
++
++static int fh_clk_get_div(struct clk *clk)
++{
++	unsigned int reg, shift;
++	int ret;
++
++	if (clk->flag & (CLOCK_NODIV | CLOCK_FIXED))
++		return 0;
++
++	shift = ffs(clk->div_reg_mask) - 1;
++	reg = fh_pmu_get_reg(clk->div_reg_offset);
++	PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
++	reg &= clk->div_reg_mask;
++	PRINT_CLK_DBG("%s, clk: %s, shift: %d, after mask: 0x%x\n", __func__, clk->name,
++		      shift, reg);
++	ret = reg >> shift;
++	PRINT_CLK_DBG("%s, clk: %s, div: %d\n", __func__, clk->name, ret);
++	PRINT_CLK_DBG("%s, clk: %s, div_mask: 0x%x, div_offset: 0x%x\n",
++		      __func__, clk->name, clk->div_reg_mask, clk->div_reg_offset);
++
++	return ret;
++}
++
++static void fh_clk_set_div(struct clk *clk, int div)
++{
++	unsigned int reg, shift;
++
++	if (clk->flag & CLOCK_NODIV) {
++		PRINT_CLK_DBG("%s, clk: %s has no divide\n", __func__, clk->name);
++		return;
++	}
++
++	shift = ffs(clk->div_reg_mask) - 1;
++
++	if(div > clk->div_reg_mask >> shift)
++	{
++		pr_err("%s, clk: %s, curr div %d is too big, max is %d\n",
++				__func__, clk->name, div, clk->div_reg_mask >> shift);
++		return;
++	}
++
++	clk->divide = div;
++
++	reg = fh_pmu_get_reg(clk->div_reg_offset);
++	PRINT_CLK_DBG("%s, clk: %s, reg: 0x%x\n", __func__, clk->name, reg);
++	reg &= ~(clk->div_reg_mask);
++	reg |= (div << shift);
++	PRINT_CLK_DBG("%s, clk: %s, shift: %d, after mask: 0x%x\n", __func__, clk->name,
++		      shift, reg);
++	fh_pmu_set_reg(clk->div_reg_offset, reg);
++	PRINT_CLK_DBG("%s, clk: %s, div: %d, reg: 0x%x\n", __func__, clk->name, div,
++		      reg);
++	PRINT_CLK_DBG("%s, clk: %s, div_mask: 0x%x, div_offset: 0x%x\n",
++		      __func__, clk->name, clk->div_reg_mask, clk->div_reg_offset);
++
++}
++
++unsigned long fh_clk_get_rate(struct clk *clk)
++{
++	if (clk->flag & CLOCK_FIXED) {
++		PRINT_CLK_DBG("%s, clk: %s is fixed clock, rate: %lu\n", __func__, clk->name,
++			      clk->frequency);
++		return clk->frequency;
++	}
++
++	if (clk->flag & CLOCK_PLL) {
++		PRINT_CLK_DBG("%s, clk: %s is a PLL clock\n", __func__, clk->name);
++		return fh_clk_get_pll_rate(clk);
++	}
++
++
++	clk->select = fh_clk_get_sel(clk);
++	clk->divide = fh_clk_get_div(clk) + 1;
++
++	if (clk->select > CLOCK_MAX_PARENT) {
++		pr_err("ERROR, %s, clk: %s, select is not correct, clk->select: %d\n", __func__,
++		       clk->name, clk->select);
++		return 0;
++	}
++
++	if (!clk->parent[clk->select]) {
++		pr_err("ERROR, %s, clk: %s has no parent and is not a fixed clock\n", __func__,
++		       clk->name);
++		return 0;
++	}
++
++	clk->frequency = clk->parent[clk->select]->frequency / clk->prediv;
++	clk->frequency /= clk->divide;
++
++	PRINT_CLK_DBG("%s, clk: %s, rate: %lu\n", __func__, clk->name, clk->frequency);
++
++	return clk->frequency;
++}
++
++void fh_clk_set_rate(struct clk *clk, unsigned long rate)
++{
++	if (clk->flag & CLOCK_FIXED) {
++		pr_err("%s, clk: %s is fixed clock, rate: %lu\n", __func__, clk->name,
++		       clk->frequency);
++		return;
++	}
++
++	if (clk->flag & CLOCK_PLL) {
++		pr_err("%s, clk: %s is a PLL clock, changing frequency is not recommended\n",
++				__func__, clk->name);
++		return;
++	}
++
++	if (clk->select > CLOCK_MAX_PARENT) {
++		pr_err("ERROR, %s, clk: %s, select is not correct, clk->select: %d\n", __func__,
++		       clk->name, clk->select);
++		return;
++	}
++
++	if (!clk->parent[clk->select]) {
++		pr_err("ERROR, %s, clk: %s has no parent and is not a fixed clock\n", __func__,
++		       clk->name);
++		return;
++	}
++
++	clk->frequency = clk->parent[clk->select]->frequency / clk->prediv;
++	clk->divide = clk->frequency / rate;
++	PRINT_CLK_DBG("%s, clk: %s, set rate: %lu, divide: %d\n", __func__, clk->name,
++		      rate, clk->divide);
++	fh_clk_set_div(clk, clk->divide - 1);
++
++	clk->frequency = rate;
++
++	PRINT_CLK_DBG("%s, clk: %s, rate: %lu\n", __func__, clk->name, clk->frequency);
++}
++
++void fh_clk_reset(struct clk *clk)
++{
++	unsigned int reg;
++
++	if (clk->flag & CLOCK_NORESET) {
++		pr_err("%s, clk: %s has no reset\n", __func__, clk->name);
++		return;
++	}
++
++	reg = 0xffffffff & ~(clk->rst_reg_mask);
++
++	fh_pmu_set_reg(clk->rst_reg_offset, reg);
++	while (fh_pmu_get_reg(clk->rst_reg_offset) != 0xffffffff) {
++
++	}
++	PRINT_CLK_DBG("%s, clk: %s has been reset\n", __func__, clk->name);
++}
++
++int clk_enable(struct clk *clk)
++{
++	unsigned long flags;
++
++	if (clk == NULL || IS_ERR(clk))
++		return -EINVAL;
++
++	spin_lock_irqsave(&clocks_lock, flags);
++	fh_clk_enable(clk);
++	spin_unlock_irqrestore(&clocks_lock, flags);
++
++	return 0;
++}
++EXPORT_SYMBOL(clk_enable);
++
++void clk_disable(struct clk *clk)
++{
++	unsigned long flags;
++
++	if (clk == NULL || IS_ERR(clk))
++		return;
++
++	spin_lock_irqsave(&clocks_lock, flags);
++	fh_clk_disable(clk);
++	spin_unlock_irqrestore(&clocks_lock, flags);
++}
++EXPORT_SYMBOL(clk_disable);
++
++
++unsigned long clk_get_rate(struct clk *clk)
++{
++	unsigned long flags, rate;
++
++	if (clk == NULL || IS_ERR(clk))
++		return -EINVAL;
++
++	spin_lock_irqsave(&clocks_lock, flags);
++
++	rate = fh_clk_get_rate(clk);
++
++	spin_unlock_irqrestore(&clocks_lock, flags);
++
++	return rate;
++}
++EXPORT_SYMBOL(clk_get_rate);
++
++
++int clk_set_rate(struct clk *clk, unsigned long rate)
++{
++	unsigned long flags, real_rate;
++	int ret = -EINVAL;
++
++	if (clk == NULL || IS_ERR(clk))
++		return ret;
++
++	spin_lock_irqsave(&clocks_lock, flags);
++	fh_clk_set_rate(clk, rate);
++	real_rate = clk_get_rate(clk);
++	if(rate != real_rate)
++	{
++		printk("WARN: set clk %s to %ld, but get %ld\n", clk->name, rate, real_rate);
++	}
++	spin_unlock_irqrestore(&clocks_lock, flags);
++
++	return 0;
++}
++EXPORT_SYMBOL(clk_set_rate);
++
++void clk_reset(struct clk *clk)
++{
++	unsigned long flags;
++
++	if (clk == NULL || IS_ERR(clk))
++		return;
++
++	spin_lock_irqsave(&clocks_lock, flags);
++	fh_clk_reset(clk);
++	spin_unlock_irqrestore(&clocks_lock, flags);
++}
++EXPORT_SYMBOL(clk_reset);
++
++void clk_change_parent(struct clk *clk, int select)
++{
++	unsigned long flags;
++
++	if (clk == NULL || IS_ERR(clk))
++		return;
++
++	spin_lock_irqsave(&clocks_lock, flags);
++	fh_clk_set_sel(clk, select);
++	spin_unlock_irqrestore(&clocks_lock, flags);
++}
++EXPORT_SYMBOL(clk_change_parent);
++
++int clk_register(struct clk *clk)
++{
++	if (clk == NULL || IS_ERR(clk))
++		return -EINVAL;
++
++	if (WARN(clk->parent[clk->select] && !clk->parent[clk->select]->frequency,
++		 "CLK: %s parent %s has no rate!\n",
++		 clk->name, clk->parent[clk->select]->name))
++		return -EINVAL;
++
++	clk_get_rate(clk);
++
++	PRINT_CLK_DBG("clk: %s has been registered, div: %d, sel: %d\n",
++		      clk->name, clk->divide, clk->select);
++
++	mutex_lock(&clocks_mutex);
++	list_add_tail(&clk->list, &clocks);
++	mutex_unlock(&clocks_mutex);
++
++	return 0;
++}
++EXPORT_SYMBOL(clk_register);
++
++void clk_unregister(struct clk *clk)
++{
++	if (clk == NULL || IS_ERR(clk))
++		return;
++
++	mutex_lock(&clocks_mutex);
++	list_del(&clk->list);
++	mutex_unlock(&clocks_mutex);
++}
++EXPORT_SYMBOL(clk_unregister);
++
++
++static void del_char(char* str,char ch)
++{
++	char *p = str;
++	char *q = str;
++	while(*q)
++	{
++		if (*q !=ch)
++		{
++			*p++ = *q;
++		}
++		q++;
++	}
++	*p='\0';
++}
++
++static ssize_t fh_clk_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
++{
++	int i, ret;
++	char message[64] = {0};
++	char * const delim = ",";
++	char *cur = message;
++	char* param_str[4];
++	unsigned int param[4];
++	struct clk *clk;
++
++	len = (len > 64) ? 64 : len;
++
++	if (copy_from_user(message, buf, len))
++		return -EFAULT;
++
++	for(i=0; i<3; i++)
++	{
++		param_str[i] = strsep(&cur, delim);
++		if(!param_str[i])
++		{
++			pr_err("%s: ERROR: parameter[%d] is empty\n", __func__, i);
++			pr_err("[clk name], [enable/disable], [clk rate]\n");
++			return -EINVAL;
++		}
++		else
++		{
++			del_char(param_str[i], ' ');
++			del_char(param_str[i], '\n');
++		}
++	}
++
++	clk = clk_get(NULL, param_str[0]);
++	if(!clk)
++	{
++		pr_err("%s: ERROR: clk %s is not found\n", __func__, param_str[0]);
++		pr_err("[clk name], [enable/disable], [clk rate]\n");
++		return -EINVAL;
++	}
++
++	param[2] = (u32)simple_strtoul(param_str[2], NULL, 10);
++	if(param[2] < 0)
++	{
++		pr_err("ERROR: parameter[2] is incorrect\n");
++		return -EINVAL;
++	}
++
++	ret = clk_set_rate(clk, param[2]);
++	if(ret)
++	{
++		pr_err("set clk rate failed\n, ret=%d\n", ret);
++	}
++
++        if(!strcmp(param_str[1], "enable"))
++        {
++        	clk_enable(clk);
++        	printk("clk %s enabled\n", param_str[0]);
++        }
++        else if(!strcmp(param_str[1], "disable"))
++        {
++        	clk_disable(clk);
++        	printk("clk %s disabled\n", param_str[0]);
++        }
++        else
++        {
++		pr_err("%s: ERROR: parameter[1]:%s is incorrect\n", __func__, param_str[1]);
++		pr_err("[clk name], [enable/disable], [clk rate]\n");
++		return -EINVAL;
++        }
++
++	return len;
++}
++
++
++static void *v_seq_start(struct seq_file *s, loff_t *pos)
++{
++	static unsigned long counter = 0;
++	if (*pos == 0)
++		return &counter;
++	else {
++		*pos = 0;
++		return NULL;
++	}
++}
++
++static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++	(*pos)++;
++	return NULL;
++}
++
++static void v_seq_stop(struct seq_file *s, void *v)
++{
++
++}
++
++static int v_seq_show(struct seq_file *sfile, void *v)
++{
++
++	struct clk_lookup *clock_lookup;
++	struct clk *clk;
++	unsigned long rate;
++
++	seq_printf(sfile, "\nPLL Information: \n");
++
++	for (clock_lookup = fh_clks; clock_lookup->clk; clock_lookup++) {
++		clk = clock_lookup->clk;
++		rate = clk_get_rate(clk);
++		seq_printf(sfile, "\t%-20s \t%9lu HZ\n", clk->name, rate);
++	}
++	return 0;
++}
++
++static const struct seq_operations fh_clk_seq_ops = {
++	.start = v_seq_start,
++	.next = v_seq_next,
++	.stop = v_seq_stop,
++	.show = v_seq_show
++};
++
++static int fh_clk_proc_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &fh_clk_seq_ops);
++}
++
++
++static struct file_operations fh_clk_proc_ops = {
++	.owner		= THIS_MODULE,
++	.open		= fh_clk_proc_open,
++	.read		= seq_read,
++	.write		= fh_clk_proc_write,
++	.release	= seq_release,
++};
++
++int __init fh_clk_procfs_init(void)
++{
++	proc_file = create_proc_entry(PROC_FILE, 0644, NULL);
++	if (proc_file)
++		proc_file->proc_fops = &fh_clk_proc_ops;
++	else
++		pr_err("clock, create proc fs failed\n");
++
++	return 0;
++}
++
++int __init fh_clk_init(void)
++{
++	struct clk_lookup *clock_lookup;
++	struct clk *clk;
++	size_t num_clocks = 0;
++
++	for (clock_lookup = fh_clks; clock_lookup->clk; clock_lookup++) {
++		clk = clock_lookup->clk;
++		num_clocks++;
++		clk_register(clk);
++		if (clk->def_rate)
++			clk_set_rate(clk, clk->def_rate);
++	}
++	clkdev_add_table(fh_clks, num_clocks);
++	return 0;
++}
+diff --git a/arch/arm/mach-fh/fh8833.c b/arch/arm/mach-fh/fh8833.c
+new file mode 100644
+index 00000000..daac50e7
+--- /dev/null
++++ b/arch/arm/mach-fh/fh8833.c
+@@ -0,0 +1,466 @@
++/*
++ * Fullhan FH8833 board support
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++#include <linux/init.h>
++#include <linux/clk.h>
++#include <linux/serial_8250.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include <linux/gpio.h>
++#include <linux/spi/spi.h>
++
++#include <asm/mach/map.h>
++
++#include <mach/chip.h>
++#include <mach/irqs.h>
++#include <mach/timex.h>
++#include <mach/pmu.h>
++#include <mach/clock.h>
++
++/*
++ * external oscillator
++ * fixed to 24M
++ */
++static struct clk osc_clk = {
++	.name               = "osc_clk",
++	.frequency          = OSC_FREQUENCY,
++	.flag               = CLOCK_FIXED,
++};
++
++/*
++ * phase-locked-loop device,
++ * generates a higher frequency clock
++ * from the external oscillator reference
++ */
++static struct clk pll0_clk = {
++	.name               = "pll0_clk",
++	.flag               = CLOCK_PLL,
++	.parent             = {&osc_clk},
++	.div_reg_offset     = REG_PMU_PLL0,
++};
++
++static struct clk pll1_clk = {
++	.name               = "pll1_clk",
++	.flag               = CLOCK_PLL,
++	.parent             = {&osc_clk},
++	.div_reg_offset     = REG_PMU_PLL1,
++};
++
++/*
++ * CPU
++ */
++static struct clk arm_clk = {
++	.name               = "arm_clk",
++	.flag               = CLOCK_NOGATE,
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV0,
++	.div_reg_mask       = 0xf,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x1,
++};
++
++static struct clk arc_clk = {
++	.name               = "arc_clk",
++	.flag               = CLOCK_NOGATE | CLOCK_NODIV,
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x400000,
++};
++
++/*
++ * BUS
++ */
++static struct clk axi_clk = {
++	.name               = "axi_clk",
++	.flag               = CLOCK_NOGATE | CLOCK_NODIV | CLOCK_NORESET,
++	.parent             = {&arm_clk},
++	.prediv             = 2,
++};
++
++static struct clk ahb_clk = {
++	.name               = "ahb_clk",
++	.flag               = CLOCK_NOGATE | CLOCK_NORESET,
++	.parent             = {&pll0_clk},
++	.prediv             = 2,
++	.div_reg_offset     = REG_PMU_CLK_DIV0,
++	.div_reg_mask       = 0xf0000,
++};
++
++static struct clk apb_clk = {
++	.name               = "apb_clk",
++	.flag               = CLOCK_NOGATE | CLOCK_NORESET | CLOCK_NODIV,
++	.parent             = {&ahb_clk},
++	.prediv             = 1,
++};
++
++
++/*
++ * ip
++ */
++static struct clk ddr_clk = {
++	.name               = "ddr_clk",
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV1,
++	.div_reg_mask       = 0xf,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x40,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x8,
++};
++
++static struct clk isp_aclk = {
++	.name               = "isp_aclk",
++	.flag               = CLOCK_NORESET,
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV0,
++	.div_reg_mask       = 0xf00,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x1,
++};
++
++static struct clk pae_clk = {
++	.name               = "pae_clk",
++	.flag               = CLOCK_NORESET,
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV0,
++	.div_reg_mask       = 0x7000000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x10,
++};
++
++static struct clk bgm_clk = {
++	.name               = "bgm_clk",
++	.flag               = CLOCK_NORESET,
++	.parent             = {&isp_aclk},
++	.prediv             = 1,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x40000,
++};
++
++static struct clk cis_clk_out = {
++	.name               = "cis_clk_out",
++	.flag               = CLOCK_NORESET,
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV1,
++	.div_reg_mask       = 0xff0000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x800000,
++};
++
++static struct clk cis_clk_out_revert = {
++	.name               = "cis_clk_out_revert",
++	.flag               = CLOCK_NOGATE | CLOCK_NORESET | CLOCK_NODIV,
++	.parent             = {&cis_clk_out},
++	.prediv             = 1,
++};
++
++static struct clk mipi_dphy_clk = {
++	.name               = "mipi_dphy_clk",
++	.flag               = CLOCK_NORESET,
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV2,
++	.div_reg_mask       = 0x1f0000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x100000,
++};
++
++static struct clk mipi_pix_clk = {
++	.name               = "mipi_pix_clk",
++	.flag               = CLOCK_NORESET | CLOCK_NOGATE,
++	.parent             = {&pll0_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV2,
++	.div_reg_mask       = 0xf000000,
++};
++
++static struct clk pix_clk = {
++	.name               = "pix_clk",
++	.flag               = CLOCK_NORESET | CLOCK_NODIV | CLOCK_MULTI_PARENT,
++	.parent             = {&cis_clk_out, &cis_clk_out_revert, &mipi_pix_clk},
++	.prediv             = 1,
++	.sel_reg_offset     = REG_PMU_CLK_SEL,
++	.sel_reg_mask       = 0x30,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x400000,
++};
++
++static struct clk pts_clk = {
++	.name               = "pts_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 10,
++	.div_reg_offset     = REG_PMU_CLK_DIV2,
++	.div_reg_mask       = 0x1ff,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x80000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x20000,
++};
++
++static struct clk spi0_clk = {
++	.name               = "spi0_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV3,
++	.div_reg_mask       = 0xff,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x80,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x100,
++};
++
++static struct clk spi1_clk = {
++	.name               = "spi1_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV3,
++	.div_reg_mask       = 0xff0000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x100,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x200,
++};
++
++static struct clk spi2_clk = {
++	.name               = "spi2_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV3,
++	.div_reg_mask       = 0xf000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x2,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x100000,
++};
++
++static struct clk sdc0_clk = {
++	.name               = "sdc0_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 2,
++	.div_reg_offset     = REG_PMU_CLK_DIV3,
++	.div_reg_mask       = 0xf00,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x200,
++	.rst_reg_offset     = REG_PMU_SWRST_AHB_CTRL,
++	.rst_reg_mask       = 0x4,
++};
++
++static struct clk sdc1_clk = {
++	.name               = "sdc1_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 2,
++	.div_reg_offset     = REG_PMU_CLK_DIV3,
++	.div_reg_mask       = 0xf000000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x400,
++	.rst_reg_offset     = REG_PMU_SWRST_AHB_CTRL,
++	.rst_reg_mask       = 0x2,
++};
++
++static struct clk uart0_clk = {
++	.name               = "uart0_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 10,
++	.div_reg_offset     = REG_PMU_CLK_DIV4,
++	.div_reg_mask       = 0x1f,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x2000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x4000,
++};
++
++static struct clk uart1_clk = {
++	.name               = "uart1_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 10,
++	.div_reg_offset     = REG_PMU_CLK_DIV4,
++	.div_reg_mask       = 0x1f00,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x4000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x8000,
++};
++
++static struct clk i2c0_clk = {
++	.name               = "i2c0_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 20,
++	.div_reg_offset     = REG_PMU_CLK_DIV4,
++	.div_reg_mask       = 0x3f0000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x1000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x400,
++};
++
++static struct clk i2c1_clk = {
++	.name               = "i2c1_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 20,
++	.div_reg_offset     = REG_PMU_CLK_DIV4,
++	.div_reg_mask       = 0x3f000000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x8000000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x800,
++};
++
++static struct clk pwm_clk = {
++	.name               = "pwm_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 2,
++	.div_reg_offset     = REG_PMU_CLK_DIV5,
++	.div_reg_mask       = 0xff,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x10000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x80,
++	.def_rate	    = 25000000,
++};
++
++static struct clk wdt_clk = {
++	.name               = "wdt_clk",
++	.flag               = CLOCK_NOGATE,
++	.parent             = {&ahb_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV5,
++	.div_reg_mask       = 0xff00,
++	.rst_reg_offset     = REG_PMU_SWRST_APB_CTRL,
++	.rst_reg_mask       = 0x100000,
++};
++
++
++static struct clk tmr0_clk = {
++	.name               = "tmr0_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 10,
++	.div_reg_offset     = REG_PMU_CLK_DIV5,
++	.div_reg_mask       = 0xff0000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x20000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x40000,
++};
++
++static struct clk ac_clk = {
++	.name               = "ac_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV6,
++	.div_reg_mask       = 0x3f,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x800,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x1000,
++};
++
++static struct clk i2s_clk = {
++	.name               = "i2s_clk",
++	.parent             = {&ac_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV6,
++	.div_reg_mask       = 0x3f00,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x1000000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x2000,
++};
++
++static struct clk sadc_clk = {
++	.name               = "sadc_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV6,
++	.div_reg_mask       = 0x7f0000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x4000000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x10000,
++};
++
++static struct clk eth_clk = {
++	.name               = "eth_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 2,
++	.div_reg_offset     = REG_PMU_CLK_DIV6,
++	.div_reg_mask       = 0xf000000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x72000000,
++	.rst_reg_offset     = REG_PMU_SWRST_AHB_CTRL,
++	.rst_reg_mask       = 0x20000,
++};
++
++static struct clk efuse_clk = {
++	.name               = "efuse_clk",
++	.parent             = {&pll1_clk},
++	.prediv             = 1,
++	.div_reg_offset     = REG_PMU_CLK_DIV1,
++	.div_reg_mask       = 0x3f000000,
++	.en_reg_offset      = REG_PMU_CLK_GATE,
++	.en_reg_mask        = 0x200000,
++	.rst_reg_offset     = REG_PMU_SWRST_MAIN_CTRL,
++	.rst_reg_mask       = 0x800000,
++};
++
++struct clk_lookup fh_clks[] = {
++	CLK(NULL, "osc_clk", &osc_clk),
++	CLK(NULL, "pll0_clk", &pll0_clk),
++	CLK(NULL, "pll1_clk", &pll1_clk),
++
++	CLK(NULL, "arm_clk", &arm_clk),
++	CLK(NULL, "arc_clk", &arc_clk),
++	CLK(NULL, "axi_clk", &axi_clk),
++	CLK(NULL, "ahb_clk", &ahb_clk),
++	CLK(NULL, "apb_clk", &apb_clk),
++
++	CLK(NULL, "ddr_clk", &ddr_clk),
++	CLK(NULL, "isp_aclk", &isp_aclk),
++	CLK(NULL, "pae_clk", &pae_clk),
++	CLK(NULL, "bgm_clk", &bgm_clk),
++
++	CLK(NULL, "cis_clk_out", &cis_clk_out),
++	CLK(NULL, "cis_clk_out_revert", &cis_clk_out_revert),
++	CLK(NULL, "mipi_dphy_clk", &mipi_dphy_clk),
++	CLK(NULL, "mipi_pix_clk", &mipi_pix_clk),
++	CLK(NULL, "pix_clk", &pix_clk),
++	CLK(NULL, "pts_clk", &pts_clk),
++
++	CLK(NULL, "spi0_clk", &spi0_clk),
++	CLK(NULL, "spi1_clk", &spi1_clk),
++	CLK(NULL, "spi2_clk", &spi2_clk),
++	CLK(NULL, "sdc0_clk", &sdc0_clk),
++	CLK(NULL, "sdc1_clk", &sdc1_clk),
++	CLK(NULL, "uart0_clk", &uart0_clk),
++	CLK(NULL, "uart1_clk", &uart1_clk),
++	CLK(NULL, "i2c0_clk", &i2c0_clk),
++	CLK(NULL, "i2c1_clk", &i2c1_clk),
++	CLK(NULL, "pwm_clk", &pwm_clk),
++	CLK(NULL, "wdt_clk", &wdt_clk),
++	CLK(NULL, "tmr0_clk", &tmr0_clk),
++	CLK(NULL, "ac_clk", &ac_clk),
++	CLK(NULL, "i2s_clk", &i2s_clk),
++	CLK(NULL, "sadc_clk", &sadc_clk),
++	CLK(NULL, "eth_clk", &eth_clk),
++	CLK(NULL, "efuse_clk", &efuse_clk),
++
++	CLK(NULL, NULL, NULL),
++};
++
++EXPORT_SYMBOL(fh_clks);
+diff --git a/arch/arm/mach-fh/fh_simple_timer.c b/arch/arm/mach-fh/fh_simple_timer.c
+new file mode 100644
+index 00000000..b3c8e929
+--- /dev/null
++++ b/arch/arm/mach-fh/fh_simple_timer.c
+@@ -0,0 +1,165 @@
++#include <linux/module.h>
++#include <mach/fh_simple_timer.h>
++
++//#define  FH_TIMER_DEBUG
++#ifdef FH_TIMER_DEBUG
++#define PRINT_DBG(fmt,args...)  printk(fmt,##args)
++#else
++#define PRINT_DBG(fmt,args...)  do{} while(0)
++#endif
++
++struct simple_time_base
++{
++	struct timerqueue_head	simple_timer_queue;
++	int state;
++};
++
++struct simple_time_base base;
++
++static void fh_timer_enable(void)
++{
++	SET_REG(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x3);
++}
++
++static void fh_timer_disable(void)
++{
++	SET_REG(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x0);
++}
++
++static void fh_timer_clearirq(void)
++{
++	GET_REG(VTIMER(REG_TIMER_EOI_REG(SIMPLE_TIMER_BASE)));
++}
++
++void fh_simple_timer_set_next(long cycles)
++{
++	long curr_val;
++
++	PRINT_DBG("cycles: %lu\n", cycles);
++
++	if(cycles < 0)
++	{
++		pr_err("ERROR: cycles is invaild: %lu\n", cycles);
++		fh_timer_clearirq();
++		fh_timer_disable();
++		base.state = SIMPLE_TIMER_ERROR;
++		return;
++	}
++
++	SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x00, 0x0);
++	SET_REG(VTIMER(REG_TIMER_LOADCNT(SIMPLE_TIMER_BASE)), cycles);
++	SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x01, 0x1);
++#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
++	curr_val = GET_REG(VTIMER(REG_TIMER_CUR_VAL(SIMPLE_TIMER_BASE))) ;
++	if (curr_val >  0x80000000) { ///0xffff0000)
++		panic("timer curr %lu, want cycles %lu\n", curr_val, cycles);
++
++		SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(SIMPLE_TIMER_BASE)), 0x01, 0x1);
++		SET_REG(VTIMER(REG_TIMER_LOADCNT(SIMPLE_TIMER_BASE)), cycles);
++
++		//pmu reset
++		fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfffbffff);
++		while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff) {
++
++		}
++	}
++
++	fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfffbffff);
++	while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff) {
++
++	}
++#endif
++
++}
++
++int fh_simple_timer_create(struct fh_simple_timer* new)
++{
++	if(base.state == SIMPLE_TIMER_START)
++	{
++		pr_err("ERROR: simple timer is working\n");
++		return 0;
++	}
++	timerqueue_init(&new->node);
++	new->node.expires = new->it_value;
++	timerqueue_add(&base.simple_timer_queue, &new->node);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(fh_simple_timer_create);
++
++int fh_timer_start(void)
++{
++	struct fh_simple_timer *timer = NULL;
++	struct timerqueue_node *node;
++
++	node = timerqueue_getnext(&base.simple_timer_queue);
++
++	if(node == NULL)
++	{
++		pr_err("ERROR: timequeue is empty\n");
++		return -1;
++	}
++
++	timer = container_of(node, struct fh_simple_timer, node);
++
++	base.state = SIMPLE_TIMER_START;
++	fh_timer_enable();
++	fh_simple_timer_set_next(ktime_to_us(ktime_sub(timer->it_value, timer->it_delay)));
++	return 0;
++}
++EXPORT_SYMBOL_GPL(fh_timer_start);
++
++int fh_simple_timer_interrupt(void)
++{
++	ktime_t diff;
++	struct fh_simple_timer *curr = NULL, *next = NULL;
++	struct timerqueue_node *node;
++
++	node = timerqueue_getnext(&base.simple_timer_queue);
++
++	if(node == NULL)
++	{
++		pr_err("ERROR: timequeue is empty\n");
++		fh_timer_clearirq();
++		fh_timer_disable();
++		base.state = SIMPLE_TIMER_ERROR;
++		return -1;
++	}
++
++	curr = container_of(node, struct fh_simple_timer, node);
++
++	timerqueue_del(&base.simple_timer_queue, &curr->node);
++
++	curr->function(curr->param);
++
++	node = timerqueue_getnext(&base.simple_timer_queue);
++
++	if(node == NULL)
++	{
++		PRINT_DBG("finished all timers, close device\n");
++		fh_timer_clearirq();
++		fh_timer_disable();
++		base.state = SIMPLE_TIMER_STOP;
++		return 0;
++	}
++
++	next = container_of(node, struct fh_simple_timer, node);
++
++	PRINT_DBG("sec: %lu, nsec: %lu\n", ktime_to_timespec(next->it_value).tv_sec,
++			ktime_to_timespec(next->it_value).tv_nsec);
++
++	diff = ktime_sub(next->it_value, curr->it_value);
++
++	fh_simple_timer_set_next(ktime_to_us(ktime_sub(diff, next->it_delay)));
++	fh_timer_clearirq();
++	return 0;
++}
++
++
++int fh_simple_timer_init(void)
++{
++	base.state = SIMPLE_TIMER_STOP;
++	timerqueue_init_head(&base.simple_timer_queue);
++	fh_timer_disable();
++	return 0;
++}
++EXPORT_SYMBOL_GPL(fh_simple_timer_init);
+diff --git a/arch/arm/mach-fh/include/mach/board_config.h b/arch/arm/mach-fh/include/mach/board_config.h
+new file mode 100644
+index 00000000..128fbcd2
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/board_config.h
+@@ -0,0 +1,40 @@
++/*
++ * board_config.h
++ *
++ *  Created on: Jan 9, 2017
++ *      Author: duobao
++ */
++
++#ifndef BOARD_CONFIG_H_
++#define BOARD_CONFIG_H_
++
++/*
++ * GPIO0  -> IRCUT_ON
++ * GPIO1  -> IRCUT_OFF
++ * GPIO2  -> PHY Reset
++ * GPIO3  -> IR
++ * GPIO13 -> Sensor Reset
++ * GPIO14 -> Sensor Power Down
++ * GPIO55 -> CSN1
++ */
++
++#define CONFIG_GPIO_EMACPHY_RESET	2
++#define CONFIG_GPIO_EMACPHY_RXDV	41
++#define CONFIG_SD_WP_FIXED
++
++#define CONFIG_PINCTRL_SELECT					\
++	"MIPI", "RMII", "UART0", "USB", "DWI2S",		\
++	"I2C0", "SSI0", "SD0_CARD_1BIT",			\
++	"GPIO0", "GPIO1", "GPIO2", "GPIO3",			\
++	"GPIO13",						\
++								\
++	"GPIO4", "GPIO11", "GPIO5", "GPIO6", "GPIO7",		\
++	"GPIO8", "GPIO9", "GPIO10", "GPIO14", "GPIO19",		\
++	"GPIO20", "GPIO21", "GPIO23", "GPIO28", "GPIO29",	\
++	"GPIO30", "GPIO31", "GPIO32", "GPIO33", "GPIO35",	\
++	"GPIO36", "GPIO37", "GPIO39", "GPIO40", "GPIO44",	\
++	"GPIO45", "GPIO47", "GPIO50", "GPIO51",	"GPIO55",	\
++	"GPIO61",						\
++    
++    
++#endif /* BOARD_CONFIG_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/chip.h b/arch/arm/mach-fh/include/mach/chip.h
+new file mode 100644
+index 00000000..ca6bcf56
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/chip.h
+@@ -0,0 +1,19 @@
++/*****************************************************************************
++*
++*                                  chip.h
++*
++*	Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
++*						All Rights Reserved. Confidential.
++*
++*	File Description:
++*		Chip definition. Include the base address of each module, memory
++*		address, memory size
++*
++*	Modification History:
++*
++******************************************************************************/
++#ifndef _CHIP_H_
++#define _CHIP_H_
++
++#include <mach/fh8833.h>
++#endif
+diff --git a/arch/arm/mach-fh/include/mach/clkdev.h b/arch/arm/mach-fh/include/mach/clkdev.h
+new file mode 100644
+index 00000000..14a50488
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/clkdev.h
+@@ -0,0 +1,15 @@
++#ifndef __MACH_CLKDEV_H
++#define __MACH_CLKDEV_H
++
++struct clk;
++
++static inline int __clk_get(struct clk *clk)
++{
++	return 1;
++}
++
++static inline void __clk_put(struct clk *clk)
++{
++}
++
++#endif
+diff --git a/arch/arm/mach-fh/include/mach/clock.h b/arch/arm/mach-fh/include/mach/clock.h
+new file mode 100644
+index 00000000..a14b3763
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/clock.h
+@@ -0,0 +1,89 @@
++/*
++ *	Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
++ *				All Rights Reserved. Confidential.
++ *
++ *This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ASM_ARCH_FH_CLOCK_H
++#define __ASM_ARCH_FH_CLOCK_H
++
++#include <linux/list.h>
++#include <linux/clkdev.h>
++
++#define CLOCK_MAX_PARENT    4
++
++#define OSC_FREQUENCY		(24000000)
++
++#define CLOCK_FIXED         	(1<<0)
++#define CLOCK_NOGATE        	(1<<1)
++#define CLOCK_NODIV         	(1<<2)
++#define CLOCK_NORESET       	(1<<3)
++#define CLOCK_MULTI_PARENT  	(1<<4)
++#define CLOCK_PLL		(1<<5)
++
++
++#define CLK_IOCTL_MAGIC 		'c'
++#define ENABLE_CLK	 		_IOWR(CLK_IOCTL_MAGIC, 0, unsigned int)
++#define DISABLE_CLK		 	_IOWR(CLK_IOCTL_MAGIC, 1, unsigned int)
++#define SET_CLK_RATE			_IOWR(CLK_IOCTL_MAGIC, 2, unsigned int)
++#define GET_CLK_RATE			_IOWR(CLK_IOCTL_MAGIC, 3, unsigned int)
++#define SET_PMU				_IOWR(CLK_IOCTL_MAGIC, 4, unsigned int)
++#define GET_PMU				_IOWR(CLK_IOCTL_MAGIC, 5, unsigned int)
++
++#define CLK_IOCTL_MAXNR 		8
++
++
++#define CLK(dev, con, ck) 	\
++		{					\
++			.dev_id = dev,	\
++			.con_id = con,	\
++			.clk = ck,		\
++		}
++
++struct clk_usr {
++	char			*name;
++	unsigned long       	frequency;
++};
++
++
++struct clk {
++	struct list_head    list;
++	const char          *name;
++	unsigned long       frequency;
++	unsigned int        flag;
++	int                 select;
++	struct clk         *parent[CLOCK_MAX_PARENT];
++	int                 prediv;
++	int                 divide;
++	unsigned int        div_reg_offset;
++	unsigned int        div_reg_mask;
++	unsigned int        en_reg_offset;
++	unsigned int        en_reg_mask;
++	unsigned int        rst_reg_offset;
++	unsigned int        rst_reg_mask;
++	unsigned int        sel_reg_offset;
++	unsigned int        sel_reg_mask;
++	unsigned int        def_rate;
++};
++
++extern int clk_register(struct clk *clk);
++extern void clk_unregister(struct clk *clk);
++
++void clk_set_clk_sel(unsigned int reg);
++unsigned int clk_get_clk_sel(void);
++
++int fh_clk_init(void);
++int fh_clk_procfs_init(void);
++int fh_clk_misc_init(void);
++
++extern struct clk_lookup fh_clks[];
++
++#endif
+diff --git a/arch/arm/mach-fh/include/mach/ddrc.h b/arch/arm/mach-fh/include/mach/ddrc.h
+new file mode 100644
+index 00000000..b6cdb5b7
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/ddrc.h
+@@ -0,0 +1,30 @@
++/*
++ *	Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
++ *				All Rights Reserved. Confidential.
++ *
++ *This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++#ifndef DDRC_H_
++#define DDRC_H_
++
++#define OFFSET_DENAL_CTL_31         (0x007c)
++#define OFFSET_DENAL_CTL_57         (0x00e4)
++#define OFFSET_DENAL_CTL_97         (0x0184)
++
++#define DDRC_CONTROLLER_BUSY        (1 << 24)
++#define DDRC_CKE_STATUS             (1 << 8)
++
++#define DDRC_LP_CMD_SELFREFRESH     (10 << 8)
++#define DDRC_LP_CMD_EXITLOWPOWER    (1 << 8)
++
++#define DDRC_LPI_SR_WAKEUP_TIME     (3 << 24)
++#define DDRC_CKSRX_DELAY            (1 << 0)
++
++#endif /* DDRC_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/debug-macro.S b/arch/arm/mach-fh/include/mach/debug-macro.S
+new file mode 100644
+index 00000000..3f542607
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/debug-macro.S
+@@ -0,0 +1,50 @@
++/* linux/arch/arm/mach-fh/include/mach/debug-macro.S
++ *
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++*/
++
++/* pull in the relevant register and map files. */
++
++	/* note, for the boot process to work we have to keep the UART
++	 * virtual address aligned to an 1MiB boundary for the L1
++	 * mapping the head code makes. We keep the UART virtual address
++	 * aligned and add in the offset when we load the value here.
++	 */
++
++
++#include <linux/serial_reg.h>
++#include <mach/hardware.h>
++#include <asm/memory.h>
++#include <mach/chip.h>
++
++#include <mach/io.h>
++
++	.macro addruart, rp, rv
++		ldr	\rp, =CONSOLE_REG_BASE
++		ldr	\rv, =VA_CONSOLE_REG_BASE
++	.endm
++
++ 	.macro senduart,data,addr
++        	strb      \data, [\addr, #(0x00)]           @ Write to Transmitter Holding Register
++ 	.endm
++
++  	.macro waituart,data,addr
++1001:   	ldr \data, [\addr, #(0x14)]                 @ Read Status Register
++        	tst \data, #(0x40)                               @when TX FIFO Full, then wait
++		beq 1001b
++  	.endm
++
++ 	.macro busyuart,data,addr
++@		stmfd r13!, {r4}
++1002:
++		ldr	\data, [\addr, #(0x14)]
++		tst	\data, #(0x40)
++		beq	1002b
++	.endm
++
++
++
+diff --git a/arch/arm/mach-fh/include/mach/entry-macro.S b/arch/arm/mach-fh/include/mach/entry-macro.S
+new file mode 100644
+index 00000000..6eea8639
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/entry-macro.S
+@@ -0,0 +1,31 @@
++#include <mach/io.h>
++#include <mach/irqs.h>
++
++		.macro	disable_fiq
++		.endm
++
++		.macro  get_irqnr_preamble, base, tmp
++		ldr \base, =VA_INTC_REG_BASE
++		.endm
++
++		.macro  arch_ret_to_user, tmp1, tmp2
++		.endm
++
++		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
++		@ check low interrupts
++		ldr		\irqstat, [\base, #0x30]
++		mov		\irqnr, #31
++		ands	\irqstat, \irqstat, #0xffffffff
++
++		@ if no low interrupts set, check high interrupts
++		ldreq	\irqstat, [\base, #0x34]
++		moveq	\irqnr, #63
++		andeqs	\irqstat, \irqstat, #0xffffffff
++
++		@ find first active interrupt source
++		clzne	\irqstat, \irqstat
++		subne	\irqnr, \irqnr, \irqstat
++		.endm
++
++		.macro	irq_prio_table
++		.endm
+diff --git a/arch/arm/mach-fh/include/mach/fh8833.h b/arch/arm/mach-fh/include/mach/fh8833.h
+new file mode 100644
+index 00000000..ceed7b96
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh8833.h
+@@ -0,0 +1,258 @@
++/*
++ *
++ * Copyright (C) 2015 Fullhan.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ASM_ARCH_FH8833_H
++#define __ASM_ARCH_FH8833_H
++
++#include <linux/init.h>
++
++#define SRAM_GRANULARITY		32
++#define SRAM_SIZE			SZ_32K
++
++#define SIMPLE_TIMER_BASE		2
++
++#define RAM_BASE			(0x10000000)
++#define DDR_BASE			(0xA0000000)
++
++#define PMU_REG_BASE			(0xF0000000)
++#define TIMER_REG_BASE			(0xF0C00000)
++#define GPIO0_REG_BASE			(0xF0300000)
++#define GPIO1_REG_BASE			(0xF4000000)
++#define UART0_REG_BASE			(0xF0700000)
++#define UART1_REG_BASE			(0xF0800000)
++#define SPI0_REG_BASE			(0xF0500000)
++#define SPI1_REG_BASE			(0xF0600000)
++#define SPI2_REG_BASE			(0xF0640000)
++#define INTC_REG_BASE			(0xE0200000)
++#define GMAC_REG_BASE			(0xE0600000)
++#define USBC_REG_BASE			(0xE0700000)
++#define DMAC_REG_BASE			(0xE0300000)
++#define I2C1_REG_BASE			(0xF0B00000)
++#define I2C0_REG_BASE			(0xF0200000)
++#define SDC0_REG_BASE			(0xE2000000)
++#define SDC1_REG_BASE			(0xE2200000)
++#define WDT_REG_BASE			(0xF0D00000)
++#define PWM_REG_BASE			(0xF0400000)
++#define PAE_REG_BASE			(0xE7000000)
++#define I2S_REG_BASE			(0xF0900000)
++#define ACW_REG_BASE			(0xF0A00000)
++#define SADC_REG_BASE			(0xF1200000)
++#define EFUSE_REG_BASE			(0xF1600000)
++#define AES_REG_BASE			(0xE8200000)
++#define RTC_REG_BASE			(0xF1500000)
++#define DDRC_REG_BASE			(0xED000000)
++#define CONSOLE_REG_BASE		UART0_REG_BASE
++#define FH_UART_NUMBER			2
++
++#define PMU_REG_SIZE                  0x2018
++#define PMU_DEBUG
++
++#define REG_PMU_CHIP_ID                  (0x0000)
++#define REG_PMU_IP_VER                   (0x0004)
++#define REG_PMU_FW_VER                   (0x0008)
++#define REG_PMU_SYS_CTRL                 (0x000c)
++#define REG_PMU_PLL0                     (0x0010)
++#define REG_PMU_PLL1                     (0x0014)
++#define REG_PMU_PLL2                     (0x0018)
++#define REG_PMU_CLK_GATE                 (0x001c)
++#define REG_PMU_CLK_SEL                  (0x0020)
++#define REG_PMU_CLK_DIV0                 (0x0024)
++#define REG_PMU_CLK_DIV1                 (0x0028)
++#define REG_PMU_CLK_DIV2                 (0x002c)
++#define REG_PMU_CLK_DIV3                 (0x0030)
++#define REG_PMU_CLK_DIV4                 (0x0034)
++#define REG_PMU_CLK_DIV5                 (0x0038)
++#define REG_PMU_CLK_DIV6                 (0x003c)
++#define REG_PMU_SWRST_MAIN_CTRL          (0x0040)
++#define REG_PMU_SWRST_AXI_CTRL           (0x0044)
++#define REG_PMU_SWRST_AHB_CTRL           (0x0048)
++#define REG_PMU_SWRST_APB_CTRL           (0x004c)
++#define REG_PMU_SPC_IO_STATUS            (0x0054)
++#define REG_PMU_SPC_FUN                  (0x0058)
++#define REG_PMU_DBG_SPOT0                (0x005c)
++#define REG_PMU_DBG_SPOT1                (0x0060)
++#define REG_PMU_DBG_SPOT2                (0x0064)
++#define REG_PMU_DBG_SPOT3                (0x0068)
++
++#define REG_PMU_PAD_CIS_HSYNC_CFG        (0x0080)
++#define REG_PMU_PAD_CIS_VSYNC_CFG        (0x0084)
++#define REG_PMU_PAD_CIS_PCLK_CFG         (0x0088)
++#define REG_PMU_PAD_CIS_D_0_CFG          (0x008c)
++#define REG_PMU_PAD_CIS_D_1_CFG          (0x0090)
++#define REG_PMU_PAD_CIS_D_2_CFG          (0x0094)
++#define REG_PMU_PAD_CIS_D_3_CFG          (0x0098)
++#define REG_PMU_PAD_CIS_D_4_CFG          (0x009c)
++#define REG_PMU_PAD_CIS_D_5_CFG          (0x00a0)
++#define REG_PMU_PAD_CIS_D_6_CFG          (0x00a4)
++#define REG_PMU_PAD_CIS_D_7_CFG          (0x00a8)
++#define REG_PMU_PAD_CIS_D_8_CFG          (0x00ac)
++#define REG_PMU_PAD_CIS_D_9_CFG          (0x00b0)
++#define REG_PMU_PAD_CIS_D_10_CFG         (0x00b4)
++#define REG_PMU_PAD_CIS_D_11_CFG         (0x00b8)
++#define REG_PMU_PAD_MAC_RMII_CLK_CFG     (0x00bc)
++#define REG_PMU_PAD_MAC_REF_CLK_CFG      (0x00c0)
++#define REG_PMU_PAD_MAC_MDC_CFG          (0x00c4)
++#define REG_PMU_PAD_MAC_MDIO_CFG         (0x00c8)
++#define REG_PMU_PAD_MAC_COL_MII_CFG      (0x00cc)
++#define REG_PMU_PAD_MAC_CRS_MII_CFG      (0x00d0)
++#define REG_PMU_PAD_MAC_RXCK_CFG         (0x00d4)
++#define REG_PMU_PAD_MAC_RXD0_CFG         (0x00d8)
++#define REG_PMU_PAD_MAC_RXD1_CFG         (0x00dc)
++#define REG_PMU_PAD_MAC_RXD2_MII_CFG     (0x00e0)
++#define REG_PMU_PAD_MAC_RXD3_MII_CFG     (0x00e4)
++#define REG_PMU_PAD_MAC_RXDV_CFG         (0x00e8)
++#define REG_PMU_PAD_MAC_TXCK_CFG         (0x00ec)
++#define REG_PMU_PAD_MAC_TXD0_CFG         (0x00f0)
++#define REG_PMU_PAD_MAC_TXD1_CFG         (0x00f4)
++#define REG_PMU_PAD_MAC_TXD2_MII_CFG     (0x00f8)
++#define REG_PMU_PAD_MAC_TXD3_MII_CFG     (0x00fc)
++#define REG_PMU_PAD_MAC_TXEN_CFG         (0x0100)
++#define REG_PMU_PAD_MAC_RXER_MII_CFG     (0x0104)
++#define REG_PMU_PAD_MAC_TXER_MII_CFG     (0x0108)
++#define REG_PMU_PAD_GPIO_0_CFG           (0x010c)
++#define REG_PMU_PAD_GPIO_1_CFG           (0x0110)
++#define REG_PMU_PAD_GPIO_2_CFG           (0x0114)
++#define REG_PMU_PAD_GPIO_3_CFG           (0x0118)
++#define REG_PMU_PAD_GPIO_4_CFG           (0x011c)
++#define REG_PMU_PAD_GPIO_5_CFG           (0x0120)
++#define REG_PMU_PAD_GPIO_6_CFG           (0x0124)
++#define REG_PMU_PAD_GPIO_7_CFG           (0x0128)
++#define REG_PMU_PAD_GPIO_8_CFG           (0x012c)
++#define REG_PMU_PAD_GPIO_9_CFG           (0x0130)
++#define REG_PMU_PAD_GPIO_10_CFG          (0x0134)
++#define REG_PMU_PAD_GPIO_11_CFG          (0x0138)
++#define REG_PMU_PAD_GPIO_12_CFG          (0x013c)
++#define REG_PMU_PAD_GPIO_13_CFG          (0x0140)
++#define REG_PMU_PAD_GPIO_14_CFG          (0x0144)
++#define REG_PMU_PAD_UART_RX_CFG          (0x0148)
++#define REG_PMU_PAD_UART_TX_CFG          (0x014c)
++#define REG_PMU_PAD_CIS_SCL_CFG          (0x0150)
++#define REG_PMU_PAD_CIS_SDA_CFG          (0x0154)
++#define REG_PMU_PAD_I2C_SCL_CFG          (0x0158)
++#define REG_PMU_PAD_I2C_SDA_CFG          (0x015c)
++#define REG_PMU_PAD_SSI0_CLK_CFG         (0x0160)
++#define REG_PMU_PAD_SSI0_TXD_CFG         (0x0164)
++#define REG_PMU_PAD_SSI0_CSN_0_CFG       (0x0168)
++#define REG_PMU_PAD_SSI0_CSN_1_CFG       (0x016c)
++#define REG_PMU_PAD_SSI0_RXD_CFG         (0x0170)
++#define REG_PMU_PAD_SD0_CD_CFG           (0x0174)
++#define REG_PMU_PAD_SD0_WP_CFG           (0x0178)
++#define REG_PMU_PAD_SD0_CLK_CFG          (0x017c)
++#define REG_PMU_PAD_SD0_CMD_RSP_CFG      (0x0180)
++#define REG_PMU_PAD_SD0_DATA_0_CFG       (0x0184)
++#define REG_PMU_PAD_SD0_DATA_1_CFG       (0x0188)
++#define REG_PMU_PAD_SD0_DATA_2_CFG       (0x018c)
++#define REG_PMU_PAD_SD0_DATA_3_CFG       (0x0190)
++#define REG_PMU_PAD_SD1_CD_CFG           (0x0194)
++#define REG_PMU_PAD_SD1_WP_CFG           (0x0198)
++#define REG_PMU_PAD_SD1_CLK_CFG          (0x019c)
++#define REG_PMU_PAD_SD1_CMD_RSP_CFG      (0x01a0)
++#define REG_PMU_PAD_SD1_DATA_0_CFG       (0x01a4)
++#define REG_PMU_PAD_SD1_DATA_1_CFG       (0x01a8)
++#define REG_PMU_PAD_SD1_DATA_2_CFG       (0x01ac)
++#define REG_PMU_PAD_SD1_DATA_3_CFG       (0x01b0)
++#define REG_PMU_AXI0_PRIO_CFG0           (0x01b4)
++#define REG_PMU_AXI0_PRIO_CFG1           (0x01b8)
++#define REG_PMU_AXI1_PRIO_CFG0           (0x01bc)
++#define REG_PMU_AXI1_PRIO_CFG1           (0x01c0)
++#define REG_PMU_SWRSTN_NSR               (0x01c4)
++#define REG_PMU_ARM_INT_0                (0x01e0)
++#define REG_PMU_ARM_INT_1                (0x01e4)
++#define REG_PMU_ARM_INT_2                (0x01e8)
++#define REG_PMU_A625_INT_0               (0x01ec)
++#define REG_PMU_A625_INT_1               (0x01f0)
++#define REG_PMU_A625_INT_2               (0x01f4)
++#define REG_PMU_DMA                      (0x01f8)
++#define REG_PMU_WDT_CTRL                 (0x01fc)
++#define REG_PMU_DBG_STAT0                (0x0200)
++#define REG_PMU_DBG_STAT1                (0x0204)
++#define REG_PMU_DBG_STAT2                (0x0208)
++#define REG_PMU_DBG_STAT3                (0x020c)
++#define REG_PMU_USB_SYS                  (0x0210)
++#define REG_PMU_USB_CFG                  (0x0214)
++#define REG_PMU_USB_TUNE                 (0x0218)
++#define REG_PMU_PAD_CIS_CLK_CFG          (0x021c)
++#define REG_PMU_PAEARCBOOT0              (0x1000)
++#define REG_PMU_PAEARCBOOT1              (0x1004)
++#define REG_PMU_PAEARCBOOT2              (0x1008)
++#define REG_PMU_PAEARCBOOT3              (0x100c)
++#define REG_PMU_PAE_ARC_START_CTRL       (0x1010)
++#define REG_PMU_A625BOOT0                (0x2000)
++#define REG_PMU_A625BOOT1                (0x2004)
++#define REG_PMU_A625BOOT2                (0x2008)
++#define REG_PMU_A625BOOT3                (0x200c)
++#define REG_PMU_A625_START_CTRL          (0x2010)
++#define REG_PMU_ARC_INTC_MASK            (0x2014)
++#define REG_PMU_PAE_ARC_INTC_MASK        (0x2018)
++
++/*ATTENTION: written by ARC */
++#define PMU_ARM_INT_MASK             (0x01e0)
++#define PMU_ARM_INT_RAWSTAT          (0x01e4)
++#define PMU_ARM_INT_STAT             (0x01e8)
++
++#define PMU_A625_INT_MASK             (0x01ec)
++#define PMU_A625_INT_RAWSTAT          (0x01f0)
++#define PMU_A625_INT_STAT             (0x01f4)
++
++#define ARM_PMU_IRQ		0
++#define DDRC_IRQ		1
++#define WDT_IRQ			2
++#define TMR0_IRQ		3
++#define PAE_ARC_IRQ0		4
++#define PAE_ARC_IRQ1		5
++#define PAE_ARC_IRQ2		6
++#define ISPP_IRQ		7
++#define ISPF_IRQ		8
++#define VPU_IRQ			9
++#define PAE_IRQ			10
++#define I2C0_IRQ		11
++#define I2C1_IRQ		12
++#define JPEG_IRQ		13
++#define BGM_IRQ			14
++#define GMAC_IRQ		15
++#define AES_IRQ			16
++#define SDC0_IRQ		17
++#define SDC1_IRQ		18
++#define ACW_IRQ			19
++#define SADC_IRQ		20
++#define SPI1_IRQ		21
++#define SPI2_IRQ		22
++#define DMAC0_IRQ		23
++#define DMAC1_IRQ		24
++#define I2S0_IRQ		25
++#define GPIO0_IRQ		26
++#define USBC_IRQ		27
++#define SPI0_IRQ		28
++#define ARC_SW_IRQ		29
++#define UART0_IRQ		30
++#define UART1_IRQ		31
++#define ARM_SW_IRQ		32
++#define RTC_IRQ			33
++#define AHBC0_IRQ		34
++#define AHBC1_IRQ		35
++#define PWM_IRQ			36
++#define MIPIC_IRQ		37
++#define MIPI_WRAP_IRQ		38
++
++#define GPIO1_IRQ		40
++#define USBC_IDHV_IRQ		41
++#define USBC_OTG_IRQ		42
++#define USBC_DP_IRQ		43
++#define USBC_DM_IRQ		44
++
++#define NR_INTERNAL_IRQS	(64)
++#define NR_EXTERNAL_IRQS	(64)
++#define NR_IRQS			(NR_INTERNAL_IRQS + NR_EXTERNAL_IRQS)
++
++#endif /* __ASM_ARCH_FH8833_H */
+diff --git a/arch/arm/mach-fh/include/mach/fh8833_iopad_mipi.h b/arch/arm/mach-fh/include/mach/fh8833_iopad_mipi.h
+new file mode 100644
+index 00000000..bd9c3146
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh8833_iopad_mipi.h
+@@ -0,0 +1,647 @@
++
++#include "pinctrl.h"
++#include "pinctrl_osdep.h"
++#include "board_config.h"
++
++PINCTRL_FUNC(CIS_HSYNC, 0, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO20, 0, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(CIS_VSYNC, 1, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO21, 1, FUNC1, PUPD_UP);
++PINCTRL_FUNC(PWM0, 1, FUNC2, PUPD_UP);
++PINCTRL_FUNC(UART1_TX, 1, FUNC3, PUPD_UP);
++PINCTRL_FUNC(CIS_PCLK, 2, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO19, 2, FUNC1, PUPD_UP);
++PINCTRL_FUNC(PWM1, 2, FUNC2, PUPD_UP);
++PINCTRL_FUNC(UART1_RX, 2, FUNC3, PUPD_UP);
++PINCTRL_FUNC(CIS_D_0, 3, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO22, 3, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(PWM2, 3, FUNC2, PUPD_DOWN);
++PINCTRL_FUNC(USB_PWREN, 3, FUNC3, PUPD_DOWN);
++PINCTRL_FUNC(AD_I2S_DI, 3, FUNC4, PUPD_DOWN);
++PINCTRL_FUNC(RTC_CLK, 3, FUNC5, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_1, 4, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO23, 4, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(PWM3, 4, FUNC2, PUPD_DOWN);
++PINCTRL_FUNC(AC_MCLK, 4, FUNC3, PUPD_DOWN);
++PINCTRL_FUNC(AD_I2S_CLK, 4, FUNC4, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_2, 5, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO24, 5, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(PWM4, 5, FUNC2, PUPD_DOWN);
++PINCTRL_FUNC(I2S_DI, 5, FUNC3, PUPD_DOWN);
++PINCTRL_FUNC(AD_I2S_WS, 5, FUNC4, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_3, 6, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO25, 6, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(PWM5, 6, FUNC2, PUPD_DOWN);
++PINCTRL_FUNC(I2S_CLK, 6, FUNC3, PUPD_DOWN);
++PINCTRL_FUNC(DA_I2S_DO, 6, FUNC4, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_4, 7, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO26, 7, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(PWM6, 7, FUNC2, PUPD_DOWN);
++PINCTRL_FUNC(I2S_WS, 7, FUNC3, PUPD_DOWN);
++PINCTRL_FUNC(DA_I2S_WS, 7, FUNC4, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_5, 8, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO27, 8, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(PWM7, 8, FUNC2, PUPD_DOWN);
++PINCTRL_FUNC(I2S_DO, 8, FUNC3, PUPD_DOWN);
++PINCTRL_FUNC(DA_I2S_CLK, 8, FUNC4, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_6, 9, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO28, 9, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_7, 10, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO29, 10, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_8, 11, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO30, 11, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_9, 12, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO31, 12, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_10, 13, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO32, 13, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(CIS_D_11, 14, FUNC0, PUPD_DOWN);
++PINCTRL_FUNC(GPIO33, 14, FUNC1, PUPD_DOWN);
++PINCTRL_FUNC(MAC_RMII_CLK, 15, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO15, 15, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SD1_CLK, 15, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PWM0, 15, FUNC3, PUPD_UP);
++PINCTRL_FUNC(USB_PWREN, 15, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_DI, 15, FUNC5, PUPD_UP);
++PINCTRL_FUNC(MAC_REF_CLK, 16, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(MAC_MDC, 17, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO34, 17, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SD1_WP, 17, FUNC2, PUPD_UP);
++PINCTRL_FUNC(I2C1_SDA, 17, FUNC3, PUPD_UP);
++PINCTRL_FUNC(UART1_TX, 17, FUNC4, PUPD_UP);
++PINCTRL_FUNC(SSI1_CLK, 17, FUNC5, PUPD_UP);
++PINCTRL_FUNC(MAC_MDIO, 18, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO17, 18, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(ARM_JTAG_TDO, 18, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(I2C1_SCL, 18, FUNC3, PUPD_NONE);
++PINCTRL_FUNC(UART1_RX, 18, FUNC4, PUPD_NONE);
++PINCTRL_FUNC(PWM7, 18, FUNC5, PUPD_NONE);
++PINCTRL_FUNC(PWM0, 19, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO35, 19, FUNC1, PUPD_UP);
++PINCTRL_FUNC(PWM1, 20, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO36, 20, FUNC1, PUPD_UP);
++PINCTRL_FUNC(MAC_RXCK, 21, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(MAC_RXD_0, 22, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO16, 22, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SD1_DATA_0, 22, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PWM1, 22, FUNC3, PUPD_UP);
++PINCTRL_FUNC(AD_I2S_DI, 22, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_CLK, 22, FUNC5, PUPD_UP);
++PINCTRL_FUNC(MAC_RXD_1, 23, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO38, 23, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SD1_DATA_1, 23, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PWM2, 23, FUNC3, PUPD_UP);
++PINCTRL_FUNC(AD_I2S_CLK, 23, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_WS, 23, FUNC5, PUPD_UP);
++PINCTRL_FUNC(PWM2, 24, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO39, 24, FUNC1, PUPD_UP);
++PINCTRL_FUNC(PWM3, 25, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO40, 25, FUNC1, PUPD_UP);
++PINCTRL_FUNC(MAC_RXDV, 26, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO41, 26, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SD1_CD, 26, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PWM3, 26, FUNC3, PUPD_UP);
++PINCTRL_FUNC(AD_I2S_WS, 26, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_DO, 26, FUNC5, PUPD_UP);
++PINCTRL_FUNC(MAC_TXCK, 27, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(MAC_TXD_0, 28, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO42, 28, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(SD1_DATA_2, 28, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(PWM4, 28, FUNC3, PUPD_NONE);
++PINCTRL_FUNC(DA_I2S_DO, 28, FUNC4, PUPD_NONE);
++PINCTRL_FUNC(SSI1_TXD, 28, FUNC5, PUPD_NONE);
++PINCTRL_FUNC(MAC_TXD_1, 29, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO43, 29, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(SD1_DATA_3, 29, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(PWM5, 29, FUNC3, PUPD_NONE);
++PINCTRL_FUNC(DA_I2S_WS, 29, FUNC4, PUPD_NONE);
++PINCTRL_FUNC(SSI1_RXD, 29, FUNC5, PUPD_NONE);
++PINCTRL_FUNC(PWM4, 30, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO44, 30, FUNC1, PUPD_UP);
++PINCTRL_FUNC(PWM5, 31, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO45, 31, FUNC1, PUPD_UP);
++PINCTRL_FUNC(MAC_TXEN, 32, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO46, 32, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(SD1_CMD_RSP, 32, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(PWM6, 32, FUNC3, PUPD_NONE);
++PINCTRL_FUNC(DA_I2S_CLK, 32, FUNC4, PUPD_NONE);
++PINCTRL_FUNC(AC_MCLK, 32, FUNC5, PUPD_NONE);
++PINCTRL_FUNC(PWM6, 33, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO47, 33, FUNC1, PUPD_UP);
++PINCTRL_FUNC(PWM7, 34, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO37, 34, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TRSTN, 35, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO0, 35, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SSI1_TXD, 35, FUNC2, PUPD_UP);
++PINCTRL_FUNC(SSI2_TXD, 35, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM0, 35, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_DI, 35, FUNC5, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TMS, 36, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO1, 36, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SSI1_RXD, 36, FUNC2, PUPD_UP);
++PINCTRL_FUNC(SSI2_RXD, 36, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM1, 36, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_WS, 36, FUNC5, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TCK, 37, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO2, 37, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SSI1_CLK, 37, FUNC2, PUPD_UP);
++PINCTRL_FUNC(SSI2_CLK, 37, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM2, 37, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_DO, 37, FUNC5, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TDI, 38, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO3, 38, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SSI1_CSN_0, 38, FUNC2, PUPD_UP);
++PINCTRL_FUNC(SSI2_CSN, 38, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM3, 38, FUNC4, PUPD_UP);
++PINCTRL_FUNC(I2S_CLK, 38, FUNC5, PUPD_UP);
++PINCTRL_FUNC(GPIO4, 39, FUNC0, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TCK, 40, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO5, 40, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TRSTN, 41, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO6, 41, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TMS, 42, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO7, 42, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TDI, 43, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO8, 43, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARM_JTAG_TDO, 44, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO9, 44, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(UART1_TX, 45, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO10, 45, FUNC1, PUPD_UP);
++PINCTRL_FUNC(UART1_RX, 46, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO11, 46, FUNC1, PUPD_UP);
++PINCTRL_FUNC(PWM0, 47, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO12, 47, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(CIS_CLK, 47, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(PWM1, 48, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO13, 48, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(PWM2, 49, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO14, 49, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(UART0_RX, 50, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO48, 50, FUNC1, PUPD_UP);
++PINCTRL_FUNC(I2S_WS, 50, FUNC2, PUPD_UP);
++PINCTRL_FUNC(UART0_TX, 51, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO49, 51, FUNC1, PUPD_UP);
++PINCTRL_FUNC(I2S_CLK, 51, FUNC2, PUPD_UP);
++PINCTRL_FUNC(I2C0_SCL, 52, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO57, 52, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(I2C0_SDA, 53, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO56, 53, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(I2C1_SCL, 54, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO51, 54, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(I2S_DI, 54, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(I2C1_SDA, 55, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO50, 55, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(I2S_DO, 55, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(SSI0_CLK, 56, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(SSI0_TXD, 57, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(SSI0_CSN_0, 58, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO54, 58, FUNC1, PUPD_UP);
++PINCTRL_FUNC(SSI0_CSN_1, 59, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO55, 59, FUNC1, PUPD_UP);
++PINCTRL_FUNC(USB_PWREN, 59, FUNC2, PUPD_UP);
++PINCTRL_FUNC(AC_MCLK, 59, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM3, 59, FUNC4, PUPD_UP);
++PINCTRL_FUNC(UART1_TX, 59, FUNC5, PUPD_UP);
++PINCTRL_FUNC(SSI0_RXD, 60, FUNC0, PUPD_UP);
++PINCTRL_FUNC(SD0_CD, 61, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO52, 61, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARC_JTAG_TRSTN, 61, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PAE_JTAG_TRSTN, 61, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM5, 61, FUNC4, PUPD_UP);
++PINCTRL_FUNC(SSI2_TXD, 61, FUNC5, PUPD_UP);
++PINCTRL_FUNC(SD0_WP, 62, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO53, 62, FUNC1, PUPD_UP);
++PINCTRL_FUNC(USB_PWREN, 62, FUNC2, PUPD_UP);
++PINCTRL_FUNC(SD0_CLK, 63, FUNC0, PUPD_NONE);
++PINCTRL_FUNC(GPIO63, 63, FUNC1, PUPD_NONE);
++PINCTRL_FUNC(ARC_JTAG_TMS, 63, FUNC2, PUPD_NONE);
++PINCTRL_FUNC(PAE_JTAG_TMS, 63, FUNC3, PUPD_NONE);
++PINCTRL_FUNC(PWM6, 63, FUNC4, PUPD_NONE);
++PINCTRL_FUNC(SSI2_RXD, 63, FUNC5, PUPD_NONE);
++PINCTRL_FUNC(SD0_CMD_RSP, 64, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO11, 64, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARC_JTAG_TCK, 64, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PAE_JTAG_TCK, 64, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM7, 64, FUNC4, PUPD_UP);
++PINCTRL_FUNC(SSI2_CLK, 64, FUNC5, PUPD_UP);
++PINCTRL_FUNC(SD0_DATA_0, 65, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO62, 65, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARC_JTAG_TDI, 65, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PAE_JTAG_TDI, 65, FUNC3, PUPD_UP);
++PINCTRL_FUNC(PWM4, 65, FUNC4, PUPD_UP);
++PINCTRL_FUNC(SSI2_CSN, 65, FUNC5, PUPD_UP);
++PINCTRL_FUNC(SD0_DATA_1, 66, FUNC0, PUPD_UP);
++PINCTRL_FUNC(GPIO61, 66, FUNC1, PUPD_UP);
++PINCTRL_FUNC(ARC_JTAG_TDO, 66, FUNC2, PUPD_UP);
++PINCTRL_FUNC(PAE_JTAG_TDO, 66, FUNC3, PUPD_UP);
++PINCTRL_FUNC(SD0_WP, 66, FUNC4, PUPD_UP);
++PINCTRL_FUNC(UART1_TX, 66, FUNC5, PUPD_UP);
++
++PINCTRL_MUX(USB_PWREN, 0, &PAD3_USB_PWREN, &PAD15_USB_PWREN,
++		&PAD59_USB_PWREN, &PAD62_USB_PWREN);
++PINCTRL_MUX(AC_MCLK, 2, &PAD4_AC_MCLK, &PAD32_AC_MCLK, &PAD59_AC_MCLK);
++
++PINCTRL_MUX(ACI2S_AD_CLK, 1, &PAD4_AD_I2S_CLK, &PAD23_AD_I2S_CLK);
++PINCTRL_MUX(ACI2S_AD_DI, 1, &PAD3_AD_I2S_DI, &PAD22_AD_I2S_DI);
++PINCTRL_MUX(ACI2S_AD_WS, 0, &PAD5_AD_I2S_WS, &PAD26_AD_I2S_WS);
++PINCTRL_MUX(ACI2S_DA_CLK, 0, &PAD8_DA_I2S_CLK, &PAD32_DA_I2S_CLK);
++PINCTRL_MUX(ACI2S_DA_DO, 0, &PAD6_DA_I2S_DO, &PAD28_DA_I2S_DO);
++PINCTRL_MUX(ACI2S_DA_WS, 0, &PAD7_DA_I2S_WS, &PAD29_DA_I2S_WS);
++
++PINCTRL_MUX(DWI2S_CLK, 0, &PAD6_I2S_CLK, &PAD22_I2S_CLK,
++		&PAD38_I2S_CLK, &PAD51_I2S_CLK);
++PINCTRL_MUX(DWI2S_DI, 0, &PAD5_I2S_DI, &PAD15_I2S_DI,
++		&PAD35_I2S_DI, &PAD54_I2S_DI);
++PINCTRL_MUX(DWI2S_DO, 0, &PAD8_I2S_DO, &PAD26_I2S_DO,
++		&PAD37_I2S_DO, &PAD55_I2S_DO);
++PINCTRL_MUX(DWI2S_WS, 0, &PAD7_I2S_WS, &PAD23_I2S_WS,
++		&PAD36_I2S_WS, &PAD50_I2S_WS);
++
++PINCTRL_MUX(ARCJTAG_TCK, 0, &PAD64_ARC_JTAG_TCK);
++PINCTRL_MUX(ARCJTAG_TDI, 0, &PAD65_ARC_JTAG_TDI);
++PINCTRL_MUX(ARCJTAG_TDO, 0, &PAD66_ARC_JTAG_TDO);
++PINCTRL_MUX(ARCJTAG_TMS, 0, &PAD63_ARC_JTAG_TMS);
++PINCTRL_MUX(ARCJTAG_TRSTN, 0, &PAD61_ARC_JTAG_TRSTN);
++
++PINCTRL_MUX(ARMJTAG_TCK, 1, &PAD37_ARM_JTAG_TCK, &PAD40_ARM_JTAG_TCK);
++PINCTRL_MUX(ARMJTAG_TDI, 1, &PAD38_ARM_JTAG_TDI, &PAD43_ARM_JTAG_TDI);
++PINCTRL_MUX(ARMJTAG_TDO, 1, &PAD18_ARM_JTAG_TDO, &PAD44_ARM_JTAG_TDO);
++PINCTRL_MUX(ARMJTAG_TMS, 1, &PAD36_ARM_JTAG_TMS, &PAD42_ARM_JTAG_TMS);
++PINCTRL_MUX(ARMJTAG_TRSTN, 1, &PAD35_ARM_JTAG_TRSTN, &PAD41_ARM_JTAG_TRSTN);
++
++PINCTRL_MUX(PAEJTAG_TCK, 0, &PAD64_PAE_JTAG_TCK);
++PINCTRL_MUX(PAEJTAG_TDI, 0, &PAD65_PAE_JTAG_TDI);
++PINCTRL_MUX(PAEJTAG_TDO, 0, &PAD66_PAE_JTAG_TDO);
++PINCTRL_MUX(PAEJTAG_TMS, 0, &PAD63_PAE_JTAG_TMS);
++PINCTRL_MUX(PAEJTAG_TRSTN, 0, &PAD61_PAE_JTAG_TRSTN);
++
++PINCTRL_MUX(CIS_CLK, 0, &PAD47_CIS_CLK);
++PINCTRL_MUX(CIS_PCLK, 0, &PAD2_CIS_PCLK);
++PINCTRL_MUX(CIS_HSYNC, 0, &PAD0_CIS_HSYNC);
++PINCTRL_MUX(CIS_VSYNC, 0, &PAD1_CIS_VSYNC);
++PINCTRL_MUX(CIS_D_0, 0, &PAD3_CIS_D_0);
++PINCTRL_MUX(CIS_D_1, 0, &PAD4_CIS_D_1);
++PINCTRL_MUX(CIS_D_2, 0, &PAD5_CIS_D_2);
++PINCTRL_MUX(CIS_D_3, 0, &PAD6_CIS_D_3);
++PINCTRL_MUX(CIS_D_4, 0, &PAD7_CIS_D_4);
++PINCTRL_MUX(CIS_D_5, 0, &PAD8_CIS_D_5);
++PINCTRL_MUX(CIS_D_6, 0, &PAD9_CIS_D_6);
++PINCTRL_MUX(CIS_D_7, 0, &PAD10_CIS_D_7);
++PINCTRL_MUX(CIS_D_8, 0, &PAD11_CIS_D_8);
++PINCTRL_MUX(CIS_D_9, 0, &PAD12_CIS_D_9);
++PINCTRL_MUX(CIS_D_10, 0, &PAD13_CIS_D_10);
++PINCTRL_MUX(CIS_D_11, 0, &PAD14_CIS_D_11);
++
++PINCTRL_MUX(I2C0_SCL, 0, &PAD52_I2C0_SCL);
++PINCTRL_MUX(I2C0_SDA, 0, &PAD53_I2C0_SDA);
++
++PINCTRL_MUX(I2C1_SCL, 1, &PAD18_I2C1_SCL, &PAD54_I2C1_SCL);
++PINCTRL_MUX(I2C1_SDA, 1, &PAD17_I2C1_SDA, &PAD55_I2C1_SDA);
++
++PINCTRL_MUX(MAC_REF_CLK, 0, &PAD16_MAC_REF_CLK);
++PINCTRL_MUX(MAC_RMII_CLK, 0, &PAD15_MAC_RMII_CLK);
++PINCTRL_MUX(MAC_MDC, 0, &PAD17_MAC_MDC);
++PINCTRL_MUX(MAC_MDIO, 0, &PAD18_MAC_MDIO);
++PINCTRL_MUX(MAC_RXCK, 0, &PAD21_MAC_RXCK);
++PINCTRL_MUX(MAC_RXD_0, 0, &PAD22_MAC_RXD_0);
++PINCTRL_MUX(MAC_RXD_1, 0, &PAD23_MAC_RXD_1);
++PINCTRL_MUX(MAC_RXDV, 0, &PAD26_MAC_RXDV);
++PINCTRL_MUX(MAC_TXCK, 0, &PAD27_MAC_TXCK);
++PINCTRL_MUX(MAC_TXD_0, 0, &PAD28_MAC_TXD_0);
++PINCTRL_MUX(MAC_TXD_1, 0, &PAD29_MAC_TXD_1);
++PINCTRL_MUX(MAC_TXEN, 0, &PAD32_MAC_TXEN);
++
++PINCTRL_MUX(PWM0, 3, &PAD1_PWM0, &PAD15_PWM0,
++		&PAD19_PWM0, &PAD35_PWM0, &PAD47_PWM0);
++PINCTRL_MUX(PWM1, 3, &PAD2_PWM1, &PAD20_PWM1,
++		&PAD22_PWM1, &PAD36_PWM1, &PAD48_PWM1);
++PINCTRL_MUX(PWM2, 3, &PAD3_PWM2, &PAD23_PWM2,
++		&PAD24_PWM2, &PAD37_PWM2, &PAD49_PWM2);
++PINCTRL_MUX(PWM3, 3, &PAD4_PWM3, &PAD25_PWM3,
++		&PAD26_PWM3, &PAD38_PWM3, &PAD59_PWM3);
++PINCTRL_MUX(PWM4, 2, &PAD5_PWM4, &PAD28_PWM4, &PAD30_PWM4,
++		&PAD65_PWM4);
++PINCTRL_MUX(PWM5, 2, &PAD6_PWM5, &PAD29_PWM5, &PAD31_PWM5,
++		&PAD61_PWM5);
++PINCTRL_MUX(PWM6, 2, &PAD7_PWM6, &PAD32_PWM6, &PAD33_PWM6,
++		&PAD63_PWM6);
++PINCTRL_MUX(PWM7, 2, &PAD8_PWM7, &PAD18_PWM7, &PAD34_PWM7,
++		&PAD64_PWM7);
++
++PINCTRL_MUX(SD0_CLK, 0, &PAD63_SD0_CLK);
++PINCTRL_MUX(SD0_CD, 0, &PAD61_SD0_CD);
++PINCTRL_MUX(SD0_CMD_RSP, 0, &PAD64_SD0_CMD_RSP);
++PINCTRL_MUX(SD0_WP, 0, &PAD62_SD0_WP, &PAD66_SD0_WP);
++PINCTRL_MUX(SD0_DATA_0, 0, &PAD65_SD0_DATA_0);
++PINCTRL_MUX(SD0_DATA_1, 0, &PAD66_SD0_DATA_1);
++
++PINCTRL_MUX(SD1_CLK, 0, &PAD15_SD1_CLK);
++PINCTRL_MUX(SD1_CD, 0, &PAD26_SD1_CD);
++PINCTRL_MUX(SD1_CMD_RSP, 0, &PAD32_SD1_CMD_RSP);
++PINCTRL_MUX(SD1_WP, 0, &PAD17_SD1_WP);
++PINCTRL_MUX(SD1_DATA_0, 0, &PAD22_SD1_DATA_0);
++PINCTRL_MUX(SD1_DATA_1, 0, &PAD23_SD1_DATA_1);
++PINCTRL_MUX(SD1_DATA_2, 0, &PAD28_SD1_DATA_2);
++PINCTRL_MUX(SD1_DATA_3, 0, &PAD29_SD1_DATA_3);
++
++PINCTRL_MUX(SSI0_CLK, 0, &PAD56_SSI0_CLK);
++PINCTRL_MUX(SSI0_RXD, 0, &PAD60_SSI0_RXD);
++PINCTRL_MUX(SSI0_TXD, 0, &PAD57_SSI0_TXD);
++PINCTRL_MUX(SSI0_CSN_0, 0, &PAD58_SSI0_CSN_0);
++PINCTRL_MUX(SSI0_CSN_1, 0, &PAD59_SSI0_CSN_1);
++
++PINCTRL_MUX(SSI1_CLK, 0, &PAD37_SSI1_CLK);
++PINCTRL_MUX(SSI1_RXD, 1, &PAD29_SSI1_RXD, &PAD36_SSI1_RXD);
++PINCTRL_MUX(SSI1_TXD, 1, &PAD28_SSI1_TXD, &PAD35_SSI1_TXD);
++PINCTRL_MUX(SSI1_CSN_0, 0, &PAD38_SSI1_CSN_0);
++
++PINCTRL_MUX(SSI2_CLK, 0, &PAD37_SSI2_CLK, &PAD64_SSI2_CLK);
++PINCTRL_MUX(SSI2_RXD, 0, &PAD36_SSI2_RXD, &PAD63_SSI2_RXD);
++PINCTRL_MUX(SSI2_TXD, 0, &PAD35_SSI2_TXD, &PAD61_SSI2_TXD);
++PINCTRL_MUX(SSI2_CSN, 0, &PAD38_SSI2_CSN, &PAD65_SSI2_CSN);
++
++PINCTRL_MUX(UART0_RX, 0, &PAD50_UART0_RX);
++PINCTRL_MUX(UART0_TX, 0, &PAD51_UART0_TX);
++PINCTRL_MUX(UART1_RX, 0, &PAD2_UART1_RX, &PAD18_UART1_RX,
++		&PAD46_UART1_RX);
++PINCTRL_MUX(UART1_TX, 0, &PAD1_UART1_TX, &PAD17_UART1_TX,
++		&PAD45_UART1_TX, &PAD59_UART1_TX, &PAD66_UART1_TX);
++
++PINCTRL_MUX(GPIO0, 0, &PAD35_GPIO0);
++PINCTRL_MUX(GPIO1, 0, &PAD36_GPIO1);
++PINCTRL_MUX(GPIO2, 0, &PAD37_GPIO2);
++PINCTRL_MUX(GPIO3, 0, &PAD38_GPIO3);
++PINCTRL_MUX(GPIO4, 0, &PAD39_GPIO4);
++PINCTRL_MUX(GPIO5, 0, &PAD40_GPIO5);
++PINCTRL_MUX(GPIO6, 0, &PAD41_GPIO6);
++PINCTRL_MUX(GPIO7, 0, &PAD42_GPIO7);
++PINCTRL_MUX(GPIO8, 0, &PAD43_GPIO8);
++PINCTRL_MUX(GPIO9, 0, &PAD44_GPIO9);
++PINCTRL_MUX(GPIO10, 0, &PAD45_GPIO10);
++PINCTRL_MUX(GPIO11, 0, &PAD46_GPIO11, &PAD64_GPIO11);
++PINCTRL_MUX(GPIO12, 0, &PAD47_GPIO12);
++PINCTRL_MUX(GPIO13, 0, &PAD48_GPIO13);
++PINCTRL_MUX(GPIO14, 0, &PAD49_GPIO14);
++PINCTRL_MUX(GPIO15, 0, &PAD15_GPIO15);
++PINCTRL_MUX(GPIO16, 0, &PAD22_GPIO16);
++PINCTRL_MUX(GPIO17, 0, &PAD18_GPIO17);
++PINCTRL_MUX(GPIO19, 0, &PAD2_GPIO19);
++PINCTRL_MUX(GPIO20, 0, &PAD0_GPIO20);
++PINCTRL_MUX(GPIO21, 0, &PAD1_GPIO21);
++PINCTRL_MUX(GPIO22, 0, &PAD3_GPIO22);
++PINCTRL_MUX(GPIO23, 0, &PAD4_GPIO23);
++PINCTRL_MUX(GPIO24, 0, &PAD5_GPIO24);
++PINCTRL_MUX(GPIO25, 0, &PAD6_GPIO25);
++PINCTRL_MUX(GPIO26, 0, &PAD7_GPIO26);
++PINCTRL_MUX(GPIO27, 0, &PAD8_GPIO27);
++PINCTRL_MUX(GPIO28, 0, &PAD9_GPIO28);
++PINCTRL_MUX(GPIO29, 0, &PAD10_GPIO29);
++PINCTRL_MUX(GPIO30, 0, &PAD11_GPIO30);
++PINCTRL_MUX(GPIO31, 0, &PAD12_GPIO31);
++PINCTRL_MUX(GPIO32, 0, &PAD13_GPIO32);
++PINCTRL_MUX(GPIO33, 0, &PAD14_GPIO33);
++PINCTRL_MUX(GPIO34, 0, &PAD17_GPIO34);
++PINCTRL_MUX(GPIO35, 0, &PAD19_GPIO35);
++PINCTRL_MUX(GPIO36, 0, &PAD20_GPIO36);
++PINCTRL_MUX(GPIO37, 0, &PAD34_GPIO37);
++PINCTRL_MUX(GPIO38, 0, &PAD23_GPIO38);
++PINCTRL_MUX(GPIO39, 0, &PAD24_GPIO39);
++PINCTRL_MUX(GPIO40, 0, &PAD25_GPIO40);
++PINCTRL_MUX(GPIO41, 0, &PAD26_GPIO41);
++PINCTRL_MUX(GPIO42, 0, &PAD28_GPIO42);
++PINCTRL_MUX(GPIO43, 0, &PAD29_GPIO43);
++PINCTRL_MUX(GPIO44, 0, &PAD30_GPIO44);
++PINCTRL_MUX(GPIO45, 0, &PAD31_GPIO45);
++PINCTRL_MUX(GPIO46, 0, &PAD32_GPIO46);
++PINCTRL_MUX(GPIO47, 0, &PAD33_GPIO47);
++PINCTRL_MUX(GPIO48, 0, &PAD50_GPIO48);
++PINCTRL_MUX(GPIO49, 0, &PAD51_GPIO49);
++PINCTRL_MUX(GPIO50, 0, &PAD55_GPIO50);
++PINCTRL_MUX(GPIO51, 0, &PAD54_GPIO51);
++PINCTRL_MUX(GPIO52, 0, &PAD61_GPIO52);
++PINCTRL_MUX(GPIO53, 0, &PAD62_GPIO53);
++PINCTRL_MUX(GPIO54, 0, &PAD58_GPIO54);
++PINCTRL_MUX(GPIO55, 0, &PAD59_GPIO55);
++PINCTRL_MUX(GPIO56, 0, &PAD53_GPIO56);
++PINCTRL_MUX(GPIO57, 0, &PAD52_GPIO57);
++PINCTRL_MUX(GPIO61, 0, &PAD66_GPIO61);
++PINCTRL_MUX(GPIO62, 0, &PAD65_GPIO62);
++PINCTRL_MUX(GPIO63, 0, &PAD63_GPIO63);
++
++PINCTRL_DEVICE(USB, 1, &MUX_USB_PWREN);
++PINCTRL_DEVICE(AC, 1, &MUX_AC_MCLK);
++PINCTRL_DEVICE(ACI2S, 6, &MUX_ACI2S_AD_CLK, &MUX_ACI2S_AD_DI, &MUX_ACI2S_AD_WS,
++		&MUX_ACI2S_DA_CLK, &MUX_ACI2S_DA_DO, &MUX_ACI2S_DA_WS);
++PINCTRL_DEVICE(DWI2S, 4, &MUX_DWI2S_CLK, &MUX_DWI2S_DI,
++		&MUX_DWI2S_DO, &MUX_DWI2S_WS);
++PINCTRL_DEVICE(ARCJTAG, 5, &MUX_ARCJTAG_TCK, &MUX_ARCJTAG_TDI, &MUX_ARCJTAG_TDO,
++		&MUX_ARCJTAG_TMS, &MUX_ARCJTAG_TRSTN);
++PINCTRL_DEVICE(ARMJTAG, 5, &MUX_ARMJTAG_TCK, &MUX_ARMJTAG_TDI, &MUX_ARMJTAG_TDO,
++		&MUX_ARMJTAG_TMS, &MUX_ARMJTAG_TRSTN);
++PINCTRL_DEVICE(PAEJTAG, 5, &MUX_PAEJTAG_TCK, &MUX_PAEJTAG_TDI, &MUX_PAEJTAG_TDO,
++		&MUX_PAEJTAG_TMS, &MUX_PAEJTAG_TRSTN);
++PINCTRL_DEVICE(CIS, 16, &MUX_CIS_CLK, &MUX_CIS_PCLK, &MUX_CIS_HSYNC,
++		&MUX_CIS_VSYNC, &MUX_CIS_D_0, &MUX_CIS_D_1, &MUX_CIS_D_2,
++		&MUX_CIS_D_3, &MUX_CIS_D_4, &MUX_CIS_D_5, &MUX_CIS_D_6,
++		&MUX_CIS_D_7, &MUX_CIS_D_8, &MUX_CIS_D_9, &MUX_CIS_D_10,
++		&MUX_CIS_D_11);
++PINCTRL_DEVICE(CIS_10BIT, 14, &MUX_CIS_CLK, &MUX_CIS_PCLK, &MUX_CIS_HSYNC,
++		&MUX_CIS_VSYNC, &MUX_CIS_D_2, &MUX_CIS_D_3,
++		&MUX_CIS_D_4, &MUX_CIS_D_5, &MUX_CIS_D_6, &MUX_CIS_D_7,
++		&MUX_CIS_D_8, &MUX_CIS_D_9, &MUX_CIS_D_10, &MUX_CIS_D_11);
++PINCTRL_DEVICE(MIPI, 1, &MUX_CIS_CLK);
++PINCTRL_DEVICE(I2C0, 2, &MUX_I2C0_SCL, &MUX_I2C0_SDA);
++PINCTRL_DEVICE(I2C1, 2, &MUX_I2C1_SCL, &MUX_I2C1_SDA);
++PINCTRL_DEVICE(RMII, 12, &MUX_MAC_REF_CLK, &MUX_MAC_RMII_CLK,
++		&MUX_MAC_MDC, &MUX_MAC_MDIO, &MUX_MAC_RXD_0,
++		&MUX_MAC_RXD_1, &MUX_MAC_RXDV, &MUX_MAC_TXD_0,
++		&MUX_MAC_TXD_1, &MUX_MAC_TXEN, &MUX_MAC_TXCK, &MUX_MAC_RXCK);
++PINCTRL_DEVICE(PWM0, 1, &MUX_PWM0);
++PINCTRL_DEVICE(PWM1, 1, &MUX_PWM1);
++PINCTRL_DEVICE(PWM2, 1, &MUX_PWM2);
++PINCTRL_DEVICE(PWM3, 1, &MUX_PWM3);
++PINCTRL_DEVICE(PWM4, 1, &MUX_PWM4);
++PINCTRL_DEVICE(PWM5, 1, &MUX_PWM5);
++PINCTRL_DEVICE(PWM6, 1, &MUX_PWM6);
++PINCTRL_DEVICE(PWM7, 1, &MUX_PWM7);
++PINCTRL_DEVICE(SD0, 6, &MUX_SD0_CLK, &MUX_SD0_CD, &MUX_SD0_CMD_RSP,
++		&MUX_SD0_WP, &MUX_SD0_DATA_0, &MUX_SD0_DATA_1);
++PINCTRL_DEVICE(SD0_CARD_1BIT, 5, &MUX_SD0_CLK, &MUX_SD0_CD, &MUX_SD0_CMD_RSP,
++		&MUX_SD0_WP, &MUX_SD0_DATA_0);
++PINCTRL_DEVICE(SD0_WIFI_2BIT, 4, &MUX_SD0_CLK, &MUX_SD0_CMD_RSP,
++		&MUX_SD0_DATA_0, &MUX_SD0_DATA_1);
++PINCTRL_DEVICE(SD1, 8, &MUX_SD1_CLK, &MUX_SD1_CD, &MUX_SD1_CMD_RSP,
++		&MUX_SD1_WP, &MUX_SD1_DATA_0, &MUX_SD1_DATA_1, &MUX_SD1_DATA_2,
++		&MUX_SD1_DATA_3);
++PINCTRL_DEVICE(SSI0, 4, &MUX_SSI0_CLK, &MUX_SSI0_RXD, &MUX_SSI0_TXD,
++		&MUX_GPIO54);
++PINCTRL_DEVICE(SSI1, 4, &MUX_SSI1_CLK, &MUX_SSI1_RXD, &MUX_SSI1_TXD,
++		&MUX_SSI1_CSN_0);
++PINCTRL_DEVICE(SSI2, 4, &MUX_SSI2_CLK, &MUX_SSI2_RXD, &MUX_SSI2_TXD,
++		&MUX_SSI2_CSN);
++PINCTRL_DEVICE(UART0, 2, &MUX_UART0_RX, &MUX_UART0_TX);
++PINCTRL_DEVICE(UART1, 2, &MUX_UART1_RX, &MUX_UART1_TX);
++PINCTRL_DEVICE(GPIO0, 1, &MUX_GPIO0);
++PINCTRL_DEVICE(GPIO1, 1, &MUX_GPIO1);
++PINCTRL_DEVICE(GPIO2, 1, &MUX_GPIO2);
++PINCTRL_DEVICE(GPIO3, 1, &MUX_GPIO3);
++PINCTRL_DEVICE(GPIO4, 1, &MUX_GPIO4);
++PINCTRL_DEVICE(GPIO5, 1, &MUX_GPIO5);
++PINCTRL_DEVICE(GPIO6, 1, &MUX_GPIO6);
++PINCTRL_DEVICE(GPIO7, 1, &MUX_GPIO7);
++PINCTRL_DEVICE(GPIO8, 1, &MUX_GPIO8);
++PINCTRL_DEVICE(GPIO9, 1, &MUX_GPIO9);
++PINCTRL_DEVICE(GPIO10, 1, &MUX_GPIO10);
++PINCTRL_DEVICE(GPIO11, 1, &MUX_GPIO11);
++PINCTRL_DEVICE(GPIO12, 1, &MUX_GPIO12);
++PINCTRL_DEVICE(GPIO13, 1, &MUX_GPIO13);
++PINCTRL_DEVICE(GPIO14, 1, &MUX_GPIO14);
++PINCTRL_DEVICE(GPIO15, 1, &MUX_GPIO15);
++PINCTRL_DEVICE(GPIO16, 1, &MUX_GPIO16);
++PINCTRL_DEVICE(GPIO17, 1, &MUX_GPIO17);
++PINCTRL_DEVICE(GPIO19, 1, &MUX_GPIO19);
++PINCTRL_DEVICE(GPIO20, 1, &MUX_GPIO20);
++PINCTRL_DEVICE(GPIO21, 1, &MUX_GPIO21);
++PINCTRL_DEVICE(GPIO22, 1, &MUX_GPIO22);
++PINCTRL_DEVICE(GPIO23, 1, &MUX_GPIO23);
++PINCTRL_DEVICE(GPIO24, 1, &MUX_GPIO24);
++PINCTRL_DEVICE(GPIO25, 1, &MUX_GPIO25);
++PINCTRL_DEVICE(GPIO26, 1, &MUX_GPIO26);
++PINCTRL_DEVICE(GPIO27, 1, &MUX_GPIO27);
++PINCTRL_DEVICE(GPIO28, 1, &MUX_GPIO28);
++PINCTRL_DEVICE(GPIO29, 1, &MUX_GPIO29);
++PINCTRL_DEVICE(GPIO30, 1, &MUX_GPIO30);
++PINCTRL_DEVICE(GPIO31, 1, &MUX_GPIO31);
++PINCTRL_DEVICE(GPIO32, 1, &MUX_GPIO32);
++PINCTRL_DEVICE(GPIO33, 1, &MUX_GPIO33);
++PINCTRL_DEVICE(GPIO34, 1, &MUX_GPIO34);
++PINCTRL_DEVICE(GPIO35, 1, &MUX_GPIO35);
++PINCTRL_DEVICE(GPIO36, 1, &MUX_GPIO36);
++PINCTRL_DEVICE(GPIO37, 1, &MUX_GPIO37);
++PINCTRL_DEVICE(GPIO38, 1, &MUX_GPIO38);
++PINCTRL_DEVICE(GPIO39, 1, &MUX_GPIO39);
++PINCTRL_DEVICE(GPIO40, 1, &MUX_GPIO40);
++PINCTRL_DEVICE(GPIO41, 1, &MUX_GPIO41);
++PINCTRL_DEVICE(GPIO42, 1, &MUX_GPIO42);
++PINCTRL_DEVICE(GPIO43, 1, &MUX_GPIO43);
++PINCTRL_DEVICE(GPIO44, 1, &MUX_GPIO44);
++PINCTRL_DEVICE(GPIO45, 1, &MUX_GPIO45);
++PINCTRL_DEVICE(GPIO46, 1, &MUX_GPIO46);
++PINCTRL_DEVICE(GPIO47, 1, &MUX_GPIO47);
++PINCTRL_DEVICE(GPIO48, 1, &MUX_GPIO48);
++PINCTRL_DEVICE(GPIO49, 1, &MUX_GPIO49);
++PINCTRL_DEVICE(GPIO50, 1, &MUX_GPIO50);
++PINCTRL_DEVICE(GPIO51, 1, &MUX_GPIO51);
++PINCTRL_DEVICE(GPIO52, 1, &MUX_GPIO52);
++PINCTRL_DEVICE(GPIO53, 1, &MUX_GPIO53);
++PINCTRL_DEVICE(GPIO54, 1, &MUX_GPIO54);
++PINCTRL_DEVICE(GPIO55, 1, &MUX_GPIO55);
++PINCTRL_DEVICE(GPIO56, 1, &MUX_GPIO56);
++PINCTRL_DEVICE(GPIO57, 1, &MUX_GPIO57);
++PINCTRL_DEVICE(GPIO61, 1, &MUX_GPIO61);
++PINCTRL_DEVICE(GPIO62, 1, &MUX_GPIO62);
++PINCTRL_DEVICE(GPIO63, 1, &MUX_GPIO63);
++
++
++void fh_pinctrl_init_devicelist(OS_LIST *list)
++{
++    OS_LIST_EMPTY(list);
++
++    PINCTRL_ADD_DEVICE(USB);
++    PINCTRL_ADD_DEVICE(AC);
++    PINCTRL_ADD_DEVICE(ACI2S);
++    PINCTRL_ADD_DEVICE(DWI2S);
++    PINCTRL_ADD_DEVICE(ARCJTAG);
++    PINCTRL_ADD_DEVICE(ARMJTAG);
++    PINCTRL_ADD_DEVICE(PAEJTAG);
++    PINCTRL_ADD_DEVICE(CIS);
++    PINCTRL_ADD_DEVICE(MIPI);
++    PINCTRL_ADD_DEVICE(CIS_10BIT);
++
++    PINCTRL_ADD_DEVICE(I2C0);
++    PINCTRL_ADD_DEVICE(I2C1);
++    PINCTRL_ADD_DEVICE(RMII);
++    PINCTRL_ADD_DEVICE(PWM0);
++    PINCTRL_ADD_DEVICE(PWM1);
++    PINCTRL_ADD_DEVICE(PWM2);
++    PINCTRL_ADD_DEVICE(PWM3);
++    PINCTRL_ADD_DEVICE(PWM4);
++    PINCTRL_ADD_DEVICE(PWM5);
++    PINCTRL_ADD_DEVICE(PWM6);
++    PINCTRL_ADD_DEVICE(PWM7);
++    PINCTRL_ADD_DEVICE(SD0);
++    PINCTRL_ADD_DEVICE(SD0_CARD_1BIT);
++    PINCTRL_ADD_DEVICE(SD0_WIFI_2BIT);
++    PINCTRL_ADD_DEVICE(SD1);
++    PINCTRL_ADD_DEVICE(SSI0);
++    PINCTRL_ADD_DEVICE(SSI1);
++    PINCTRL_ADD_DEVICE(SSI2);
++    PINCTRL_ADD_DEVICE(UART0);
++    PINCTRL_ADD_DEVICE(UART1);
++    PINCTRL_ADD_DEVICE(GPIO0);
++    PINCTRL_ADD_DEVICE(GPIO1);
++    PINCTRL_ADD_DEVICE(GPIO2);
++    PINCTRL_ADD_DEVICE(GPIO3);
++    PINCTRL_ADD_DEVICE(GPIO4);
++    PINCTRL_ADD_DEVICE(GPIO5);
++    PINCTRL_ADD_DEVICE(GPIO6);
++    PINCTRL_ADD_DEVICE(GPIO7);
++    PINCTRL_ADD_DEVICE(GPIO8);
++    PINCTRL_ADD_DEVICE(GPIO9);
++    PINCTRL_ADD_DEVICE(GPIO10);
++    PINCTRL_ADD_DEVICE(GPIO11);
++    PINCTRL_ADD_DEVICE(GPIO12);
++    PINCTRL_ADD_DEVICE(GPIO13);
++    PINCTRL_ADD_DEVICE(GPIO14);
++    PINCTRL_ADD_DEVICE(GPIO15);
++    PINCTRL_ADD_DEVICE(GPIO16);
++    PINCTRL_ADD_DEVICE(GPIO17);
++    PINCTRL_ADD_DEVICE(GPIO19);
++    PINCTRL_ADD_DEVICE(GPIO20);
++    PINCTRL_ADD_DEVICE(GPIO21);
++    PINCTRL_ADD_DEVICE(GPIO22);
++    PINCTRL_ADD_DEVICE(GPIO23);
++    PINCTRL_ADD_DEVICE(GPIO24);
++    PINCTRL_ADD_DEVICE(GPIO25);
++    PINCTRL_ADD_DEVICE(GPIO26);
++    PINCTRL_ADD_DEVICE(GPIO27);
++    PINCTRL_ADD_DEVICE(GPIO28);
++    PINCTRL_ADD_DEVICE(GPIO29);
++    PINCTRL_ADD_DEVICE(GPIO30);
++    PINCTRL_ADD_DEVICE(GPIO31);
++    PINCTRL_ADD_DEVICE(GPIO32);
++    PINCTRL_ADD_DEVICE(GPIO33);
++    PINCTRL_ADD_DEVICE(GPIO34);
++    PINCTRL_ADD_DEVICE(GPIO35);
++    PINCTRL_ADD_DEVICE(GPIO36);
++    PINCTRL_ADD_DEVICE(GPIO37);
++    PINCTRL_ADD_DEVICE(GPIO38);
++    PINCTRL_ADD_DEVICE(GPIO39);
++    PINCTRL_ADD_DEVICE(GPIO40);
++    PINCTRL_ADD_DEVICE(GPIO41);
++    PINCTRL_ADD_DEVICE(GPIO42);
++    PINCTRL_ADD_DEVICE(GPIO43);
++    PINCTRL_ADD_DEVICE(GPIO44);
++    PINCTRL_ADD_DEVICE(GPIO45);
++    PINCTRL_ADD_DEVICE(GPIO46);
++    PINCTRL_ADD_DEVICE(GPIO47);
++    PINCTRL_ADD_DEVICE(GPIO48);
++    PINCTRL_ADD_DEVICE(GPIO49);
++    PINCTRL_ADD_DEVICE(GPIO50);
++    PINCTRL_ADD_DEVICE(GPIO51);
++    PINCTRL_ADD_DEVICE(GPIO52);
++    PINCTRL_ADD_DEVICE(GPIO53);
++    PINCTRL_ADD_DEVICE(GPIO54);
++    PINCTRL_ADD_DEVICE(GPIO55);
++    PINCTRL_ADD_DEVICE(GPIO56);
++    PINCTRL_ADD_DEVICE(GPIO57);
++    PINCTRL_ADD_DEVICE(GPIO61);
++    PINCTRL_ADD_DEVICE(GPIO62);
++    PINCTRL_ADD_DEVICE(GPIO63);
++}
++
++
++
++char* fh_pinctrl_selected_devices[] =
++{
++	CONFIG_PINCTRL_SELECT
++};
+diff --git a/arch/arm/mach-fh/include/mach/fh_dmac.h b/arch/arm/mach-fh/include/mach/fh_dmac.h
+new file mode 100644
+index 00000000..c6d100b6
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh_dmac.h
+@@ -0,0 +1,151 @@
++/*
++ * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
++ * AVR32 systems.)
++ *
++ * Copyright (C) 2007 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef FH_DMAC_H
++#define FH_DMAC_H
++
++#include <linux/dmaengine.h>
++
++/**
++ * enum fh_dma_slave_width - DMA slave register access width.
++ * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
++ * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
++ * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
++ */
++enum fh_dma_slave_width {
++	FH_DMA_SLAVE_WIDTH_8BIT,
++	FH_DMA_SLAVE_WIDTH_16BIT,
++	FH_DMA_SLAVE_WIDTH_32BIT,
++};
++
++/* bursts size */
++enum fh_dma_msize {
++	FH_DMA_MSIZE_1,
++	FH_DMA_MSIZE_4,
++	FH_DMA_MSIZE_8,
++	FH_DMA_MSIZE_16,
++	FH_DMA_MSIZE_32,
++	FH_DMA_MSIZE_64,
++	FH_DMA_MSIZE_128,
++	FH_DMA_MSIZE_256,
++};
++
++/* flow controller */
++enum fh_dma_fc {
++	FH_DMA_FC_D_M2M,
++	FH_DMA_FC_D_M2P,
++	FH_DMA_FC_D_P2M,
++	FH_DMA_FC_D_P2P,
++	FH_DMA_FC_P_P2M,
++	FH_DMA_FC_SP_P2P,
++	FH_DMA_FC_P_M2P,
++	FH_DMA_FC_DP_P2P,
++};
++
++/**
++ * struct fh_dma_slave - Controller-specific information about a slave
++ *
++ * @dma_dev: required DMA master device
++ * @tx_reg: physical address of data register used for
++ *	memory-to-peripheral transfers
++ * @rx_reg: physical address of data register used for
++ *	peripheral-to-memory transfers
++ * @reg_width: peripheral register width
++ * @cfg_hi: Platform-specific initializer for the CFG_HI register
++ * @cfg_lo: Platform-specific initializer for the CFG_LO register
++ * @src_master: src master for transfers on allocated channel.
++ * @dst_master: dest master for transfers on allocated channel.
++ * @src_msize: src burst size.
++ * @dst_msize: dest burst size.
++ * @fc: flow controller for DMA transfer
++ */
++struct fh_dma_slave {
++	struct device		*dma_dev;
++	dma_addr_t		tx_reg;
++	dma_addr_t		rx_reg;
++	enum fh_dma_slave_width	reg_width;
++	u32			cfg_hi;
++	u32			cfg_lo;
++	u8			src_master;
++	u8			dst_master;
++	u8			src_msize;
++	u8			dst_msize;
++	u8			fc;
++};
++
++
++/**
++ * struct fh_dma_platform_data - Controller configuration parameters
++ * @nr_channels: Number of channels supported by hardware (max 8)
++ * @is_private: The device channels should be marked as private and not for
++ *	by the general purpose DMA channel allocator.
++ * @chan_allocation_order: Allocate channels starting from 0 or 7
++ * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
++ * @block_size: Maximum block size supported by the controller
++ * @nr_masters: Number of AHB masters supported by the controller
++ * @data_width: Maximum data width supported by hardware per AHB master
++ *		(0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
++ * @sd: slave specific data. Used for configuring channels
++ * @sd_count: count of slave data structures passed.
++ */
++struct fh_dma_platform_data {
++	unsigned int	nr_channels;
++	bool		is_private;
++#define CHAN_ALLOCATION_ASCENDING	0	/* zero to seven */
++#define CHAN_ALLOCATION_DESCENDING	1	/* seven to zero */
++	unsigned char	chan_allocation_order;
++#define CHAN_PRIORITY_ASCENDING		0	/* chan0 highest */
++#define CHAN_PRIORITY_DESCENDING	1	/* chan7 highest */
++	unsigned char	chan_priority;
++	unsigned short	block_size;
++	unsigned char	nr_masters;
++	unsigned char	data_width[4];
++};
++
++/* Platform-configurable bits in CFG_HI */
++#define FHC_CFGH_FCMODE		(1 << 0)
++#define FHC_CFGH_FIFO_MODE	(1 << 1)
++#define FHC_CFGH_PROTCTL(x)	((x) << 2)
++#define FHC_CFGH_SRC_PER(x)	((x) << 7)
++#define FHC_CFGH_DST_PER(x)	((x) << 11)
++
++/* Platform-configurable bits in CFG_LO */
++#define FHC_CFGL_LOCK_CH_XFER	(0 << 12)	/* scope of LOCK_CH */
++#define FHC_CFGL_LOCK_CH_BLOCK	(1 << 12)
++#define FHC_CFGL_LOCK_CH_XACT	(2 << 12)
++#define FHC_CFGL_LOCK_BUS_XFER	(0 << 14)	/* scope of LOCK_BUS */
++#define FHC_CFGL_LOCK_BUS_BLOCK	(1 << 14)
++#define FHC_CFGL_LOCK_BUS_XACT	(2 << 14)
++#define FHC_CFGL_LOCK_CH	(1 << 15)	/* channel lockout */
++#define FHC_CFGL_LOCK_BUS	(1 << 16)	/* busmaster lockout */
++#define FHC_CFGL_HS_DST_POL	(1 << 18)	/* dst handshake active low */
++#define FHC_CFGL_HS_SRC_POL	(1 << 19)	/* src handshake active low */
++
++/* DMA API extensions */
++struct fh_cyclic_desc {
++	struct fh_desc	**desc;
++	unsigned long	periods;
++	void	(*period_callback)(void *param);
++	void		*period_callback_param;
++};
++
++struct fh_cyclic_desc *fh_dma_cyclic_prep(struct dma_chan *chan,
++		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
++		enum dma_transfer_direction direction);
++void fh_dma_cyclic_free(struct dma_chan *chan);
++int fh_dma_cyclic_start(struct dma_chan *chan);
++void fh_dma_cyclic_stop(struct dma_chan *chan);
++
++dma_addr_t fh_dma_get_src_addr(struct dma_chan *chan);
++
++dma_addr_t fh_dma_get_dst_addr(struct dma_chan *chan);
++
++#endif /* FH_DMAC_H */
+diff --git a/arch/arm/mach-fh/include/mach/fh_dmac_regs.h b/arch/arm/mach-fh/include/mach/fh_dmac_regs.h
+new file mode 100644
+index 00000000..504dfead
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh_dmac_regs.h
+@@ -0,0 +1,312 @@
++/*
++ * Driver for the Synopsys DesignWare AHB DMA Controller
++ *
++ * Copyright (C) 2005-2007 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/dmaengine.h>
++#include <mach/fh_dmac.h>
++
++#define FH_DMA_MAX_NR_CHANNELS	8
++#define FH_DMA_MAX_NR_REQUESTS	16
++
++/*
++ * Redefine this macro to handle differences between 32- and 64-bit
++ * addressing, big vs. little endian, etc.
++ */
++#define FH_REG(name)		u32 name; u32 __pad_##name
++
++/* Hardware register definitions. */
++struct fh_dma_chan_regs {
++	FH_REG(SAR);		/* Source Address Register */
++	FH_REG(DAR);		/* Destination Address Register */
++	FH_REG(LLP);		/* Linked List Pointer */
++	u32	CTL_LO;		/* Control Register Low */
++	u32	CTL_HI;		/* Control Register High */
++	FH_REG(SSTAT);
++	FH_REG(DSTAT);
++	FH_REG(SSTATAR);
++	FH_REG(DSTATAR);
++	u32	CFG_LO;		/* Configuration Register Low */
++	u32	CFG_HI;		/* Configuration Register High */
++	FH_REG(SGR);
++	FH_REG(DSR);
++};
++
++struct fh_dma_irq_regs {
++	FH_REG(XFER);
++	FH_REG(BLOCK);
++	FH_REG(SRC_TRAN);
++	FH_REG(DST_TRAN);
++	FH_REG(ERROR);
++};
++
++struct fh_dma_regs {
++	/* per-channel registers */
++	struct fh_dma_chan_regs	CHAN[FH_DMA_MAX_NR_CHANNELS];
++
++	/* irq handling */
++	struct fh_dma_irq_regs	RAW;		/* r */
++	struct fh_dma_irq_regs	STATUS;		/* r (raw & mask) */
++	struct fh_dma_irq_regs	MASK;		/* rw (set = irq enabled) */
++	struct fh_dma_irq_regs	CLEAR;		/* w (ack, affects "raw") */
++
++	FH_REG(STATUS_INT);			/* r */
++
++	/* software handshaking */
++	FH_REG(REQ_SRC);
++	FH_REG(REQ_DST);
++	FH_REG(SGL_REQ_SRC);
++	FH_REG(SGL_REQ_DST);
++	FH_REG(LAST_SRC);
++	FH_REG(LAST_DST);
++
++	/* miscellaneous */
++	FH_REG(CFG);
++	FH_REG(CH_EN);
++	FH_REG(ID);
++	FH_REG(TEST);
++
++	/* reserved */
++	FH_REG(__reserved0);
++	FH_REG(__reserved1);
++
++	/* optional encoded params, 0x3c8..0x3f7 */
++	u32	__reserved;
++
++	/* per-channel configuration registers */
++	u32	FHC_PARAMS[FH_DMA_MAX_NR_CHANNELS];
++	u32	MULTI_BLK_TYPE;
++	u32	MAX_BLK_SIZE;
++
++	/* top-level parameters */
++	u32	FH_PARAMS;
++};
++
++#ifdef CONFIG_FH_DMAC_BIG_ENDIAN_IO
++#define dma_readl_native ioread32be
++#define dma_writel_native iowrite32be
++#else
++#define dma_readl_native readl
++#define dma_writel_native writel
++#endif
++
++/* To access the registers in early stage of probe */
++#define dma_read_byaddr(addr, name) \
++	dma_readl_native((addr) + offsetof(struct fh_dma_regs, name))
++
++/* Bitfields in FH_PARAMS */
++#define FH_PARAMS_NR_CHAN	8		/* number of channels */
++#define FH_PARAMS_NR_MASTER	11		/* number of AHB masters */
++#define FH_PARAMS_DATA_WIDTH(n)	(15 + 2 * (n))
++#define FH_PARAMS_DATA_WIDTH1	15		/* master 1 data width */
++#define FH_PARAMS_DATA_WIDTH2	17		/* master 2 data width */
++#define FH_PARAMS_DATA_WIDTH3	19		/* master 3 data width */
++#define FH_PARAMS_DATA_WIDTH4	21		/* master 4 data width */
++#define FH_PARAMS_EN		28		/* encoded parameters */
++
++/* Bitfields in FHC_PARAMS */
++#define FHC_PARAMS_MBLK_EN	11		/* multi block transfer */
++
++/* Bitfields in CTL_LO */
++#define FHC_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
++#define FHC_CTLL_DST_WIDTH(n)	((n)<<1)	/* bytes per element */
++#define FHC_CTLL_SRC_WIDTH(n)	((n)<<4)
++#define FHC_CTLL_DST_INC	(0<<7)		/* DAR update/not */
++#define FHC_CTLL_DST_DEC	(1<<7)
++#define FHC_CTLL_DST_FIX	(2<<7)
++#define FHC_CTLL_SRC_INC	(0<<9)		/* SAR update/not */
++#define FHC_CTLL_SRC_DEC	(1<<9)
++#define FHC_CTLL_SRC_FIX	(2<<9)
++#define FHC_CTLL_DST_MSIZE(n)	((n)<<11)	/* burst, #elements */
++#define FHC_CTLL_SRC_MSIZE(n)	((n)<<14)
++#define FHC_CTLL_S_GATH_EN	(1 << 17)	/* src gather, !FIX */
++#define FHC_CTLL_D_SCAT_EN	(1 << 18)	/* dst scatter, !FIX */
++#define FHC_CTLL_FC(n)		((n) << 20)
++#define FHC_CTLL_FC_M2M		(0 << 20)	/* mem-to-mem */
++#define FHC_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
++#define FHC_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
++#define FHC_CTLL_FC_P2P		(3 << 20)	/* periph-to-periph */
++/* plus 4 transfer types for peripheral-as-flow-controller */
++#define FHC_CTLL_DMS(n)		((n)<<23)	/* dst master select */
++#define FHC_CTLL_SMS(n)		((n)<<25)	/* src master select */
++#define FHC_CTLL_LLP_D_EN	(1 << 27)	/* dest block chain */
++#define FHC_CTLL_LLP_S_EN	(1 << 28)	/* src block chain */
++
++/* Bitfields in CTL_HI */
++#define FHC_CTLH_DONE		0x00001000
++#define FHC_CTLH_BLOCK_TS_MASK	0x00000fff
++
++/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/fh_dmac.h> */
++#define FHC_CFGL_CH_PRIOR_MASK	(0x7 << 5)	/* priority mask */
++#define FHC_CFGL_CH_PRIOR(x)	((x) << 5)	/* priority */
++#define FHC_CFGL_CH_SUSP	(1 << 8)	/* pause xfer */
++#define FHC_CFGL_FIFO_EMPTY	(1 << 9)	/* pause xfer */
++#define FHC_CFGL_HS_DST		(1 << 10)	/* handshake w/dst */
++#define FHC_CFGL_HS_SRC		(1 << 11)	/* handshake w/src */
++#define FHC_CFGL_MAX_BURST(x)	((x) << 20)
++#define FHC_CFGL_RELOAD_SAR	(1 << 30)
++#define FHC_CFGL_RELOAD_DAR	(1 << 31)
++
++/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/fh_dmac.h> */
++#define FHC_CFGH_DS_UPD_EN	(1 << 5)
++#define FHC_CFGH_SS_UPD_EN	(1 << 6)
++
++/* Bitfields in SGR */
++#define FHC_SGR_SGI(x)		((x) << 0)
++#define FHC_SGR_SGC(x)		((x) << 20)
++
++/* Bitfields in DSR */
++#define FHC_DSR_DSI(x)		((x) << 0)
++#define FHC_DSR_DSC(x)		((x) << 20)
++
++/* Bitfields in CFG */
++#define FH_CFG_DMA_EN		(1 << 0)
++
++#define FH_REGLEN       0x400
++
++enum fh_dmac_flags {
++	FH_DMA_IS_CYCLIC = 0,
++	FH_DMA_IS_SOFT_LLP = 1,
++};
++
++struct fh_dma_chan {
++	struct dma_chan			chan;
++	void __iomem			*ch_regs;
++	u8				mask;
++	u8				priority;
++	enum dma_transfer_direction	direction;
++	bool				paused;
++	bool				initialized;
++
++	/* software emulation of the LLP transfers */
++	struct list_head	*tx_node_active;
++
++	spinlock_t		lock;
++
++	/* these other elements are all protected by lock */
++	unsigned long		flags;
++	struct list_head	active_list;
++	struct list_head	queue;
++	struct list_head	free_list;
++	u32			residue;
++	struct fh_cyclic_desc	*cdesc;
++
++	unsigned int		descs_allocated;
++
++	/* hardware configuration */
++	unsigned int		block_size;
++	bool			nollp;
++
++	/* custom slave configuration */
++	unsigned int		request_line;
++	unsigned char		src_master;
++	unsigned char		dst_master;
++
++	/* configuration passed via DMA_SLAVE_CONFIG */
++	struct dma_slave_config dma_sconfig;
++};
++
++enum fh_dma_slave_increment {
++	FH_DMA_SLAVE_INC,
++	FH_DMA_SLAVE_DEC,
++	FH_DMA_SLAVE_FIX,
++};
++
++struct fh_dma_pri {
++	u32 sinc;
++	u32 dinc;
++};
++
++static inline struct fh_dma_chan_regs __iomem *
++__fhc_regs(struct fh_dma_chan *fhc)
++{
++	return fhc->ch_regs;
++}
++
++#define channel_readl(fhc, name) \
++	dma_readl_native(&(__fhc_regs(fhc)->name))
++#define channel_writel(fhc, name, val) \
++	dma_writel_native((val), &(__fhc_regs(fhc)->name))
++
++static inline struct fh_dma_chan *to_fh_dma_chan(struct dma_chan *chan)
++{
++	return container_of(chan, struct fh_dma_chan, chan);
++}
++
++struct fh_dma {
++	struct dma_device	dma;
++	void __iomem		*regs;
++	struct dma_pool		*desc_pool;
++	struct tasklet_struct	tasklet;
++	struct clk		*clk;
++
++	u8			all_chan_mask;
++
++	/* hardware configuration */
++	unsigned char		nr_masters;
++	unsigned char		data_width[4];
++
++	struct fh_dma_chan	chan[0];
++};
++
++static inline struct fh_dma_regs __iomem *__fh_regs(struct fh_dma *dw)
++{
++	return dw->regs;
++}
++
++#define dma_readl(dw, name) \
++	dma_readl_native(&(__fh_regs(dw)->name))
++#define dma_writel(dw, name, val) \
++	dma_writel_native((val), &(__fh_regs(dw)->name))
++
++#define channel_set_bit(dw, reg, mask) \
++	dma_writel(dw, reg, ((mask) << 8) | (mask))
++#define channel_clear_bit(dw, reg, mask) \
++	dma_writel(dw, reg, ((mask) << 8) | 0)
++
++static inline struct fh_dma *to_fh_dma(struct dma_device *ddev)
++{
++	return container_of(ddev, struct fh_dma, dma);
++}
++
++/* LLI == Linked List Item; a.k.a. DMA block descriptor */
++struct fh_lli {
++	/* values that are not changed by hardware */
++	u32		sar;
++	u32		dar;
++	u32		llp;		/* chain to next lli */
++	u32		ctllo;
++	/* values that may get written back: */
++	u32		ctlhi;
++	/* sstat and dstat can snapshot peripheral register state.
++	 * silicon config may discard either or both...
++	 */
++	u32		sstat;
++	u32		dstat;
++};
++
++struct fh_desc {
++	/* FIRST values the hardware uses */
++	struct fh_lli			lli;
++
++	/* THEN values for driver housekeeping */
++	struct list_head		desc_node;
++	struct list_head		tx_list;
++	struct dma_async_tx_descriptor	txd;
++	size_t				len;
++	size_t				total_len;
++};
++
++#define to_fh_desc(h)	list_entry(h, struct fh_desc, desc_node)
++
++static inline struct fh_desc *
++txd_to_fh_desc(struct dma_async_tx_descriptor *txd)
++{
++	return container_of(txd, struct fh_desc, txd);
++}
+diff --git a/arch/arm/mach-fh/include/mach/fh_gmac.h b/arch/arm/mach-fh/include/mach/fh_gmac.h
+new file mode 100644
+index 00000000..1ab649d0
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh_gmac.h
+@@ -0,0 +1,33 @@
++#ifndef __FH_GMAC_PLATFORM_DATA
++#define __FH_GMAC_PLATFORM_DATA
++
++#include <linux/platform_device.h>
++
++enum {
++	gmac_phyt_reg_basic_ctrl = 0,
++	gmac_phyt_reg_basic_status = 1,
++	gmac_phyt_reg_phy_id1 = 2,
++	gmac_phyt_reg_phy_id2 = 3,
++	gmac_phyt_rtl8201_rmii_mode = 16,
++	gmac_phyt_ti83848_rmii_mode = 17,
++	gmac_phyt_rtl8201_power_saving = 24,
++	gmac_phyt_rtl8201_page_select = 31,
++	gmac_phyt_ip101g_page_select = 20
++};
++
++enum {
++	gmac_speed_10m,
++	gmac_speed_100m
++};
++
++struct fh_gmac_platform_data {
++	int interface;
++	int phyid;
++	void (*early_init)(struct fh_gmac_platform_data *plat_data);
++	void (*plat_init)(struct fh_gmac_platform_data *plat_data);
++	void (*set_rmii_speed)(int speed);
++	void (*phy_reset)(void);
++};
++
++#endif
++
+diff --git a/arch/arm/mach-fh/include/mach/fh_predefined.h b/arch/arm/mach-fh/include/mach/fh_predefined.h
+new file mode 100644
+index 00000000..a5572f2f
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh_predefined.h
+@@ -0,0 +1,40 @@
++/*
++ * fh_predefined.h
++ *
++ *  Created on: May 22, 2014
++ *      Author: duobao
++ */
++
++#ifndef FH_PREDEFINED_H_
++#define FH_PREDEFINED_H_
++
++typedef unsigned char		UINT8;
++typedef unsigned short	UINT16;
++typedef unsigned int		UINT32;
++typedef unsigned long long	UINT64;
++
++typedef char			SINT8;
++typedef short			SINT16;
++typedef int			SINT32;
++typedef long long		SINT64;
++#define FALSE 			(0)
++#define TRUE 			(!FALSE)
++#define reg_read(addr) (*((volatile UINT32 *)(addr)))
++#define reg_write(addr,value) (*(volatile UINT32 *)(addr)=(value))
++
++#define GET_REG(addr) reg_read(addr)
++#define SET_REG(addr,value) reg_write(addr,value)
++#define SET_REG_M(addr,value,mask) reg_write(addr,(reg_read(addr)&(~(mask)))|((value)&(mask)))
++#define SET_REG_B(addr,element,highbit,lowbit) SET_REG_M((addr),((element)<<(lowbit)),(((1<<((highbit)-(lowbit)+1))-1)<<(lowbit)))
++
++#define GET_REG8(addr) (*((volatile UINT8 *)(addr)))
++#define SET_REG8(addr,value) (*(volatile UINT8 *)(addr)=(value))
++
++#define LD8(addr) 		(*((volatile u8 *)(addr)))
++#define ST8(addr,value)		(*(volatile u8 *)(addr)=(value))
++#define LD16(addr) 		(*((volatile u16 *)(addr)))
++#define ST16(addr,value)	(*(volatile u16 *)(addr)=(value))
++#define LD32(addr)		(*((volatile u32 *)(addr)))
++#define ST32(addr,value)	(*(volatile u32 *)(addr)=(value))
++
++#endif /* FH_PREDEFINED_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/fh_sadc.h b/arch/arm/mach-fh/include/mach/fh_sadc.h
+new file mode 100644
+index 00000000..6f73bc90
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh_sadc.h
+@@ -0,0 +1,83 @@
++/*
++ * fh_sadc.h
++ *
++ *  Created on: Mar 13, 2015
++ *      Author: duobao
++ */
++
++#ifndef FH_SADC_H_
++#define FH_SADC_H_
++
++#include <linux/io.h>
++#include <linux/scatterlist.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/workqueue.h>
++#include <linux/bug.h>
++#include <linux/completion.h>
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++
++/****************************************************************************
++ * #define section
++ *	add constant #define here if any
++ ***************************************************************************/
++#define FH_SADC_PROC_FILE    "driver/sadc"
++#define MAX_CHANNEL_NO		(8)
++#define SADC_REF			(3300)
++#define SADC_MAX_AD_VALUE	(0x3ff)
++#define LOOP_MODE			(0x55)
++#define ISR_MODE			(0xAA)
++
++
++#define SADC_TIMEOUT  		0x55
++/****************************************************************************
++ * ADT section
++ *	add Abstract Data Type definition here
++ ***************************************************************************/
++
++struct wrap_sadc_reg {
++	u32 sadc_cmd;
++	u32 sadc_control;
++	u32 sadc_ier;
++	u32 sadc_int_status;
++	u32 sadc_dout0;
++	u32 sadc_dout1;
++	u32 sadc_dout2;
++	u32 sadc_dout3;
++	u32 sadc_debuge0;
++	u32 sadc_status;
++	u32 sadc_cnt;
++	u32 sadc_timeout;
++};
++
++struct wrap_sadc_obj {
++	void *regs;
++	u32 irq_no;
++	u32 active_channel_no;
++	u32 active_channel_status;
++	uint16_t channel_data[MAX_CHANNEL_NO];
++	u32 error_rec;
++	u32 en_isr;
++	u32 sample_mode;
++	spinlock_t lock;
++	struct completion done;
++	struct proc_dir_entry *proc_file;
++};
++
++#ifdef CONFIG_FH_SADC
++long fh_sadc_get_value(int channel);
++#else
++long fh_sadc_get_value(int channel)
++{
++	return 0;
++}
++#endif
++
++#endif /* fh_SADC_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/fh_simple_timer.h b/arch/arm/mach-fh/include/mach/fh_simple_timer.h
+new file mode 100644
+index 00000000..a5f08a54
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh_simple_timer.h
+@@ -0,0 +1,45 @@
++/*
++ * fh_simple_timer.h
++ *
++ *  Created on: Jan 22, 2017
++ *      Author: duobao
++ */
++
++#ifndef FH_SIMPLE_TIMER_H_
++#define FH_SIMPLE_TIMER_H_
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/time.h>
++#include <linux/ktime.h>
++#include <linux/timerqueue.h>
++#include <mach/pmu.h>
++#include <mach/timex.h>
++#include <mach/io.h>
++#include <mach/fh_predefined.h>
++
++enum simple_timer_state {
++	SIMPLE_TIMER_STOP,
++	SIMPLE_TIMER_START,
++	SIMPLE_TIMER_ERROR,
++};
++
++struct fh_simple_timer
++{
++	struct timerqueue_node		node;
++	ktime_t it_interval;	/* timer period */
++	ktime_t it_value;	/* timer expiration */
++	ktime_t it_delay;
++	void (*function) (void *);
++	void *param;
++};
++
++
++int fh_simple_timer_interrupt(void);
++int fh_simple_timer_create(struct fh_simple_timer* tim);
++int fh_timer_start(void);
++int fh_simple_timer_init(void);
++
++#endif /* FH_SIMPLE_TIMER_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/fh_wdt.h b/arch/arm/mach-fh/include/mach/fh_wdt.h
+new file mode 100644
+index 00000000..22e4a45f
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fh_wdt.h
+@@ -0,0 +1,12 @@
++#ifndef __FH_WDT_PLATFORM_DATA
++#define __FH_WDT_PLATFORM_DATA
++
++#include <linux/platform_device.h>
++
++struct fh_wdt_platform_data {
++	void (*resume)(void);
++	void (*pause)(void);
++};
++
++#endif
++
+diff --git a/arch/arm/mach-fh/include/mach/fhmci.h b/arch/arm/mach-fh/include/mach/fhmci.h
+new file mode 100644
+index 00000000..b60ad26e
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/fhmci.h
+@@ -0,0 +1,178 @@
++#ifndef _FH_MCI_H_
++#define _FH_MCI_H_
++
++extern int trace_level;
++#define FHMCI_TRACE_LEVEL 5
++/*
++   0 - all message
++   1 - dump all register read/write
++   2 - flow trace
++   3 - timeout err and protocol err
++   */
++
++#define FHMCI_TRACE_FMT KERN_INFO
++#define ID_SD0        0
++#define ID_SD1        1
++
++#define POWER_ON	1
++#define POWER_OFF	0
++
++#define CARD_UNPLUGED	1
++#define CARD_PLUGED	0
++
++#define ENABLE		1
++#define DISABLE		0
++
++#define FH_MCI_DETECT_TIMEOUT	(HZ/4)
++
++#define FH_MCI_REQUEST_TIMEOUT	(5 * HZ)
++
++#define MAX_RETRY_COUNT   100
++#define MAX_MCI_HOST	(2)	/* max num of host on soc */
++
++#define fhmci_trace(level, msg...) do { \
++	if ((level) >= trace_level) { \
++		printk(FHMCI_TRACE_FMT "%s:%d: ", __func__, __LINE__); \
++		printk(msg); \
++		printk("\n"); \
++	} \
++} while (0)
++
++#define fhmci_assert(cond) do { \
++	if (!(cond)) {\
++		printk(KERN_ERR "Assert:fhmci:%s:%d\n", \
++				__func__, \
++				__LINE__); \
++		BUG(); \
++	} \
++} while (0)
++
++#define fhmci_error(s...) do { \
++	printk(KERN_ERR "fhmci:%s:%d: ", __func__, __LINE__); \
++	printk(s); \
++	printk("\n"); \
++} while (0)
++
++#define fhmci_readl(addr) ({unsigned int reg = readl((unsigned int)addr); \
++	fhmci_trace(1, "readl(0x%04X) = 0x%08X", (unsigned int)addr, reg); \
++	reg; })
++
++#define fhmci_writel(v, addr) do { \
++	writel(v, (unsigned int)addr); \
++	fhmci_trace(1, "writel(0x%04X) = 0x%08X",\
++			(unsigned int)addr, (unsigned int)(v)); \
++} while (0)
++
++
++struct fhmci_des {
++	unsigned long idmac_des_ctrl;
++	unsigned long idmac_des_buf_size;
++	unsigned long idmac_des_buf_addr;
++	unsigned long idmac_des_next_addr;
++};
++
++struct fhmci_host {
++	struct mmc_host		*mmc;
++	spinlock_t		lock;
++	struct mmc_request	*mrq;
++	struct mmc_command	*cmd;
++	struct mmc_data		*data;
++	void __iomem		*base;
++	unsigned int		card_status;
++	struct scatterlist	*dma_sg;
++	unsigned int		dma_sg_num;
++	unsigned int		dma_alloc_size;
++	unsigned int		dma_dir;
++	dma_addr_t		dma_paddr;
++	unsigned int		*dma_vaddr;
++	struct timer_list	timer;
++	unsigned int		irq;
++	unsigned int		irq_status;
++	unsigned int		is_tuning;
++	wait_queue_head_t	intr_wait;
++	unsigned long		pending_events;
++	unsigned int		id;
++	struct fh_mci_board *pdata;
++	unsigned int (*get_cd)(struct fhmci_host *host);
++	unsigned int (*get_ro)(struct fhmci_host *host);
++#define	FHMCI_PEND_DTO_b	(0)
++#define	FHMCI_PEND_DTO_m	(1 << FHMCI_PEND_DTO_b)
++};
++
++/* Board platform data */
++struct fh_mci_board {
++    unsigned int num_slots;
++
++    unsigned int quirks; /* Workaround / Quirk flags */
++    unsigned int bus_hz; /* Bus speed */
++
++    unsigned int caps;  /* Capabilities */
++
++    /* delay in mS before detecting cards after interrupt */
++    unsigned int detect_delay_ms;
++
++    int (*init)(unsigned int slot_id,void* irq_handler_t , void *);
++    unsigned int (*get_ro)(struct fhmci_host *host);
++    unsigned int (*get_cd)(struct fhmci_host *host);
++    int (*get_ocr)(unsigned int slot_id);
++    int (*get_bus_wd)(unsigned int slot_id);
++    /*
++     * Enable power to selected slot and set voltage to desired level.
++     * Voltage levels are specified using MMC_VDD_xxx defines defined
++     * in linux/mmc/host.h file.
++     */
++    void (*setpower)(unsigned int slot_id, unsigned int volt);
++    void (*exit)(unsigned int slot_id);
++    void (*select_slot)(unsigned int slot_id);
++
++    struct dw_mci_dma_ops *dma_ops;
++    struct dma_pdata *data;
++    struct block_settings *blk_settings;
++    int fifo_depth;
++};
++
++union cmd_arg_s {
++	unsigned int cmd_arg;
++	struct cmd_bits_arg {
++		unsigned int cmd_index:6;
++		unsigned int response_expect:1;
++		unsigned int response_length:1;
++		unsigned int check_response_crc:1;
++		unsigned int data_transfer_expected:1;
++		unsigned int read_write:1;
++		unsigned int transfer_mode:1;
++		unsigned int send_auto_stop:1;
++		unsigned int wait_prvdata_complete:1;
++		unsigned int stop_abort_cmd:1;
++		unsigned int send_initialization:1;
++		unsigned int card_number:5;
++		unsigned int update_clk_reg_only:1; /* bit 21 */
++		unsigned int read_ceata_device:1;
++		unsigned int ccs_expected:1;
++		unsigned int enable_boot:1;
++		unsigned int expect_boot_ack:1;
++		unsigned int disable_boot:1;
++		unsigned int boot_mode:1;
++		unsigned int volt_switch:1;
++		unsigned int use_hold_reg:1;
++		unsigned int reserved:1;
++		unsigned int start_cmd:1; /* HSB */
++	} bits;
++};
++
++struct mmc_ctrl {
++	unsigned int slot_idx;       /*0: mmc0;  1: mmc1*/
++	unsigned int mmc_ctrl_state; /*0: enable mmc_rescan;  1: disable mmc_rescan*/
++};
++
++enum mmc_ctrl_state {
++	RESCAN_ENABLE = 0,
++	RESCAN_DISABLE
++};
++
++struct platform_device *get_mci_device(unsigned int index);
++int storage_dev_set_mmc_rescan(struct mmc_ctrl *m_ctrl);
++int read_mci_ctrl_states(int id_mmc_sd);
++
++#endif
++
+diff --git a/arch/arm/mach-fh/include/mach/gpio.h b/arch/arm/mach-fh/include/mach/gpio.h
+new file mode 100644
+index 00000000..781093dd
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/gpio.h
+@@ -0,0 +1,259 @@
++/*
++ * TI DaVinci GPIO Support
++ *
++ * Copyright (c) 2006 David Brownell
++ * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef __FH_GPIO_H
++#define __FH_GPIO_H
++
++#include <linux/io.h>
++#include <linux/spinlock.h>
++
++#include <asm-generic/gpio.h>
++
++#include <mach/irqs.h>
++#include <mach/fh_predefined.h>
++
++/*
++ * GPIO Direction
++ */
++#define GPIO_DIR_INPUT          0
++#define GPIO_DIR_OUTPUT         1
++
++/*
++ * GPIO interrupt type
++ */
++#define GPIO_INT_TYPE_LEVEL     0
++#define GPIO_INT_TYPE_EDGE      1
++
++/*
++ * GPIO interrupt polarity
++ */
++#define GPIO_INT_POL_LOW        0
++#define GPIO_INT_POL_HIGH       1
++
++#define OFFSET_GPIO_SWPORTA_DR         (0x0000)
++#define OFFSET_GPIO_SWPORTA_DDR        (0x0004)
++#define OFFSET_GPIO_PORTA_CTL          (0x0008)
++#define OFFSET_GPIO_SWPORTB_DR         (0x000C)
++#define OFFSET_GPIO_SWPORTB_DDR        (0x0010)
++#define OFFSET_GPIO_PORTB_CTL          (0x0014)
++#define OFFSET_GPIO_INTEN              (0x0030)
++#define OFFSET_GPIO_INTMASK            (0x0034)
++#define OFFSET_GPIO_INTTYPE_LEVEL      (0x0038)
++#define OFFSET_GPIO_INT_POLARITY       (0x003C)
++#define OFFSET_GPIO_INTSTATUS          (0x0040)
++#define OFFSET_GPIO_RAWINTSTATUS       (0x0044)
++#define OFFSET_GPIO_DEBOUNCE           (0x0048)
++#define OFFSET_GPIO_PORTA_EOI          (0x004C)
++#define OFFSET_GPIO_EXT_PORTA          (0x0050)
++#define OFFSET_GPIO_EXT_PORTB          (0x0054)
++
++static inline void FH_GPIO_SetValue(unsigned int base, int bit, int val)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_SWPORTA_DR);
++	reg = val ? (reg | (1 << bit)) : (reg & ~(1 << bit));
++	SET_REG(base + OFFSET_GPIO_SWPORTA_DR, reg);
++}
++
++static inline int FH_GPIO_GetValue(unsigned int base, int bit)
++{
++	return (GET_REG(base + OFFSET_GPIO_EXT_PORTA) >> bit) & 0x1;
++}
++
++static inline void FH_GPIO_SetDirection(unsigned int base, int bit, int dir)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_SWPORTA_DDR);
++	reg = dir ? reg | (1 << bit) : reg & ~(1 << bit);
++	SET_REG(base + OFFSET_GPIO_SWPORTA_DDR, reg);
++}
++
++static inline int FH_GPIO_GetDirection(unsigned int base, int bit)
++{
++	return (GET_REG(base + OFFSET_GPIO_SWPORTA_DDR) >> bit) & 0x1;
++}
++
++static inline void FH_GPIOB_SetValue(unsigned int base, int bit, int val)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_SWPORTB_DR);
++	reg = val ? (reg | (1 << bit)) : (reg & ~(1 << bit));
++	SET_REG(base + OFFSET_GPIO_SWPORTB_DR, reg);
++}
++
++static inline int FH_GPIOB_GetValue(unsigned int base, int bit)
++{
++	return (GET_REG(base + OFFSET_GPIO_EXT_PORTB) >> bit) & 0x1;
++}
++
++static inline void FH_GPIOB_SetDirection(unsigned int base, int bit, int dir)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_SWPORTB_DDR);
++	reg = dir ? reg | (1 << bit) : reg & ~(1 << bit);
++	SET_REG(base + OFFSET_GPIO_SWPORTB_DDR, reg);
++}
++
++static inline int FH_GPIOB_GetDirection(unsigned int base, int bit)
++{
++	return (GET_REG(base + OFFSET_GPIO_SWPORTB_DDR) >> bit) & 0x1;
++}
++
++static inline void FH_GPIO_EnableDebounce(unsigned int base, int bit, int bool)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_DEBOUNCE);
++	reg = bool ? reg | (1 << bit) : reg & ~(1 << bit);
++	SET_REG(base + OFFSET_GPIO_DEBOUNCE, reg);
++}
++
++static inline void FH_GPIO_SetInterruptType(unsigned int base, int bit,
++		int type)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_INTTYPE_LEVEL);
++	reg = type ? reg | (1 << bit) : reg & ~(1 << bit);
++	SET_REG(base + OFFSET_GPIO_INTTYPE_LEVEL, reg);
++}
++
++static inline void FH_GPIO_SetInterruptPolarity(unsigned int base, int bit,
++		int pol)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_INT_POLARITY);
++	reg = pol ? reg | (1 << bit) : reg & ~(1 << bit);
++	SET_REG(base + OFFSET_GPIO_INT_POLARITY, reg);
++}
++
++static inline void FH_GPIO_EnableInterruptMask(unsigned int base, int bit,
++		int bool)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_INTMASK);
++	reg = bool ? reg | (1 << bit) : reg & ~(1 << bit);
++	SET_REG(base + OFFSET_GPIO_INTMASK, reg);
++}
++
++static inline void FH_GPIO_EnableInterrupt(unsigned int base, int bit, int bool)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_INTEN);
++	reg = bool ? reg | (1 << bit) : reg & ~(1 << bit);
++	SET_REG(base + OFFSET_GPIO_INTEN, reg);
++}
++
++static inline void FH_GPIO_SetEnableInterrupts(unsigned int base,
++		unsigned int val)
++{
++	SET_REG(base + OFFSET_GPIO_INTEN, val);
++}
++
++static inline unsigned int FH_GPIO_GetEnableInterrupts(unsigned int base)
++{
++	return GET_REG(base + OFFSET_GPIO_INTEN);
++}
++
++static inline unsigned int FH_GPIO_GetInterruptStatus(unsigned int base)
++{
++	return GET_REG(base + OFFSET_GPIO_INTSTATUS);
++}
++
++static inline void FH_GPIO_ClearInterrupt(unsigned int base, int bit)
++{
++	unsigned int reg;
++
++	reg = GET_REG(base + OFFSET_GPIO_PORTA_EOI);
++	reg |= (1 << bit);
++	SET_REG(base + OFFSET_GPIO_PORTA_EOI, reg);
++}
++
++#define GPIO_NAME "FH_GPIO"
++
++
++struct gpio_irq_info {
++	int irq_gpio;
++	int irq_line;
++	int irq_type;
++	int irq_gpio_val;
++	int irq_gpio_mode;
++};
++
++struct fh_gpio_chip {
++	struct gpio_chip chip;
++	void __iomem *base;
++
++	struct platform_device *pdev;
++	int irq;
++	spinlock_t lock;
++
++	u32 gpio_wakeups;
++	u32 gpio_backups;
++};
++
++/*
++ * The get/set/clear functions will inline when called with constant
++ * parameters referencing built-in GPIOs, for low-overhead bitbanging.
++ *
++ * gpio_set_value() will inline only on traditional Davinci style controllers
++ * with distinct set/clear registers.
++ *
++ * Otherwise, calls with variable parameters or referencing external
++ * GPIOs (e.g. on GPIO expander chips) use outlined functions.
++ */
++static inline void gpio_set_value(unsigned gpio, int value)
++{
++    __gpio_set_value(gpio, value);
++}
++
++/* Returns zero or nonzero; works for gpios configured as inputs OR
++ * as outputs, at least for built-in GPIOs.
++ *
++ * NOTE: for built-in GPIOs, changes in reported values are synchronized
++ * to the GPIO clock.  This is easily seen after calling gpio_set_value()
++ * and then immediately gpio_get_value(), where the gpio_get_value() will
++ * return the old value until the GPIO clock ticks and the new value gets
++ * latched.
++ */
++static inline int gpio_get_value(unsigned gpio)
++{
++    return __gpio_get_value(gpio);
++}
++
++static inline int gpio_cansleep(unsigned gpio)
++{
++    return 0;
++}
++
++static inline int gpio_to_irq(unsigned gpio)
++{
++    return __gpio_to_irq(gpio);
++}
++
++static inline int irq_to_gpio(unsigned irq)
++{
++    return 0;
++}
++
++void fh_gpio_irq_suspend(void);
++void fh_gpio_irq_resume(void);
++
++#endif
++
+diff --git a/arch/arm/mach-fh/include/mach/hardware.h b/arch/arm/mach-fh/include/mach/hardware.h
+new file mode 100644
+index 00000000..af93e037
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/hardware.h
+@@ -0,0 +1,18 @@
++/*
++*	Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
++*				All Rights Reserved. Confidential.
++*
++*This program is free software; you can redistribute it and/or
++* modify it under the terms of the GNU General Public License as
++* published by the Free Software Foundation version 2.
++*
++* This program is distributed "as is" WITHOUT ANY WARRANTY of any
++* kind, whether express or implied; without even the implied warranty
++* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++* GNU General Public License for more details.
++*/
++
++#ifndef __ASM_ARCH_HARDWARE_H
++#define __ASM_ARCH_HARDWARE_H
++
++#endif /* __ASM_ARCH_HARDWARE_H */
+diff --git a/arch/arm/mach-fh/include/mach/i2c.h b/arch/arm/mach-fh/include/mach/i2c.h
+new file mode 100644
+index 00000000..991000c5
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/i2c.h
+@@ -0,0 +1,327 @@
++/*
++*	Copyright (c) 2010 Shanghai Fullhan Microelectronics Co., Ltd.
++*				All Rights Reserved. Confidential.
++*
++*This program is free software; you can redistribute it and/or
++* modify it under the terms of the GNU General Public License as
++* published by the Free Software Foundation version 2.
++*
++* This program is distributed "as is" WITHOUT ANY WARRANTY of any
++* kind, whether express or implied; without even the implied warranty
++* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++* GNU General Public License for more details.
++*/
++
++#ifndef __ASM_ARCH_I2C_H
++#define __ASM_ARCH_I2C_H
++#include "fh_predefined.h"
++
++//I2C
++#define     REG_I2C_CON             (0x0000)
++#define     REG_I2C_TAR             (0x0004)
++#define     REG_I2C_SAR             (0x0008)
++#define     REG_I2C_HS_MADDR        (0x000C)
++#define     REG_I2C_DATA_CMD        (0x0010)
++#define     REG_I2C_SS_SCL_HCNT     (0x0014)
++#define     REG_I2C_SS_SCL_LCNT     (0x0018)
++#define     REG_I2C_FS_SCL_HCNT     (0x001C)
++#define     REG_I2C_FS_SCL_LCNT     (0x0020)
++#define     REG_I2C_HS_SCL_HCNT     (0x0024)
++#define     REG_I2C_HS_SCL_LCNT     (0x0028)
++#define     REG_I2C_INTR_STAT       (0x002c)
++#define     REG_I2C_INTR_MASK       (0x0030)
++#define     REG_I2C_RAW_INTR_STAT   (0x0034)
++#define     REG_I2C_RX_TL           (0x0038)
++#define     REG_I2C_TX_TL           (0x003c)
++#define     REG_I2C_CLR_INTR        (0x0040)
++#define     REG_I2C_ENABLE          (0x006c)
++#define     REG_I2C_STATUS          (0x0070)
++#define     REG_I2C_TXFLR           (0x0074)
++#define     REG_I2C_RXFLR           (0x0078)
++#define     REG_I2C_DMA_CR          (0x0088)
++#define     REG_I2C_DMA_TDLR        (0x008c)
++#define     REG_I2C_DMA_RDLR        (0x0090)
++
++#define DW_IC_INTR_NONE         0x0
++
++
++enum BUS_STATUS {
++	I2C_BUSY,
++	I2C_IDLE
++};
++enum RESULT {
++	SUCCESS,
++	FAILURE
++};
++enum ENABLE_SET {
++	DISABLE,
++	ENABLE
++};
++enum SPEED_MODE {
++	SSPEED = 1,
++	FSPEED = 2,
++	HSPEED = 3,
++};
++
++UINT32 I2c_Disable(UINT32 base_addr);
++
++void I2c_SetSpeed(UINT32 base_addr, UINT8 model);
++void I2c_SetDeviceId(UINT32 base_addr, UINT32 deviceID);
++void I2c_Enable(UINT32 enable);
++UINT32 I2c_GetStatus(UINT32 base_addr);
++void I2c_SetIr(UINT32 base_addr, UINT16 mask);
++UINT32 I2c_Disable(UINT32 base_addr);
++
++void I2c_Init(UINT32 base_addr, UINT16 slave_addr, enum SPEED_MODE speed,
++	      int txtl, int rxtl);
++
++/* function Macro */
++/*******************************************************************************
++* Function Name  : I2c_GetTxFifoDepth
++* Description    : get tx fifo depth
++* Input          : base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define I2c_GetTxFifoDepth( base_addr)   (((GET_REG(base_addr + \
++		DW_IC_COMP_PARAM_1)>> 16) & 0xff) + 1)
++
++/*******************************************************************************
++* Function Name  : I2c_GetRxFifoDepth
++* Description    : get rx fifo depth
++* Input          : base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define I2c_GetRxFifoDepth( base_addr)   (((GET_REG(base_addr + \
++		DW_IC_COMP_PARAM_1)>> 8) & 0xff) + 1)
++/*******************************************************************************
++* Function Name  : I2c_SetDeviceId
++* Description    : set the slave addr
++* Input          : deviceID:slave addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define I2c_SetDeviceId( base_addr,deviceID)   SET_REG(base_addr + REG_I2C_TAR,deviceID)  //set IIC  slave address
++
++/*******************************************************************************
++* Function Name  : I2c_Read
++* Description    : read data from I2C bus
++* Input          : None
++* Output         : None
++* Return         : data:I2C  data
++*
++ *******************************************************************************/
++
++#define I2c_Read(base_addr )   (GET_REG(base_addr + REG_I2C_DATA_CMD)&0xff)  //DW_I2C_DATA_CMD
++/*******************************************************************************
++* Function Name  : I2c_SetSsSclHcnt
++* Description    : set i2c ss scl hcnt
++* Input          : hcnt
++* Output         : None
++* Return         : data:I2C  data
++*
++ *******************************************************************************/
++
++#define I2c_SetSsHcnt(base_addr, hcnt)  SET_REG(base_addr + DW_IC_SS_SCL_HCNT,hcnt)
++
++/*******************************************************************************
++* Function Name  : I2c_SetSsSclLcnt
++* Description    : set i2c ss scl lcnt
++* Input          : lcnt
++* Output         : None
++* Return         : data:I2C  data
++*
++ *******************************************************************************/
++
++#define I2c_SetSsLcnt(base_addr, lcnt)  SET_REG(base_addr + DW_IC_SS_SCL_LCNT,lcnt)
++/*******************************************************************************
++* Function Name  : I2c_SetFsSclHcnt
++* Description    : set i2c fs scl hcnt
++* Input          : hcnt
++* Output         : None
++* Return         : data:I2C  data
++*
++ *******************************************************************************/
++
++#define I2c_SetFsHcnt(base_addr, hcnt)  SET_REG(base_addr + DW_IC_FS_SCL_HCNT,hcnt)
++
++/*******************************************************************************
++* Function Name  : I2c_SetFsSclLcnt
++* Description    : set i2c fs scl lcnt
++* Input          : lcnt
++* Output         : None
++* Return         : data:I2C  data
++*
++ *******************************************************************************/
++
++#define I2c_SetFsLcnt(base_addr, lcnt)  SET_REG(base_addr + DW_IC_FS_SCL_LCNT,lcnt)
++/*******************************************************************************
++* Function Name  : I2c_Disable
++* Description    : disable I2C bus
++* Input          : None
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define I2c_DisEnable(base_addr)  SET_REG(base_addr + REG_I2C_ENABLE,DISABLE);
++/*******************************************************************************
++* Function Name  : I2c_Enable
++* Description    : set the I2C bus enable
++* Input          : enable
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define I2c_Enable(base_addr)  SET_REG(base_addr + REG_I2C_ENABLE,ENABLE);
++/*******************************************************************************
++* Function Name  : I2c_Write
++* Description    : Write data to I2C bus
++* Input          : data:wirte out data
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define I2c_Write(base_addr, data)  SET_REG(base_addr + REG_I2C_DATA_CMD,data)
++
++/*******************************************************************************
++* Function Name  : I2c_GetTxTl
++* Description    : Get TX_TL
++* Input          : TX_TL
++* Return         : None
++ *******************************************************************************/
++#define I2c_GetTxTl(base_addr )     (GET_REG(base_addr + REG_I2C_TX_TL)&0xff);
++/*******************************************************************************
++* Function Name  : I2c_GetRxTl
++* Description    : Get RX_TL
++* Input          : RX_TL
++* Return         : None
++ *******************************************************************************/
++#define I2c_GetRxTl(base_addr )     (GET_REG(base_addr + REG_I2C_RX_TL)&0xff);
++/*******************************************************************************
++* Function Name  : I2c_GetRxFLR
++* Description    : Get RX_FLR
++* Input          : base_addr
++* Return         : None
++ *******************************************************************************/
++#define I2c_GetRxFLR(base_addr)     (GET_REG(base_addr + DW_IC_RXFLR)&0xff);
++/*******************************************************************************
++* Function Name  : I2c_GetTxFLR
++* Description    : Get TX_FLR
++* Input          : base_addr
++* Return         : None
++ *******************************************************************************/
++#define I2c_GetTxFLR(base_addr)     (GET_REG(base_addr + DW_IC_TXFLR)&0xff);
++/*******************************************************************************
++* Function Name  : I2c_SetTxRxTl
++* Description    : set TX_TL RX_TL
++* Input          : TX_TL, RX_TL
++* Return         : None
++ *******************************************************************************/
++#define I2c_SetTxRxTl(base_addr ,txtl,rxtl)    \
++    SET_REG(base_addr + REG_I2C_TX_TL, txtl);   \
++    SET_REG(base_addr + REG_I2C_RX_TL, rxtl)
++
++/*******************************************************************************
++* Function Name  : I2c_IsActiveMst
++* Description    : if master mode is active, return 1
++* Input          : none
++* Return         : MST_ACTIVITY (IC_STATUS[5])
++ *******************************************************************************/
++#define I2c_IsActiveMst(base_addr)   (GET_REG(base_addr + REG_I2C_STATUS)>>5 & 1)
++
++/*******************************************************************************
++* Function Name  : I2c_SetCon
++* Description    : set config
++* Input          : config
++* Return         : None
++ *******************************************************************************/
++#define I2c_SetCon(base_addr,config)      SET_REG(base_addr + REG_I2C_CON,config)
++/*******************************************************************************
++* Function Name  : I2c_GetCon
++* Description    : get config
++* Input          : config
++* Return         : None
++ *******************************************************************************/
++#define I2c_GetCon(base_addr)      GET_REG(base_addr + REG_I2C_CON)
++
++/*******************************************************************************
++* Function Name  : I2c_Status
++* Description    : get i2c status
++* Input          : None
++* Return         : None
++ *******************************************************************************/
++#define I2c_Status(base_addr)     GET_REG(base_addr + REG_I2C_STATUS)
++
++/*******************************************************************************
++* Function Name  : I2c_SetTar
++* Description    : set target address
++* Input          : id
++* Return         : None
++ *******************************************************************************/
++#define I2c_SetTar(base_addr, id)      SET_REG(base_addr + REG_I2C_TAR,id)
++
++/*******************************************************************************
++* Function Name  : I2c_SetIntrMask
++* Description    : set interrupt mask
++* Input          : mask
++* Return         : None
++ *******************************************************************************/
++#define I2c_SetIntrMask(base_addr,mask)   SET_REG(base_addr + REG_I2C_INTR_MASK,mask)
++
++/*******************************************************************************
++* Function Name  : I2c_ClrIntr
++* Description    : clear interrupt
++* Input          : mask
++* Return         : None
++ *******************************************************************************/
++#define I2c_ClrIntr(base_addr,mask)   GET_REG(base_addr + mask)
++/*******************************************************************************
++* Function Name  : I2c_REG_STATUS
++* Description    : clear interrupt
++* Input          : mask
++* Return         : None
++ *******************************************************************************/
++#define I2c_GetTxAbrtSource(base_addr)   GET_REG(base_addr + DW_IC_TX_ABRT_SOURCE)
++
++/*******************************************************************************
++* Function Name  : I2c_TxEmpty
++* Description    : TX_EMPTY interrupt assert
++* Input          : none
++* Return         : TX_EMPTY
++ *******************************************************************************/
++#define I2c_TxEmpty(base_addr)   (GET_REG(base_addr + REG_I2C_RAW_INTR_STAT) & M_TX_EMPTY)
++
++/*******************************************************************************
++* Function Name  : I2c_RxFull
++* Description    : RX_FULL interrupt assert
++* Input          : none
++* Return         : RX_FULL
++ *******************************************************************************/
++#define I2c_RxFull(base_addr)    (GET_REG(base_addr + REG_I2C_RAW_INTR_STAT) & M_RX_FULL)
++/*******************************************************************************
++* Function Name  : I2c_RxEmpty
++* Description    : RX_EMPTY interrupt assert
++* Input          : none
++* Return         : RX_EMPTY
++ *******************************************************************************/
++#define I2c_RxEmpty(base_addr)    (GET_REG(base_addr + REG_I2C_RAW_INTR_STAT) & M_RX_OVER)
++
++/* register define */
++typedef union {
++	struct {
++		UINT32 MASTER_MODE      	: 1;
++		UINT32 SPEED            	: 2;
++		UINT32 IC_10BITADDR_SLAVE   : 1;
++		UINT32 IC_10BITADDR_MASTER  : 1;
++		UINT32 IC_RESTART_EN        : 1;
++		UINT32 IC_SLAVE_DISABLE     : 1;
++		UINT32 reserved_31_7        : 25;
++	} x;
++	UINT32 dw;
++} Reg_I2c_Con;
++
++#endif /* __ASM_ARCH_I2C_H */
+diff --git a/arch/arm/mach-fh/include/mach/io.h b/arch/arm/mach-fh/include/mach/io.h
+new file mode 100644
+index 00000000..7a987d6e
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/io.h
+@@ -0,0 +1,126 @@
++/*
++ * fh io definitions
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#ifndef __ASM_ARCH_IO_H
++#define __ASM_ARCH_IO_H
++
++#include <mach/chip.h>
++
++#define IO_SPACE_LIMIT 0xffffffff
++
++/*
++ * We don't actually have real ISA nor PCI buses, but there is so many
++ * drivers out there that might just work if we fake them...
++ */
++#define __io(a)			__typesafe_io(a)
++#define __mem_pci(a)		(a)
++#define __mem_isa(a)		(a)
++
++#ifndef CONFIG_JLINK_DEBUG
++#define FH_VIRT 		0xFE000000
++
++#define		VA_INTC_REG_BASE	(FH_VIRT + 0x00000)
++#define		VA_TIMER_REG_BASE	(FH_VIRT + 0x10000)
++#define		VA_UART0_REG_BASE	(FH_VIRT + 0x20000)
++//#define		VA_GMAC_REG_BASE	(FH_VIRT + 0x30000)
++//#define		VA_SPI0_REG_BASE	(FH_VIRT + 0x40000)
++//#define		VA_GPIO_REG_BASE	(FH_VIRT + 0x50000)
++//#define		VA_DMAC_REG_BASE	(FH_VIRT + 0x60000)
++//#define		VA_SDC0_REG_BASE	(FH_VIRT + 0x70000)
++//#define		VA_I2C_REG_BASE		(FH_VIRT + 0x80000)
++#define		VA_PMU_REG_BASE		(FH_VIRT + 0x90000)
++//#define		VA_SDC1_REG_BASE	(FH_VIRT + 0xa0000)
++//
++#define		VA_UART1_REG_BASE	(FH_VIRT + 0xb0000)
++#define		VA_PAE_REG_BASE		(FH_VIRT + 0xc0000)
++
++#define		VA_RAM_REG_BASE		(FH_VIRT + 0xd0000)
++#define		VA_DDRC_REG_BASE	(FH_VIRT + 0xe0000)
++#define		VA_UART2_REG_BASE	(FH_VIRT + 0xf0000)
++#ifdef CONFIG_ARCH_FH
++#define VA_CONSOLE_REG_BASE 		VA_UART1_REG_BASE
++#else
++#define VA_CONSOLE_REG_BASE 		VA_UART0_REG_BASE
++#endif
++
++#define I2C_OFFSET	(VA_I2C_REG_BASE - I2C_REG_BASE)
++#define VI2C(x)		(x + I2C_OFFSET)
++
++#define INTC_OFFSET	(VA_INTC_REG_BASE - INTC_REG_BASE)
++#define VINTC(x)	(x + INTC_OFFSET)
++
++#define TIME_OFFSET	(VA_TIMER_REG_BASE - TIMER_REG_BASE)
++#define VTIMER(x)	(x + TIME_OFFSET)
++
++#define UART0_OFFSET	(VA_UART0_REG_BASE - UART0_REG_BASE)
++#define VUART0(x)	(x + UART0_OFFSET)
++
++#define UART1_OFFSET	(VA_UART1_REG_BASE - UART1_REG_BASE)
++#define VUART1(x)	(x + UART1_OFFSET)
++
++#define UART2_OFFSET	(VA_UART2_REG_BASE - UART2_REG_BASE)
++#define VUART2(x)	(x + UART2_OFFSET)
++
++#define SPI0_OFFSET	(VA_SPI0_REG_BASE - SPI0_REG_BASE)
++#define VSPI0(x)	(x + SPI0_OFFSET)
++
++#define GMAC_OFFSET	(VA_GMAC_REG_BASE - GMAC_REG_BASE)
++#define VGMAC(x)	(x + GMAC_OFFSET)
++
++#define DMAC_OFFSET	(VA_DMAC_REG_BASE - DMAC_REG_BASE)
++#define VDMAC(x)	(x + DMAC_OFFSET)
++
++#define SDC0_OFFSET	(VA_SDC0_REG_BASE - SDC0_REG_BASE)
++#define VSDC0(x)	(x + SDC0_OFFSET)
++
++#define SDC1_OFFSET	(VA_SDC1_REG_BASE - SDC1_REG_BASE)
++#define VSDC1(x)	(x + SDC1_OFFSET)
++
++#define GPIO_OFFSET	(VA_GPIO_REG_BASE - GPIO_REG_BASE)
++#define VGPIO(x)	(x + GPIO_OFFSET)
++
++#define PMU_OFFSET	(VA_PMU_REG_BASE - PMU_REG_BASE)
++#define VPMU(x)		(x + PMU_OFFSET)
++
++#define PAE_OFFSET	(VA_PAE_REG_BASE - PAE_REG_BASE)
++#define VPAE(x)		(x + PAE_OFFSET)
++
++#else
++#define		VA_INTC_REG_BASE		INTC_REG_BASE
++#define		VA_TIMER_REG_BASE		TIMER_REG_BASE
++#define		VA_UART0_REG_BASE		UART0_REG_BASE
++#define		VA_UART1_REG_BASE		UART1_REG_BASE
++#define		VA_GMAC_REG_BASE		GMAC_REG_BASE
++#define		VA_DMAC_REG_BASE		DMAC_REG_BASE
++#define		VA_I2C_REG_BASE			I2C_REG_BASE
++#define		VA_SDC0_REG_BASE		SDC0_REG_BASE
++
++#define		VA_SPI0_REG_BASE		SPI0_REG_BASE
++
++#define		VA_GPIO_REG_BASE		GPIO0_REG_BASE
++#define		VA_PMU_REG_BASE			PMU_REG_BASE
++
++//#define		VA_GPIO_REG_BASE		(FH_VIRT + 0x500000)
++
++#define VINTC(x)		 x
++#define VTIMER(x)		x
++#define VUART0(x)		x
++#define VUART1(x)		x
++#define VGMAC(x)		x
++
++#define VDMAC(x)		x
++#define VI2C(x)			x
++#define VSDC0(x)			x
++
++#define VSPI0(x)		x
++#define VPMU(x)		x
++
++#endif
++#endif /* __ASM_ARCH_IO_H */
+diff --git a/arch/arm/mach-fh/include/mach/iomux.h b/arch/arm/mach-fh/include/mach/iomux.h
+new file mode 100644
+index 00000000..398b0a6e
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/iomux.h
+@@ -0,0 +1,165 @@
++#ifndef IOMUX_H_
++#define IOMUX_H_
++#include <linux/types.h>
++#include <linux/io.h>
++#include <mach/io.h>
++
++#define IOMUX_PADTYPE(n)		(Iomux_PadType##n *)
++#define IOMUX_PUPD_NONE			0
++#define IOMUX_PUPD_DOWN			1
++#define IOMUX_PUPD_UP			2
++#define IOMUX_PUPD_KEEPER		3
++//#define IOMUX_DEBUG
++
++
++typedef union {
++	struct {
++		__u32	sr		: 1;
++		__u32	reserved_3_1	: 3;
++
++		__u32	e8_e4		: 2;
++		__u32	reserved_31_6	: 24;
++
++	} bit;
++	__u32 dw;
++} Iomux_PadType5;
++
++typedef union {
++	struct {
++		__u32	sr		: 1;
++		__u32	reserved_3_1	: 3;
++
++		__u32	e8_e4		: 2;
++		__u32	reserved_7_6	: 2;
++
++		__u32	mfs		: 1;
++		__u32	reserved_31_9	: 23;
++
++	} bit;
++	__u32 dw;
++} Iomux_PadType8;
++
++
++typedef union {
++	struct {
++		__u32	smt		: 1;
++		__u32	reserved_3_1	: 3;
++
++		__u32	ie		: 1;
++		__u32	reserved_7_5	: 3;
++
++		__u32	pu_pd		: 2;
++		__u32	reserved_31_10	: 22;
++
++	} bit;
++	__u32 dw;
++} Iomux_PadType9;
++
++
++typedef union {
++	struct {
++		__u32	e4_e2		: 2;
++		__u32	reserved_3_2	: 2;
++
++		__u32	smt		: 1;
++		__u32	reserved_7_5	: 3;
++
++		__u32	ie		: 1;
++		__u32	reserved_11_9	: 3;
++
++		__u32	mfs		: 2;
++		__u32	reserved_31_14	: 18;
++
++	} bit;
++	__u32 dw;
++} Iomux_PadType13;
++
++typedef union {
++	struct {
++		__u32	sr		: 1;
++		__u32	reserved_3_1	: 3;
++
++		__u32	e8_e4		: 2;
++		__u32	reserved_7_6	: 2;
++
++		__u32	smt		: 1;
++		__u32	reserved_11_9	: 3;
++
++		__u32	ie		: 1;
++		__u32	e		: 1;	//only for PAD_MAC_REF_CLK_CFG (0x00a4)
++		__u32	reserved_15_12	: 2;
++
++		__u32	pu_pd		: 2;
++		__u32	reserved_31_18	: 14;
++
++	} bit;
++	__u32 dw;
++} Iomux_PadType17;
++
++typedef union {
++	struct {
++		__u32	sr		: 1;
++		__u32	reserved_3_1	: 3;
++
++		__u32	e4_e2		: 2;
++		__u32	reserved_7_6	: 2;
++
++		__u32	smt		: 1;
++		__u32	reserved_11_9	: 3;
++
++		__u32	ie		: 1;
++		__u32	reserved_15_13	: 3;
++
++		__u32	pu_pd		: 2;
++		__u32	reserved_19_18	: 2;
++
++		__u32	mfs		: 1;
++		__u32	reserved_31_21	: 11;
++
++	} bit;
++	__u32 dw;
++} Iomux_PadType20;
++
++
++typedef union {
++	struct {
++		__u32	sr		: 1;
++		__u32	reserved_3_1	: 3;
++
++		__u32	e4_e2		: 2;
++		__u32	reserved_7_6	: 2;
++
++		__u32	smt		: 1;
++		__u32	reserved_11_9	: 3;
++
++		__u32	ie		: 1;
++		__u32	reserved_15_13	: 3;
++
++		__u32	pu_pd		: 2;
++		__u32	reserved_19_18	: 2;
++
++		__u32	mfs		: 2;
++		__u32	reserved_31_21	: 10;
++
++	} bit;
++	__u32 dw;
++} Iomux_PadType21;
++
++typedef struct {
++	u32 *reg;
++	u32 reg_offset;
++	char *func_name[4];
++	int reg_type;
++	int func_sel;
++	int drv_cur;
++	int pupd;
++} Iomux_Pad;
++
++typedef struct {
++	void __iomem *base;
++	Iomux_Pad *pads;
++} Iomux_Object;
++
++void fh_iomux_init(Iomux_Object *iomux_obj);
++
++#endif /* IOMUX_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/irqs.h b/arch/arm/mach-fh/include/mach/irqs.h
+new file mode 100644
+index 00000000..0138006f
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/irqs.h
+@@ -0,0 +1,43 @@
++/*
++ * fh interrupt controller definitions
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#ifndef __ASM_ARCH_IRQS_H
++#define __ASM_ARCH_IRQS_H
++
++#include <mach/chip.h>
++
++#define		REG_IRQ_EN_LOW				(INTC_REG_BASE + 0x0000)
++#define		REG_IRQ_EN_HIGH				(INTC_REG_BASE + 0x0004)
++#define		REG_IRQ_IRQMASK_LOW			(INTC_REG_BASE + 0x0008)
++#define		REG_IRQ_IRQMASK_HIGH		(INTC_REG_BASE + 0x000C)
++#define		REG_IRQ_IRQFORCE_LOW		(INTC_REG_BASE + 0x0010)
++#define		REG_IRQ_IRQFORCE_HIGH		(INTC_REG_BASE + 0x0014)
++#define		REG_IRQ_RAWSTATUS_LOW		(INTC_REG_BASE + 0x0018)
++#define		REG_IRQ_RAWSTATUS_HIGH		(INTC_REG_BASE + 0x001C)
++#define		REG_IRQ_STATUS_LOW			(INTC_REG_BASE + 0x0020)
++#define		REG_IRQ_STATUS_HIGH			(INTC_REG_BASE + 0x0024)
++#define		REG_IRQ_MASKSTATUS_LOW		(INTC_REG_BASE + 0x0028)
++#define		REG_IRQ_MASKSTATUS_HIGH		(INTC_REG_BASE + 0x002C)
++#define		REG_IRQ_FINSTATUS_LOW		(INTC_REG_BASE + 0x0030)
++#define		REG_IRQ_FINSTATUS_HIGH		(INTC_REG_BASE + 0x0034)
++#define		REG_FIQ_EN_LOW				(INTC_REG_BASE + 0x02C0)
++#define		REG_FIQ_EN_HIGH				(INTC_REG_BASE + 0x02C4)
++#define		REG_FIQ_FIQMASK_LOW			(INTC_REG_BASE + 0x02C8)
++#define		REG_FIQ_FIQMASK_HIGH		(INTC_REG_BASE + 0x02CC)
++#define		REG_FIQ_FIQFORCE_LOW		(INTC_REG_BASE + 0x02D0)
++#define		REG_FIQ_FIQFORCE_HIGH		(INTC_REG_BASE + 0x02D4)
++#define		REG_FIQ_RAWSTATUS_LOW		(INTC_REG_BASE + 0x02D8)
++#define		REG_FIQ_RAWSTATUS_HIGH		(INTC_REG_BASE + 0x02DC)
++#define		REG_FIQ_STATUS_LOW			(INTC_REG_BASE + 0x02E0)
++#define		REG_FIQ_STATUS_HIGH			(INTC_REG_BASE + 0x02E4)
++#define		REG_FIQ_FINSTATUS_LOW		(INTC_REG_BASE + 0x02E8)
++#define		REG_FIQ_FINSTATUS_HIGH		(INTC_REG_BASE + 0x02EC)
++
++#endif /* __ASM_ARCH_IRQS_H */
+diff --git a/arch/arm/mach-fh/include/mach/memory.h b/arch/arm/mach-fh/include/mach/memory.h
+new file mode 100644
+index 00000000..c8c984a9
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/memory.h
+@@ -0,0 +1,27 @@
++/*
++ * fh memory space definitions
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#ifndef __ASM_ARCH_MEMORY_H
++#define __ASM_ARCH_MEMORY_H
++
++/**************************************************************************
++ * Included Files
++ **************************************************************************/
++#include <asm/page.h>
++#include <asm/sizes.h>
++
++/**************************************************************************
++ * Definitions
++ **************************************************************************/
++#define FH_DDR_BASE	0xA0000000
++
++#define PLAT_PHYS_OFFSET FH_DDR_BASE
++
++#endif /* __ASM_ARCH_MEMORY_H */
+diff --git a/arch/arm/mach-fh/include/mach/pinctrl.h b/arch/arm/mach-fh/include/mach/pinctrl.h
+new file mode 100644
+index 00000000..90c02c11
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/pinctrl.h
+@@ -0,0 +1,118 @@
++#ifndef PINCTRL_H_
++#define PINCTRL_H_
++#include "pinctrl_osdep.h"
++#include <linux/seq_file.h>
++
++#define PINCTRL_UNUSED      (-1)
++
++#define PUPD_NONE           (0)
++#define PUPD_UP             (1)
++#define PUPD_DOWN           (2)
++
++#define INPUT_DISABLE       (0)
++#define INPUT_ENABLE        (1)
++#define OUTPUT_DISABLE      (0)
++#define OUTPUT_ENABLE       (1)
++
++#define FUNC0               (0)
++#define FUNC1               (1)
++#define FUNC2               (2)
++#define FUNC3               (3)
++#define FUNC4               (4)
++#define FUNC5               (5)
++#define FUNC6               (6)
++#define FUNC7               (7)
++
++#define NEED_CHECK_PINLIST  (1)
++
++#define MAX_FUNC_NUM    8
++
++#define PINCTRL_FUNC(name, id, sel, pupd)           \
++PinCtrl_Pin PAD##id##_##name =                      \
++{                                                   \
++    .pad_id         = id,                           \
++    .func_name      = #name,                        \
++    .reg_offset     = (id * 4),                     \
++    .func_sel       = sel,                          \
++    .pullup_pulldown= pupd,			    \
++}
++
++#define PINCTRL_MUX(pname, sel, ...)                \
++PinCtrl_Mux MUX_##pname =                           \
++{                                                   \
++    .mux_pin = { __VA_ARGS__ },                     \
++    .cur_pin = sel,                                 \
++}
++
++#define PINCTRL_DEVICE(name, count, ...)            \
++typedef struct                                      \
++{                                                   \
++    char *dev_name;                                 \
++    int mux_count;                                  \
++    OS_LIST list;                                   \
++    PinCtrl_Mux *mux[count];                        \
++} PinCtrl_Device_##name;                            \
++PinCtrl_Device_##name pinctrl_dev_##name =          \
++{                                                   \
++     .dev_name = #name,                             \
++     .mux_count = count,                            \
++     .mux = { __VA_ARGS__ },                        \
++}
++
++typedef union {
++    struct
++    {
++        unsigned int        : 12;   //0~11
++        unsigned int ie     : 1;    //12
++        unsigned int        : 3;    //13~15
++        unsigned int pdn    : 1;    //16
++        unsigned int        : 3;    //17~19
++        unsigned int pun    : 1;    //20
++        unsigned int        : 3;    //21~23
++        unsigned int mfs    : 4;    //24~27
++        unsigned int oe     : 1;    //28
++        unsigned int        : 3;    //29~31
++    } bit;
++    unsigned int dw;
++} PinCtrl_Register;
++
++typedef struct
++{
++	char *func_name;
++	PinCtrl_Register *reg;
++	int pad_id			: 12;
++	unsigned int reg_offset		: 12;
++	int func_sel			: 4;
++	int input_enable		: 1;
++	int output_enable		: 1;
++	int pullup_pulldown		: 2;
++}PinCtrl_Pin;
++
++typedef struct
++{
++    //todo: int lock;
++    int cur_pin;
++    PinCtrl_Pin *mux_pin[MUX_NUM];
++} PinCtrl_Mux;
++
++typedef struct
++{
++    void *vbase;
++    void *pbase;
++    PinCtrl_Pin *pinlist[PAD_NUM];
++} PinCtrl_Object;
++
++typedef struct
++{
++    char *dev_name;
++    int mux_count;
++    OS_LIST list;
++    void *mux;
++}PinCtrl_Device;
++
++void fh_pinctrl_init(unsigned int base);
++void fh_pinctrl_prt(struct seq_file *sfile);
++int fh_pinctrl_smux(char *devname, char* muxname, int muxsel, unsigned int flag);
++int fh_pinctrl_sdev(char *devname, unsigned int flag);
++void fh_pinctrl_init_devicelist(OS_LIST *list);
++#endif /* PINCTRL_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/pinctrl_osdep.h b/arch/arm/mach-fh/include/mach/pinctrl_osdep.h
+new file mode 100644
+index 00000000..8e3d24de
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/pinctrl_osdep.h
+@@ -0,0 +1,23 @@
++#ifndef PINCTRL_OSDEP_H_
++#define PINCTRL_OSDEP_H_
++
++#include <linux/list.h>
++#include <linux/kernel.h>
++#include <mach/fh_predefined.h>
++#include <linux/string.h>
++
++#define OS_LIST_INIT LIST_HEAD_INIT
++#define OS_LIST struct list_head
++#define OS_PRINT printk
++#define OS_LIST_EMPTY INIT_LIST_HEAD
++#define OS_NULL NULL
++
++#define PINCTRL_ADD_DEVICE(name)                    	\
++        list_add(&pinctrl_dev_##name.list,		\
++	list)
++
++#define PAD_NUM             (77)
++
++#define MUX_NUM             (6)
++
++#endif /* PINCTRL_OSDEP_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/pmu.h b/arch/arm/mach-fh/include/mach/pmu.h
+new file mode 100644
+index 00000000..e9de6447
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/pmu.h
+@@ -0,0 +1,15 @@
++
++#ifndef _FH_PMU_H_
++#define _FH_PMU_H_
++
++#include <linux/types.h>
++
++void fh_pmu_set_reg(u32 offset, u32 data);
++u32 fh_pmu_get_reg(u32 offset);
++int fh_pmu_init(void);
++
++void fh_pmu_stop(void);
++
++void fh_pae_set_reg(u32 offset, u32 data);
++
++#endif /* _FH_PMU_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/rtc.h b/arch/arm/mach-fh/include/mach/rtc.h
+new file mode 100644
+index 00000000..594259eb
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/rtc.h
+@@ -0,0 +1,238 @@
++/*
++ * rtc.h
++ *
++ *  Created on: Aug 18, 2016
++ *      Author: fullhan
++ */
++
++#ifndef ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_
++#define ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_
++#include "fh_predefined.h"
++
++/*
++ * Registers offset
++ */
++#define  FH_RTC_COUNTER            0x0
++#define  FH_RTC_OFFSET             0x4
++#define  FH_RTC_POWER_FAIL         0x8
++#define  FH_RTC_ALARM_COUNTER      0xC
++#define  FH_RTC_INT_STAT           0x10
++#define  FH_RTC_INT_EN             0x14
++#define  FH_RTC_SYNC               0x18
++#define  FH_RTC_DEBUG              0x1C
++#define  FH_RTC_USER_REG           0x20
++
++#define SEC_BIT_START		0
++#define SEC_VAL_MASK		0x3f
++
++#define MIN_BIT_START		6
++#define MIN_VAL_MASK		0xfc0
++
++#define HOUR_BIT_START		12
++#define HOUR_VAL_MASK		0x1f000
++
++#define DAY_BIT_START		17
++#define DAY_VAL_MASK		0xfffe0000
++
++#define FH_RTC_ISR_SEC_POS  	1<<0
++#define FH_RTC_ISR_MIN_POS  	1<<1
++#define	FH_RTC_ISR_HOUR_POS  	1<<2
++#define	FH_RTC_ISR_DAY_POS  	1<<3
++#define	FH_RTC_ISR_ALARM_POS    1<<4
++#define	FH_RTC_ISR_SEC_MASK 	1<<27
++#define	FH_RTC_ISR_MIN_MASK 	1<<28
++#define	FH_RTC_ISR_HOUR_MASK 	1<<29
++#define	FH_RTC_ISR_DAY_MASK 	1<<30
++#define	FH_RTC_ISR_ALARM_MASK 	1<<31
++
++// input: val=fh_rtc_get_time(base_addr)
++#define FH_GET_RTC_SEC(val)		 ((val & SEC_VAL_MASK) >> SEC_BIT_START)
++#define FH_GET_RTC_MIN(val)		 ((val & MIN_VAL_MASK) >> MIN_BIT_START)
++#define FH_GET_RTC_HOUR(val)	 ((val & HOUR_VAL_MASK) >> HOUR_BIT_START)
++#define FH_GET_RTC_DAY(val)	 	 ((val & DAY_VAL_MASK) >> DAY_BIT_START)
++
++#define ELAPSED_LEAP_YEARS(y) (((y -1)/4)-((y-1)/100)+((y+299)/400)-17)
++
++#define FH_RTC_PROC_FILE    "driver/fh_rtc"
++
++struct fh_rtc_platform_data
++{
++	u32 clock_in;
++	char *clk_name;
++	char *dev_name;
++	u32 base_year;
++	u32 base_month;
++	u32 base_day;
++	int sadc_channel;
++};
++enum
++{
++	init_done=1,
++	initing=0
++
++};
++
++/*******************************************************************************
++* Function Name  : fh_rtc_interrupt_disabel
++* Description    : disabale rtc interrupt
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_interrupt_disabel(base_addr)                   SET_REG(base_addr+REG_RTC_INT_EN,DISABLE)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_get_time
++* Description    : get rtc current time
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_time(base_addr)                  			GET_REG(base_addr+FH_RTC_COUNTER)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_set_time
++* Description    : set rtc current time
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_set_time(base_addr,value)                  		SET_REG(base_addr+FH_RTC_COUNTER,value)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_set_alarm_time
++* Description    : set rtc alarm
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_set_alarm_time(base_addr,value)                  SET_REG(base_addr+FH_RTC_ALARM_COUNTER,value)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_get_alarm_time
++* Description    : get alarm register
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_alarm_time(base_addr)                 		 GET_REG(base_addr+FH_RTC_ALARM_COUNTER)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_get_int_status
++* Description    : get rtc current interrupt status
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_int_status(base_addr)                  		  GET_REG(base_addr+FH_RTC_INT_STAT)
++/*******************************************************************************
++* Function Name  : fh_rtc_enable_interrupt
++* Description    : enable rtc interrupt
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_enable_interrupt(base_addr,value)                 SET_REG(base_addr+FH_RTC_INT_EN,value|GET_REG(base_addr+FH_RTC_INT_EN))
++/*******************************************************************************
++* Function Name  : fh_rtc_disenable_interrupt
++* Description    : disable interrupt
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_disenable_interrupt(base_addr,value)             SET_REG(base_addr+FH_RTC_INT_EN,(~value)&GET_REG(base_addr+FH_RTC_INT_EN))
++
++/*******************************************************************************
++* Function Name  : fh_rtc_get_enabled_interrupt
++* Description    : get rtc current interrupt enabled
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_enabled_interrupt(base_addr)                 GET_REG(base_addr+FH_RTC_INT_EN)
++/*******************************************************************************
++* Function Name  : fh_rtc_set_mask_interrupt
++* Description    : set rtc interrupt mask
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_set_mask_interrupt(base_addr,value)             SET_REG(base_addr+FH_RTC_INT_EN,value|GET_REG(base_addr+FH_RTC_INT_EN))
++/*******************************************************************************
++* Function Name  : fh_rtc_clear_interrupt_status
++* Description    : clear rtc interrupt status
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_clear_interrupt_status(base_addr,value)         SET_REG(base_addr+FH_RTC_INT_STAT,(~value)&GET_REG(base_addr+FH_RTC_INT_STAT))
++/*******************************************************************************
++* Function Name  : fh_rtc_get_offset
++* Description    : get rtc offset
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_offset(base_addr)                  			GET_REG(base_addr+FH_RTC_OFFSET)
++/*******************************************************************************
++* Function Name  : fh_rtc_get_power_fail
++* Description    : get rtc power fail register
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_power_fail(base_addr)                  		GET_REG(base_addr+FH_RTC_POWER_FAIL)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_get_sync
++* Description    : get rtc sync register value
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_sync(base_addr)                  			GET_REG(base_addr+FH_RTC_SYNC)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_set_sync
++* Description    : set rtc sync register value
++* Input          : rtc base addr,init_done/initing
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_set_sync(base_addr,value)                  		SET_REG(base_addr+FH_RTC_SYNC,value)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_get_debug
++* Description    : get rtc debug register value
++* Input          : rtc base addr
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_get_debug(base_addr)                  			GET_REG(base_addr+FH_RTC_DEBUG)
++
++/*******************************************************************************
++* Function Name  : fh_rtc_set_debug
++* Description    : set rtc debug register value
++* Input          : rtc base addr,x pclk
++* Output         : None
++* Return         : None
++*
++ *******************************************************************************/
++#define  fh_rtc_set_debug(base_addr,value)     					SET_REG(base_addr+FH_RTC_DEBUG,value)
++#endif /* ARCH_ARM_MACH_FH_INCLUDE_MACH_RTC_H_ */
+diff --git a/arch/arm/mach-fh/include/mach/spi.h b/arch/arm/mach-fh/include/mach/spi.h
+new file mode 100644
+index 00000000..cc350f0e
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/spi.h
+@@ -0,0 +1,57 @@
++/*
++ * Copyright 2009 Texas Instruments.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef __ARCH_ARM_FH_SPI_H
++#define __ARCH_ARM_FH_SPI_H
++
++#include <linux/io.h>
++#include <linux/scatterlist.h>
++
++#define SPI_MASTER_CONTROLLER_MAX_SLAVE		(2)
++#define SPI_TRANSFER_USE_DMA			(0x77888877)
++
++struct fh_spi_cs {
++	u32 GPIO_Pin;
++	char *name;
++};
++
++struct fh_spi_chip {
++	u8 poll_mode;	/* 0 for contoller polling mode */
++	u8 type;	/* SPI/SSP/Micrwire */
++	u8 enable_dma;
++	void *cs_control;
++//	void (*cs_control)(u32 command);
++};
++
++struct fh_spi_platform_data {
++	u32 apb_clock_in;
++	u32 fifo_len;
++	u32 slave_max_num;
++	struct fh_spi_cs cs_data[SPI_MASTER_CONTROLLER_MAX_SLAVE];
++	//below is dma transfer needed
++	u32 dma_transfer_enable;
++	u32 rx_handshake_num;
++	u32 tx_handshake_num;
++	u32 bus_no;
++	char *clk_name;
++	u32 rx_dma_channel;
++	u32 tx_dma_channel;
++
++};
++
++#endif	/* __ARCH_ARM_FH_SPI_H */
+diff --git a/arch/arm/mach-fh/include/mach/sram.h b/arch/arm/mach-fh/include/mach/sram.h
+new file mode 100644
+index 00000000..46d2dcd1
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/sram.h
+@@ -0,0 +1,25 @@
++/*
++ * mach/sram.h - FH simple SRAM allocator
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#ifndef __MACH_SRAM_H
++#define __MACH_SRAM_H
++
++/*
++ * SRAM allocations return a CPU virtual address, or NULL on error.
++ * If a DMA address is requested and the SRAM supports DMA, its
++ * mapped address is also returned.
++ *
++ * Errors include SRAM memory not being available, and requesting
++ * DMA mapped SRAM on systems which don't allow that.
++ */
++extern void *sram_alloc(size_t len, dma_addr_t *dma);
++extern void sram_free(void *addr, size_t len);
++
++#endif /* __MACH_SRAM_H */
+diff --git a/arch/arm/mach-fh/include/mach/system.h b/arch/arm/mach-fh/include/mach/system.h
+new file mode 100644
+index 00000000..fbcda2b2
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/system.h
+@@ -0,0 +1,40 @@
++/*
++ * mach/system.h
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#ifndef __ASM_ARCH_SYSTEM_H
++#define __ASM_ARCH_SYSTEM_H
++
++#include <mach/io.h>
++#include <mach/fh_predefined.h>
++#include <mach/pmu.h>
++#include <linux/compiler.h>
++#include <linux/types.h>
++#include <asm/mach/time.h>
++#include <asm/proc-fns.h>
++
++extern void fh_intc_init(void);
++
++void fh_irq_suspend(void);
++void fh_irq_resume(void);
++
++extern unsigned int fh_cpu_suspend_sz;
++extern void fh_cpu_suspend(void);
++
++static inline void arch_idle(void)
++{
++	/* cpu_do_idle(); */
++}
++
++static inline void arch_reset(char mode, const char *cmd)
++{
++	fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0x7fffffff);
++}
++
++#endif /* __ASM_ARCH_SYSTEM_H */
+diff --git a/arch/arm/mach-fh/include/mach/timex.h b/arch/arm/mach-fh/include/mach/timex.h
+new file mode 100644
+index 00000000..308fd1ee
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/timex.h
+@@ -0,0 +1,22 @@
++/*
++ * FH timer subsystem
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#ifndef __ASM_ARCH_TIMEX_H
++#define __ASM_ARCH_TIMEX_H
++
++#define TIMER0_CLK			(1000000)
++#define TIMER1_CLK			(1000000)
++#define PAE_PTS_CLK			(1000000)
++
++#define CLOCK_TICK_RATE			TIMER0_CLK
++
++extern struct sys_timer fh_timer;
++
++#endif /* __ASM_ARCH_TIMEX_H__ */
+diff --git a/arch/arm/mach-fh/include/mach/uncompress.h b/arch/arm/mach-fh/include/mach/uncompress.h
+new file mode 100644
+index 00000000..819c6257
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/uncompress.h
+@@ -0,0 +1,57 @@
++/*
++ * Serial port stubs for kernel decompress status messages
++ *
++ * Initially based on:
++ * arch/arm/plat-omap/include/mach/uncompress.h
++ *
++ * Original copyrights follow.
++ *
++ * Copyright (C) 2000 RidgeRun, Inc.
++ * Author: Greg Lonnon <glonnon@ridgerun.com>
++ *
++ * Rewritten by:
++ * Author: <source@mvista.com>
++ * 2004 (c) MontaVista Software, Inc.
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include <linux/types.h>
++
++#include <asm/mach-types.h>
++#include <mach/chip.h>
++
++#define     REG_UART_THR		(0x0000)
++#define     REG_UART_USR		(0x007c)
++
++#define REG_UART0_THR	(*(unsigned char *)(CONSOLE_REG_BASE + REG_UART_THR))
++#define REG_UART0_USR	(*(unsigned char *)(CONSOLE_REG_BASE + REG_UART_USR))
++
++static void putc(char c)
++{
++	while (!(REG_UART0_USR & (1 << 1)))
++		barrier();
++
++	REG_UART0_THR = c;
++}
++
++static inline void flush(void)
++{
++	while (!(REG_UART0_USR & (1 << 2)))
++		barrier();
++}
++
++static inline void set_uart_info(u32 phys, void *__iomem virt)
++{
++
++}
++
++static inline void __arch_decomp_setup(unsigned long arch_id)
++{
++
++}
++
++#define arch_decomp_setup()	__arch_decomp_setup(arch_id)
++#define arch_decomp_wdog()
+diff --git a/arch/arm/mach-fh/include/mach/vmalloc.h b/arch/arm/mach-fh/include/mach/vmalloc.h
+new file mode 100644
+index 00000000..7796486e
+--- /dev/null
++++ b/arch/arm/mach-fh/include/mach/vmalloc.h
+@@ -0,0 +1,16 @@
++/*
++ * DaVinci vmalloc definitions
++ *
++ * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
++ *
++ * 2007 (c) MontaVista Software, Inc. This file is licensed under
++ * the terms of the GNU General Public License version 2. This program
++ * is licensed "as is" without any warranty of any kind, whether express
++ * or implied.
++ */
++#include <mach/hardware.h>
++
++/* Allow vmalloc range until the IO virtual range minus a 2M "hole" */
++///#define VMALLOC_END	  (IO_VIRT - (2<<20))
++
++#define VMALLOC_END            (PAGE_OFFSET + 0x3e000000)
+diff --git a/arch/arm/mach-fh/iomux.c b/arch/arm/mach-fh/iomux.c
+new file mode 100644
+index 00000000..f042a4bd
+--- /dev/null
++++ b/arch/arm/mach-fh/iomux.c
+@@ -0,0 +1,854 @@
++#include <mach/iomux.h>
++#include <mach/pmu.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/printk.h>
++#include <linux/slab.h>
++#include <linux/io.h>
++
++
++Iomux_Pad fh_iomux_cfg[] = {
++	{
++		.func_name = { "RESETN", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "TEST", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "CIS_CLK", "", "", "", },
++		.reg_type  = 5,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_HSYNC", "GPIO20", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_VSYNC", "GPIO21", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_PCLK", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 0,
++	},
++	{
++		.func_name = { "CIS_D0", "GPIO22", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D1", "GPIO23", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D2", "GPIO24", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D3", "GPIO25", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D4", "GPIO26", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D5", "GPIO27", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D6", "GPIO28", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D7", "GPIO29", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D8", "GPIO30", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D9", "GPIO31", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D10", "GPIO32", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_D11", "GPIO33", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_REF_CLK", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "MAC_MDC", "GPIO34", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 0,
++	},
++	{
++		.func_name = { "MAC_MDIO", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_COL", "GPIO35", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_CRS", "GPIO36", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_RXCK", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "MAC_RXD0", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = -1,
++	},
++
++	{
++		.func_name = { "MAC_RXD1", "GPIO38", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_RXD2", "GPIO39", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_RXD3", "GPIO40", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_RXDV", "GPIO41", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_TXCK", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "MAC_TXD0", "GPIO42", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_TXD1", "GPIO43", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_TXD2", "GPIO44", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_TXD3", "GPIO45", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_TXEN", "GPIO46", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "MAC_RXER", "GPIO47", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO0", "ARC_JTAG_TCK", "GPIO0", "CIS_SSI0_CSN1", },
++		.reg_type  = 21,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO1", "ARC_JTAG_TRSTN", "GPIO1", "CIS_SSI0_RXD", },
++		.reg_type  = 21,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO2", "ARC_JTAG_TMS", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO3", "ARC_JTAG_TDI", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO4", "ARC_JTAG_TDO", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "JTAG_TCK", "GPIO5", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "JTAG_TRSTN", "GPIO6", "PWM_OUT3", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "JTAG_TMS", "GPIO7", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "JTAG_TDI", "GPIO8", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "JTAG_TDO", "GPIO9", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO10", "UART1_OUT", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO11", "UART1_IN", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO12", "PWM_OUT0", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO13", "PWM_OUT1", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "GPIO14", "PWM_OUT2", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "UART0_IN", "GPIO48", "UART0_IN", " I2S_WS", },
++		.reg_type  = 21,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "UART0_OUT", "GPIO49", "UART0_OUT", "I2S_CLK", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_SCL", "GPIO56", "CIS_SCL", "CIS_SSI0_CLK", },
++		.reg_type  = 13,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "CIS_SDA", "GPIO57", "CIS_SDA", "CIS_SSI0_TXD", },
++		.reg_type  = 13,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SCL1", "GPIO50", "SCL1", "I2S_DI", },
++		.reg_type  = 21,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SDA1", "GPIO51", "I2S_DO", "", },
++		.reg_type  = 21,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SSI0_CLK", "", "", "", },
++		.reg_type  = 5,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SSI0_TXD", "", "", "", },
++		.reg_type  = 5,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SSI0_CSN0", "GPIO54", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SSI0_CSN1", "GPIO55", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SSI0_RXD", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "SD0_CD", "GPIO52", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SD0_WP", "GPIO53", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SD0_CLK", "", "", "", },
++		.reg_type  = 5,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD0_CMD_RSP", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD0_DATA0", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD0_DATA1", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 2,
++	},
++	{
++		.func_name = { "SD0_DATA2", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD0_DATA3", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD1_CLK", "SSI1_CLK", "", "", },
++		.reg_type  = 8,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_NONE,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SD1_CD", "GPIO_58", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SD1_WP", "GPIO_59", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++	{
++		.func_name = { "SD1_DATA0", "SSI1_TXD", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD1_DATA1", "SSI1_CSN0", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD1_DATA2", "SSI1_CSN1", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD1_DATA3", "", "", "", },
++		.reg_type  = 17,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "SD1_CMD_RSP", "SSI1_RXD", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = 3,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "CLK_SW0", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "CLK_SW1", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "CLK_SW2", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "CLK_SW3", "", "", "", },
++		.reg_type  = 9,
++		.func_sel  = 0,
++		.pupd 	   = IOMUX_PUPD_UP,
++		.drv_cur   = -1,
++	},
++	{
++		.func_name = { "RESERVED", "", "", "", },
++		.reg_type  = 20,
++		.func_sel  = -1,
++	},
++	{
++		.func_name = { "MAC_TXER", "GPIO37", "", "", },
++		.reg_type  = 20,
++		.func_sel  = 1,
++		.pupd 	   = IOMUX_PUPD_DOWN,
++		.drv_cur   = 1,
++	},
++};
++
++
++static void fh_iomux_setmfs(Iomux_Object *iomux_obj, Iomux_Pad *pad)
++{
++	switch (pad->reg_type) {
++	case 8:
++		(IOMUX_PADTYPE(8)pad->reg)->bit.mfs = pad->func_sel;
++		break;
++	case 13:
++		(IOMUX_PADTYPE(13)pad->reg)->bit.mfs = pad->func_sel;
++		break;
++	case 20:
++		(IOMUX_PADTYPE(20)pad->reg)->bit.mfs = pad->func_sel;
++		break;
++	case 21:
++		(IOMUX_PADTYPE(21)pad->reg)->bit.mfs = pad->func_sel;
++		break;
++	default:
++		break;
++	}
++
++}
++
++#ifdef IOMUX_DEBUG
++
++static int fh_iomux_getmfs(Iomux_Object *iomux_obj, Iomux_Pad *pad)
++{
++	int mfs;
++
++	switch (pad->reg_type) {
++	case 8:
++		mfs = (IOMUX_PADTYPE(8)pad->reg)->bit.mfs;
++		break;
++	case 13:
++		mfs = (IOMUX_PADTYPE(13)pad->reg)->bit.mfs;
++		break;
++	case 20:
++		mfs = (IOMUX_PADTYPE(20)pad->reg)->bit.mfs;
++		break;
++	case 21:
++		mfs = (IOMUX_PADTYPE(21)pad->reg)->bit.mfs;
++		break;
++	default:
++		mfs = -1;
++		break;
++
++	}
++	return mfs;
++}
++
++
++static void fh_iomux_print(Iomux_Object *iomux_obj)
++{
++	int i;
++	u32 reg;
++
++	printk("\tPad No.\t\tFunction Select\t\tRegister\n");
++
++	for (i = 0; i < ARRAY_SIZE(fh_iomux_cfg); i++) {
++		int curr_func;
++
++		curr_func = fh_iomux_getmfs(iomux_obj, &iomux_obj->pads[i]);
++		reg = readl((u32)iomux_obj->pads[i].reg);
++
++		if (curr_func < 0)
++			printk("\t%d\t\t%-8s(no mfs)\t0x%08x\n", i, iomux_obj->pads[i].func_name[0],
++			       reg);
++		else
++			printk("\t%d\t\t%-16s\t0x%08x\n", i, iomux_obj->pads[i].func_name[curr_func],
++			       reg);
++
++	}
++
++}
++
++#endif
++
++static void fh_iomux_setcur(Iomux_Object *iomux_obj, Iomux_Pad *pad)
++{
++	switch (pad->reg_type) {
++	case 5:
++		(IOMUX_PADTYPE(5)pad->reg)->bit.e8_e4 = pad->drv_cur;
++		break;
++	case 8:
++		(IOMUX_PADTYPE(8)pad->reg)->bit.e8_e4 = pad->drv_cur;
++		break;
++	case 13:
++		(IOMUX_PADTYPE(13)pad->reg)->bit.e4_e2 = pad->drv_cur;
++		break;
++	case 17:
++		(IOMUX_PADTYPE(17)pad->reg)->bit.e8_e4 = pad->drv_cur;
++		break;
++	case 20:
++		(IOMUX_PADTYPE(20)pad->reg)->bit.e4_e2 = pad->drv_cur;
++		break;
++	case 21:
++		(IOMUX_PADTYPE(21)pad->reg)->bit.e4_e2 = pad->drv_cur;
++		break;
++	default:
++		break;
++	}
++
++}
++
++static void fh_iomux_setpupd(Iomux_Object *iomux_obj, Iomux_Pad *pad)
++{
++	switch (pad->reg_type) {
++	case 9:
++		(IOMUX_PADTYPE(9)pad->reg)->bit.pu_pd = pad->pupd;
++		break;
++	case 17:
++		(IOMUX_PADTYPE(17)pad->reg)->bit.pu_pd = pad->pupd;
++		break;
++	case 20:
++		(IOMUX_PADTYPE(20)pad->reg)->bit.pu_pd = pad->pupd;
++		break;
++	case 21:
++		(IOMUX_PADTYPE(21)pad->reg)->bit.pu_pd = pad->pupd;
++		break;
++	default:
++		break;
++	}
++}
++
++static void fh_iomux_setrest(Iomux_Object *iomux_obj, Iomux_Pad *pad)
++{
++	switch (pad->reg_type) {
++	case 5:
++		(IOMUX_PADTYPE(5)pad->reg)->bit.sr = 0;
++		break;
++	case 8:
++		(IOMUX_PADTYPE(8)pad->reg)->bit.sr = 0;
++		break;
++	case 9:
++		(IOMUX_PADTYPE(9)pad->reg)->bit.ie = 1;
++		(IOMUX_PADTYPE(9)pad->reg)->bit.smt = 1;
++		break;
++	case 13:
++		(IOMUX_PADTYPE(13)pad->reg)->bit.ie = 1;
++		(IOMUX_PADTYPE(13)pad->reg)->bit.smt = 1;
++		break;
++	case 17:
++		(IOMUX_PADTYPE(17)pad->reg)->bit.sr = 0;
++		(IOMUX_PADTYPE(17)pad->reg)->bit.ie = 1;
++		(IOMUX_PADTYPE(17)pad->reg)->bit.e = 1;
++		(IOMUX_PADTYPE(17)pad->reg)->bit.smt = 1;
++		break;
++	case 20:
++		(IOMUX_PADTYPE(20)pad->reg)->bit.sr = 0;
++		(IOMUX_PADTYPE(20)pad->reg)->bit.ie = 1;
++		(IOMUX_PADTYPE(20)pad->reg)->bit.smt = 1;
++		break;
++	case 21:
++		(IOMUX_PADTYPE(21)pad->reg)->bit.sr = 0;
++		(IOMUX_PADTYPE(21)pad->reg)->bit.ie = 1;
++		(IOMUX_PADTYPE(21)pad->reg)->bit.smt = 1;
++		break;
++	default:
++		break;
++	}
++
++}
++
++void fh_iomux_init(Iomux_Object *iomux_obj)
++{
++	int i;
++	u32 reg = 0;
++
++	iomux_obj->pads = fh_iomux_cfg;
++
++	for (i = 0; i < ARRAY_SIZE(fh_iomux_cfg); i++) {
++		iomux_obj->pads[i].reg_offset = i * 4;
++		iomux_obj->pads[i].reg = &reg;
++
++#if defined(CONFIG_FH_PWM_NUM) && CONFIG_FH_PWM_NUM == 4
++	//for pwm3 only
++	if(fh_iomux_cfg[i].func_sel == 2
++		&& iomux_obj->pads[i].reg_offset == 0xa8)
++	{
++		fh_pmu_set_reg(0x128, 0x00101110);
++		fh_iomux_cfg[i].func_sel = 1;
++	}
++#endif
++
++        if(iomux_obj->pads[i].func_sel < 0)
++            continue;
++
++		fh_iomux_setmfs(iomux_obj, &fh_iomux_cfg[i]);
++		fh_iomux_setcur(iomux_obj, &fh_iomux_cfg[i]);
++		fh_iomux_setpupd(iomux_obj, &fh_iomux_cfg[i]);
++		fh_iomux_setrest(iomux_obj, &fh_iomux_cfg[i]);
++		fh_pmu_set_reg(0x5c + iomux_obj->pads[i].reg_offset, reg);
++	}
++#ifdef CONFIG_FH_GMAC_RMII
++	//(IOMUX_PADTYPE(17)(iomux_obj->pads[18]).reg)->bit.e = 1;
++	reg = fh_pmu_get_reg(REG_PMU_PAD_MAC_REF_CLK_CFG);
++	reg |= (1 << 13);
++	fh_pmu_set_reg(REG_PMU_PAD_MAC_REF_CLK_CFG, reg);
++#else
++	//(IOMUX_PADTYPE(17)(iomux_obj->pads[18]).reg)->bit.e = 0;
++	reg = fh_pmu_get_reg(REG_PMU_PAD_MAC_REF_CLK_CFG);
++	reg &= ~(1 << 13);
++	fh_pmu_set_reg(REG_PMU_PAD_MAC_REF_CLK_CFG, reg);
++#endif
++#ifdef IOMUX_DEBUG
++	fh_iomux_print(iomux_obj);
++#endif
++}
+diff --git a/arch/arm/mach-fh/irq.c b/arch/arm/mach-fh/irq.c
+new file mode 100644
+index 00000000..a17666fa
+--- /dev/null
++++ b/arch/arm/mach-fh/irq.c
+@@ -0,0 +1,151 @@
++/*
++ * Fullhan FH board support
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++
++#include <mach/hardware.h>
++#include <mach/system.h>
++#include <asm/mach/irq.h>
++
++#include <mach/chip.h>
++#include <mach/fh_predefined.h>
++#include <mach/irqs.h>
++
++static void fh_intc_ack(struct irq_data *d)
++{
++
++}
++static void fh_intc_enable(struct irq_data *d)
++{
++	if (d->irq >= NR_INTERNAL_IRQS)
++		return;
++
++	if (d->irq > 31) {
++		SET_REG_M(VINTC(REG_IRQ_EN_HIGH), 1 << (d->irq - 32),
++			  1 << (d->irq - 32));
++	} else
++		SET_REG_M(VINTC(REG_IRQ_EN_LOW), 1 << d->irq, 1 << d->irq);
++
++}
++static void fh_intc_disable(struct irq_data *d)
++{
++	if (d->irq >= NR_INTERNAL_IRQS)
++		return;
++	if (d->irq > 31)
++		SET_REG_M(VINTC(REG_IRQ_EN_HIGH), 0, 1 << (d->irq - 32));
++	else
++		SET_REG_M(VINTC(REG_IRQ_EN_LOW), 0, 1 << d->irq);
++}
++
++static void fh_intc_mask(struct irq_data *d)
++{
++	if (d->irq >= NR_INTERNAL_IRQS)
++		return;
++	if (d->irq > 31) {
++		SET_REG_M(VINTC(REG_IRQ_IRQMASK_HIGH), 1 << (d->irq - 32),
++			  1 << (d->irq - 32));
++	} else
++		SET_REG_M(VINTC(REG_IRQ_IRQMASK_LOW), 1 << d->irq, 1 << d->irq);
++}
++
++static void fh_intc_unmask(struct irq_data *d)
++{
++	if (d->irq >= NR_INTERNAL_IRQS)
++		return;
++	if (d->irq > 31)
++		SET_REG_M(VINTC(REG_IRQ_IRQMASK_HIGH), 0, 1 << (d->irq - 32));
++	else
++		SET_REG_M(VINTC(REG_IRQ_IRQMASK_LOW), 0, 1 << d->irq);
++}
++
++#ifdef CONFIG_PM
++
++static u32 wakeups_high;
++static u32 wakeups_low;
++static u32 backups_high;
++static u32 backups_low;
++
++static int fh_intc_set_wake(struct irq_data *d, unsigned value)
++{
++	if (unlikely(d->irq >= NR_IRQS))
++		return -EINVAL;
++
++	if (value) {
++		if (d->irq > 31)
++			wakeups_high |= (1 << (d->irq - 32));
++		else
++			wakeups_low |= (1 << d->irq);
++	} else {
++		if (d->irq > 31)
++			wakeups_high &= ~(1 << (d->irq - 32));
++		else
++			wakeups_low &= ~(1 << d->irq);
++	}
++	return 0;
++}
++
++void fh_irq_suspend(void)
++{
++	backups_high = GET_REG(VINTC(REG_IRQ_EN_HIGH));
++	backups_low = GET_REG(VINTC(REG_IRQ_EN_LOW));
++
++	SET_REG(VINTC(REG_IRQ_EN_HIGH), wakeups_high);
++	SET_REG(VINTC(REG_IRQ_EN_LOW), wakeups_low);
++}
++
++void fh_irq_resume(void)
++{
++	SET_REG(VINTC(REG_IRQ_EN_HIGH), backups_high);
++	SET_REG(VINTC(REG_IRQ_EN_LOW), backups_low);
++}
++
++#else
++#define fh_intc_set_wake   NULL
++#endif
++
++static struct irq_chip fh_irq_chip = {
++	.name		= "FH_INTC",
++	.irq_ack	= fh_intc_ack,
++	.irq_mask	= fh_intc_mask,
++	.irq_unmask	= fh_intc_unmask,
++
++	.irq_enable = fh_intc_enable,
++	.irq_disable = fh_intc_disable,
++	.irq_set_wake = fh_intc_set_wake,
++};
++
++void __init fh_intc_init(void)
++{
++	int i;
++
++	//disable all interrupts
++	SET_REG(VINTC(REG_IRQ_EN_LOW), 0x0);
++	SET_REG(VINTC(REG_IRQ_EN_HIGH), 0x0);
++
++	for (i = 0; i < NR_IRQS; i++) {
++		irq_set_chip_and_handler(i, &fh_irq_chip, handle_level_irq);
++		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
++	}
++
++}
+diff --git a/arch/arm/mach-fh/pinctrl.c b/arch/arm/mach-fh/pinctrl.c
+new file mode 100644
+index 00000000..58fad592
+--- /dev/null
++++ b/arch/arm/mach-fh/pinctrl.c
+@@ -0,0 +1,299 @@
++#ifdef CONFIG_MACH_FH8830
++#ifdef CONFIG_MACH_FH8830_QFN
++#include <mach/fh8830_iopad_qfn.h>
++#else
++#include <mach/fh8830_iopad_bga.h>
++#endif
++#endif
++
++#ifdef CONFIG_MACH_FH8833
++#include <mach/fh8833_iopad_mipi.h>
++#endif
++
++#include <mach/pinctrl.h>
++#include <linux/module.h>
++
++/* #define  FH_PINCTRL_DEBUG */
++#ifdef FH_PINCTRL_DEBUG
++#define PRINT_DBG(fmt,args...)  OS_PRINT(fmt,##args)
++#else
++#define PRINT_DBG(fmt,args...)  do{} while(0)
++#endif
++
++static PinCtrl_Object pinctrl_obj;
++OS_LIST fh_pinctrl_devices = OS_LIST_INIT(fh_pinctrl_devices);
++
++static int fh_pinctrl_func_select(PinCtrl_Pin *pin, unsigned int flag)
++{
++    unsigned int reg;
++
++    if(!pin)
++    {
++        OS_PRINT("ERROR: pin is null\n\n");
++        return -1;
++    }
++
++    if(flag & NEED_CHECK_PINLIST)
++    {
++        if(pinctrl_obj.pinlist[pin->pad_id])
++        {
++            OS_PRINT("ERROR: pad %d has already been set\n\n", pin->pad_id);
++            return -2;
++        }
++    }
++
++    reg = GET_REG(pinctrl_obj.vbase + pin->reg_offset);
++
++    pin->reg = (PinCtrl_Register *)&reg;
++
++    pin->reg->bit.mfs = pin->func_sel;
++
++    if(pin->pullup_pulldown == PUPD_DOWN)
++    {
++	    pin->reg->bit.pdn = 0;
++    }
++    else if(pin->pullup_pulldown == PUPD_UP)
++    {
++	    pin->reg->bit.pun = 0;
++    }
++    else
++    {
++	    pin->reg->bit.pdn = 1;
++	    pin->reg->bit.pun = 1;
++    }
++
++    pin->reg->bit.ie = 1;
++
++    SET_REG(pinctrl_obj.vbase + pin->reg_offset, pin->reg->dw);
++
++    pinctrl_obj.pinlist[pin->pad_id] = pin;
++
++    return 0;
++}
++
++static int fh_pinctrl_mux_switch(PinCtrl_Mux *mux, unsigned int flag)
++{
++    if(mux->cur_pin > MUX_NUM)
++    {
++        OS_PRINT("ERROR: selected function is not exist, sel_func=%d\n\n", mux->cur_pin);
++        return -3;
++    }
++
++    if(!mux->mux_pin[mux->cur_pin])
++    {
++        OS_PRINT("ERROR: mux->mux_pin[%d] has no pin\n\n", mux->cur_pin);
++        return -4;
++    }
++
++    PRINT_DBG("\t%s[%d]\n", mux->mux_pin[mux->cur_pin]->func_name, mux->cur_pin);
++    return fh_pinctrl_func_select(mux->mux_pin[mux->cur_pin], flag);
++}
++
++
++static int fh_pinctrl_device_switch(PinCtrl_Device *dev, unsigned int flag)
++{
++    int i, ret;
++    for(i=0; i<dev->mux_count; i++)
++    {
++        unsigned int *mux_addr = (unsigned int *)((unsigned int)dev
++                + sizeof(*dev) - 4 + i*4);
++        PinCtrl_Mux *mux = (PinCtrl_Mux *)(*mux_addr);
++
++        ret = fh_pinctrl_mux_switch(mux, flag);
++        if(ret)
++        {
++            return ret;
++        }
++    }
++
++    return 0;
++}
++
++static PinCtrl_Device * fh_pinctrl_get_device_by_name(char *name)
++{
++    PinCtrl_Device *dev = OS_NULL;
++
++    list_for_each_entry(dev, &fh_pinctrl_devices, list)
++    {
++        if(!strcmp(name, dev->dev_name))
++        {
++            return dev;
++        }
++    }
++
++    return 0;
++}
++
++int fh_pinctrl_check_pinlist(void)
++{
++    int i;
++    for(i=0; i<PAD_NUM; i++)
++    {
++        if(!pinctrl_obj.pinlist[i])
++        {
++        	PRINT_DBG("ERROR: pad %d is still empty\n", i);
++        }
++    }
++
++    return 0;
++}
++
++static int fh_pinctrl_init_devices(char** devlist, int listsize, unsigned int flag)
++{
++    int i, ret;
++    PinCtrl_Device *dev;
++
++    memset(pinctrl_obj.pinlist, 0, sizeof(pinctrl_obj.pinlist));
++
++    for(i=0; i<listsize; i++)
++    {
++        dev = fh_pinctrl_get_device_by_name(devlist[i]);
++
++        if(!dev)
++        {
++            OS_PRINT("ERROR: cannot find device %s\n", devlist[i]);
++            return -5;
++        }
++
++        PRINT_DBG("%s:\n", dev->dev_name);
++        ret = fh_pinctrl_device_switch(dev, flag);
++        PRINT_DBG("\n");
++        if(ret)
++        {
++            return ret;
++        }
++
++    }
++
++    fh_pinctrl_check_pinlist();
++
++    return 0;
++
++}
++
++static void fh_pinctrl_init_pin(void)
++{
++    int i;
++
++    for(i=0; i<PAD_NUM; i++)
++    {
++        PinCtrl_Pin *pin = pinctrl_obj.pinlist[i];
++        if(!pin)
++        {
++        	unsigned int reg;
++        	PRINT_DBG("ERROR: pad %d is empty\n", i);
++        	reg = GET_REG(pinctrl_obj.vbase + i * 4);
++        	reg &= ~(0x1000);
++        	SET_REG(pinctrl_obj.vbase + i * 4, reg);
++        	continue;
++        }
++        pin->reg->dw = GET_REG(pinctrl_obj.vbase +
++                pin->reg_offset);
++
++        pin->input_enable = pin->reg->bit.ie;
++        pin->output_enable = pin->reg->bit.oe;
++    }
++}
++
++
++void fh_pinctrl_init(unsigned int base)
++{
++    pinctrl_obj.vbase = pinctrl_obj.pbase = (void *)base;
++
++    fh_pinctrl_init_devicelist(&fh_pinctrl_devices);
++    fh_pinctrl_init_devices(fh_pinctrl_selected_devices,
++            ARRAY_SIZE(fh_pinctrl_selected_devices),
++            NEED_CHECK_PINLIST);
++    fh_pinctrl_init_pin();
++}
++
++void fh_pinctrl_prt(struct seq_file *sfile)
++{
++    int i;
++    seq_printf(sfile, "%2s\t%8s\t%4s\t%8s\t%4s\t%4s\t%4s\t%4s\n",
++		    "id", "name", "addr", "reg", "sel", "ie", "oe", "pupd");
++    for(i=0; i<PAD_NUM; i++)
++    {
++        if(!pinctrl_obj.pinlist[i])
++        {
++                OS_PRINT("ERROR: pad %d is empty\n", i);
++		continue;
++        }
++        seq_printf(sfile, "%02d\t%8s\t0x%08x\t0x%08x\t%04d\t%04d\t%04d\t%04d\n",
++                pinctrl_obj.pinlist[i]->pad_id,
++                pinctrl_obj.pinlist[i]->func_name,
++                pinctrl_obj.pinlist[i]->reg_offset + 0xf0000080,
++                GET_REG(pinctrl_obj.vbase + pinctrl_obj.pinlist[i]->reg_offset),
++                pinctrl_obj.pinlist[i]->func_sel,
++                pinctrl_obj.pinlist[i]->input_enable,
++                pinctrl_obj.pinlist[i]->output_enable,
++                pinctrl_obj.pinlist[i]->pullup_pulldown);
++    }
++
++}
++
++
++int fh_pinctrl_smux(char *devname, char* muxname, int muxsel, unsigned int flag)
++{
++    PinCtrl_Device *dev;
++    int i, ret;
++
++    dev = fh_pinctrl_get_device_by_name(devname);
++
++    if(!dev)
++    {
++        OS_PRINT("ERROR: cannot find device %s\n", devname);
++        return -4;
++    }
++
++    for(i=0; i<dev->mux_count; i++)
++    {
++        unsigned int *mux_addr = (unsigned int *)((unsigned int)dev
++                + sizeof(*dev) - 4 + i*4);
++        PinCtrl_Mux *mux = (PinCtrl_Mux *)(*mux_addr);
++
++        if(!strcmp(muxname, mux->mux_pin[0]->func_name))
++        {
++            mux->cur_pin = muxsel;
++            ret = fh_pinctrl_mux_switch(mux, flag);
++            return ret;
++        }
++    }
++
++    if(i == dev->mux_count)
++    {
++        OS_PRINT("ERROR: cannot find mux %s of device %s\n", muxname, devname);
++        return -6;
++    }
++
++    fh_pinctrl_check_pinlist();
++
++    return 0;
++}
++EXPORT_SYMBOL(fh_pinctrl_smux);
++
++int fh_pinctrl_sdev(char *devname, unsigned int flag)
++{
++    PinCtrl_Device *dev;
++    int ret;
++
++    dev = fh_pinctrl_get_device_by_name(devname);
++    if(!dev)
++    {
++        OS_PRINT("ERROR: cannot find device %s\n", devname);
++        return -7;
++    }
++
++    OS_PRINT("%s:\n", dev->dev_name);
++    ret = fh_pinctrl_device_switch(dev, flag);
++    OS_PRINT("\n");
++    if(ret)
++    {
++        return ret;
++    }
++
++    fh_pinctrl_check_pinlist();
++
++    return 0;
++}
++EXPORT_SYMBOL(fh_pinctrl_sdev);
+diff --git a/arch/arm/mach-fh/pm.c b/arch/arm/mach-fh/pm.c
+new file mode 100644
+index 00000000..a7ec90b5
+--- /dev/null
++++ b/arch/arm/mach-fh/pm.c
+@@ -0,0 +1,223 @@
++/*
++ * FH Power Management Routines
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/pm.h>
++#include <linux/suspend.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <linux/spinlock.h>
++#include <linux/io.h>
++
++#include <asm/cacheflush.h>
++#include <asm/delay.h>
++
++#include <mach/sram.h>
++#include <mach/system.h>
++#include <mach/io.h>
++#include <mach/gpio.h>
++#include <mach/ddrc.h>
++#include <mach/pmu.h>
++
++#ifdef CONFIG_PM
++static u32 old_clk_gate = 0;
++
++static void (*fh_sram_suspend)(void);
++
++static inline void fh_pm_pll0_to_xtal(void)
++{
++	u32 reg;
++
++	reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
++	reg &= ~(0x1);
++	fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
++}
++
++static inline void fh_pm_xtal_to_pll0(void)
++{
++	u32 reg;
++
++	reg = fh_pmu_get_reg(REG_PMU_SYS_CTRL);
++	reg |= 0x1;
++	fh_pmu_set_reg(REG_PMU_SYS_CTRL, reg);
++}
++
++static inline void fh_pm_gate_clocks(void)
++{
++	u32 reg;
++	old_clk_gate = fh_pmu_get_reg(REG_PMU_CLK_GATE);
++	reg = fh_pmu_get_reg(REG_PMU_CLK_GATE);
++	reg |= 0x7fff3fb1;
++	fh_pmu_set_reg(REG_PMU_CLK_GATE, reg);
++}
++
++static inline void fh_pm_ungate_clocks(void)
++{
++	u32 reg;
++
++	reg = old_clk_gate;
++	fh_pmu_set_reg(REG_PMU_CLK_GATE, reg);
++}
++
++
++static void fh_sram_push(void *dest, void *src, unsigned int size)
++{
++	memcpy(dest, src, size);
++	flush_icache_range((unsigned long)dest, (unsigned long)(dest + size));
++}
++
++static int fh_pm_valid_state(suspend_state_t state)
++{
++	switch (state) {
++	case PM_SUSPEND_ON:
++	case PM_SUSPEND_STANDBY:
++	case PM_SUSPEND_MEM:
++		return 1;
++
++	default:
++		return 0;
++	}
++}
++
++static void fh_pm_suspend_to_ram(void)
++{
++	fh_pm_pll0_to_xtal();
++	fh_pm_gate_clocks();
++
++	fh_sram_suspend();
++
++	fh_pm_ungate_clocks();
++	fh_pm_xtal_to_pll0();
++}
++
++static inline void fh_ddrc_selfrefresh_enable(void)
++{
++	u32 reg;
++
++	/*
++	 * Ensure that the Cadence DDR Controller is idle,
++	 * that is when the controller_busy signal is low.
++	 */
++	do {
++		reg = readl(VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_57);
++	} while (reg & DDRC_CONTROLLER_BUSY);
++
++	/*
++	 * Put the memories into self-refresh mode
++	 * by issuing one of the self-refresh entry commands
++	 * through the Low Power Control Module
++	 */
++	writel(DDRC_LPI_SR_WAKEUP_TIME | DDRC_LP_CMD_SELFREFRESH | DDRC_CKSRX_DELAY,
++	       VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_31);
++
++	//wait no more
++	/*
++	do
++	{
++	    reg = readl(VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_97);
++	}
++	while(reg & DDRC_CKE_STATUS);
++	*/
++}
++
++static inline void fh_ddrc_selfrefresh_disable(void)
++{
++	//Exit any low power state
++	writel(DDRC_LPI_SR_WAKEUP_TIME | DDRC_LP_CMD_EXITLOWPOWER | DDRC_CKSRX_DELAY,
++	       VA_DDRC_REG_BASE + OFFSET_DENAL_CTL_31);
++}
++
++static void fh_pm_suspend_to_cache(void)
++{
++	asm volatile("mov r1, #0\n\t"
++		     "mcr p15, 0, r1, c7, c10, 4\n\t"
++		     "mcr p15, 0, r1, c7, c0, 4\n\t"
++		     : /* no output */
++		     : /* no input */
++		     : "r1");
++
++	fh_ddrc_selfrefresh_enable();
++
++	asm volatile("mov r1, #0\n\t"
++		     "mcr p15, 0, r1, c7, c10, 4\n\t"
++		     "mcr p15, 0, r1, c7, c0, 4\n\t"
++		     : /* no output */
++		     : /* no input */
++		     : "r1");
++	fh_ddrc_selfrefresh_disable();
++}
++
++static int fh_pm_enter(suspend_state_t state)
++{
++	int ret = 0;
++
++	fh_irq_suspend();
++	fh_gpio_irq_suspend();
++
++	switch (state) {
++	case PM_SUSPEND_ON:
++		cpu_do_idle();
++		break;
++	case PM_SUSPEND_STANDBY:
++		fh_pm_suspend_to_cache();
++		break;
++	case PM_SUSPEND_MEM:
++		fh_pm_suspend_to_ram();
++		break;
++	default:
++		ret = -EINVAL;
++	}
++
++	fh_gpio_irq_resume();
++	fh_irq_resume();
++
++	return ret;
++}
++
++static const struct platform_suspend_ops fh_pm_ops = {
++	.enter		= fh_pm_enter,
++	.valid		= fh_pm_valid_state,
++};
++
++static int __init fh_pm_probe(struct platform_device *pdev)
++{
++	fh_sram_suspend = sram_alloc(fh_cpu_suspend_sz, NULL);
++	if (!fh_sram_suspend) {
++		dev_err(&pdev->dev, "cannot allocate SRAM memory\n");
++		return -ENOMEM;
++	}
++	fh_sram_push(fh_sram_suspend, fh_cpu_suspend,
++		       fh_cpu_suspend_sz);
++	suspend_set_ops(&fh_pm_ops);
++
++	return 0;
++}
++
++static int __exit fh_pm_remove(struct platform_device *pdev)
++{
++	sram_free(fh_sram_suspend, fh_cpu_suspend_sz);
++	return 0;
++}
++
++static struct platform_driver fh_pm_driver = {
++	.driver =
++	{
++		.name	 = "pm-fh",
++		.owner	 = THIS_MODULE,
++	},
++	.remove = __exit_p(fh_pm_remove),
++};
++
++static int __init fh_pm_init(void)
++{
++	return platform_driver_probe(&fh_pm_driver, fh_pm_probe);
++}
++late_initcall(fh_pm_init);
++#endif
+diff --git a/arch/arm/mach-fh/pmu.c b/arch/arm/mach-fh/pmu.c
+new file mode 100644
+index 00000000..2e1dc40b
+--- /dev/null
++++ b/arch/arm/mach-fh/pmu.c
+@@ -0,0 +1,57 @@
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <asm/io.h>
++
++#include <mach/io.h>
++#include <mach/chip.h>
++
++static int fh_pmu_flag_stop = 0;
++
++void fh_pmu_stop(void)
++{
++	fh_pmu_flag_stop = 1;
++}
++EXPORT_SYMBOL(fh_pmu_stop);
++
++void fh_pmu_set_reg(u32 offset, u32 data)
++{
++	if (fh_pmu_flag_stop)
++		return;
++
++	if (offset > PMU_REG_SIZE) {
++		pr_err("fh_pmu_set_reg: offset is out of range");
++		return;
++	}
++	writel(data, VPMU(PMU_REG_BASE + offset));
++}
++EXPORT_SYMBOL(fh_pmu_set_reg);
++
++u32 fh_pmu_get_reg(u32 offset)
++{
++	if (fh_pmu_flag_stop)
++		return 0;
++
++	if (offset > PMU_REG_SIZE) {
++		pr_err("fh_pmu_get_reg: offset is out of range");
++		return 0;
++	}
++	return readl(VPMU(PMU_REG_BASE + offset));
++}
++EXPORT_SYMBOL(fh_pmu_get_reg);
++
++void fh_pae_set_reg(u32 offset, u32 data)
++{
++	if (offset > 0x60) {
++		pr_err("fh_pae_set_reg: offset is out of range");
++		return;
++	}
++	writel(data, VPAE(PAE_REG_BASE + offset));
++}
++EXPORT_SYMBOL(fh_pae_set_reg);
++
++int fh_pmu_init(void)
++{
++	return 0;
++}
+diff --git a/arch/arm/mach-fh/sleep.S b/arch/arm/mach-fh/sleep.S
+new file mode 100644
+index 00000000..5eb4ac23
+--- /dev/null
++++ b/arch/arm/mach-fh/sleep.S
+@@ -0,0 +1,144 @@
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++#include <mach/chip.h>
++#include <mach/io.h>
++#include <mach/ddrc.h>
++
++#ifdef CONFIG_PM
++#define PMU_MASK_SWITCH_PLL0	0x1
++#define PMU_MASK_DDR_SEL		0x1000000
++#define PMU_MASK_DDR_DIV		0xff
++#define PMU_MASK_PLL1_PDN		0x80000000
++
++
++	.macro wait_ddrc_idle
++1:	ldr	r3, [r1, #OFFSET_DENAL_CTL_57]
++	tst	r3, #DDRC_CONTROLLER_BUSY
++	bne	1b
++	.endm
++
++
++	.macro enable_ddrc_selfrefresh
++	ldr r3, .fh_ddrc_cmd_en_self_refresh
++	str	r3, [r1, #OFFSET_DENAL_CTL_31]
++	.endm
++
++
++	.macro wait_ddrc_cke
++1:	ldr	r3, [r1, #OFFSET_DENAL_CTL_97]
++	tst	r3, #DDRC_CKE_STATUS
++	bne	1b
++	.endm
++
++
++	.macro disable_ddrc_selfrefresh
++	ldr r3, .fh_ddrc_cmd_dis_self_refresh
++	str	r3, [r1, #OFFSET_DENAL_CTL_31]
++	.endm
++
++
++	.macro ddr_to_pll0
++	ldr r3, [r2, #REG_PMU_CLK_SEL]
++	bic	r3, r3, #PMU_MASK_DDR_SEL
++	str	r3, [r2, #REG_PMU_CLK_SEL]
++	.endm
++
++
++	.macro ddr_to_pll1
++	ldr r3, [r2, #REG_PMU_CLK_SEL]
++	orr	r3, r3, #PMU_MASK_DDR_SEL
++	str	r3, [r2, #REG_PMU_CLK_SEL]
++	.endm
++
++#if 1
++	.macro ddr_dec_feq
++	ldr r3, [r2, #REG_PMU_CLK_DIV1]
++	orr	r3, r3, #PMU_MASK_DDR_DIV
++	str	r3, [r2, #REG_PMU_CLK_SEL]
++	.endm
++
++
++	.macro ddr_inc_feq
++	ldr r3, [r2, #REG_PMU_CLK_DIV1]
++	bic	r3, r3, #PMU_MASK_DDR_DIV
++	orr	r3, r3, #0x1
++	str	r3, [r2, #REG_PMU_CLK_SEL]
++	.endm
++
++
++	.macro pll1_power_down
++	ldr r3, [r2, #REG_PMU_PLL1_CTRL]
++	bic	r3, r3, #PMU_MASK_PLL1_PDN
++	str	r3, [r2, #REG_PMU_PLL1_CTRL]
++	.endm
++
++
++	.macro pll1_power_on
++	ldr r3, [r2, #REG_PMU_PLL1_CTRL]
++	orr	r3, r3, #PMU_MASK_PLL1_PDN
++	str	r3, [r2, #REG_PMU_PLL1_CTRL]
++	.endm
++#endif
++
++	.text
++ENTRY(fh_cpu_suspend)
++
++	stmfd	sp!, {r0-r12, lr}		@ save registers on stack
++
++	/*
++
++	 * Register usage:
++	 *  R1 = Base address of DDRC
++	 *  R2 = Base register for PMU
++	 *  R3 = temporary register
++	 *  R4 = temporary register
++	 *
++	 *  R9 = Test address
++	 */
++
++	ldr	r1, .fh_va_base_ddrc
++	ldr	r2, .fh_va_base_pmu
++	ldr r9, .fh_va_test_addr
++
++	wait_ddrc_idle
++	enable_ddrc_selfrefresh
++	wait_ddrc_cke
++
++	@ddr_dec_feq
++	ddr_to_pll0
++
++	@pll1_power_down
++
++	mcr p15, 0, r0, c7, c10, 4		@ Data Synchronization Barrier operation
++	mcr	p15, 0, r0, c7, c0, 4		@ Wait-for-Interrupt
++
++	@pll1_power_on
++
++    @ddr_inc_feq
++	ddr_to_pll1
++
++	disable_ddrc_selfrefresh
++
++    ldmfd   sp!, {r0-r12, pc}
++
++ENDPROC(fh_cpu_suspend)
++
++.fh_va_base_ddrc:
++	.word VA_DDRC_REG_BASE
++
++.fh_va_base_pmu:
++	.word VA_PMU_REG_BASE
++
++.fh_va_test_addr:
++	.word 0xc03efef0
++
++.fh_ddrc_cmd_en_self_refresh:
++    .word 0x3000a01
++
++.fh_ddrc_cmd_dis_self_refresh:
++    .word 0x3000101
++
++ENTRY(fh_cpu_suspend_sz)
++	.word	. - fh_cpu_suspend
++ENDPROC(fh_cpu_suspend_sz)
++#endif
+diff --git a/arch/arm/mach-fh/sram.c b/arch/arm/mach-fh/sram.c
+new file mode 100644
+index 00000000..4df4838b
+--- /dev/null
++++ b/arch/arm/mach-fh/sram.c
+@@ -0,0 +1,53 @@
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/genalloc.h>
++#include <asm-generic/sizes.h>
++#include <mach/sram.h>
++#include <mach/chip.h>
++#include <mach/io.h>
++
++static struct gen_pool *sram_pool;
++
++void *sram_alloc(size_t len, dma_addr_t *dma)
++{
++	unsigned long vaddr;
++
++	if (!sram_pool)
++		return NULL;
++
++	vaddr = gen_pool_alloc(sram_pool, len);
++	if (!vaddr)
++		return NULL;
++
++	return (void *)vaddr;
++}
++EXPORT_SYMBOL(sram_alloc);
++
++void sram_free(void *addr, size_t len)
++{
++	gen_pool_free(sram_pool, (unsigned long) addr, len);
++}
++EXPORT_SYMBOL(sram_free);
++
++
++/*
++ * REVISIT This supports CPU and DMA access to/from SRAM, but it
++ * doesn't (yet?) support some other notable uses of SRAM:  as TCM
++ * for data and/or instructions; and holding code needed to enter
++ * and exit suspend states (while DRAM can't be used).
++ */
++static int __init sram_init(void)
++{
++	int status = 0;
++
++	sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
++	if (!sram_pool)
++		status = -ENOMEM;
++
++	if (sram_pool)
++		status = gen_pool_add(sram_pool, VA_RAM_REG_BASE, SRAM_SIZE, -1);
++	WARN_ON(status < 0);
++	return status;
++}
++core_initcall(sram_init);
++
+diff --git a/arch/arm/mach-fh/time.c b/arch/arm/mach-fh/time.c
+new file mode 100644
+index 00000000..f8978a4b
+--- /dev/null
++++ b/arch/arm/mach-fh/time.c
+@@ -0,0 +1,278 @@
++/*
++ * FH timer subsystem
++ *
++ * Copyright (C) 2014 Fullhan Microelectronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/clocksource.h>
++#include <linux/clockchips.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/time.h>
++#include <linux/irqreturn.h>
++#include <linux/delay.h>
++
++#include <asm/system.h>
++#include <asm/mach-types.h>
++#include <asm/sched_clock.h>
++#include <asm/mach/irq.h>
++#include <asm/mach/time.h>
++
++#include <mach/hardware.h>
++#include <mach/timex.h>
++#include <mach/chip.h>
++#include <mach/fh_predefined.h>
++#include <mach/irqs.h>
++#include <mach/pmu.h>
++#include <mach/clock.h>
++#include <mach/fh_simple_timer.h>
++
++#define TIMERN_REG_BASE(n)		(TIMER_REG_BASE + 0x14 * n)
++
++#define	REG_TIMER_LOADCNT(n)		(TIMERN_REG_BASE(n) + 0x00)
++#define	REG_TIMER_CUR_VAL(n)		(TIMERN_REG_BASE(n) + 0x04)
++#define	REG_TIMER_CTRL_REG(n)		(TIMERN_REG_BASE(n) + 0x08)
++#define	REG_TIMER_EOI_REG(n)		(TIMERN_REG_BASE(n) + 0x0C)
++#define	REG_TIMER_INTSTATUS(n)		(TIMERN_REG_BASE(n) + 0x10)
++
++#define REG_TIMERS_INTSTATUS		(TIMER_REG_BASE + 0xa0)
++
++#define REG_PAE_PTS_REG 0xe7000040
++
++static struct clock_event_device clockevent_fh;
++static struct clocksource clocksource_fh;
++#ifndef CONFIG_USE_PTS_AS_CLOCKSOURCE
++static unsigned int prev_cycle = 0;
++#endif
++
++struct clk *timer0_clk, *timer1_clk, *pts_clk;
++
++/*
++ * clockevent
++ */
++
++static int fh_set_next_event(unsigned long cycles,
++			     struct clock_event_device *evt)
++{
++
++	/* SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(1)), 0x00, 0x1); */
++	SET_REG(VTIMER(REG_TIMER_LOADCNT(1)), cycles);
++	SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(1)), 0x01, 0x1);
++
++#if defined(CONFIG_ARCH_FH8833)
++	fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfffbffff);
++	while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff)
++		;
++#endif
++
++#if defined(CONFIG_ARCH_FH8810)
++	unsigned int curr_val;
++
++	curr_val = GET_REG(VTIMER(REG_TIMER_CUR_VAL(1))) ;
++	if (curr_val >  0x80000000) {
++		panic("timer curr %u, want cycles %lu\n", curr_val, cycles);
++
++		SET_REG_M(VTIMER(REG_TIMER_CTRL_REG(1)), 0x01, 0x1);
++		SET_REG(VTIMER(REG_TIMER_LOADCNT(1)), cycles);
++
++		fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfff7ffff);
++		while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff)
++			;
++	}
++#endif
++	return 0;
++}
++
++static void fh_set_mode(enum clock_event_mode mode,
++			struct clock_event_device *evt)
++{
++	switch (mode) {
++	case CLOCK_EVT_MODE_PERIODIC:
++
++		SET_REG(VTIMER(REG_TIMER_CTRL_REG(1)), 0x3);
++		SET_REG(VTIMER(REG_TIMER_LOADCNT(1)), TIMER1_CLK / HZ);
++
++#if defined(CONFIG_ARCH_FH8833)
++		fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfffbffff);
++		while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff)
++			;
++#endif
++
++#if defined(CONFIG_ARCH_FH8810)
++		fh_pmu_set_reg(REG_PMU_SWRST_MAIN_CTRL, 0xfff7ffff);
++		while (fh_pmu_get_reg(REG_PMU_SWRST_MAIN_CTRL) != 0xffffffff)
++			;
++#endif
++		break;
++	case CLOCK_EVT_MODE_ONESHOT:
++		break;
++	case CLOCK_EVT_MODE_UNUSED:
++	case CLOCK_EVT_MODE_SHUTDOWN:
++		SET_REG(VTIMER(REG_TIMER_CTRL_REG(1)), 0x0);
++		break;
++	case CLOCK_EVT_MODE_RESUME:
++		SET_REG(VTIMER(REG_TIMER_CTRL_REG(1)), 0x3);
++		break;
++	}
++}
++
++
++static irqreturn_t fh_clock_timer_interrupt_handle(int irq, void *dev_id)
++{
++	unsigned int status;
++	status = GET_REG(VTIMER(REG_TIMERS_INTSTATUS));
++
++
++#ifdef CONFIG_FH_SIMPLE_TIMER
++	if (status & (1 << SIMPLE_TIMER_BASE))
++		fh_simple_timer_interrupt();
++#endif
++	if (status & 0x2) {
++		GET_REG(VTIMER(REG_TIMER_EOI_REG(1)));
++		clockevent_fh.event_handler(&clockevent_fh);
++	}
++
++	return IRQ_HANDLED;
++}
++
++static struct irqaction fh_eventtimer_irq = {
++	.name           = "System Timer Tick",
++	.flags          =  IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER,
++	.handler        = fh_clock_timer_interrupt_handle,
++	.dev_id		= &clockevent_fh,
++};
++
++
++static void fh_timer_resources(void)
++{
++
++}
++static DEFINE_CLOCK_DATA(cd);
++
++static void notrace fh_update_sched_clock(void)
++{
++	const cycle_t cyc = clocksource_fh.read(&clocksource_fh);
++	update_sched_clock(&cd, cyc, (u32)~0);
++}
++
++unsigned long long notrace sched_clock(void)
++{
++	const cycle_t cyc = clocksource_fh.read(&clocksource_fh);
++
++	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
++}
++
++static void fh_clocksource_init(void)
++{
++#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
++	unsigned long clock_tick_rate = pts_clk->frequency;
++#else
++	unsigned long clock_tick_rate = timer0_clk->frequency;
++	prev_cycle = 0;
++#endif
++
++	if (clocksource_register_hz(&clocksource_fh, clock_tick_rate))
++		panic("register clocksouce :%s error\n", clocksource_fh.name);
++
++	printk(KERN_INFO "timer mult: 0x%x, timer shift: 0x%x\n",
++			clocksource_fh.mult, clocksource_fh.shift);
++
++	/* force check the mult/shift of clocksource */
++	init_fixed_sched_clock(&cd, fh_update_sched_clock, 32, clock_tick_rate,
++			       clocksource_fh.mult, clocksource_fh.shift);
++}
++
++static cycle_t fh_clocksource_read(struct clocksource *cs)
++{
++#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
++	return GET_REG(VPAE(REG_PAE_PTS_REG));
++#else
++	unsigned int cycle;
++	cycle = ~GET_REG(VTIMER(REG_TIMER_CUR_VAL(0)));
++#ifdef CONFIG_ARCH_FH8810
++	if (unlikely(prev_cycle > cycle))
++		cycle = ~GET_REG(VTIMER(REG_TIMER_CUR_VAL(0)));
++	prev_cycle = cycle;
++#endif
++	return cycle;
++#endif
++}
++
++static void fh_clockevent_init(void)
++{
++	setup_irq(TMR0_IRQ, &fh_eventtimer_irq);
++	clockevent_fh.mult = div_sc(timer1_clk->frequency,
++			NSEC_PER_SEC, clockevent_fh.shift);
++	clockevent_fh.max_delta_ns = clockevent_delta2ns(0xffffffff,
++				       &clockevent_fh);
++
++	clockevent_fh.min_delta_ns = clockevent_delta2ns(0xf, &clockevent_fh);
++
++	clockevent_fh.cpumask = cpumask_of(0);
++	clockevents_register_device(&clockevent_fh);
++}
++
++
++static void __init fh_timer_init(void)
++{
++
++	timer0_clk = clk_get(NULL, "tmr0_clk");
++	timer1_clk = clk_get(NULL, "tmr0_clk");
++	pts_clk = clk_get(NULL, "pts_clk");
++
++#ifdef CONFIG_USE_PTS_AS_CLOCKSOURCE
++	clk_set_rate(pts_clk, PAE_PTS_CLK);
++	clk_enable(pts_clk);
++#endif
++
++	clk_set_rate(timer0_clk, TIMER0_CLK);
++	clk_enable(timer0_clk);
++
++	clk_set_rate(timer1_clk, TIMER1_CLK);
++	clk_enable(timer1_clk);
++	timer0_clk->frequency = 1000000;
++	timer1_clk->frequency = 1000000;
++	if (IS_ERR(timer0_clk) || IS_ERR(timer1_clk) || IS_ERR(pts_clk))
++		pr_err("fh_timer: clock is not defined\n");
++
++
++	fh_timer_resources();
++	fh_clocksource_init();
++	fh_clockevent_init();
++#ifdef CONFIG_FH_SIMPLE_TIMER
++	fh_simple_timer_init();
++#endif
++}
++
++
++
++static struct clocksource clocksource_fh = {
++	.name		= "fh_clocksource",
++	.rating		= 300,
++	.read		= fh_clocksource_read,
++	.mask		= CLOCKSOURCE_MASK(32),
++	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
++};
++
++
++static struct clock_event_device clockevent_fh = {
++	.name			= "fh_clockevent",
++	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
++	.shift			= 32,
++	.set_next_event	= fh_set_next_event,
++	.set_mode		= fh_set_mode,
++};
++
++struct sys_timer fh_timer = {
++	.init   = fh_timer_init,
++};
+diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
+index 3b3776d0..96681cd9 100644
+--- a/arch/arm/tools/mach-types
++++ b/arch/arm/tools/mach-types
+@@ -1113,3 +1113,6 @@ blissc			MACH_BLISSC		BLISSC			3491
+ thales_adc		MACH_THALES_ADC		THALES_ADC		3492
+ ubisys_p9d_evp		MACH_UBISYS_P9D_EVP	UBISYS_P9D_EVP		3493
+ atdgp318		MACH_ATDGP318		ATDGP318		3494
++fh8810			MACH_FH8810		FH8810			9999
++fh8830			MACH_FH8830		FH8830			9999
++fh8833			MACH_FH8833		FH8833			9999
+\ No newline at end of file
+diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
+deleted file mode 120000
+index 7cb65789..00000000
+--- a/arch/microblaze/boot/dts/system.dts
++++ /dev/null
+@@ -1 +0,0 @@
+-../../platform/generic/system.dts
+\ No newline at end of file
+diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
+new file mode 100644
+index 00000000..3f85df2b
+--- /dev/null
++++ b/arch/microblaze/boot/dts/system.dts
+@@ -0,0 +1,367 @@
++/*
++ * Device Tree Generator version: 1.1
++ *
++ * (C) Copyright 2007-2008 Xilinx, Inc.
++ * (C) Copyright 2007-2009 Michal Simek
++ *
++ * Michal SIMEK <monstr@monstr.eu>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of
++ * the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
++ * MA 02111-1307 USA
++ *
++ * CAUTION: This file is automatically generated by libgen.
++ * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
++ *
++ * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101
++ */
++
++/dts-v1/;
++/ {
++	#address-cells = <1>;
++	#size-cells = <1>;
++	compatible = "xlnx,microblaze";
++	hard-reset-gpios = <&LEDs_8Bit 2 1>;
++	model = "testing";
++	DDR2_SDRAM: memory@90000000 {
++		device_type = "memory";
++		reg = < 0x90000000 0x10000000 >;
++	} ;
++	aliases {
++		ethernet0 = &Hard_Ethernet_MAC;
++		serial0 = &RS232_Uart_1;
++	} ;
++	chosen {
++		bootargs = "console=ttyUL0,115200 highres=on";
++		linux,stdout-path = "/plb@0/serial@84000000";
++	} ;
++	cpus {
++		#address-cells = <1>;
++		#cpus = <0x1>;
++		#size-cells = <0>;
++		microblaze_0: cpu@0 {
++			clock-frequency = <125000000>;
++			compatible = "xlnx,microblaze-7.10.d";
++			d-cache-baseaddr = <0x90000000>;
++			d-cache-highaddr = <0x9fffffff>;
++			d-cache-line-size = <0x10>;
++			d-cache-size = <0x2000>;
++			device_type = "cpu";
++			i-cache-baseaddr = <0x90000000>;
++			i-cache-highaddr = <0x9fffffff>;
++			i-cache-line-size = <0x10>;
++			i-cache-size = <0x2000>;
++			model = "microblaze,7.10.d";
++			reg = <0>;
++			timebase-frequency = <125000000>;
++			xlnx,addr-tag-bits = <0xf>;
++			xlnx,allow-dcache-wr = <0x1>;
++			xlnx,allow-icache-wr = <0x1>;
++			xlnx,area-optimized = <0x0>;
++			xlnx,cache-byte-size = <0x2000>;
++			xlnx,d-lmb = <0x1>;
++			xlnx,d-opb = <0x0>;
++			xlnx,d-plb = <0x1>;
++			xlnx,data-size = <0x20>;
++			xlnx,dcache-addr-tag = <0xf>;
++			xlnx,dcache-always-used = <0x1>;
++			xlnx,dcache-byte-size = <0x2000>;
++			xlnx,dcache-line-len = <0x4>;
++			xlnx,dcache-use-fsl = <0x1>;
++			xlnx,debug-enabled = <0x1>;
++			xlnx,div-zero-exception = <0x1>;
++			xlnx,dopb-bus-exception = <0x0>;
++			xlnx,dynamic-bus-sizing = <0x1>;
++			xlnx,edge-is-positive = <0x1>;
++			xlnx,family = "virtex5";
++			xlnx,endianness = <0x1>;
++			xlnx,fpu-exception = <0x1>;
++			xlnx,fsl-data-size = <0x20>;
++			xlnx,fsl-exception = <0x0>;
++			xlnx,fsl-links = <0x0>;
++			xlnx,i-lmb = <0x1>;
++			xlnx,i-opb = <0x0>;
++			xlnx,i-plb = <0x1>;
++			xlnx,icache-always-used = <0x1>;
++			xlnx,icache-line-len = <0x4>;
++			xlnx,icache-use-fsl = <0x1>;
++			xlnx,ill-opcode-exception = <0x1>;
++			xlnx,instance = "microblaze_0";
++			xlnx,interconnect = <0x1>;
++			xlnx,interrupt-is-edge = <0x0>;
++			xlnx,iopb-bus-exception = <0x0>;
++			xlnx,mmu-dtlb-size = <0x4>;
++			xlnx,mmu-itlb-size = <0x2>;
++			xlnx,mmu-tlb-access = <0x3>;
++			xlnx,mmu-zones = <0x10>;
++			xlnx,number-of-pc-brk = <0x1>;
++			xlnx,number-of-rd-addr-brk = <0x0>;
++			xlnx,number-of-wr-addr-brk = <0x0>;
++			xlnx,opcode-0x0-illegal = <0x1>;
++			xlnx,pvr = <0x2>;
++			xlnx,pvr-user1 = <0x0>;
++			xlnx,pvr-user2 = <0x0>;
++			xlnx,reset-msr = <0x0>;
++			xlnx,sco = <0x0>;
++			xlnx,unaligned-exceptions = <0x1>;
++			xlnx,use-barrel = <0x1>;
++			xlnx,use-dcache = <0x1>;
++			xlnx,use-div = <0x1>;
++			xlnx,use-ext-brk = <0x1>;
++			xlnx,use-ext-nm-brk = <0x1>;
++			xlnx,use-extended-fsl-instr = <0x0>;
++			xlnx,use-fpu = <0x2>;
++			xlnx,use-hw-mul = <0x2>;
++			xlnx,use-icache = <0x1>;
++			xlnx,use-interrupt = <0x1>;
++			xlnx,use-mmu = <0x3>;
++			xlnx,use-msr-instr = <0x1>;
++			xlnx,use-pcmp-instr = <0x1>;
++		} ;
++	} ;
++	mb_plb: plb@0 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
++		ranges ;
++		FLASH: flash@a0000000 {
++			bank-width = <2>;
++			compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
++			reg = < 0xa0000000 0x2000000 >;
++			xlnx,family = "virtex5";
++			xlnx,include-datawidth-matching-0 = <0x1>;
++			xlnx,include-datawidth-matching-1 = <0x0>;
++			xlnx,include-datawidth-matching-2 = <0x0>;
++			xlnx,include-datawidth-matching-3 = <0x0>;
++			xlnx,include-negedge-ioregs = <0x0>;
++			xlnx,include-plb-ipif = <0x1>;
++			xlnx,include-wrbuf = <0x1>;
++			xlnx,max-mem-width = <0x10>;
++			xlnx,mch-native-dwidth = <0x20>;
++			xlnx,mch-plb-clk-period-ps = <0x1f40>;
++			xlnx,mch-splb-awidth = <0x20>;
++			xlnx,mch0-accessbuf-depth = <0x10>;
++			xlnx,mch0-protocol = <0x0>;
++			xlnx,mch0-rddatabuf-depth = <0x10>;
++			xlnx,mch1-accessbuf-depth = <0x10>;
++			xlnx,mch1-protocol = <0x0>;
++			xlnx,mch1-rddatabuf-depth = <0x10>;
++			xlnx,mch2-accessbuf-depth = <0x10>;
++			xlnx,mch2-protocol = <0x0>;
++			xlnx,mch2-rddatabuf-depth = <0x10>;
++			xlnx,mch3-accessbuf-depth = <0x10>;
++			xlnx,mch3-protocol = <0x0>;
++			xlnx,mch3-rddatabuf-depth = <0x10>;
++			xlnx,mem0-width = <0x10>;
++			xlnx,mem1-width = <0x20>;
++			xlnx,mem2-width = <0x20>;
++			xlnx,mem3-width = <0x20>;
++			xlnx,num-banks-mem = <0x1>;
++			xlnx,num-channels = <0x0>;
++			xlnx,priority-mode = <0x0>;
++			xlnx,synch-mem-0 = <0x0>;
++			xlnx,synch-mem-1 = <0x0>;
++			xlnx,synch-mem-2 = <0x0>;
++			xlnx,synch-mem-3 = <0x0>;
++			xlnx,synch-pipedelay-0 = <0x2>;
++			xlnx,synch-pipedelay-1 = <0x2>;
++			xlnx,synch-pipedelay-2 = <0x2>;
++			xlnx,synch-pipedelay-3 = <0x2>;
++			xlnx,tavdv-ps-mem-0 = <0x1adb0>;
++			xlnx,tavdv-ps-mem-1 = <0x3a98>;
++			xlnx,tavdv-ps-mem-2 = <0x3a98>;
++			xlnx,tavdv-ps-mem-3 = <0x3a98>;
++			xlnx,tcedv-ps-mem-0 = <0x1adb0>;
++			xlnx,tcedv-ps-mem-1 = <0x3a98>;
++			xlnx,tcedv-ps-mem-2 = <0x3a98>;
++			xlnx,tcedv-ps-mem-3 = <0x3a98>;
++			xlnx,thzce-ps-mem-0 = <0x88b8>;
++			xlnx,thzce-ps-mem-1 = <0x1b58>;
++			xlnx,thzce-ps-mem-2 = <0x1b58>;
++			xlnx,thzce-ps-mem-3 = <0x1b58>;
++			xlnx,thzoe-ps-mem-0 = <0x1b58>;
++			xlnx,thzoe-ps-mem-1 = <0x1b58>;
++			xlnx,thzoe-ps-mem-2 = <0x1b58>;
++			xlnx,thzoe-ps-mem-3 = <0x1b58>;
++			xlnx,tlzwe-ps-mem-0 = <0x88b8>;
++			xlnx,tlzwe-ps-mem-1 = <0x0>;
++			xlnx,tlzwe-ps-mem-2 = <0x0>;
++			xlnx,tlzwe-ps-mem-3 = <0x0>;
++			xlnx,twc-ps-mem-0 = <0x2af8>;
++			xlnx,twc-ps-mem-1 = <0x3a98>;
++			xlnx,twc-ps-mem-2 = <0x3a98>;
++			xlnx,twc-ps-mem-3 = <0x3a98>;
++			xlnx,twp-ps-mem-0 = <0x11170>;
++			xlnx,twp-ps-mem-1 = <0x2ee0>;
++			xlnx,twp-ps-mem-2 = <0x2ee0>;
++			xlnx,twp-ps-mem-3 = <0x2ee0>;
++			xlnx,xcl0-linesize = <0x4>;
++			xlnx,xcl0-writexfer = <0x1>;
++			xlnx,xcl1-linesize = <0x4>;
++			xlnx,xcl1-writexfer = <0x1>;
++			xlnx,xcl2-linesize = <0x4>;
++			xlnx,xcl2-writexfer = <0x1>;
++			xlnx,xcl3-linesize = <0x4>;
++			xlnx,xcl3-writexfer = <0x1>;
++		} ;
++		Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "xlnx,compound";
++			ranges ;
++			ethernet@81c00000 {
++				compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
++				device_type = "network";
++				interrupt-parent = <&xps_intc_0>;
++				interrupts = < 5 2 >;
++				llink-connected = <&PIM3>;
++				local-mac-address = [ 00 0a 35 00 00 00 ];
++				reg = < 0x81c00000 0x40 >;
++				xlnx,bus2core-clk-ratio = <0x1>;
++				xlnx,phy-type = <0x1>;
++				xlnx,phyaddr = <0x1>;
++				xlnx,rxcsum = <0x0>;
++				xlnx,rxfifo = <0x1000>;
++				xlnx,temac-type = <0x0>;
++				xlnx,txcsum = <0x0>;
++				xlnx,txfifo = <0x1000>;
++			} ;
++		} ;
++		IIC_EEPROM: i2c@81600000 {
++			compatible = "xlnx,xps-iic-2.00.a";
++			interrupt-parent = <&xps_intc_0>;
++			interrupts = < 6 2 >;
++			reg = < 0x81600000 0x10000 >;
++			xlnx,clk-freq = <0x7735940>;
++			xlnx,family = "virtex5";
++			xlnx,gpo-width = <0x1>;
++			xlnx,iic-freq = <0x186a0>;
++			xlnx,scl-inertial-delay = <0x0>;
++			xlnx,sda-inertial-delay = <0x0>;
++			xlnx,ten-bit-adr = <0x0>;
++		} ;
++		LEDs_8Bit: gpio@81400000 {
++			compatible = "xlnx,xps-gpio-1.00.a";
++			interrupt-parent = <&xps_intc_0>;
++			interrupts = < 7 2 >;
++			reg = < 0x81400000 0x10000 >;
++			xlnx,all-inputs = <0x0>;
++			xlnx,all-inputs-2 = <0x0>;
++			xlnx,dout-default = <0x0>;
++			xlnx,dout-default-2 = <0x0>;
++			xlnx,family = "virtex5";
++			xlnx,gpio-width = <0x8>;
++			xlnx,interrupt-present = <0x1>;
++			xlnx,is-bidir = <0x1>;
++			xlnx,is-bidir-2 = <0x1>;
++			xlnx,is-dual = <0x0>;
++			xlnx,tri-default = <0xffffffff>;
++			xlnx,tri-default-2 = <0xffffffff>;
++			#gpio-cells = <2>;
++			gpio-controller;
++		} ;
++
++		gpio-leds {
++			compatible = "gpio-leds";
++
++			heartbeat {
++				label = "Heartbeat";
++				gpios = <&LEDs_8Bit 4 1>;
++				linux,default-trigger = "heartbeat";
++			};
++
++			yellow {
++				label = "Yellow";
++				gpios = <&LEDs_8Bit 5 1>;
++			};
++
++			red {
++				label = "Red";
++				gpios = <&LEDs_8Bit 6 1>;
++			};
++
++			green {
++				label = "Green";
++				gpios = <&LEDs_8Bit 7 1>;
++			};
++		} ;
++		RS232_Uart_1: serial@84000000 {
++			clock-frequency = <125000000>;
++			compatible = "xlnx,xps-uartlite-1.00.a";
++			current-speed = <115200>;
++			device_type = "serial";
++			interrupt-parent = <&xps_intc_0>;
++			interrupts = < 8 0 >;
++			port-number = <0>;
++			reg = < 0x84000000 0x10000 >;
++			xlnx,baudrate = <0x1c200>;
++			xlnx,data-bits = <0x8>;
++			xlnx,family = "virtex5";
++			xlnx,odd-parity = <0x0>;
++			xlnx,use-parity = <0x0>;
++		} ;
++		SysACE_CompactFlash: sysace@83600000 {
++			compatible = "xlnx,xps-sysace-1.00.a";
++			interrupt-parent = <&xps_intc_0>;
++			interrupts = < 4 2 >;
++			reg = < 0x83600000 0x10000 >;
++			xlnx,family = "virtex5";
++			xlnx,mem-width = <0x10>;
++		} ;
++		debug_module: debug@84400000 {
++			compatible = "xlnx,mdm-1.00.d";
++			reg = < 0x84400000 0x10000 >;
++			xlnx,family = "virtex5";
++			xlnx,interconnect = <0x1>;
++			xlnx,jtag-chain = <0x2>;
++			xlnx,mb-dbg-ports = <0x1>;
++			xlnx,uart-width = <0x8>;
++			xlnx,use-uart = <0x1>;
++			xlnx,write-fsl-ports = <0x0>;
++		} ;
++		mpmc@90000000 {
++			#address-cells = <1>;
++			#size-cells = <1>;
++			compatible = "xlnx,mpmc-4.02.a";
++			ranges ;
++			PIM3: sdma@84600180 {
++				compatible = "xlnx,ll-dma-1.00.a";
++				interrupt-parent = <&xps_intc_0>;
++				interrupts = < 2 2 1 2 >;
++				reg = < 0x84600180 0x80 >;
++			} ;
++		} ;
++		xps_intc_0: interrupt-controller@81800000 {
++			#interrupt-cells = <0x2>;
++			compatible = "xlnx,xps-intc-1.00.a";
++			interrupt-controller ;
++			reg = < 0x81800000 0x10000 >;
++			xlnx,kind-of-intr = <0x100>;
++			xlnx,num-intr-inputs = <0x9>;
++		} ;
++		xps_timer_1: timer@83c00000 {
++			compatible = "xlnx,xps-timer-1.00.a";
++			interrupt-parent = <&xps_intc_0>;
++			interrupts = < 3 2 >;
++			reg = < 0x83c00000 0x10000 >;
++			xlnx,count-width = <0x20>;
++			xlnx,family = "virtex5";
++			xlnx,gen0-assert = <0x1>;
++			xlnx,gen1-assert = <0x1>;
++			xlnx,one-timer-only = <0x0>;
++			xlnx,trig0-assert = <0x1>;
++			xlnx,trig1-assert = <0x1>;
++		} ;
++	} ;
++}  ;
+diff --git a/drivers/Kconfig b/drivers/Kconfig
+index 3bb154d8..67f5c27f 100644
+--- a/drivers/Kconfig
++++ b/drivers/Kconfig
+@@ -126,4 +126,6 @@ source "drivers/hwspinlock/Kconfig"
+ 
+ source "drivers/clocksource/Kconfig"
+ 
++source "drivers/pwm/Kconfig"
++
+ endmenu
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 09f3232b..c3217634 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -6,6 +6,7 @@
+ #
+ 
+ obj-y				+= gpio/
++obj-y				+= pwm/
+ obj-$(CONFIG_PCI)		+= pci/
+ obj-$(CONFIG_PARISC)		+= parisc/
+ obj-$(CONFIG_RAPIDIO)		+= rapidio/
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index e0b25de1..8624d851 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -292,4 +292,24 @@ config CRYPTO_DEV_S5P
+ 	  Select this to offload Samsung S5PV210 or S5PC110 from AES
+ 	  algorithms execution.
+ 
++config FH_AES
++	tristate "FH AES support"
++	select CRYPTO_BLKCIPHER
++	select CRYPTO_AES
++	select CRYPTO_DES
++#	select CRYPTO_AUTHENC
++#	select CRYPTO_ALGAPI
++	select CRYPTO_CBC
++	select CRYPTO_ECB
++	select CRYPTO_SEQIV
++	
++	help
++	  To compile this driver as a module, choose M here: the module will
++	  be called fh_aes.
++	  
++
++config FH_AES_SELF_TEST
++	bool "fh aes self test"
++	depends on FH_AES 
++
+ endif # CRYPTO_HW
+diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
+index 53ea5015..46f30b50 100644
+--- a/drivers/crypto/Makefile
++++ b/drivers/crypto/Makefile
+@@ -13,3 +13,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
+ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
+ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
++
++obj-$(CONFIG_FH_AES)		+= fh_aes.o
++obj-$(CONFIG_FH_AES_SELF_TEST)		+= fh_aes_test.o
+diff --git a/drivers/crypto/fh_aes.c b/drivers/crypto/fh_aes.c
+new file mode 100644
+index 00000000..bf9dbc2f
+--- /dev/null
++++ b/drivers/crypto/fh_aes.c
+@@ -0,0 +1,1548 @@
++/*****************************************************************************
++ *  Include Section
++ *  add all #include here
++ *****************************************************************************/
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/in.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/skbuff.h>
++#include <linux/highmem.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/version.h>
++#include <linux/spinlock.h>
++#include <linux/dma-mapping.h>
++#include <linux/clk.h>
++#include <linux/platform_device.h>
++#include <linux/bitops.h>
++#include <linux/io.h>
++#include <linux/irqreturn.h>
++#include <asm/irq.h>
++#include <asm/page.h>
++#include <crypto/hash.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <crypto/rng.h>
++#include "fh_aes.h"
++
++/*****************************************************************************
++ * Define section
++ * add all #define here
++ *****************************************************************************/
++
++#define CRYPTO_QUEUE_LEN    (1)
++#define CRYPTION_POS		(0)
++#define METHOD_POS			(1)
++#define EMODE_POS			(4)
++
++#define aes_readl(aes, name) \
++	__raw_readl(&(((struct fh_aes_reg *)aes->regs)->name))
++
++#define aes_writel(aes, name, val) \
++	__raw_writel((val), &(((struct fh_aes_reg *)aes->regs)->name))
++
++#define aes_readw(aes, name) \
++	__raw_readw(&(((struct fh_aes_reg *)aes->regs)->name))
++
++#define aes_writew(aes, name, val) \
++	__raw_writew((val), &(((struct fh_aes_reg *)aes->regs)->name))
++
++#define aes_readb(aes, name) \
++	__raw_readb(&(((struct fh_aes_reg *)aes->regs)->name))
++
++#define aes_writeb(aes, name, val) \
++	__raw_writeb((val), &(((struct fh_aes_reg *)aes->regs)->name))
++
++
++#ifdef CONFIG_FH_EFUSE
++#define FH_AESV2
++#else
++#undef FH_AESV2
++#endif
++
++#define FH_AES_ALLIGN_SIZE			64
++#define FH_AES_MALLOC_SIZE			4096
++#define FH_AES_CTL_MAX_PROCESS_SIZE		(FH_AES_MALLOC_SIZE - 1)
++
++#ifdef FH_AESV2
++#include   <../drivers/misc/fh_efuse.h>
++extern struct wrap_efuse_obj s_efuse_obj;
++#endif
++/****************************************************************************
++ * ADT section
++ * add definition of user defined Data Type that only be used in this file  here
++ ***************************************************************************/
++
++enum {
++	ENCRYPT = 0 << CRYPTION_POS,
++	DECRYPT = 1 << CRYPTION_POS,
++};
++
++enum {
++	ECB_MODE = 0 << EMODE_POS,
++	CBC_MODE = 1 << EMODE_POS,
++	CTR_MODE = 2 << EMODE_POS,
++	CFB_MODE = 4 << EMODE_POS,
++	OFB_MODE = 5 << EMODE_POS,
++};
++
++enum {
++	DES_METHOD = 0 << METHOD_POS,
++	TRIPLE_DES_METHOD = 1 << METHOD_POS,
++	AES_128_METHOD = 4 << METHOD_POS,
++	AES_192_METHOD = 5 << METHOD_POS,
++	AES_256_METHOD = 6 << METHOD_POS,
++};
++
++/*****************************************************************************
++
++ *  static fun;
++ *****************************************************************************/
++
++static int fh_aes_handle_req(struct fh_aes_dev *dev,
++			     struct ablkcipher_request *req);
++/*aes*/
++static int fh_aes_crypt(struct ablkcipher_request *req, unsigned long mode);
++static int fh_aes_ecb_encrypt(struct ablkcipher_request *req);
++static int fh_aes_ecb_decrypt(struct ablkcipher_request *req);
++static int fh_aes_cbc_encrypt(struct ablkcipher_request *req);
++static int fh_aes_cbc_decrypt(struct ablkcipher_request *req);
++static int fh_aes_ctr_encrypt(struct ablkcipher_request *req);
++static int fh_aes_ctr_decrypt(struct ablkcipher_request *req);
++static int fh_aes_ofb_encrypt(struct ablkcipher_request *req);
++static int fh_aes_ofb_decrypt(struct ablkcipher_request *req);
++static int fh_aes_cfb_encrypt(struct ablkcipher_request *req);
++static int fh_aes_cfb_decrypt(struct ablkcipher_request *req);
++
++/*des*/
++static int fh_des_ecb_encrypt(struct ablkcipher_request *req);
++static int fh_des_ecb_decrypt(struct ablkcipher_request *req);
++static int fh_des_cbc_encrypt(struct ablkcipher_request *req);
++static int fh_des_cbc_decrypt(struct ablkcipher_request *req);
++static int fh_des_ofb_encrypt(struct ablkcipher_request *req);
++static int fh_des_ofb_decrypt(struct ablkcipher_request *req);
++static int fh_des_cfb_encrypt(struct ablkcipher_request *req);
++static int fh_des_cfb_decrypt(struct ablkcipher_request *req);
++
++/*tri des*/
++static int fh_des_tri_ecb_encrypt(struct ablkcipher_request *req);
++static int fh_des_tri_ecb_decrypt(struct ablkcipher_request *req);
++static int fh_des_tri_cbc_encrypt(struct ablkcipher_request *req);
++static int fh_des_tri_cbc_decrypt(struct ablkcipher_request *req);
++static int fh_des_tri_ofb_encrypt(struct ablkcipher_request *req);
++static int fh_des_tri_ofb_decrypt(struct ablkcipher_request *req);
++static int fh_des_tri_cfb_encrypt(struct ablkcipher_request *req);
++static int fh_des_tri_cfb_decrypt(struct ablkcipher_request *req);
++static int fh_aes_setkey(struct crypto_ablkcipher *cipher, const uint8_t *key,
++			 unsigned int keylen);
++static int fh_aes_cra_init(struct crypto_tfm *tfm);
++static void fh_aes_tx(struct fh_aes_dev *dev);
++static void fh_aes_rx(struct fh_aes_dev *dev);
++static irqreturn_t fh_aes_interrupt(int irq, void *dev_id);
++static void aes_biglittle_swap(u8 *buf);
++static int fh_set_indata(struct fh_aes_dev *dev, struct scatterlist *sg);
++static int fh_set_outdata(struct fh_aes_dev *dev, struct scatterlist *sg);
++static void fh_set_aes_key_reg(struct fh_aes_dev *dev, uint8_t *key,
++			       uint8_t *iv, unsigned int keylen);
++static void fh_set_dma_indata(struct fh_aes_dev *dev,
++			      struct scatterlist *sg);
++static void fh_set_dma_outdata(struct fh_aes_dev *dev,
++			       struct scatterlist *sg);
++static void fh_unset_indata(struct fh_aes_dev *dev);
++static void fh_unset_outdata(struct fh_aes_dev *dev);
++static void fh_aes_complete(struct fh_aes_dev *dev, int err);
++static void fh_aes_crypt_start(struct fh_aes_dev *dev, unsigned long mode);
++static void fh_aes_tasklet_cb(unsigned long data);
++
++#define fh_des_setkey  fh_aes_setkey
++/*****************************************************************************
++ * Global variables section - Local
++ * define global variables(will be refered only in this file) here,
++ * static keyword should be used to limit scope of local variable to this file
++ * e.g.
++ *  static uint8_t ufoo;
++ *****************************************************************************/
++struct fh_aes_dev  *pobj_aes_dev = NULL;
++static struct crypto_alg algs[] = {
++	{
++		.cra_name		= "ecb(aes)",
++		.cra_driver_name	= "ecb-aes-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= AES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= AES_MIN_KEY_SIZE,
++			.max_keysize	= AES_MAX_KEY_SIZE,
++			.setkey		= fh_aes_setkey,
++			.encrypt	= fh_aes_ecb_encrypt,
++			.decrypt	= fh_aes_ecb_decrypt,
++		}
++	},
++	{
++		.cra_name		= "cbc(aes)",
++		.cra_driver_name	= "cbc-aes-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++
++		.cra_blocksize		= AES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= AES_MIN_KEY_SIZE,
++			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
++			.setkey		= fh_aes_setkey,
++			.encrypt	= fh_aes_cbc_encrypt,
++			.decrypt	= fh_aes_cbc_decrypt,
++		}
++	},
++	{
++		.cra_name		= "ctr(aes)",
++		.cra_driver_name	= "ctr-aes-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= AES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= AES_MIN_KEY_SIZE,
++			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
++			.setkey		= fh_aes_setkey,
++			.encrypt	= fh_aes_ctr_encrypt,
++			.decrypt	= fh_aes_ctr_decrypt,
++		}
++	},
++	{
++		.cra_name		= "ofb(aes)",
++		.cra_driver_name	= "ofb-aes-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= AES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= AES_MIN_KEY_SIZE,
++			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
++			.setkey		= fh_aes_setkey,
++			.encrypt	= fh_aes_ofb_encrypt,
++			.decrypt	= fh_aes_ofb_decrypt,
++		}
++	},
++	{
++		.cra_name		= "cfb(aes)",
++		.cra_driver_name	= "cfb-aes-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= AES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= AES_MIN_KEY_SIZE,
++			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
++			.setkey		= fh_aes_setkey,
++			.encrypt	= fh_aes_cfb_encrypt,
++			.decrypt	= fh_aes_cfb_decrypt,
++		}
++	},
++	{
++		.cra_name		= "ecb(des)",
++		.cra_driver_name	= "ecb-des-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++
++		.cra_blocksize		= DES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES_KEY_SIZE,
++			.max_keysize	= DES_KEY_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_ecb_encrypt,
++			.decrypt	= fh_des_ecb_decrypt,
++		}
++	},
++	{
++		.cra_name		= "cbc(des)",
++		.cra_driver_name	= "cbc-des-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= DES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES_KEY_SIZE,
++			.max_keysize	= DES_KEY_SIZE,
++			.ivsize		= DES_BLOCK_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_cbc_encrypt,
++			.decrypt	= fh_des_cbc_decrypt,
++		}
++	},
++	{
++		.cra_name		= "ofb(des)",
++		.cra_driver_name	= "ofb-des-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= DES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES_KEY_SIZE,
++			.max_keysize	= DES_KEY_SIZE,
++			.ivsize		= DES_BLOCK_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_ofb_encrypt,
++			.decrypt	= fh_des_ofb_decrypt,
++		}
++	},
++	{
++		.cra_name		= "cfb(des)",
++		.cra_driver_name	= "cfb-des-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= DES_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES_KEY_SIZE,
++			.max_keysize	= DES_KEY_SIZE,
++			.ivsize		= DES_BLOCK_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_cfb_encrypt,
++			.decrypt	= fh_des_cfb_decrypt,
++		}
++	},
++	{
++		.cra_name			= "ecb(des3)",
++		.cra_driver_name	= "ecb-des3-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++
++		.cra_blocksize		= DES3_EDE_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES3_EDE_KEY_SIZE,
++			.max_keysize	= DES3_EDE_KEY_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_tri_ecb_encrypt,
++			.decrypt	= fh_des_tri_ecb_decrypt,
++		}
++	},
++	{
++		.cra_name			= "cbc(des3)",
++		.cra_driver_name	= "cbc-des3-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= DES3_EDE_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES3_EDE_KEY_SIZE,
++			.max_keysize	= DES3_EDE_KEY_SIZE,
++			.ivsize		= DES3_EDE_BLOCK_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_tri_cbc_encrypt,
++			.decrypt	= fh_des_tri_cbc_decrypt,
++		}
++	},
++	{
++		.cra_name			= "ofb(des3)",
++		.cra_driver_name	= "ofb-des3-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= DES3_EDE_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES3_EDE_KEY_SIZE,
++			.max_keysize	= DES3_EDE_KEY_SIZE,
++			.ivsize		= DES3_EDE_BLOCK_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_tri_ofb_encrypt,
++			.decrypt	= fh_des_tri_ofb_decrypt,
++		}
++	},
++	{
++		.cra_name			= "cfb(des3)",
++		.cra_driver_name	= "cfb-des3-fh",
++		.cra_priority		= 100,
++		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC,
++		.cra_blocksize		= DES3_EDE_BLOCK_SIZE,
++		.cra_ctxsize		= sizeof(struct fh_aes_ctx),
++		.cra_alignmask		= 0x0f,
++		.cra_type		= &crypto_ablkcipher_type,
++		.cra_module		= THIS_MODULE,
++		.cra_init		= fh_aes_cra_init,
++		.cra_u.ablkcipher = {
++			.min_keysize	= DES3_EDE_KEY_SIZE,
++			.max_keysize	= DES3_EDE_KEY_SIZE,
++			.ivsize		= DES3_EDE_BLOCK_SIZE,
++			.setkey		= fh_des_setkey,
++			.encrypt	= fh_des_tri_cfb_encrypt,
++			.decrypt	= fh_des_tri_cfb_decrypt,
++		}
++	},
++};
++
++#ifdef CONFIG_FH_AES_SELF_TEST
++extern void fh_aes_self_test_all(void);
++#endif
++
++/* function body */
++static int fh_aes_handle_req(struct fh_aes_dev *dev,
++			     struct ablkcipher_request *req)
++{
++	unsigned long flags;
++	int err;
++	spin_lock_irqsave(&dev->lock, flags);
++	if (dev->busy) {
++		err = -EAGAIN;
++		spin_unlock_irqrestore(&dev->lock, flags);
++		goto exit;
++	}
++	dev->busy = true;	/*true :1 ;false :0.*/
++	err = ablkcipher_enqueue_request(&dev->queue, req);
++	spin_unlock_irqrestore(&dev->lock, flags);
++	tasklet_schedule(&dev->tasklet);
++exit:
++	return err;
++}
++
++static int fh_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
++{
++	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
++	struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
++	struct fh_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
++	struct fh_aes_dev *dev = ctx->dev;
++	AES_DBG("%s\n", __func__);
++	dev->reqctx = reqctx;
++	/*if (!(mode & CFB_MODE)) {*/
++		if ((!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
++		    && (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE))) {
++			pr_err("request size is not exact amount of AES blocks\n");
++			return -EINVAL;
++		}
++	/*}*/
++	AES_DBG("reqctx->mode value: %x\n", (unsigned int_t)mode);
++	reqctx->mode = mode;
++	return fh_aes_handle_req(dev, req);
++}
++
++static int fh_aes_ecb_encrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
++	struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
++	u32 method = 0;
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++	return fh_aes_crypt(req, method | ECB_MODE | ENCRYPT);
++}
++
++
++static int fh_aes_ecb_decrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
++	struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
++	u32 method = 0;
++
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++	return fh_aes_crypt(req, method | ECB_MODE | DECRYPT);
++}
++
++static int fh_aes_cbc_encrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
++	struct fh_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
++	u32 method = 0;
++
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++	return fh_aes_crypt(req, method | CBC_MODE | ENCRYPT);
++}
++
++static int fh_aes_cbc_decrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm;
++	struct fh_aes_ctx *ctx;
++	u32 method;
++
++	tfm = crypto_ablkcipher_reqtfm(req);
++	ctx = crypto_ablkcipher_ctx(tfm);
++	method = 0;
++	AES_DBG("%s\n", __func__);
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++
++	return fh_aes_crypt(req, method | CBC_MODE | DECRYPT);
++}
++
++static int fh_aes_ctr_encrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm;
++	struct fh_aes_ctx *ctx;
++	u32 method;
++	tfm = crypto_ablkcipher_reqtfm(req);
++	ctx = crypto_ablkcipher_ctx(tfm);
++	method = 0;
++
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++
++	return fh_aes_crypt(req, method | CTR_MODE | ENCRYPT);
++}
++
++static int fh_aes_ctr_decrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm;
++	struct fh_aes_ctx *ctx;
++	u32 method;
++
++	tfm = crypto_ablkcipher_reqtfm(req);
++	ctx = crypto_ablkcipher_ctx(tfm);
++	method = 0;
++	AES_DBG("%s\n", __func__);
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++	return fh_aes_crypt(req, method | CTR_MODE | DECRYPT);
++}
++
++static int fh_aes_ofb_encrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm;
++	struct fh_aes_ctx *ctx;
++	u32 method;
++
++	tfm = crypto_ablkcipher_reqtfm(req);
++	ctx = crypto_ablkcipher_ctx(tfm);
++	method = 0;
++
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++	return fh_aes_crypt(req, method | OFB_MODE | ENCRYPT);
++}
++
++static int fh_aes_ofb_decrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm;
++	struct fh_aes_ctx *ctx;
++	u32 method;
++
++	tfm = crypto_ablkcipher_reqtfm(req);
++	ctx = crypto_ablkcipher_ctx(tfm);
++	method = 0;
++
++	AES_DBG("%s\n", __func__);
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++
++	return fh_aes_crypt(req, method | OFB_MODE | DECRYPT);
++}
++
++static int fh_aes_cfb_encrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm;
++	struct fh_aes_ctx *ctx;
++	u32 method;
++
++	tfm = crypto_ablkcipher_reqtfm(req);
++	ctx = crypto_ablkcipher_ctx(tfm);
++	method = 0;
++
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++	return fh_aes_crypt(req, method | CFB_MODE | ENCRYPT);
++}
++
++static int fh_aes_cfb_decrypt(struct ablkcipher_request *req)
++{
++	struct crypto_ablkcipher *tfm;
++	struct fh_aes_ctx *ctx;
++	u32 method;
++
++	tfm = crypto_ablkcipher_reqtfm(req);
++	ctx = crypto_ablkcipher_ctx(tfm);
++	method = 0;
++
++	AES_DBG("%s\n", __func__);
++	switch (ctx->keylen) {
++	case AES_KEYSIZE_128:
++		method = AES_128_METHOD;
++		break;
++	case AES_KEYSIZE_192:
++		method = AES_192_METHOD;
++		break;
++	case AES_KEYSIZE_256:
++		method = AES_256_METHOD;
++		break;
++	default:
++		break;
++	}
++
++	return fh_aes_crypt(req, method | CFB_MODE | DECRYPT);
++}
++static int fh_des_ecb_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | ECB_MODE | ENCRYPT);
++}
++
++static int fh_des_ecb_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | ECB_MODE | DECRYPT);
++}
++
++static int fh_des_cbc_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | CBC_MODE | ENCRYPT);
++}
++
++static int fh_des_cbc_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | CBC_MODE | DECRYPT);
++}
++
++static int fh_des_ofb_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | OFB_MODE | ENCRYPT);
++}
++
++static int fh_des_ofb_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | OFB_MODE | DECRYPT);
++}
++
++static int fh_des_cfb_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | CFB_MODE | ENCRYPT);
++}
++
++static int fh_des_cfb_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = DES_METHOD;
++
++	return fh_aes_crypt(req, method | CFB_MODE | DECRYPT);
++}
++static int fh_des_tri_ecb_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | ECB_MODE | ENCRYPT);
++}
++
++static int fh_des_tri_ecb_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | ECB_MODE | DECRYPT);
++}
++
++static int fh_des_tri_cbc_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | CBC_MODE | ENCRYPT);
++}
++
++static int fh_des_tri_cbc_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | CBC_MODE | DECRYPT);
++}
++
++static int fh_des_tri_ofb_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | OFB_MODE | ENCRYPT);
++}
++
++static int fh_des_tri_ofb_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | OFB_MODE | DECRYPT);
++}
++
++static int fh_des_tri_cfb_encrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | CFB_MODE | ENCRYPT);
++}
++
++static int fh_des_tri_cfb_decrypt(struct ablkcipher_request *req)
++{
++	u32 method;
++	method = 0;
++	method = TRIPLE_DES_METHOD;
++	return fh_aes_crypt(req, method | CFB_MODE | DECRYPT);
++}
++static int fh_aes_setkey(struct crypto_ablkcipher *cipher, const uint8_t *key,
++			 unsigned int keylen)
++{
++	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
++	struct fh_aes_ctx *ctx = crypto_tfm_ctx(tfm);
++	int i = 0;
++	AES_DBG("%s\n", __func__);
++	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192
++	    && keylen != AES_KEYSIZE_256 && keylen != DES_KEY_SIZE
++	    && keylen != DES3_EDE_KEY_SIZE)
++		return -EINVAL;
++
++	for (; i < keylen; i++)
++		AES_DBG("%x", key[i]);
++	AES_DBG("\n");
++
++	memcpy(ctx->aes_key, key, keylen);
++	ctx->keylen = keylen;
++
++	return 0;
++}
++
++static int fh_aes_cra_init(struct crypto_tfm *tfm)
++{
++	struct fh_aes_ctx *ctx = crypto_tfm_ctx(tfm);
++	ctx->dev = pobj_aes_dev;
++	tfm->crt_ablkcipher.reqsize = sizeof(struct fh_aes_reqctx);
++	AES_DBG("%s\n", __func__);
++
++	return 0;
++}
++
++static void fh_aes_tx(struct fh_aes_dev *dev)
++{
++	int err = 0;
++	unsigned int i = 0;
++	struct ablkcipher_request *req = dev->req;
++	struct scatterlist *temp_sg = req->dst;
++	fh_unset_outdata(dev);
++	do {
++		sg_copy_from_buffer(temp_sg, 1, &dev->ctl_dst_xbuf[i],
++		sg_dma_len(temp_sg));
++		i += sg_dma_len(temp_sg);
++		temp_sg = sg_next(temp_sg);
++	} while (temp_sg != NULL);
++
++	fh_aes_complete(dev, err);
++}
++
++static void fh_aes_rx(struct fh_aes_dev *dev)
++{
++	fh_unset_indata(dev);
++}
++
++static irqreturn_t fh_aes_interrupt(int irq, void *dev_id)
++{
++
++	u32 isr_status;
++	unsigned long flags;
++	struct platform_device *pdev = (struct platform_device *) dev_id;
++	struct fh_aes_dev *dev = platform_get_drvdata(pdev);
++
++	u32 isr = dev->en_isr;
++	AES_DBG("%s\n", __func__);
++	spin_lock_irqsave(&dev->lock, flags);
++	aes_writel(dev, dma_control, 0);
++	isr_status = aes_readl(dev, intr_src);
++	aes_writel(dev, intr_clear_status, 0x07);
++	aes_writel(dev, intr_enable, 0);
++	if (isr_status & 0x02)
++		AES_DBG("dma rev hreap error...\n");
++	if (isr_status & 0x04)
++		AES_DBG("dma stop src ..\n");
++	if (isr_status & 0x01) {
++		AES_DBG("dma done..\n");
++		fh_aes_rx(dev);
++		fh_aes_tx(dev);
++		if (dev->busy == true) {
++			/*begin the next transfer...*/
++			aes_writel(dev, intr_enable, isr);
++			/*enable dma go..*/
++			aes_writel(dev, dma_control, 1);
++		}
++
++	}
++	spin_unlock_irqrestore(&dev->lock, flags);
++	return IRQ_HANDLED;
++}
++
++static void aes_biglittle_swap(u8 *buf)
++{
++	u8 tmp, tmp1;
++	tmp = buf[0];
++	tmp1 = buf[1];
++	buf[0] = buf[3];
++	buf[1] = buf[2];
++	buf[2] = tmp1;
++	buf[3] = tmp;
++}
++
++static int fh_set_indata(struct fh_aes_dev *dev, struct scatterlist *sg)
++{
++	int err;
++	unsigned int i = 0;
++	unsigned int len = 0;
++	struct scatterlist *temp_sg = sg;
++	unsigned char *src_xbuf;
++	src_xbuf = &dev->ctl_src_xbuf[0];
++	do {
++		if (len + sg_dma_len(temp_sg) > FH_AES_CTL_MAX_PROCESS_SIZE) {
++			printk("%s: total size > driver size 0x%x\n", __func__, FH_AES_CTL_MAX_PROCESS_SIZE);
++			err = -ENOMEM;
++			goto exit;
++		}
++		sg_copy_to_buffer(temp_sg, 1, &src_xbuf[i], sg_dma_len(temp_sg));
++		len += sg_dma_len(temp_sg);
++		i += sg_dma_len(temp_sg);
++		temp_sg = sg_next(temp_sg);
++	} while (temp_sg != NULL);
++
++	sg_init_one(&dev->src_sg[0], &src_xbuf[0], len);
++	err = dma_map_sg(dev->dev, &dev->src_sg[0], 1, DMA_TO_DEVICE);
++	if (!err) {
++		err = -ENOMEM;
++		goto exit;
++	}
++	dev->sg_src = &dev->src_sg[0];
++	err = 0;
++exit:
++	return err;
++}
++
++static int fh_set_outdata(struct fh_aes_dev *dev, struct scatterlist *sg)
++{
++	int err;
++	sg_init_one(&dev->dst_sg[0], &dev->ctl_dst_xbuf[0], FH_AES_CTL_MAX_PROCESS_SIZE);
++	err = dma_map_sg(dev->dev, &dev->dst_sg[0], 1, DMA_FROM_DEVICE);
++	if (!err) {
++		err = -ENOMEM;
++		goto exit;
++	}
++	dev->sg_dst = &dev->dst_sg[0];
++	err = 0;
++exit:
++	return err;
++}
++
++static void fh_set_aes_key_reg(struct fh_aes_dev *dev, uint8_t *key,
++			       uint8_t *iv, unsigned int keylen)
++{
++
++	int i;
++	u32 method;
++	u32 temp_key_buf[32];
++	u32 temp_iv_buf[32];
++	u32 *p_dst = NULL;
++	u32 key_size = 0;
++	if (dev->iv_flag == true) {
++		/*set iv*/
++		/*if aes mode ....set 128 bit iv,  des set 64bit iv..*/
++		AES_DBG("set iv reg\n");
++		if ((dev->control_reg & AES_128_METHOD)
++		    || ((dev->control_reg & AES_192_METHOD))
++		    || (dev->control_reg & AES_256_METHOD)) {
++			AES_DBG("aes iv mode...\n");
++
++			memcpy((u8 *)&temp_iv_buf[0], iv, 16);
++			p_dst = &temp_iv_buf[0];
++			for (i = 0; i < 16 / sizeof(u32); i++)
++				aes_biglittle_swap((u8 *)(p_dst + i));
++			memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->initial_vector0,
++			       temp_iv_buf, 16);
++		} else {
++			AES_DBG("des iv mode...\n");
++
++			memcpy((u8 *)&temp_iv_buf[0], iv, 8);
++			p_dst = &temp_iv_buf[0];
++			for (i = 0; i < 8 / sizeof(u32); i++)
++				aes_biglittle_swap((u8 *)(p_dst + i));
++
++			memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->initial_vector0,
++			       temp_iv_buf, 8);
++
++		}
++	}
++	/*set key...*/
++	method = dev->control_reg & 0x0e;
++	AES_DBG("set key reg\n");
++
++	switch (method) {
++	case AES_128_METHOD:
++		AES_DBG("set key aes 128 mode..\n");
++		key_size = 16;
++
++		break;
++	case AES_192_METHOD:
++		AES_DBG("set key aes 192 mode..\n");
++		key_size = 24;
++		break;
++
++	case AES_256_METHOD:
++		AES_DBG("set key aes 256 mode..\n");
++		key_size = 32;
++		break;
++
++	case DES_METHOD:
++		AES_DBG("set key des normal mode..\n");
++		key_size = 8;
++		break;
++
++	case TRIPLE_DES_METHOD:
++		AES_DBG("set key des triple mode..\n");
++		key_size = 24;
++		break;
++
++	default:
++		AES_DBG("error method!!\n");
++		break;
++
++	}
++#ifdef FH_AESV2
++
++	if (s_efuse_obj.open_flag == USE_CPU_SET_KEY) {
++		memcpy((u8 *)&temp_key_buf[0], key, key_size);
++		p_dst = &temp_key_buf[0];
++		for (i = 0; i < key_size / sizeof(u32); i++)
++			aes_biglittle_swap((u8 *)(p_dst + i));
++		memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->security_key0,
++		       (u8 *)&temp_key_buf[0],
++		       key_size);
++	} else {
++		s_efuse_obj.trans_key_start_no = 0;
++		s_efuse_obj.trans_key_size = key_size / 4;
++		efuse_trans_key(&s_efuse_obj,
++				s_efuse_obj.trans_key_start_no,
++				s_efuse_obj.trans_key_size);
++	}
++
++#else
++	memcpy((u8 *)&temp_key_buf[0], key, key_size);
++	p_dst = &temp_key_buf[0];
++	for (i = 0; i < key_size / sizeof(u32); i++)
++		aes_biglittle_swap((u8 *)(p_dst + i));
++
++	memcpy((u8 *)&((struct fh_aes_reg *) dev->regs)->security_key0,
++	       (u8 *)&temp_key_buf[0],
++	       key_size);
++
++#endif
++
++
++}
++
++static void fh_set_dma_indata(struct fh_aes_dev *dev,
++			      struct scatterlist *sg)
++{
++	aes_writel(dev, dma_src_add, sg_dma_address(sg));
++	AES_DBG("%s :dma trans size is :%x\n", __func__, sg_dma_len(sg));
++	aes_writel(dev, dma_trans_size, sg_dma_len(sg));
++}
++
++static void fh_set_dma_outdata(struct fh_aes_dev *dev,
++			       struct scatterlist *sg)
++{
++	aes_writel(dev, dma_dst_add, sg_dma_address(sg));
++}
++
++static void fh_unset_indata(struct fh_aes_dev *dev)
++{
++	dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
++}
++
++static void fh_unset_outdata(struct fh_aes_dev *dev)
++{
++	dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
++}
++
++static void fh_aes_complete(struct fh_aes_dev *dev, int err)
++{
++	if (dev->req->base.complete)
++		dev->req->base.complete(&dev->req->base, err);
++	dev->busy = false;
++}
++
++static void fh_aes_crypt_start(struct fh_aes_dev *dev, unsigned long mode)
++{
++
++	struct ablkcipher_request *req = dev->req;
++	u32 control_reg;
++	u32 outfifo_thold = 0;
++	u32 infifo_thold = 0;
++	u32 isr;
++	int err;
++	unsigned long flags;
++	control_reg = 0;
++
++	if ((mode & CBC_MODE) || (mode & CTR_MODE) || (mode & CFB_MODE)
++	    || (mode & OFB_MODE)) {
++		control_reg |= 1 << 7;
++		dev->iv_flag = true;
++	} else
++		dev->iv_flag = false;
++
++	/*emode & method*/
++	control_reg |= (unsigned int) mode;
++	dev->control_reg = control_reg;
++	outfifo_thold = 0;
++	infifo_thold = 8;
++	isr = dev->en_isr;
++	spin_lock_irqsave(&dev->lock, flags);
++	AES_DBG("control_reg:0x%x\n", control_reg);
++	aes_writel(dev, encrypt_control, control_reg);
++	/*set key...*/
++	fh_set_aes_key_reg(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
++	err = fh_set_indata(dev, req->src);
++	if (err)
++		goto indata_error;
++
++	err = fh_set_outdata(dev, req->dst);
++	if (err)
++		goto outdata_error;
++
++	fh_set_dma_indata(dev, dev->sg_src);
++	fh_set_dma_outdata(dev, dev->sg_dst);
++
++	/*set fifo..*/
++	AES_DBG("outfifo thold:%x\n", outfifo_thold);
++	AES_DBG("infifo thold:%x\n", infifo_thold);
++	aes_writel(dev, fifo_threshold, outfifo_thold << 8 | infifo_thold);
++	/*set isr..*/
++	AES_DBG("intr enable:%x\n", isr);
++	aes_writel(dev, intr_enable, isr);
++	/*enable dma go..*/
++	aes_writel(dev, dma_control, 1);
++	spin_unlock_irqrestore(&dev->lock, flags);
++
++	return;
++
++outdata_error:
++	AES_DBG("outdata_error ..\n");
++	fh_unset_indata(dev);
++
++indata_error:
++	AES_DBG("indata_error ..\n");
++	fh_aes_complete(dev, err);
++	spin_unlock_irqrestore(&dev->lock, flags);
++
++}
++
++static void fh_aes_tasklet_cb(unsigned long data)
++{
++
++	struct fh_aes_dev *dev = (struct fh_aes_dev *) data;
++	struct crypto_async_request *async_req, *backlog;
++	struct fh_aes_reqctx *reqctx;
++	unsigned long flags;
++
++	AES_DBG("%s\n", __func__);
++	spin_lock_irqsave(&dev->lock, flags);
++	backlog = crypto_get_backlog(&dev->queue);
++	AES_DBG("backlog add is :%x\n", (u32)backlog);
++	/*get the req need to handle*/
++	async_req = crypto_dequeue_request(&dev->queue);
++	spin_unlock_irqrestore(&dev->lock, flags);
++	if (!async_req)
++		return;
++	if (backlog) {
++		if (backlog->complete)
++			backlog->complete(backlog, -EINPROGRESS);
++	}
++
++	dev->req = ablkcipher_request_cast(async_req);
++	dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
++	reqctx = ablkcipher_request_ctx(dev->req);
++	fh_aes_crypt_start(dev, reqctx->mode);
++}
++
++/*add chenjn dsp use...*/
++typedef struct {
++	unsigned int base;
++	void *vbase;
++	unsigned int size;
++} MEM_INFO;
++typedef struct {
++	MEM_INFO mem;
++	unsigned char *remap_base; /**<已用大小*/
++} RW_MEM_INFO;
++
++struct tcrypt_result {
++	struct completion completion;
++	int err;
++};
++
++int aes_128_ecb_encrypt(char *key_128, RW_MEM_INFO in, 
++	RW_MEM_INFO out, unsigned int data_len_align16);
++
++
++int fh_aes_ctl_mem_init(struct fh_aes_dev *pdata)
++{
++	unsigned int t1;
++	unsigned int t2;
++	unsigned int t3;
++	unsigned int t4;
++
++	t1 = (unsigned int)kmalloc(FH_AES_MALLOC_SIZE + FH_AES_ALLIGN_SIZE, GFP_KERNEL);
++	if (!t1)
++		goto err1;
++
++	t2 = (unsigned int)kmalloc(FH_AES_MALLOC_SIZE + FH_AES_ALLIGN_SIZE, GFP_KERNEL);
++	if (!t2)
++		goto err2;
++
++
++	t3 = ((t1 + FH_AES_ALLIGN_SIZE - 1) & (~(FH_AES_ALLIGN_SIZE - 1)));
++	t4 = ((t2 + FH_AES_ALLIGN_SIZE - 1) & (~(FH_AES_ALLIGN_SIZE - 1)));
++
++	pdata->ctl_raw_src_xbuf = (unsigned char *)t1;
++	pdata->ctl_raw_dst_xbuf = (unsigned char *)t2;
++	pdata->ctl_src_xbuf = (unsigned char *)t3;
++	pdata->ctl_dst_xbuf = (unsigned char *)t4;
++	return 0;
++err2:
++	kfree((void *)t1);
++err1:
++	return -1;
++
++}
++
++static int __devinit fh_aes_probe(struct platform_device *pdev)
++{
++
++	int i, j, err = -ENODEV;
++	struct fh_aes_dev *pdata;
++	struct device *dev = &pdev->dev;
++	struct resource *res;
++	struct resource *ioarea;
++
++	AES_DBG("aes probe get in..\n");
++	if (pobj_aes_dev) {
++		dev_err(&pdev->dev, "second crypto dev..\n");
++		return -EEXIST;
++	}
++
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res) {
++		dev_err(&pdev->dev, "get platform source error..\n");
++		return -ENODEV;
++	}
++
++	ioarea = request_mem_region(res->start, resource_size(res), pdev->name);
++	if (!ioarea) {
++		dev_err(&pdev->dev, "aes region already claimed\n");
++		/*BUG_ON(ioarea);*/
++		return -EBUSY;
++	}
++
++	pdata = kzalloc(sizeof(struct fh_aes_dev), GFP_KERNEL);
++	if (!pdata) {
++		err = -ENOMEM;
++		goto err_malloc;
++	}
++
++	spin_lock_init(&pdata->lock);
++	pdata->regs = ioremap(res->start, resource_size(res));
++
++	if (!pdata->regs) {
++		dev_err(&pdev->dev, "aes region already mapped\n");
++		err = -EINVAL;
++		goto err_iomap;
++	}
++	pdata->irq_no = platform_get_irq(pdev, 0);
++	if (pdata->irq_no < 0) {
++		err = pdata->irq_no;
++		dev_warn(dev, "aes interrupt is not available.\n");
++		goto err_irq;
++	}
++	/*only enable dma done isr..*/
++	pdata->en_isr = 1 << 0;
++	err = request_irq(pdata->irq_no, fh_aes_interrupt, 0,
++			  dev_name(&pdev->dev), pdev);
++
++	if (err) {
++		dev_dbg(&pdev->dev, "request_irq failed, %d\n", err);
++		goto err_irq;
++	}
++	/*bind to plat dev..*/
++	pdata->dev = dev;
++	/*bing to static para..only one aes controller in fh..*/
++	pobj_aes_dev = pdata;
++	platform_set_drvdata(pdev, pdata);
++
++	tasklet_init(&pdata->tasklet, fh_aes_tasklet_cb, (unsigned long) pdata);
++	crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
++	for (i = 0; i < ARRAY_SIZE(algs); i++) {
++		INIT_LIST_HEAD(&algs[i].cra_list);
++		err = crypto_register_alg(&algs[i]);
++
++		if (err) {
++			dev_warn(dev, "register alg error...\n");
++			goto err_algs;
++		}
++	}
++
++	err = fh_aes_ctl_mem_init(pdata);
++	if (err) {
++		dev_err(&pdev->dev, "aes malloc mem error..\n");
++		goto err_algs;
++	}
++	pr_info("aes driver registered\n");
++
++#ifdef CONFIG_FH_AES_SELF_TEST
++
++	fh_aes_self_test_all();
++#endif
++
++	return 0;
++err_algs:
++	for (j = 0; j < i; j++)
++		crypto_unregister_alg(&algs[j]);
++	tasklet_kill(&pdata->tasklet);
++	platform_set_drvdata(pdev, NULL);
++	pobj_aes_dev = NULL;
++	free_irq(pdata->irq_no, pdata);
++
++err_irq:
++	iounmap(pdata->regs);
++
++err_iomap:
++	kfree(pdata);
++
++err_malloc:
++	release_mem_region(res->start, resource_size(res));
++	return err;
++}
++
++static int __devexit fh_aes_remove(struct platform_device *pdev)
++{
++
++	int i;
++	struct fh_aes_dev *pdata = platform_get_drvdata(pdev);
++	struct resource *res;
++
++	for (i = 0; i < ARRAY_SIZE(algs); i++)
++		crypto_unregister_alg(&algs[i]);
++
++	tasklet_kill(&pdata->tasklet);
++	platform_set_drvdata(pdev, NULL);
++	pobj_aes_dev = NULL;
++	free_irq(pdata->irq_no, pdata);
++	iounmap(pdata->regs);
++	kfree(pdata->ctl_raw_src_xbuf);
++	kfree(pdata->ctl_raw_dst_xbuf);
++	pdata->ctl_raw_src_xbuf = NULL;
++	pdata->ctl_raw_dst_xbuf = NULL;
++	pdata->ctl_src_xbuf = NULL;
++	pdata->ctl_dst_xbuf = NULL;
++	kfree(pdata);
++
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	release_mem_region(res->start, resource_size(res));
++
++	return 0;
++}
++
++static struct platform_driver fh_aes_driver = {
++	.driver = {
++		.name = "fh_aes",
++		.owner = THIS_MODULE,
++	},
++	.probe = fh_aes_probe,
++	.remove = __devexit_p(fh_aes_remove),
++};
++
++static int __init fh_aes_init(void)
++{
++	return platform_driver_register(&fh_aes_driver);
++}
++late_initcall(fh_aes_init);
++
++static void __exit fh_aes_exit(void)
++{
++	platform_driver_unregister(&fh_aes_driver);
++}
++module_exit(fh_aes_exit);
++
++static void tcrypt_complete(struct crypto_async_request *req, int err)
++{
++	struct tcrypt_result *res = req->data;
++	if (err == -EINPROGRESS)
++		return;
++	complete(&res->completion);
++}
++
++int aes_128_ecb_encrypt(char *key_128, RW_MEM_INFO in, RW_MEM_INFO out, unsigned int data_len_align16)
++{
++	static char *xbuf;
++	static char *dst_xbuf;
++	static struct crypto_ablkcipher *tfm;
++	static struct ablkcipher_request *req;
++	static int malloc_flag;
++	/*const char *algo = NULL;*/
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	void *data;
++	void *dst_data;
++	struct tcrypt_result wait_result;
++
++	/*malloc buf...*/
++	if (malloc_flag != 0)
++		goto work_go;
++	malloc_flag = 1;
++	xbuf = (void *)__get_free_page(GFP_KERNEL);
++	if (!xbuf) {
++		printk("no pages.\n");
++		return -1;
++	}
++
++	dst_xbuf = (void *)__get_free_page(GFP_KERNEL);
++	if (!dst_xbuf) {
++		free_page((unsigned long)xbuf);
++		printk("no pages.\n");
++		return -1;
++	}
++
++	tfm = crypto_alloc_ablkcipher("ecb-aes-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		printk("aes_test: failed to alloc cipher!\n");
++		free_page((unsigned long)xbuf);
++		free_page((unsigned long)dst_xbuf);
++		return -1;
++	}
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		printk(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for\n");
++		return -1;
++	}
++
++work_go:
++	init_completion(&wait_result.completion);
++	crypto_ablkcipher_setkey(tfm, (u8 *)key_128, 16);
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &wait_result);
++	data = xbuf;
++	dst_data = dst_xbuf;
++	/*encrypt*/
++	memcpy(data, in.remap_base, data_len_align16);
++	sg_init_one(&sg[0], data, data_len_align16);
++	sg_init_one(&dst_sg[0], dst_data, data_len_align16);
++	ablkcipher_request_set_crypt(req, sg, dst_sg, data_len_align16, NULL);
++	crypto_ablkcipher_encrypt(req);
++	wait_for_completion(&wait_result.completion);
++	memcpy(out.remap_base, dst_data, data_len_align16);
++
++	return 0;
++
++}
++EXPORT_SYMBOL(aes_128_ecb_encrypt);
++
++
++MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Fullhan AES driver support");
+diff --git a/drivers/crypto/fh_aes.h b/drivers/crypto/fh_aes.h
+new file mode 100644
+index 00000000..90eaa8a5
+--- /dev/null
++++ b/drivers/crypto/fh_aes.h
+@@ -0,0 +1,119 @@
++/*
++ * fh_aes.h
++ *
++ *  Created on: 3.12.2015
++ *      Author: duobao
++ */
++
++#ifndef FH_AES_H_
++#define FH_AES_H_
++
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/clk.h>
++#include <linux/platform_device.h>
++#include <linux/scatterlist.h>
++#include <linux/dma-mapping.h>
++#include <linux/io.h>
++#include <linux/crypto.h>
++#include <linux/interrupt.h>
++#include <crypto/algapi.h>
++#include <crypto/aes.h>
++#include <crypto/des.h>
++#include <crypto/ctr.h>
++
++struct fh_aes_reg {
++	u32 encrypt_control;		/*0*/
++	u32 reserved_4_8;			/*4*/
++	u32 fifo_status;			/*8*/
++	u32 parity_error;			/*c*/
++	u32 security_key0;			/*10*/
++	u32 security_key1;			/*14*/
++	u32 security_key2;			/*18*/
++	u32 security_key3;			/*1c*/
++	u32 security_key4;			/*20*/
++	u32 security_key5;			/*24*/
++	u32 security_key6;			/*28*/
++	u32 security_key7;			/*2c*/
++	u32 initial_vector0;		/*30*/
++	u32 initial_vector1;		/*34*/
++	u32 initial_vector2;		/*38*/
++	u32 initial_vector3;		/*3c*/
++	u32 reserved_40_44;			/*40*/
++	u32 reserved_44_48;			/*44*/
++	u32 dma_src_add;			/*48*/
++	u32 dma_dst_add;			/*4c*/
++	u32 dma_trans_size;			/*50*/
++	u32 dma_control;			/*54*/
++	u32 fifo_threshold;			/*58*/
++	u32 intr_enable;			/*5c*/
++	u32 intr_src;				/*60*/
++	u32 mask_intr_status;		/*64*/
++	u32 intr_clear_status;		/*68*/
++	u32 reserved_6c_70;			/*6c*/
++	u32 revision;				/*70*/
++	u32 feature;				/*74*/
++	u32 reserved_78_7c;			/*78*/
++	u32 reserved_7c_80;			/*7c*/
++	u32 last_initial_vector0;	/*80*/
++	u32 last_initial_vector1;	/*84*/
++	u32 last_initial_vector2;	/*88*/
++	u32 last_initial_vector3;	/*8c*/
++};
++
++/*requst ctx.....*/
++struct fh_aes_reqctx {
++	unsigned long mode;
++};
++/*aes ctx....*/
++struct fh_aes_ctx {
++	struct fh_aes_dev *dev;              /*bind to aes dev..*/
++	uint8_t aes_key[AES_MAX_KEY_SIZE];		/*rec key value..*/
++	int keylen;		/*rec key len.*/
++};
++
++struct fh_aes_dev {
++	/*common driver paras..*/
++	void *regs;
++	struct device *dev;	/*bind to the platform dev...*/
++	struct clk *clk;
++	bool busy;		/*software sync the hardware....*/
++	spinlock_t lock;	/*just lock...*/
++	u32 irq_no;		/*board info...*/
++	u32 en_isr;		/*software rec the isr src*/
++	bool iv_flag;
++	u32 control_reg;
++	/*crypto need below...*/
++	struct fh_aes_ctx *ctx;		/*bind to the aes ctx...*/
++	struct fh_aes_reqctx *reqctx;	/*bind to the req ctx..*/
++	struct scatterlist *sg_src;	/*rec the src data need to be handled*/
++	struct scatterlist *sg_dst;	/*rec the dst data need to be handled*/
++	struct tasklet_struct tasklet;	/*async process the crypto*/
++	struct ablkcipher_request *req;	/*active req...*/
++	struct crypto_queue queue;
++	unsigned char *ctl_src_xbuf;
++	unsigned char *ctl_dst_xbuf;
++	unsigned char *ctl_raw_src_xbuf;
++	unsigned char *ctl_raw_dst_xbuf;
++	struct scatterlist src_sg[1];
++	struct scatterlist dst_sg[1];
++};
++
++
++/*#define FH_AES_SELF_TEST*/
++/*#define FH_AES_DEBUG*/
++#ifdef FH_AES_DEBUG
++#define AES_DBG(fmt, args...)  printk(fmt, ## args)
++#else
++#define AES_DBG(fmt, args...)  do { } while (0)
++#endif
++
++#define AES_PRINT_RESULT(fmt, args...)  printk(fmt, ## args)
++
++#endif /* fh_AES_H_ */
++
++
+diff --git a/drivers/crypto/fh_aes_test.c b/drivers/crypto/fh_aes_test.c
+new file mode 100644
+index 00000000..25dca9a6
+--- /dev/null
++++ b/drivers/crypto/fh_aes_test.c
+@@ -0,0 +1,1369 @@
++/*
++ * fh_aes_test.c
++ *
++ *  Created on: May 7, 2015
++ *      Author: yu.zhang
++ */
++#ifdef CONFIG_FH_AES_SELF_TEST
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/in.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/skbuff.h>
++#include <linux/highmem.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/version.h>
++#include <linux/spinlock.h>
++#include <linux/dma-mapping.h>
++#include <linux/clk.h>
++#include <linux/platform_device.h>
++#include <linux/bitops.h>
++#include <linux/io.h>
++#include <linux/irqreturn.h>
++#include <asm/irq.h>
++#include <asm/page.h>
++#include <crypto/hash.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <crypto/rng.h>
++#include "fh_aes.h"
++//cbc aes 128
++#define AES_IV0			0x00010203
++#define AES_IV1			0x04050607
++#define AES_IV2			0x08090a0b
++#define AES_IV3			0x0c0d0e0f
++
++#define AES_KEY0		0x2b7e1516
++#define AES_KEY1		0x28aed2a6
++#define AES_KEY2		0xabf71588
++#define AES_KEY3		0x09cf4f3c
++
++
++static const unsigned char aes_cbc_iv_buf[] = {
++	0x00,0x01,0x02,0x03, 0x04,0x05,0x06,0x07, 0x08,0x09,0x0a,0x0b, 0x0c,0x0d,0x0e,0x0f,
++};
++
++static const unsigned char aes_cbc_key_buf[] = {
++	0x2b,0x7e,0x15,0x16, 0x28,0xae,0xd2,0xa6, 0xab,0xf7,0x15,0x88, 0x09,0xcf,0x4f,0x3c,
++};
++
++
++//ecb aes 256
++#define AES_ECB_KEY0	0x603deb10
++#define AES_ECB_KEY1	0x15ca71be
++#define AES_ECB_KEY2	0x2b73aef0
++#define AES_ECB_KEY3	0x857d7781
++#define AES_ECB_KEY4	0x1f352c07
++#define AES_ECB_KEY5	0x3b6108d7
++#define AES_ECB_KEY6	0x2d9810a3
++#define AES_ECB_KEY7	0x0914dff4
++
++
++static const unsigned char aes_ecb_key_buf[] = {
++	0x60,0x3d,0xeb,0x10, 0x15,0xca,0x71,0xbe, 0x2b,0x73,0xae,0xf0, 0x85,0x7d,0x77,0x81,
++	0x1f,0x35,0x2c,0x07, 0x3b,0x61,0x08,0xd7, 0x2d,0x98,0x10,0xa3, 0x09,0x14,0xdf,0xf4,
++};
++
++//ctr aes 192
++#define AES_CTR_KEY0 	0x8e73b0f7
++#define AES_CTR_KEY1 	0xda0e6452
++#define AES_CTR_KEY2 	0xc810f32b
++#define AES_CTR_KEY3 	0x809079e5
++#define AES_CTR_KEY4 	0x62f8ead2
++#define AES_CTR_KEY5 	0x522c6b7b
++
++#define AES_CTR_IV0		0xf0f1f2f3
++#define AES_CTR_IV1		0xf4f5f6f7
++#define AES_CTR_IV2		0xf8f9fafb
++#define AES_CTR_IV3		0xfcfdfeff
++
++
++static const unsigned char aes_ctr_iv_buf[] = {
++	0xf0,0xf1,0xf2,0xf3, 0xf4,0xf5,0xf6,0xf7, 0xf8,0xf9,0xfa,0xfb, 0xfc,0xfd,0xfe,0xff,
++};
++
++static const unsigned char aes_ctr_key_buf[] = {
++	0x8e,0x73,0xb0,0xf7, 0xda,0x0e,0x64,0x52, 0xc8,0x10,0xf3,0x2b, 0x80,0x90,0x79,0xe5,
++	0x62,0xf8,0xea,0xd2, 0x52,0x2c,0x6b,0x7b,
++};
++
++
++//cfb aes 192
++//#define AES_CFB_KEY0 	0x8e73b0f7
++//#define AES_CFB_KEY1 	0xda0e6452
++//#define AES_CFB_KEY2	0xc810f32b
++//#define AES_CFB_KEY3 	0x809079e5
++//#define AES_CFB_KEY4 	0x62f8ead2
++//#define AES_CFB_KEY5	0x522c6b7b
++//
++//#define AES_CFB_IV0		0x00010203
++//#define AES_CFB_IV1		0x04050607
++//#define AES_CFB_IV2		0x08090a0b
++//#define AES_CFB_IV3		0x0c0d0e0f
++
++//ofb aes 256
++#define AES_OFB_256_KEY0 0x603deb10
++#define AES_OFB_256_KEY1 0x15ca71be
++#define AES_OFB_256_KEY2 0x2b73aef0
++#define AES_OFB_256_KEY3 0x857d7781
++#define AES_OFB_256_KEY4 0x1f352c07
++#define AES_OFB_256_KEY5 0x3b6108d7
++#define AES_OFB_256_KEY6 0x2d9810a3
++#define AES_OFB_256_KEY7 0x0914dff4
++
++#define AES_OFB_IV0		0x00010203
++#define AES_OFB_IV1		0x04050607
++#define AES_OFB_IV2		0x08090a0b
++#define AES_OFB_IV3		0x0c0d0e0f
++
++static const unsigned char aes_ofb_iv_buf[] = {
++	0x00,0x01,0x02,0x03, 0x04,0x05,0x06,0x07, 0x08,0x09,0x0a,0x0b, 0x0c,0x0d,0x0e,0x0f,
++};
++
++static const unsigned char aes_ofb_key_buf[] = {
++	0x60,0x3d,0xeb,0x10, 0x15,0xca,0x71,0xbe, 0x2b,0x73,0xae,0xf0, 0x85,0x7d,0x77,0x81,
++	0x1f,0x35,0x2c,0x07, 0x3b,0x61,0x08,0xd7, 0x2d,0x98,0x10,0xa3, 0x09,0x14,0xdf,0xf4,
++};
++
++//des ecb
++#define DES_ECB_KEY0 	0x01010101
++#define DES_ECB_KEY1 	0x01010101
++
++static const unsigned char des_ecb_key_buf[] = {
++	0x01,0x01,0x01,0x01, 0x01,0x01,0x01,0x01,
++};
++//des cbc
++#define DES_CBC_KEY0 	0x01234567
++#define DES_CBC_KEY1 	0x89abcdef
++
++#define DES_CBC_IV0		0x12345678
++#define DES_CBC_IV1		0x90abcdef
++
++
++static const unsigned char des_cbc_key_buf[] = {
++	0x01,0x23,0x45,0x67, 0x89,0xab,0xcd,0xef,
++};
++
++static const unsigned char des_cbc_iv_buf[] = {
++	0x12,0x34,0x56,0x78, 0x90,0xab,0xcd,0xef,
++};
++
++
++//ofb cbc
++#define DES_OFB_KEY0 	0x01234567
++#define DES_OFB_KEY1 	0x89abcdef
++
++#define DES_OFB_IV0		0x12345678
++#define DES_OFB_IV1		0x90abcdef
++
++static const unsigned char des_ofb_key_buf[] = {
++	0x01,0x23,0x45,0x67, 0x89,0xab,0xcd,0xef,
++};
++
++static const unsigned char des_ofb_iv_buf[] = {
++	0x12,0x34,0x56,0x78, 0x90,0xab,0xcd,0xef,
++};
++
++//ecb tri-des
++#define DES_TRI_ECB_KEY0	 0x01234567
++#define DES_TRI_ECB_KEY1 	 0x89abcdef
++
++#define DES_TRI_ECB_KEY2	 0x23456789
++#define DES_TRI_ECB_KEY3	 0xabcdef01
++
++#define DES_TRI_ECB_KEY4	 0x456789ab
++#define DES_TRI_ECB_KEY5	 0xcdef0123
++
++static const unsigned char des3_ecb_key_buf[] = {
++	0x01,0x23,0x45,0x67, 0x89,0xab,0xcd,0xef, 0x23,0x45,0x67,0x89, 0xab,0xcd,0xef,0x01,
++	0x45,0x67,0x89,0xab, 0xcd,0xef,0x01,0x23,
++};
++
++//cbc tri-des
++#define DES_TRI_CBC_KEY0	 0x01234567
++#define DES_TRI_CBC_KEY1 	 0x89abcdef
++
++#define DES_TRI_CBC_KEY2	 0x23456789
++#define DES_TRI_CBC_KEY3	 0xabcdef01
++
++#define DES_TRI_CBC_KEY4	 0x456789ab
++#define DES_TRI_CBC_KEY5	 0xcdef0123
++
++#define DES_TRI_CBC_IV0		0x12345678
++#define DES_TRI_CBC_IV1		0x90abcdef
++
++static const unsigned char des3_cbc_key_buf[] = {
++	0x01,0x23,0x45,0x67, 0x89,0xab,0xcd,0xef, 0x23,0x45,0x67,0x89, 0xab,0xcd,0xef,0x01,
++	0x45,0x67,0x89,0xab, 0xcd,0xef,0x01,0x23,
++};
++static const unsigned char des3_cbc_iv_buf[] = {
++	0x12,0x34,0x56,0x78, 0x90,0xab,0xcd,0xef,
++};
++#define XBUFSIZE	128
++
++struct tcrypt_result {
++	struct completion completion;
++	int err;
++};
++
++static inline void hexdump(unsigned char *buf, unsigned int len);
++static void tcrypt_complete(struct crypto_async_request *req, int err);
++static int testmgr_alloc_buf(char *buf[XBUFSIZE]);
++static int fh_aes_cbc128_self_test(void);
++static int fh_aes_ecb256_self_test(void);
++
++static struct tcrypt_result result;
++static const unsigned char plain_text[]  = {
++		0x6b,0xc1,0xbe,0xe2, 0x2e,0x40,0x9f,0x96, 0xe9,0x3d,0x7e,0x11, 0x73,0x93,0x17,0x2a,
++		0xae,0x2d,0x8a,0x57, 0x1e,0x03,0xac,0x9c, 0x9e,0xb7,0x6f,0xac, 0x45,0xaf,0x8e,0x51,
++		0x30,0xc8,0x1c,0x46, 0xa3,0x5c,0xe4,0x11, 0xe5,0xfb,0xc1,0x19, 0x1a,0x0a,0x52,0xef,
++		0xf6,0x9f,0x24,0x45, 0xdf,0x4f,0x9b,0x17, 0xad,0x2b,0x41,0x7b, 0xe6,0x6c,0x37,0x10,
++};
++
++static const unsigned char cipher_text[] = {
++		0x76,0x49,0xab,0xac, 0x81,0x19,0xb2,0x46, 0xce,0xe9,0x8e,0x9b, 0x12,0xe9,0x19,0x7d,
++		0x50,0x86,0xcb,0x9b, 0x50,0x72,0x19,0xee, 0x95,0xdb,0x11,0x3a, 0x91,0x76,0x78,0xb2,
++		0x73,0xbe,0xd6,0xb8, 0xe3,0xc1,0x74,0x3b, 0x71,0x16,0xe6,0x9e, 0x22,0x22,0x95,0x16,
++		0x3f,0xf1,0xca,0xa1, 0x68,0x1f,0xac,0x09, 0x12,0x0e,0xca,0x30, 0x75,0x86,0xe1,0xa7,
++};
++
++static const unsigned char plain_ecb_256_text[] = {
++		0x6b,0xc1,0xbe,0xe2, 0x2e,0x40,0x9f,0x96, 0xe9,0x3d,0x7e,0x11, 0x73,0x93,0x17,0x2a,
++		0xae,0x2d,0x8a,0x57, 0x1e,0x03,0xac,0x9c, 0x9e,0xb7,0x6f,0xac, 0x45,0xaf,0x8e,0x51,
++		0x30,0xc8,0x1c,0x46, 0xa3,0x5c,0xe4,0x11, 0xe5,0xfb,0xc1,0x19, 0x1a,0x0a,0x52,0xef,
++		0xf6,0x9f,0x24,0x45, 0xdf,0x4f,0x9b,0x17, 0xad,0x2b,0x41,0x7b, 0xe6,0x6c,0x37,0x10,
++};
++
++static const unsigned char cipher_ecb_256_text[] = {
++		0xf3,0xee,0xd1,0xbd, 0xb5,0xd2,0xa0,0x3c, 0x06,0x4b,0x5a,0x7e, 0x3d,0xb1,0x81,0xf8,
++		0x59,0x1c,0xcb,0x10, 0xd4,0x10,0xed,0x26, 0xdc,0x5b,0xa7,0x4a, 0x31,0x36,0x28,0x70,
++		0xb6,0xed,0x21,0xb9, 0x9c,0xa6,0xf4,0xf9, 0xf1,0x53,0xe7,0xb1, 0xbe,0xaf,0xed,0x1d,
++		0x23,0x30,0x4b,0x7a, 0x39,0xf9,0xf3,0xff, 0x06,0x7d,0x8d,0x8f, 0x9e,0x24,0xec,0xc7,
++};
++
++static const unsigned char plain_ctr_192_text[] = {
++		0x6b,0xc1,0xbe,0xe2, 0x2e,0x40,0x9f,0x96, 0xe9,0x3d,0x7e,0x11, 0x73,0x93,0x17,0x2a,
++		0xae,0x2d,0x8a,0x57, 0x1e,0x03,0xac,0x9c, 0x9e,0xb7,0x6f,0xac, 0x45,0xaf,0x8e,0x51,
++		0x30,0xc8,0x1c,0x46, 0xa3,0x5c,0xe4,0x11, 0xe5,0xfb,0xc1,0x19, 0x1a,0x0a,0x52,0xef,
++		0xf6,0x9f,0x24,0x45, 0xdf,0x4f,0x9b,0x17, 0xad,0x2b,0x41,0x7b, 0xe6,0x6c,0x37,0x10,
++};
++
++static const unsigned char cipher_ctr_192_text[] = {
++		0x1a,0xbc,0x93,0x24,	0x17,0x52,0x1c,0xa2,	0x4f,0x2b,0x04,0x59,	0xfe,0x7e,0x6e,0x0b,
++		0x09,0x03,0x39,0xec,	0x0a,0xa6,0xfa,0xef,	0xd5,0xcc,0xc2,0xc6,	0xf4,0xce,0x8e,0x94,
++		0x1e,0x36,0xb2,0x6b,	0xd1,0xeb,0xc6,0x70,	0xd1,0xbd,0x1d,0x66,	0x56,0x20,0xab,0xf7,
++		0x4f,0x78,0xa7,0xf6,	0xd2,0x98,0x09,0x58,	0x5a,0x97,0xda,0xec,	0x58,0xc6,0xb0,0x50,
++};
++
++static const unsigned char plain_ofb_256_text[] = {
++		0x6b,0xc1,0xbe,0xe2, 0x2e,0x40,0x9f,0x96, 0xe9,0x3d,0x7e,0x11, 0x73,0x93,0x17,0x2a,
++		0xae,0x2d,0x8a,0x57, 0x1e,0x03,0xac,0x9c, 0x9e,0xb7,0x6f,0xac, 0x45,0xaf,0x8e,0x51,
++		0x30,0xc8,0x1c,0x46, 0xa3,0x5c,0xe4,0x11, 0xe5,0xfb,0xc1,0x19, 0x1a,0x0a,0x52,0xef,
++		0xf6,0x9f,0x24,0x45, 0xdf,0x4f,0x9b,0x17, 0xad,0x2b,0x41,0x7b, 0xe6,0x6c,0x37,0x10,
++};
++
++static const unsigned char cipher_ofb_256_text[] = {
++		0xdc,0x7e,0x84,0xbf,0xda,0x79,0x16,0x4b,0x7e,0xcd,0x84,0x86,0x98,0x5d,0x38,0x60,
++		0x4f,0xeb,0xdc,0x67,0x40,0xd2,0x0b,0x3a,0xc8,0x8f,0x6a,0xd8,0x2a,0x4f,0xb0,0x8d,
++		0x71,0xab,0x47,0xa0,0x86,0xe8,0x6e,0xed,0xf3,0x9d,0x1c,0x5b,0xba,0x97,0xc4,0x08,
++		0x01,0x26,0x14,0x1d,0x67,0xf3,0x7b,0xe8,0x53,0x8f,0x5a,0x8b,0xe7,0x40,0xe4,0x84,
++};
++
++static const unsigned char plain_des_ecb_text[] = {
++		0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++		0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++		0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++		0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++};
++
++static const unsigned char cipher_des_ecb_text[] = {
++		0x95,0xF8,0xA5,0xE5,0xDD,0x31,0xD9,0x00,0xDD,0x7F,0x12,0x1C,0xA5,0x01,0x56,0x19,
++		0x2E,0x86,0x53,0x10,0x4F,0x38,0x34,0xEA,0x4B,0xD3,0x88,0xFF,0x6C,0xD8,0x1D,0x4F,
++		0x20,0xB9,0xE7,0x67,0xB2,0xFB,0x14,0x56,0x55,0x57,0x93,0x80,0xD7,0x71,0x38,0xEF,
++		0x6C,0xC5,0xDE,0xFA,0xAF,0x04,0x51,0x2F,0x0D,0x9F,0x27,0x9B,0xA5,0xD8,0x72,0x60,
++};
++
++static const unsigned char plain_des_cbc_text[] = {
++		0x4e,0x6f,0x77,0x20,0x69,0x73,0x20,0x74,0x68,0x65,0x20,0x74,0x69,0x6d,0x65,0x20,
++		0x66,0x6f,0x72,0x20,0x61,0x6c,0x6c,0x20,
++};
++
++static const unsigned char cipher_des_cbc_text[] = {
++		0xe5,0xc7,0xcd,0xde,0x87,0x2b,0xf2,0x7c,0x43,0xe9,0x34,0x00,0x8c,0x38,0x9c,0x0f,
++		0x68,0x37,0x88,0x49,0x9a,0x7c,0x05,0xf6,
++};
++
++static const unsigned char  plain_des_ofb_text[] = {
++		0x4e,0x6f,0x77,0x20,0x69,0x73,0x20,0x74,0x43,0xe9,0x34,0x00,0x8c,0x38,0x9c,0x0f,
++			0x68,0x37,0x88,0x49,0x9a,0x7c,0x05,0xf6,
++};
++
++static const unsigned char cipher_des_ofb_text[] = {
++		0xf3,0x09,0x62,0x49,0xc7,0xf4,0x6e,0x51,0x1e,0x7e,0x5e,0x50,0xcb,0xbe,0xc4,0x10,
++		0x33,0x35,0xa1,0x8a,0xde,0x4a,0x91,0x15,
++};
++
++static const unsigned char plain_des_tri_ecb_text[] = {
++		0x4e,0x6f,0x77,0x20,0x69,0x73,0x20,0x74,0x43,0xe9,0x34,0x00,0x8c,0x38,0x9c,0x0f,
++		0x68,0x37,0x88,0x49,0x9a,0x7c,0x05,0xf6,
++};
++
++static const unsigned char cipher_des_tri_ecb_text[] = {
++		0x31,0x4f,0x83,0x27,0xfa,0x7a,0x09,0xa8,0xd5,0x89,0x5f,0xad,0xe9,0x8f,0xae,0xdf,
++		0x98,0xf4,0x70,0xeb,0x35,0x53,0xa5,0xda,
++};
++
++static const unsigned char plain_des_tri_cbc_text[] = {
++		0x4e,0x6f,0x77,0x20,0x69,0x73,0x20,0x74,0x43,0xe9,0x34,0x00,0x8c,0x38,0x9c,0x0f,
++		0x68,0x37,0x88,0x49,0x9a,0x7c,0x05,0xf6,
++};
++
++static const unsigned char cipher_des_tri_cbc_text[] = {
++		0xf3,0xc0,0xff,0x02,0x6c,0x02,0x30,0x89,0xc4,0x3a,0xdd,0x8f,0xd8,0xcd,0x5e,0x43,
++		0x2b,0xfd,0x41,0xd3,0x13,0x0b,0xcf,0x40,
++};
++
++static inline void hexdump(unsigned char *buf, unsigned int len)
++{
++	while (len--)
++		AES_DBG("%02x", *buf++);
++	AES_DBG("\n");
++}
++
++static void tcrypt_complete(struct crypto_async_request *req, int err)
++{
++	struct tcrypt_result *res = req->data;
++	if (err == -EINPROGRESS)
++		return;
++//      res->err = err;
++	AES_DBG("crypt all over....\n");
++	complete(&res->completion);
++
++}
++
++static int testmgr_alloc_buf(char *buf[XBUFSIZE])
++{
++	int i;
++	for (i = 0; i < XBUFSIZE; i++) {
++		buf[i] = (void *)__get_free_page(GFP_KERNEL);
++		if (!buf[i])
++			goto err_free_buf;
++	}
++
++	return 0;
++err_free_buf:
++	while (i-- > 0)
++		free_page((unsigned long)buf[i]);
++
++	return -ENOMEM;
++}
++
++static int fh_aes_cbc128_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++
++	u32 key[4] = { AES_KEY0, AES_KEY1, AES_KEY2, AES_KEY3 };
++	u32 iv[4] = { AES_IV0, AES_IV1, AES_IV2, AES_IV3 };
++	//void * memcpy(void * dest, const void *src, size_t n)
++
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++	//int ret = -ENOMEM;
++	void *data;
++	void *dst_data;
++	memcpy(&key[0],&aes_cbc_key_buf[0],sizeof(aes_cbc_key_buf));
++	memcpy(&iv[0],&aes_cbc_iv_buf[0],sizeof(aes_cbc_iv_buf));
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	AES_DBG("aes self test get in...\n");
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("cbc-aes-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 16);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++
++	//encrypt
++	memcpy(data, plain_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, (void *)iv);
++
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, cipher_text, 64))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++
++	//decrypt
++	memcpy(data, cipher_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, (void *)iv);
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, plain_text, 64))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++
++}
++
++static int fh_aes_ecb256_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	u32 key[8] = {
++		AES_ECB_KEY0, AES_ECB_KEY1, AES_ECB_KEY2, AES_ECB_KEY3,
++		AES_ECB_KEY4, AES_ECB_KEY5, AES_ECB_KEY6, AES_ECB_KEY7
++	};
++	//const u32 iv[4] = {AES_IV0,AES_IV1,AES_IV2,AES_IV3};
++
++
++	//memcpy(&iv[0],&aes_cbc_iv_buf[0],sizeof(aes_cbc_iv_buf));
++
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++
++	//int ret = -ENOMEM;
++	void *data;
++	void *dst_data;
++	memcpy(&key[0],&aes_ecb_key_buf[0],sizeof(aes_ecb_key_buf));
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	AES_DBG("aes self test get in...\n");
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("ecb-aes-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 32);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++
++	//encrypt
++	memcpy(data, plain_ecb_256_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, NULL);
++
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, cipher_ecb_256_text, 64))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++
++	//decrypt
++	memcpy(data, cipher_ecb_256_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, NULL);
++
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, plain_ecb_256_text, 64))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++
++}
++
++static int fh_aes_ofb256_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	u32 key[8] = {
++		AES_OFB_256_KEY0, AES_OFB_256_KEY1, AES_OFB_256_KEY2,
++		AES_OFB_256_KEY3,
++		AES_OFB_256_KEY4, AES_OFB_256_KEY5, AES_OFB_256_KEY6,
++		AES_OFB_256_KEY7
++	};
++	u32 iv[4] =
++	{ AES_OFB_IV0, AES_OFB_IV1, AES_OFB_IV2, AES_OFB_IV3 };
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++	void *data;
++	void *dst_data;
++	memcpy(&key[0],&aes_ofb_key_buf[0],sizeof(aes_ofb_key_buf));
++	memcpy(&iv[0],&aes_ofb_iv_buf[0],sizeof(aes_ofb_iv_buf));
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	AES_DBG("aes self test get in...\n");
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("ofb-aes-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 32);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++	//encrypt
++	memcpy(data, plain_ofb_256_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, (void *)iv);
++
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++	wait_for_completion(&result.completion);
++	if (memcmp(dst_data, cipher_ofb_256_text, 64))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++	//decrypt
++	memcpy(data, cipher_ofb_256_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, (void *)iv);
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, plain_ofb_256_text, 64))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++}
++
++static int fh_des_ecb_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	u32 key[2] = { DES_ECB_KEY0, DES_ECB_KEY1 };
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++	void *data;
++	void *dst_data;
++
++
++	memcpy(&key[0],&des_ecb_key_buf[0],sizeof(des_ecb_key_buf));
++	//memcpy(&iv[0],&aes_ofb_iv_buf[0],sizeof(aes_ofb_iv_buf));
++
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	AES_DBG("aes self test get in...\n");
++
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("ecb-des-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 8);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++
++	//encrypt
++	memcpy(data, plain_des_ecb_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, NULL);
++
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, cipher_des_ecb_text, 64))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++
++	//decrypt
++	memcpy(data, cipher_des_ecb_text, 64);
++	memset(dst_data, 0, 64);
++	sg_init_one(&sg[0], data, 64);
++	sg_init_one(&dst_sg[0], dst_data, 64);
++
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 64, NULL);
++
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, plain_des_ecb_text, 64))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++
++}
++
++static int fh_des_cbc_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	u32 key[2] = { DES_CBC_KEY0, DES_CBC_KEY1 };
++	u32 iv[2] = { DES_CBC_IV0, DES_CBC_IV1 };
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++	void *data;
++	void *dst_data;
++
++
++	memcpy(&key[0],&des_cbc_key_buf[0],sizeof(des_cbc_key_buf));
++	memcpy(&iv[0],&des_cbc_iv_buf[0],sizeof(des_cbc_iv_buf));
++
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	AES_DBG("aes self test get in...\n");
++
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("cbc-des-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 8);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++
++	//encrypt
++	memcpy(data, plain_des_cbc_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)iv);
++
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, cipher_des_cbc_text, 24))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++
++	//decrypt
++	memcpy(data, cipher_des_cbc_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)iv);
++
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, plain_des_cbc_text, 24))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++}
++
++static int fh_des_ofb_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	u32 key[2] = { DES_OFB_KEY0, DES_OFB_KEY1 };
++	u32 iv[2] = { DES_OFB_IV0, DES_OFB_IV1 };
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++	void *data;
++	void *dst_data;
++
++	memcpy(&key[0],&des_ofb_key_buf[0],sizeof(des_ofb_key_buf));
++	memcpy(&iv[0],&des_ofb_iv_buf[0],sizeof(des_ofb_iv_buf));
++
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++	AES_DBG("aes self test get in...\n");
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("ofb-des-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 8);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++
++	//encrypt
++	memcpy(data, plain_des_ofb_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)iv);
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++	wait_for_completion(&result.completion);
++	if (memcmp(dst_data, cipher_des_ofb_text, 24))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++
++	//decrypt
++	memcpy(data, cipher_des_ofb_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)iv);
++
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++	wait_for_completion(&result.completion);
++	if (memcmp(dst_data, plain_des_ofb_text, 24))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++
++}
++
++static int fh_des_tri_ecb_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	u32 key[6] = {
++		DES_TRI_ECB_KEY0, DES_TRI_ECB_KEY1, DES_TRI_ECB_KEY2,
++		DES_TRI_ECB_KEY3, DES_TRI_ECB_KEY4, DES_TRI_ECB_KEY5
++	};
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++	void *data;
++	void *dst_data;
++
++	memcpy(&key[0],&des3_ecb_key_buf[0],sizeof(des3_ecb_key_buf));
++	//memcpy(&iv[0],&des_ofb_iv_buf[0],sizeof(des_ofb_iv_buf));
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	AES_DBG("aes self test get in...\n");
++
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("ecb-des3-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 24);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++
++	//encrypt
++	memcpy(data, plain_des_tri_ecb_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)NULL);
++
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, cipher_des_tri_ecb_text, 24))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++
++	//decrypt
++	memcpy(data, cipher_des_tri_ecb_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)NULL);
++
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, plain_des_tri_ecb_text, 24))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++
++}
++
++static int fh_des_tri_cbc_self_test(void)
++{
++	struct crypto_ablkcipher *tfm;
++	struct ablkcipher_request *req;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	u32 key[6] = {
++		DES_TRI_CBC_KEY0, DES_TRI_CBC_KEY1, DES_TRI_CBC_KEY2,
++		DES_TRI_CBC_KEY3, DES_TRI_CBC_KEY4, DES_TRI_CBC_KEY5
++	};
++	u32 iv[2] = { DES_TRI_CBC_IV0, DES_TRI_CBC_IV1 };
++	char *xbuf[XBUFSIZE];
++	char *dst_xbuf[XBUFSIZE];
++	void *data;
++	void *dst_data;
++
++
++	memcpy(&key[0],&des3_cbc_key_buf[0],sizeof(des3_cbc_key_buf));
++	memcpy(&iv[0],&des3_cbc_iv_buf[0],sizeof(des3_cbc_iv_buf));
++
++
++	if (testmgr_alloc_buf(xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	if (testmgr_alloc_buf(dst_xbuf)) {
++		AES_DBG("no pages.\n");
++		return -1;
++	}
++
++	AES_DBG("aes self test get in...\n");
++
++	AES_DBG(" *_* step 1\n");
++	tfm =
++		crypto_alloc_ablkcipher("cbc-des3-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		AES_DBG("aes_test: failed to alloc cipher!\n");
++		return -1;
++	}
++
++	AES_DBG(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&result.completion);
++
++	AES_DBG(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *) key, 24);
++
++	AES_DBG(" *_* step 4\n");
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		AES_DBG(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++	AES_DBG(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &result);
++
++	AES_DBG(" *_* step 6\n");
++	data = xbuf[0];
++	dst_data = dst_xbuf[0];
++
++	//encrypt
++	memcpy(data, plain_des_tri_cbc_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++
++	AES_DBG(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)iv);
++
++	AES_DBG(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, cipher_des_tri_cbc_text, 24))
++		AES_PRINT_RESULT(" encrypt error....\n");
++	else
++		AES_PRINT_RESULT(" encrypt ok....\n");
++
++	//decrypt
++	memcpy(data, cipher_des_tri_cbc_text, 24);
++	memset(dst_data, 0, 24);
++	sg_init_one(&sg[0], data, 24);
++	sg_init_one(&dst_sg[0], dst_data, 24);
++
++	AES_DBG(" *_* step 8\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, 24, (void *)iv);
++
++	AES_DBG(" *_* step 9\n");
++	crypto_ablkcipher_decrypt(req);
++
++	wait_for_completion(&result.completion);
++
++	if (memcmp(dst_data, plain_des_tri_cbc_text, 24))
++		AES_PRINT_RESULT(" decrypt error....\n");
++	else
++		AES_PRINT_RESULT(" decrypt ok....\n");
++
++	return 0;
++
++}
++
++#if(0)
++
++typedef struct
++{
++	unsigned int base;
++	void * vbase;
++	unsigned int size;
++}MEM_INFO;
++typedef struct {
++	MEM_INFO mem;
++	unsigned char *remap_base; /**<已用大小*/
++} RW_MEM_INFO;
++
++
++static  unsigned char aes_128_key_buf[] = {
++	0x2b,0x7e,0x15,0x16, 0x28,0xae,0xd2,0xa6, 0xab,0xf7,0x15,0x88, 0x09,0xcf,0x4f,0x3c,
++};
++static  unsigned char plain_aes_128_text[]  = {
++		0x6b,0xc1,0xbe,0xe2, 0x2e,0x40,0x9f,0x96, 0xe9,0x3d,0x7e,0x11, 0x73,0x93,0x17,0x2a,
++		0xae,0x2d,0x8a,0x57, 0x1e,0x03,0xac,0x9c, 0x9e,0xb7,0x6f,0xac, 0x45,0xaf,0x8e,0x51,
++		0x30,0xc8,0x1c,0x46, 0xa3,0x5c,0xe4,0x11, 0xe5,0xfb,0xc1,0x19, 0x1a,0x0a,0x52,0xef,
++		0xf6,0x9f,0x24,0x45, 0xdf,0x4f,0x9b,0x17, 0xad,0x2b,0x41,0x7b, 0xe6,0x6c,0x37,0x10,
++};
++
++static  unsigned char cipher_aes_128_text[]  = {
++		0x3A,0xD7,0x7B,0xB4, 0x0D,0x7A,0x36,0x60, 0xA8,0x9E,0xCA,0xF3, 0x24,0x66,0xEF,0x97,
++		0xf5,0xd3,0xd5,0x85, 0x03,0xb9,0x69,0x9d, 0xe7,0x85,0x89,0x5a, 0x96,0xfd,0xba,0xaf,
++		0x43,0xb1,0xcd,0x7f, 0x59,0x8e,0xce,0x23, 0x88,0x1b,0x00,0xe3, 0xed,0x03,0x06,0x88,
++		0x7b,0x0c,0x78,0x5e, 0x27,0xe8,0xad,0x3f, 0x82,0x23,0x20,0x71, 0x04,0x72,0x5d,0xd4,
++};
++
++int aes_128_ecb_encrypt(char *key_128,RW_MEM_INFO in,
++	RW_MEM_INFO out,unsigned int data_len_align16){
++
++	static char *xbuf;
++	static char *dst_xbuf;
++	static struct crypto_ablkcipher *tfm;
++	static struct ablkcipher_request *req;
++	static malloc_flag = 0;
++	const char *algo;
++	struct scatterlist sg[8];
++	struct scatterlist dst_sg[8];
++	void *data;
++	void *dst_data;
++	struct tcrypt_result wait_result;
++
++//malloc buf...
++	if(malloc_flag != 0){
++		goto work_go;
++	}
++	malloc_flag = 1;
++	xbuf = (void *)__get_free_page(GFP_KERNEL);
++	if (!xbuf) {
++		printk("no pages.\n");
++		return -1;
++	}
++
++	dst_xbuf = (void *)__get_free_page(GFP_KERNEL);
++	if (!dst_xbuf) {
++		free_page((unsigned long)xbuf);
++		printk("no pages.\n");
++		return -1;
++	}
++
++	tfm =
++		crypto_alloc_ablkcipher("ecb-aes-fh",
++					CRYPTO_ALG_TYPE_ABLKCIPHER |
++					CRYPTO_ALG_ASYNC, 0);
++	if (IS_ERR(tfm)) {
++		printk("aes_test: failed to alloc cipher!\n");
++		free_page((unsigned long)xbuf);
++		free_page((unsigned long)dst_xbuf);
++		return -1;
++	}
++	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		printk(KERN_ERR "alg: skcipher: Failed to allocate request "
++			"for %s\n", algo);
++		return -1;
++	}
++
++
++work_go:
++	printk("aes self test get in...\n");
++	printk(" *_* step 1\n");
++
++	printk(" *_* step 2\n");
++	algo = crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
++	init_completion(&wait_result.completion);
++
++	printk(" *_* step 3\n");
++	crypto_ablkcipher_setkey(tfm, (u8 *)key_128, 16);
++
++	printk(" *_* step 4\n");
++
++
++	printk(" *_* step 5\n");
++	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++					tcrypt_complete, &wait_result);
++
++	printk(" *_* step 6\n");
++	data = xbuf;
++	dst_data = dst_xbuf;
++
++	//encrypt
++	memcpy(data, in.remap_base, data_len_align16);
++	//memset(dst_data, 0, data_len_align16);
++	sg_init_one(&sg[0], data, data_len_align16);
++	sg_init_one(&dst_sg[0], dst_data, data_len_align16);
++
++	printk(" *_* step 7\n");
++	ablkcipher_request_set_crypt(req, sg, dst_sg, data_len_align16, NULL);
++
++	printk(" *_* step 8\n");
++	crypto_ablkcipher_encrypt(req);
++
++	wait_for_completion(&wait_result.completion);
++
++	memcpy(out.remap_base, dst_data, data_len_align16);
++
++	return 0;
++
++}
++#endif
++
++
++
++void fh_aes_self_test_all(void)
++{
++	unsigned char temp_buf[64] = {0};
++	int i;
++	pr_info("aes cbc128 self test go...\n");
++	fh_aes_cbc128_self_test();
++	pr_info("aes ecb256 self test go...\n");
++	fh_aes_ecb256_self_test();
++	pr_info("aes ctr192 self test go...\n");
++	fh_aes_ofb256_self_test();
++	pr_info("aes ofb 256 self test go...\n");
++	fh_aes_ofb256_self_test();
++	pr_info("des ecb self test go...\n");
++	fh_des_ecb_self_test();
++	pr_info("des cbc self test go...\n");
++	fh_des_cbc_self_test();
++	pr_info("des ofb self test go...\n");
++	fh_des_ofb_self_test();
++	pr_info("des tri ecb self test go...\n");
++	fh_des_tri_ecb_self_test();
++	pr_info("des tri cbc self test go...\n");
++	fh_des_tri_cbc_self_test();
++#if(0)
++	RW_MEM_INFO in;
++	RW_MEM_INFO out;
++	in.remap_base = &plain_aes_128_text[0];
++	out.remap_base = &temp_buf[0];
++
++	pr_info("chenjn self test go.....\n");
++
++	aes_128_ecb_encrypt(&aes_128_key_buf[0], in,
++			out,64);
++	for (i = 0; i < sizeof(temp_buf); i++)
++	printk("cipher data[%d]:0x%x\n",i, temp_buf[i]);
++
++#endif
++
++}
++
++#endif
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 25cf327c..76f36670 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -89,6 +89,25 @@ config DW_DMAC
+ 	  Support the Synopsys DesignWare AHB DMA controller.  This
+ 	  can be integrated in chips such as the Atmel AT32ap7000.
+ 
++config FH_DMAC
++	tristate "FH DesignWare AHB DMA support"
++	depends on HAVE_CLK
++	select DMA_ENGINE
++
++	help
++	  Support the Synopsys DesignWare AHB DMA controller.  This
++	  can be integrated in chips such as the FullHan.
++	  
++if FH_DMAC
++
++config FH_DMAC_MISC
++	bool "FH DMAC Misc Device Enable"
++	default y
++	help
++	  FH DMAC Misc Device Enable
++
++endif
++	  
+ config AT_HDMAC
+ 	tristate "Atmel AHB DMA support"
+ 	depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+@@ -271,5 +290,7 @@ config DMATEST
+ 	help
+ 	  Simple DMA test client. Say N unless you're debugging a
+ 	  DMA Device driver.
+-
+ endif
++
++
++
+diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
+index 836095ab..252d297a 100644
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+ obj-$(CONFIG_PL330_DMA) += pl330.o
+ obj-$(CONFIG_PCH_DMA) += pch_dma.o
+ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
++obj-$(CONFIG_FH_DMAC) += fh_dmac.o
+\ No newline at end of file
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index 8bcb15fb..ce1de9b4 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -45,6 +45,9 @@
+  * See Documentation/dmaengine.txt for more details
+  */
+ 
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/dma-mapping.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+@@ -61,9 +64,9 @@
+ #include <linux/slab.h>
+ 
+ static DEFINE_MUTEX(dma_list_mutex);
++static DEFINE_IDR(dma_idr);
+ static LIST_HEAD(dma_device_list);
+ static long dmaengine_ref_count;
+-static struct idr dma_idr;
+ 
+ /* --- sysfs implementation --- */
+ 
+@@ -170,7 +173,8 @@ static struct class dma_devclass = {
+ #define dma_device_satisfies_mask(device, mask) \
+ 	__dma_device_satisfies_mask((device), &(mask))
+ static int
+-__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
++__dma_device_satisfies_mask(struct dma_device *device,
++			    const dma_cap_mask_t *want)
+ {
+ 	dma_cap_mask_t has;
+ 
+@@ -260,10 +264,13 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+ 	do {
+ 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+ 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
+-			printk(KERN_ERR "dma_sync_wait_timeout!\n");
++			pr_err("%s: timeout!\n", __func__);
+ 			return DMA_ERROR;
+ 		}
+-	} while (status == DMA_IN_PROGRESS);
++		if (status != DMA_IN_PROGRESS)
++			break;
++		cpu_relax();
++	} while (1);
+ 
+ 	return status;
+ }
+@@ -311,7 +318,7 @@ static int __init dma_channel_table_init(void)
+ 	}
+ 
+ 	if (err) {
+-		pr_err("dmaengine: initialization failure\n");
++		pr_err("initialization failure\n");
+ 		for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ 			if (channel_table[cap])
+ 				free_percpu(channel_table[cap]);
+@@ -331,6 +338,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
+ }
+ EXPORT_SYMBOL(dma_find_channel);
+ 
++/*
++ * net_dma_find_channel - find a channel for net_dma
++ * net_dma has alignment requirements
++ */
++struct dma_chan *net_dma_find_channel(void)
++{
++	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
++	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
++		return NULL;
++
++	return chan;
++}
++EXPORT_SYMBOL(net_dma_find_channel);
++
+ /**
+  * dma_issue_pending_all - flush all pending operations across all channels
+  */
+@@ -442,7 +463,8 @@ static void dma_channel_rebalance(void)
+ 		}
+ }
+ 
+-static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
++static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
++					  struct dma_device *dev,
+ 					  dma_filter_fn fn, void *fn_param)
+ {
+ 	struct dma_chan *chan;
+@@ -484,7 +506,8 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic
+  * @fn: optional callback to disposition available channels
+  * @fn_param: opaque parameter to pass to dma_filter_fn
+  */
+-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
++struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
++				       dma_filter_fn fn, void *fn_param)
+ {
+ 	struct dma_device *device, *_d;
+ 	struct dma_chan *chan = NULL;
+@@ -505,12 +528,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
+ 			err = dma_chan_get(chan);
+ 
+ 			if (err == -ENODEV) {
+-				pr_debug("%s: %s module removed\n", __func__,
+-					 dma_chan_name(chan));
++				pr_debug("%s: %s module removed\n",
++					 __func__, dma_chan_name(chan));
+ 				list_del_rcu(&device->global_node);
+ 			} else if (err)
+-				pr_err("dmaengine: failed to get %s: (%d)\n",
+-				       dma_chan_name(chan), err);
++				pr_debug("%s: failed to get %s: (%d)\n",
++					 __func__, dma_chan_name(chan), err);
+ 			else
+ 				break;
+ 			if (--device->privatecnt == 0)
+@@ -520,13 +543,34 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
+ 	}
+ 	mutex_unlock(&dma_list_mutex);
+ 
+-	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
++	pr_debug("%s: %s (%s)\n",
++		 __func__,
++		 chan ? "success" : "fail",
+ 		 chan ? dma_chan_name(chan) : NULL);
+ 
+ 	return chan;
+ }
+ EXPORT_SYMBOL_GPL(__dma_request_channel);
++#if 0
++/**
++ * dma_request_slave_channel - try to allocate an exclusive slave channel
++ * @dev:	pointer to client device structure
++ * @name:	slave channel name
++ */
++struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
++{
++	/* If device-tree is present get slave info from here */
++	if (dev->of_node)
++		return of_dma_request_slave_channel(dev->of_node, name);
++
++	/* If device was enumerated by ACPI get slave info from here */
++	if (ACPI_HANDLE(dev))
++		return acpi_dma_request_slave_chan_by_name(dev, name);
+ 
++	return NULL;
++}
++EXPORT_SYMBOL_GPL(dma_request_slave_channel);
++#endif
+ void dma_release_channel(struct dma_chan *chan)
+ {
+ 	mutex_lock(&dma_list_mutex);
+@@ -563,8 +607,8 @@ void dmaengine_get(void)
+ 				list_del_rcu(&device->global_node);
+ 				break;
+ 			} else if (err)
+-				pr_err("dmaengine: failed to get %s: (%d)\n",
+-				       dma_chan_name(chan), err);
++				pr_debug("%s: failed to get %s: (%d)\n",
++				       __func__, dma_chan_name(chan), err);
+ 		}
+ 	}
+ 
+@@ -647,19 +691,19 @@ static bool device_has_all_tx_types(struct dma_device *device)
+ static int get_dma_id(struct dma_device *device)
+ {
+ 	int rc;
++	int dma_id;
+ 
+- idr_retry:
+-	if (!idr_pre_get(&dma_idr, GFP_KERNEL))
+-		return -ENOMEM;
+ 	mutex_lock(&dma_list_mutex);
+-	rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
+-	mutex_unlock(&dma_list_mutex);
+-	if (rc == -EAGAIN)
+-		goto idr_retry;
+-	else if (rc != 0)
+-		return rc;
+ 
+-	return 0;
++    if (!idr_pre_get(&dma_idr, GFP_KERNEL))
++        return -ENOMEM;
++
++    rc = idr_get_new(&dma_idr, NULL, &dma_id);
++	if (rc >= 0)
++		device->dev_id = dma_id;
++
++	mutex_unlock(&dma_list_mutex);
++	return rc < 0 ? rc : 0;
+ }
+ 
+ /**
+@@ -692,12 +736,12 @@ int dma_async_device_register(struct dma_device *device)
+ 		!device->device_prep_dma_interrupt);
+ 	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
+ 		!device->device_prep_dma_sg);
+-	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
+-		!device->device_prep_slave_sg);
+ 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
+ 		!device->device_prep_dma_cyclic);
+ 	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
+ 		!device->device_control);
++	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
++		!device->device_prep_interleaved_dma);
+ 
+ 	BUG_ON(!device->device_alloc_chan_resources);
+ 	BUG_ON(!device->device_free_chan_resources);
+@@ -1000,7 +1044,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+ 	while (tx->cookie == -EBUSY) {
+ 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
+ 			pr_err("%s timeout waiting for descriptor submission\n",
+-				__func__);
++			       __func__);
+ 			return DMA_ERROR;
+ 		}
+ 		cpu_relax();
+@@ -1049,8 +1093,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
+ 
+ static int __init dma_bus_init(void)
+ {
+-	idr_init(&dma_idr);
+-	mutex_init(&dma_list_mutex);
+ 	return class_register(&dma_devclass);
+ }
+ arch_initcall(dma_bus_init);
+diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
+new file mode 100644
+index 00000000..17f983a4
+--- /dev/null
++++ b/drivers/dma/dmaengine.h
+@@ -0,0 +1,89 @@
++/*
++ * The contents of this file are private to DMA engine drivers, and is not
++ * part of the API to be used by DMA engine users.
++ */
++#ifndef DMAENGINE_H
++#define DMAENGINE_H
++
++#include <linux/bug.h>
++#include <linux/dmaengine.h>
++
++/**
++ * dma_cookie_init - initialize the cookies for a DMA channel
++ * @chan: dma channel to initialize
++ */
++static inline void dma_cookie_init(struct dma_chan *chan)
++{
++	chan->cookie = DMA_MIN_COOKIE;
++	chan->completed_cookie = DMA_MIN_COOKIE;
++}
++
++/**
++ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
++ * @tx: descriptor needing cookie
++ *
++ * Assign a unique non-zero per-channel cookie to the descriptor.
++ * Note: caller is expected to hold a lock to prevent concurrency.
++ */
++static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
++{
++	struct dma_chan *chan = tx->chan;
++	dma_cookie_t cookie;
++
++	cookie = chan->cookie + 1;
++	if (cookie < DMA_MIN_COOKIE)
++		cookie = DMA_MIN_COOKIE;
++	tx->cookie = chan->cookie = cookie;
++
++	return cookie;
++}
++
++/**
++ * dma_cookie_complete - complete a descriptor
++ * @tx: descriptor to complete
++ *
++ * Mark this descriptor complete by updating the channels completed
++ * cookie marker.  Zero the descriptors cookie to prevent accidental
++ * repeated completions.
++ *
++ * Note: caller is expected to hold a lock to prevent concurrency.
++ */
++static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
++{
++	BUG_ON(tx->cookie < DMA_MIN_COOKIE);
++	tx->chan->completed_cookie = tx->cookie;
++	tx->cookie = 0;
++}
++
++/**
++ * dma_cookie_status - report cookie status
++ * @chan: dma channel
++ * @cookie: cookie we are interested in
++ * @state: dma_tx_state structure to return last/used cookies
++ *
++ * Report the status of the cookie, filling in the state structure if
++ * non-NULL.  No locking is required.
++ */
++static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
++	dma_cookie_t cookie, struct dma_tx_state *state)
++{
++	dma_cookie_t used, complete;
++
++	used = chan->cookie;
++	complete = chan->completed_cookie;
++	barrier();
++	if (state) {
++		state->last = complete;
++		state->used = used;
++		state->residue = 0;
++	}
++	return dma_async_is_complete(cookie, complete, used);
++}
++
++static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
++{
++	if (state)
++		state->residue = residue;
++}
++
++#endif
+diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
+deleted file mode 100644
+index c3419518..00000000
+--- a/drivers/dma/dw_dmac_regs.h
++++ /dev/null
+@@ -1,235 +0,0 @@
+-/*
+- * Driver for the Synopsys DesignWare AHB DMA Controller
+- *
+- * Copyright (C) 2005-2007 Atmel Corporation
+- * Copyright (C) 2010-2011 ST Microelectronics
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/dw_dmac.h>
+-
+-#define DW_DMA_MAX_NR_CHANNELS	8
+-
+-/*
+- * Redefine this macro to handle differences between 32- and 64-bit
+- * addressing, big vs. little endian, etc.
+- */
+-#define DW_REG(name)		u32 name; u32 __pad_##name
+-
+-/* Hardware register definitions. */
+-struct dw_dma_chan_regs {
+-	DW_REG(SAR);		/* Source Address Register */
+-	DW_REG(DAR);		/* Destination Address Register */
+-	DW_REG(LLP);		/* Linked List Pointer */
+-	u32	CTL_LO;		/* Control Register Low */
+-	u32	CTL_HI;		/* Control Register High */
+-	DW_REG(SSTAT);
+-	DW_REG(DSTAT);
+-	DW_REG(SSTATAR);
+-	DW_REG(DSTATAR);
+-	u32	CFG_LO;		/* Configuration Register Low */
+-	u32	CFG_HI;		/* Configuration Register High */
+-	DW_REG(SGR);
+-	DW_REG(DSR);
+-};
+-
+-struct dw_dma_irq_regs {
+-	DW_REG(XFER);
+-	DW_REG(BLOCK);
+-	DW_REG(SRC_TRAN);
+-	DW_REG(DST_TRAN);
+-	DW_REG(ERROR);
+-};
+-
+-struct dw_dma_regs {
+-	/* per-channel registers */
+-	struct dw_dma_chan_regs	CHAN[DW_DMA_MAX_NR_CHANNELS];
+-
+-	/* irq handling */
+-	struct dw_dma_irq_regs	RAW;		/* r */
+-	struct dw_dma_irq_regs	STATUS;		/* r (raw & mask) */
+-	struct dw_dma_irq_regs	MASK;		/* rw (set = irq enabled) */
+-	struct dw_dma_irq_regs	CLEAR;		/* w (ack, affects "raw") */
+-
+-	DW_REG(STATUS_INT);			/* r */
+-
+-	/* software handshaking */
+-	DW_REG(REQ_SRC);
+-	DW_REG(REQ_DST);
+-	DW_REG(SGL_REQ_SRC);
+-	DW_REG(SGL_REQ_DST);
+-	DW_REG(LAST_SRC);
+-	DW_REG(LAST_DST);
+-
+-	/* miscellaneous */
+-	DW_REG(CFG);
+-	DW_REG(CH_EN);
+-	DW_REG(ID);
+-	DW_REG(TEST);
+-
+-	/* optional encoded params, 0x3c8..0x3 */
+-};
+-
+-/* Bitfields in CTL_LO */
+-#define DWC_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
+-#define DWC_CTLL_DST_WIDTH(n)	((n)<<1)	/* bytes per element */
+-#define DWC_CTLL_SRC_WIDTH(n)	((n)<<4)
+-#define DWC_CTLL_DST_INC	(0<<7)		/* DAR update/not */
+-#define DWC_CTLL_DST_DEC	(1<<7)
+-#define DWC_CTLL_DST_FIX	(2<<7)
+-#define DWC_CTLL_SRC_INC	(0<<7)		/* SAR update/not */
+-#define DWC_CTLL_SRC_DEC	(1<<9)
+-#define DWC_CTLL_SRC_FIX	(2<<9)
+-#define DWC_CTLL_DST_MSIZE(n)	((n)<<11)	/* burst, #elements */
+-#define DWC_CTLL_SRC_MSIZE(n)	((n)<<14)
+-#define DWC_CTLL_S_GATH_EN	(1 << 17)	/* src gather, !FIX */
+-#define DWC_CTLL_D_SCAT_EN	(1 << 18)	/* dst scatter, !FIX */
+-#define DWC_CTLL_FC(n)		((n) << 20)
+-#define DWC_CTLL_FC_M2M		(0 << 20)	/* mem-to-mem */
+-#define DWC_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
+-#define DWC_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
+-#define DWC_CTLL_FC_P2P		(3 << 20)	/* periph-to-periph */
+-/* plus 4 transfer types for peripheral-as-flow-controller */
+-#define DWC_CTLL_DMS(n)		((n)<<23)	/* dst master select */
+-#define DWC_CTLL_SMS(n)		((n)<<25)	/* src master select */
+-#define DWC_CTLL_LLP_D_EN	(1 << 27)	/* dest block chain */
+-#define DWC_CTLL_LLP_S_EN	(1 << 28)	/* src block chain */
+-
+-/* Bitfields in CTL_HI */
+-#define DWC_CTLH_DONE		0x00001000
+-#define DWC_CTLH_BLOCK_TS_MASK	0x00000fff
+-
+-/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+-#define DWC_CFGL_CH_PRIOR_MASK	(0x7 << 5)	/* priority mask */
+-#define DWC_CFGL_CH_PRIOR(x)	((x) << 5)	/* priority */
+-#define DWC_CFGL_CH_SUSP	(1 << 8)	/* pause xfer */
+-#define DWC_CFGL_FIFO_EMPTY	(1 << 9)	/* pause xfer */
+-#define DWC_CFGL_HS_DST		(1 << 10)	/* handshake w/dst */
+-#define DWC_CFGL_HS_SRC		(1 << 11)	/* handshake w/src */
+-#define DWC_CFGL_MAX_BURST(x)	((x) << 20)
+-#define DWC_CFGL_RELOAD_SAR	(1 << 30)
+-#define DWC_CFGL_RELOAD_DAR	(1 << 31)
+-
+-/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
+-#define DWC_CFGH_DS_UPD_EN	(1 << 5)
+-#define DWC_CFGH_SS_UPD_EN	(1 << 6)
+-
+-/* Bitfields in SGR */
+-#define DWC_SGR_SGI(x)		((x) << 0)
+-#define DWC_SGR_SGC(x)		((x) << 20)
+-
+-/* Bitfields in DSR */
+-#define DWC_DSR_DSI(x)		((x) << 0)
+-#define DWC_DSR_DSC(x)		((x) << 20)
+-
+-/* Bitfields in CFG */
+-#define DW_CFG_DMA_EN		(1 << 0)
+-
+-#define DW_REGLEN		0x400
+-
+-enum dw_dmac_flags {
+-	DW_DMA_IS_CYCLIC = 0,
+-};
+-
+-struct dw_dma_chan {
+-	struct dma_chan		chan;
+-	void __iomem		*ch_regs;
+-	u8			mask;
+-	u8			priority;
+-	bool			paused;
+-
+-	spinlock_t		lock;
+-
+-	/* these other elements are all protected by lock */
+-	unsigned long		flags;
+-	dma_cookie_t		completed;
+-	struct list_head	active_list;
+-	struct list_head	queue;
+-	struct list_head	free_list;
+-	struct dw_cyclic_desc	*cdesc;
+-
+-	unsigned int		descs_allocated;
+-};
+-
+-static inline struct dw_dma_chan_regs __iomem *
+-__dwc_regs(struct dw_dma_chan *dwc)
+-{
+-	return dwc->ch_regs;
+-}
+-
+-#define channel_readl(dwc, name) \
+-	readl(&(__dwc_regs(dwc)->name))
+-#define channel_writel(dwc, name, val) \
+-	writel((val), &(__dwc_regs(dwc)->name))
+-
+-static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
+-{
+-	return container_of(chan, struct dw_dma_chan, chan);
+-}
+-
+-struct dw_dma {
+-	struct dma_device	dma;
+-	void __iomem		*regs;
+-	struct tasklet_struct	tasklet;
+-	struct clk		*clk;
+-
+-	u8			all_chan_mask;
+-
+-	struct dw_dma_chan	chan[0];
+-};
+-
+-static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
+-{
+-	return dw->regs;
+-}
+-
+-#define dma_readl(dw, name) \
+-	readl(&(__dw_regs(dw)->name))
+-#define dma_writel(dw, name, val) \
+-	writel((val), &(__dw_regs(dw)->name))
+-
+-#define channel_set_bit(dw, reg, mask) \
+-	dma_writel(dw, reg, ((mask) << 8) | (mask))
+-#define channel_clear_bit(dw, reg, mask) \
+-	dma_writel(dw, reg, ((mask) << 8) | 0)
+-
+-static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
+-{
+-	return container_of(ddev, struct dw_dma, dma);
+-}
+-
+-/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+-struct dw_lli {
+-	/* values that are not changed by hardware */
+-	dma_addr_t	sar;
+-	dma_addr_t	dar;
+-	dma_addr_t	llp;		/* chain to next lli */
+-	u32		ctllo;
+-	/* values that may get written back: */
+-	u32		ctlhi;
+-	/* sstat and dstat can snapshot peripheral register state.
+-	 * silicon config may discard either or both...
+-	 */
+-	u32		sstat;
+-	u32		dstat;
+-};
+-
+-struct dw_desc {
+-	/* FIRST values the hardware uses */
+-	struct dw_lli			lli;
+-
+-	/* THEN values for driver housekeeping */
+-	struct list_head		desc_node;
+-	struct list_head		tx_list;
+-	struct dma_async_tx_descriptor	txd;
+-	size_t				len;
+-};
+-
+-static inline struct dw_desc *
+-txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
+-{
+-	return container_of(txd, struct dw_desc, txd);
+-}
+diff --git a/drivers/dma/fh_dmac.c b/drivers/dma/fh_dmac.c
+new file mode 100644
+index 00000000..314b33d6
+--- /dev/null
++++ b/drivers/dma/fh_dmac.c
+@@ -0,0 +1,1846 @@
++/*
++ * Core driver for the Synopsys DesignWare DMA Controller
++ *
++ * Copyright (C) 2007-2008 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/bitops.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/dmaengine.h>
++#include "dmaengine.h"
++#include <mach/fh_dmac_regs.h>
++
++/*
++ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
++ * (FH_ahb_dmac) which is used with various AMBA 2.0 systems (not all
++ * of which use ARM any more).  See the "Databook" from Synopsys for
++ * information beyond what licensees probably provide.
++ *
++ * The driver has currently been tested only with the Atmel AT32AP7000,
++ * which does not support descriptor writeback.
++ */
++
++static inline unsigned int fhc_get_dms(struct fh_dma_slave *slave)
++{
++	return slave ? slave->dst_master : 0;
++}
++
++static inline unsigned int fhc_get_sms(struct fh_dma_slave *slave)
++{
++	return slave ? slave->src_master : 1;
++}
++
++static inline void fhc_set_masters(struct fh_dma_chan *fhc)
++{
++	struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
++	struct fh_dma_slave *dms = fhc->chan.private;
++	unsigned char mmax = fhd->nr_masters - 1;
++
++	if (fhc->request_line == ~0) {
++		fhc->src_master = min_t(unsigned char, mmax, fhc_get_sms(dms));
++		fhc->dst_master = min_t(unsigned char, mmax, fhc_get_dms(dms));
++	}
++}
++
++#define FHC_DEFAULT_CTLLO(_chan) ({				\
++		struct fh_dma_chan *_fhc = to_fh_dma_chan(_chan);	\
++		struct dma_slave_config	*_sconfig = &_fhc->dma_sconfig;	\
++		bool _is_slave = is_slave_direction(_fhc->direction);	\
++		u8 _smsize = _is_slave ? _sconfig->src_maxburst :	\
++			FH_DMA_MSIZE_16;			\
++		u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :	\
++			FH_DMA_MSIZE_16;			\
++								\
++		(FHC_CTLL_DST_MSIZE(_dmsize)			\
++		 | FHC_CTLL_SRC_MSIZE(_smsize)			\
++		 | FHC_CTLL_LLP_D_EN				\
++		 | FHC_CTLL_LLP_S_EN				\
++		 | FHC_CTLL_DMS(_fhc->dst_master)		\
++		 | FHC_CTLL_SMS(_fhc->src_master));		\
++	})
++
++#define FHC_DEFAULT_CTLLO_OLD(private) ({				\
++		struct fh_dma_slave *__slave = (private);	\
++		int dms = __slave ? __slave->dst_master : 0;	\
++		int sms = __slave ? __slave->src_master : 1;	\
++		u8 smsize = __slave ? __slave->src_msize : FH_DMA_MSIZE_16; \
++		u8 dmsize = __slave ? __slave->dst_msize : FH_DMA_MSIZE_16; \
++								\
++		(FHC_CTLL_DST_MSIZE(dmsize)			\
++		 | FHC_CTLL_SRC_MSIZE(smsize)			\
++		 | FHC_CTLL_LLP_D_EN				\
++		 | FHC_CTLL_LLP_S_EN				\
++		 | FHC_CTLL_DMS(dms)				\
++		 | FHC_CTLL_SMS(sms));				\
++	})
++
++/*
++ * Number of descriptors to allocate for each channel. This should be
++ * made configurable somehow; preferably, the clients (at least the
++ * ones using slave transfers) should be able to give us a hint.
++ */
++#define NR_DESCS_PER_CHANNEL	4096
++
++/*----------------------------------------------------------------------*/
++
++static struct device *chan2dev(struct dma_chan *chan)
++{
++	return &chan->dev->device;
++}
++static struct device *chan2parent(struct dma_chan *chan)
++{
++	return chan->dev->device.parent;
++}
++
++static struct fh_desc *fhc_first_active(struct fh_dma_chan *fhc)
++{
++	return to_fh_desc(fhc->active_list.next);
++}
++
++static struct fh_desc *fhc_desc_get(struct fh_dma_chan *fhc)
++{
++	struct fh_desc *desc, *_desc;
++	struct fh_desc *ret = NULL;
++	unsigned int i = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	list_for_each_entry_safe(desc, _desc, &fhc->free_list, desc_node) {
++		i++;
++		if (async_tx_test_ack(&desc->txd)) {
++			list_del(&desc->desc_node);
++			ret = desc;
++			break;
++		}
++		dev_dbg(chan2dev(&fhc->chan), "desc %p not ACKed\n", desc);
++	}
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	dev_vdbg(chan2dev(&fhc->chan), "scanned %u descriptors on freelist\n", i);
++
++	return ret;
++}
++
++/*
++ * Move a descriptor, including any children, to the free list.
++ * `desc' must not be on any lists.
++ */
++static void fhc_desc_put(struct fh_dma_chan *fhc, struct fh_desc *desc)
++{
++	unsigned long flags;
++
++	if (desc) {
++		struct fh_desc *child;
++
++		spin_lock_irqsave(&fhc->lock, flags);
++		list_for_each_entry(child, &desc->tx_list, desc_node)
++			dev_vdbg(chan2dev(&fhc->chan),
++					"moving child desc %p to freelist\n",
++					child);
++		list_splice_init(&desc->tx_list, &fhc->free_list);
++		dev_vdbg(chan2dev(&fhc->chan), "moving desc %p to freelist\n", desc);
++		list_add(&desc->desc_node, &fhc->free_list);
++		spin_unlock_irqrestore(&fhc->lock, flags);
++	}
++}
++
++static void fhc_initialize(struct fh_dma_chan *fhc)
++{
++	struct fh_dma *fhd = to_fh_dma(fhc->chan.device);
++	struct fh_dma_slave *dms = fhc->chan.private;
++	u32 cfghi = FHC_CFGH_FIFO_MODE;
++	u32 cfglo = FHC_CFGL_CH_PRIOR(fhc->priority);
++
++	if (fhc->initialized == true)
++		return;
++
++	if (dms) {
++		cfghi = dms->cfg_hi;
++		cfglo |= dms->cfg_lo & ~FHC_CFGL_CH_PRIOR_MASK;
++	} else {
++		if (fhc->direction == DMA_MEM_TO_DEV)
++			cfghi = FHC_CFGH_DST_PER(fhc->request_line);
++		else if (fhc->direction == DMA_DEV_TO_MEM)
++			cfghi = FHC_CFGH_SRC_PER(fhc->request_line);
++	}
++
++	channel_writel(fhc, CFG_LO, cfglo);
++	channel_writel(fhc, CFG_HI, cfghi);
++
++	/* Enable interrupts */
++	channel_set_bit(fhd, MASK.XFER, fhc->mask);
++	channel_set_bit(fhd, MASK.BLOCK, fhc->mask);
++	channel_set_bit(fhd, MASK.ERROR, fhc->mask);
++
++	fhc->initialized = true;
++}
++
++/*----------------------------------------------------------------------*/
++
++static inline unsigned int fhc_fast_fls(unsigned long long v)
++{
++	/*
++	 * We can be a lot more clever here, but this should take care
++	 * of the most common optimization.
++	 */
++	if (!(v & 7))
++		return 3;
++	else if (!(v & 3))
++		return 2;
++	else if (!(v & 1))
++		return 1;
++	return 0;
++}
++
++static inline void fhc_dump_chan_regs(struct fh_dma_chan *fhc)
++{
++	dev_err(chan2dev(&fhc->chan),
++		"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
++		channel_readl(fhc, SAR),
++		channel_readl(fhc, DAR),
++		channel_readl(fhc, LLP),
++		channel_readl(fhc, CTL_HI),
++		channel_readl(fhc, CTL_LO));
++}
++
++static inline void fhc_chan_disable(struct fh_dma *fhd, struct fh_dma_chan *fhc)
++{
++	channel_clear_bit(fhd, CH_EN, fhc->mask);
++	while (dma_readl(fhd, CH_EN) & fhc->mask)
++		cpu_relax();
++}
++
++/*----------------------------------------------------------------------*/
++
++/* Perform single block transfer */
++static inline void fhc_do_single_block(struct fh_dma_chan *fhc,
++				       struct fh_desc *desc)
++{
++	struct fh_dma	*fhd = to_fh_dma(fhc->chan.device);
++	u32		ctllo;
++
++	/* Software emulation of LLP mode relies on interrupts to continue
++	 * multi block transfer. */
++	ctllo = desc->lli.ctllo | FHC_CTLL_INT_EN;
++
++	channel_writel(fhc, SAR, desc->lli.sar);
++	channel_writel(fhc, DAR, desc->lli.dar);
++	channel_writel(fhc, CTL_LO, ctllo);
++	channel_writel(fhc, CTL_HI, desc->lli.ctlhi);
++	channel_set_bit(fhd, CH_EN, fhc->mask);
++
++	/* Move pointer to next descriptor */
++	fhc->tx_node_active = fhc->tx_node_active->next;
++}
++
++/* Called with fhc->lock held and bh disabled */
++static void fhc_dostart(struct fh_dma_chan *fhc, struct fh_desc *first)
++{
++	struct fh_dma	*fhd = to_fh_dma(fhc->chan.device);
++	unsigned long	was_soft_llp;
++
++	/* ASSERT:  channel is idle */
++	if (dma_readl(fhd, CH_EN) & fhc->mask) {
++		dev_err(chan2dev(&fhc->chan),
++			"BUG: Attempted to start non-idle channel\n");
++		fhc_dump_chan_regs(fhc);
++
++		/* The tasklet will hopefully advance the queue... */
++		return;
++	}
++
++	if (fhc->nollp) {
++		was_soft_llp = test_and_set_bit(FH_DMA_IS_SOFT_LLP,
++						&fhc->flags);
++		if (was_soft_llp) {
++			dev_err(chan2dev(&fhc->chan),
++				"BUG: Attempted to start new LLP transfer "
++				"inside ongoing one\n");
++			return;
++		}
++
++		fhc_initialize(fhc);
++
++		fhc->residue = first->total_len;
++		fhc->tx_node_active = &first->tx_list;
++
++		/* Submit first block */
++		fhc_do_single_block(fhc, first);
++
++		return;
++	}
++
++	fhc_initialize(fhc);
++
++	channel_writel(fhc, LLP, first->txd.phys);
++	channel_writel(fhc, CTL_LO,
++			FHC_CTLL_LLP_D_EN | FHC_CTLL_LLP_S_EN);
++	channel_writel(fhc, CTL_HI, 0);
++	channel_set_bit(fhd, CH_EN, fhc->mask);
++}
++
++/*----------------------------------------------------------------------*/
++
++static void
++fhc_descriptor_complete(struct fh_dma_chan *fhc, struct fh_desc *desc,
++		bool callback_required)
++{
++	dma_async_tx_callback		callback = NULL;
++	void				*param = NULL;
++	struct dma_async_tx_descriptor	*txd = &desc->txd;
++	struct fh_desc			*child;
++	unsigned long			flags;
++
++	dev_vdbg(chan2dev(&fhc->chan), "descriptor %u complete\n", txd->cookie);
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	dma_cookie_complete(txd);
++	if (callback_required) {
++		callback = txd->callback;
++		param = txd->callback_param;
++	}
++
++	/* async_tx_ack */
++	list_for_each_entry(child, &desc->tx_list, desc_node)
++		async_tx_ack(&child->txd);
++	async_tx_ack(&desc->txd);
++
++	list_splice_init(&desc->tx_list, &fhc->free_list);
++	list_move(&desc->desc_node, &fhc->free_list);
++
++	if (!is_slave_direction(fhc->direction)) {
++		struct device *parent = chan2parent(&fhc->chan);
++		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
++			if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
++				dma_unmap_single(parent, desc->lli.dar,
++					desc->total_len, DMA_FROM_DEVICE);
++			else
++				dma_unmap_page(parent, desc->lli.dar,
++					desc->total_len, DMA_FROM_DEVICE);
++		}
++		if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
++			if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
++				dma_unmap_single(parent, desc->lli.sar,
++					desc->total_len, DMA_TO_DEVICE);
++			else
++				dma_unmap_page(parent, desc->lli.sar,
++					desc->total_len, DMA_TO_DEVICE);
++		}
++	}
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	if (callback)
++		callback(param);
++}
++
++static void fhc_complete_all(struct fh_dma *fhd, struct fh_dma_chan *fhc)
++{
++	struct fh_desc *desc, *_desc;
++	LIST_HEAD(list);
++	unsigned long flags;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	if (dma_readl(fhd, CH_EN) & fhc->mask) {
++		dev_err(chan2dev(&fhc->chan),
++			"BUG: XFER bit set, but channel not idle!\n");
++
++		/* Try to continue after resetting the channel... */
++		fhc_chan_disable(fhd, fhc);
++	}
++
++	/*
++	 * Submit queued descriptors ASAP, i.e. before we go through
++	 * the completed ones.
++	 */
++	list_splice_init(&fhc->active_list, &list);
++	if (!list_empty(&fhc->queue)) {
++		list_move(fhc->queue.next, &fhc->active_list);
++		fhc_dostart(fhc, fhc_first_active(fhc));
++	}
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	list_for_each_entry_safe(desc, _desc, &list, desc_node)
++		fhc_descriptor_complete(fhc, desc, true);
++}
++
++/* Returns how many bytes were already received from source */
++static inline u32 fhc_get_sent(struct fh_dma_chan *fhc)
++{
++	u32 ctlhi = channel_readl(fhc, CTL_HI);
++	u32 ctllo = channel_readl(fhc, CTL_LO);
++
++	return (ctlhi & FHC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
++}
++
++static void fhc_scan_descriptors(struct fh_dma *fhd, struct fh_dma_chan *fhc)
++{
++	dma_addr_t llp;
++	struct fh_desc *desc, *_desc;
++	struct fh_desc *child;
++	u32 status_xfer;
++	unsigned long flags;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	/*
++	 * Clear block interrupt flag before scanning so that we don't
++	 * miss any, and read LLP before RAW_XFER to ensure it is
++	 * valid if we decide to scan the list.
++	 */
++	dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
++	llp = channel_readl(fhc, LLP);
++	status_xfer = dma_readl(fhd, RAW.XFER);
++
++	if (status_xfer & fhc->mask) {
++		/* Everything we've submitted is done */
++		dma_writel(fhd, CLEAR.XFER, fhc->mask);
++		if (test_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags)) {
++			struct list_head *head, *active = fhc->tx_node_active;
++
++			/*
++			 * We are inside first active descriptor.
++			 * Otherwise something is really wrong.
++			 */
++			desc = fhc_first_active(fhc);
++
++			head = &desc->tx_list;
++			if (active != head) {
++				/* Update desc to reflect last sent one */
++				if (active != head->next)
++					desc = to_fh_desc(active->prev);
++
++				fhc->residue -= desc->len;
++
++				child = to_fh_desc(active);
++
++				/* Submit next block */
++				fhc_do_single_block(fhc, child);
++
++				spin_unlock_irqrestore(&fhc->lock, flags);
++				return;
++			}
++
++			/* We are done here */
++			clear_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags);
++		}
++		fhc->residue = 0;
++
++		spin_unlock_irqrestore(&fhc->lock, flags);
++
++		fhc_complete_all(fhd, fhc);
++		return;
++	}
++
++	if (list_empty(&fhc->active_list)) {
++		fhc->residue = 0;
++		spin_unlock_irqrestore(&fhc->lock, flags);
++		return;
++	}
++
++	if (test_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags)) {
++		dev_vdbg(chan2dev(&fhc->chan), "%s: soft LLP mode\n", __func__);
++		spin_unlock_irqrestore(&fhc->lock, flags);
++		return;
++	}
++
++	dev_vdbg(chan2dev(&fhc->chan), "%s: llp=0x%llx\n", __func__,
++			(unsigned long long)llp);
++
++	list_for_each_entry_safe(desc, _desc, &fhc->active_list, desc_node) {
++		/* Initial residue value */
++		fhc->residue = desc->total_len;
++
++		/* Check first descriptors addr */
++		if (desc->txd.phys == llp) {
++			spin_unlock_irqrestore(&fhc->lock, flags);
++			return;
++		}
++
++		/* Check first descriptors llp */
++		if (desc->lli.llp == llp) {
++			/* This one is currently in progress */
++			fhc->residue -= fhc_get_sent(fhc);
++			spin_unlock_irqrestore(&fhc->lock, flags);
++			return;
++		}
++
++		fhc->residue -= desc->len;
++		list_for_each_entry(child, &desc->tx_list, desc_node) {
++			if (child->lli.llp == llp) {
++				/* Currently in progress */
++				fhc->residue -= fhc_get_sent(fhc);
++				spin_unlock_irqrestore(&fhc->lock, flags);
++				return;
++			}
++			fhc->residue -= child->len;
++		}
++
++		/*
++		 * No descriptors so far seem to be in progress, i.e.
++		 * this one must be done.
++		 */
++		spin_unlock_irqrestore(&fhc->lock, flags);
++		fhc_descriptor_complete(fhc, desc, true);
++		spin_lock_irqsave(&fhc->lock, flags);
++	}
++
++	dev_err(chan2dev(&fhc->chan),
++		"BUG: All descriptors done, but channel not idle!\n");
++
++	/* Try to continue after resetting the channel... */
++	fhc_chan_disable(fhd, fhc);
++
++	if (!list_empty(&fhc->queue)) {
++		list_move(fhc->queue.next, &fhc->active_list);
++		fhc_dostart(fhc, fhc_first_active(fhc));
++	}
++	spin_unlock_irqrestore(&fhc->lock, flags);
++}
++
++static inline void fhc_dump_lli(struct fh_dma_chan *fhc, struct fh_lli *lli)
++{
++	dev_crit(chan2dev(&fhc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
++		 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
++
++}
++
++static void fhc_handle_error(struct fh_dma *fhd, struct fh_dma_chan *fhc)
++{
++	struct fh_desc *bad_desc;
++	struct fh_desc *child;
++	unsigned long flags;
++
++	fhc_scan_descriptors(fhd, fhc);
++
++	spin_lock_irqsave(&fhc->lock, flags);
++
++	/*
++	 * The descriptor currently at the head of the active list is
++	 * borked. Since we don't have any way to report errors, we'll
++	 * just have to scream loudly and try to carry on.
++	 */
++	bad_desc = fhc_first_active(fhc);
++	list_del_init(&bad_desc->desc_node);
++	list_move(fhc->queue.next, fhc->active_list.prev);
++
++	/* Clear the error flag and try to restart the controller */
++	dma_writel(fhd, CLEAR.ERROR, fhc->mask);
++	if (!list_empty(&fhc->active_list))
++		fhc_dostart(fhc, fhc_first_active(fhc));
++
++	/*
++	 * WARN may seem harsh, but since this only happens
++	 * when someone submits a bad physical address in a
++	 * descriptor, we should consider ourselves lucky that the
++	 * controller flagged an error instead of scribbling over
++	 * random memory locations.
++	 */
++	dev_WARN(chan2dev(&fhc->chan), "Bad descriptor submitted for DMA!\n"
++				       "  cookie: %d\n", bad_desc->txd.cookie);
++	fhc_dump_lli(fhc, &bad_desc->lli);
++	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
++		fhc_dump_lli(fhc, &child->lli);
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	/* Pretend the descriptor completed successfully */
++	fhc_descriptor_complete(fhc, bad_desc, true);
++}
++
++/* --------------------- Cyclic DMA API extensions -------------------- */
++
++inline dma_addr_t fh_dma_get_src_addr(struct dma_chan *chan)
++{
++	struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
++	return channel_readl(fhc, SAR);
++}
++EXPORT_SYMBOL(fh_dma_get_src_addr);
++
++inline dma_addr_t fh_dma_get_dst_addr(struct dma_chan *chan)
++{
++	struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
++	return channel_readl(fhc, DAR);
++}
++EXPORT_SYMBOL(fh_dma_get_dst_addr);
++
++/* Called with fhc->lock held and all DMAC interrupts disabled */
++static void fhc_handle_cyclic(struct fh_dma *fhd, struct fh_dma_chan *fhc,
++		u32 status_err, u32 status_xfer, u32 status_block)
++{
++	unsigned long flags;
++
++	if (status_block & fhc->mask) {
++		void (*callback) (void *param);
++		void *callback_param;
++
++		dev_vdbg(chan2dev(&fhc->chan), "new cyclic period llp 0x%08x\n",
++			 channel_readl(fhc, LLP));
++		dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
++
++		callback = fhc->cdesc->period_callback;
++		callback_param = fhc->cdesc->period_callback_param;
++
++		if (callback)
++			callback(callback_param);
++	}
++
++	/*
++	 * Error and transfer complete are highly unlikely, and will most
++	 * likely be due to a configuration error by the user.
++	 */
++	if (unlikely(status_err & fhc->mask) ||
++			unlikely(status_xfer & fhc->mask)) {
++		int i;
++
++		dev_err(chan2dev(&fhc->chan), "cyclic DMA unexpected %s "
++				"interrupt, stopping DMA transfer\n",
++				status_xfer ? "xfer" : "error");
++
++		spin_lock_irqsave(&fhc->lock, flags);
++
++		fhc_dump_chan_regs(fhc);
++
++		fhc_chan_disable(fhd, fhc);
++
++		/* Make sure DMA does not restart by loading a new list */
++		channel_writel(fhc, LLP, 0);
++		channel_writel(fhc, CTL_LO, 0);
++		channel_writel(fhc, CTL_HI, 0);
++
++		dma_writel(fhd, CLEAR.ERROR, fhc->mask);
++		dma_writel(fhd, CLEAR.XFER, fhc->mask);
++		dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
++
++		for (i = 0; i < fhc->cdesc->periods; i++)
++			fhc_dump_lli(fhc, &fhc->cdesc->desc[i]->lli);
++
++		spin_unlock_irqrestore(&fhc->lock, flags);
++	}
++}
++
++/* ------------------------------------------------------------------------- */
++
++static void fh_dma_tasklet(unsigned long data)
++{
++	struct fh_dma *fhd = (struct fh_dma *)data;
++	struct fh_dma_chan *fhc;
++	u32 status_xfer;
++	u32 status_err;
++	u32 status_block;
++	int i;
++
++	status_xfer = dma_readl(fhd, RAW.XFER);
++	status_block = dma_readl(fhd, RAW.BLOCK);
++	status_err = dma_readl(fhd, RAW.ERROR);
++
++	dev_vdbg(fhd->dma.dev, "%s: status_err=%x\n", __func__, status_err);
++
++	for (i = 0; i < fhd->dma.chancnt; i++) {
++		fhc = &fhd->chan[i];
++		if (test_bit(FH_DMA_IS_CYCLIC, &fhc->flags))
++			fhc_handle_cyclic(fhd, fhc, status_err,
++					status_xfer, status_block);
++		else if (status_err & (1 << i))
++			fhc_handle_error(fhd, fhc);
++		else if (status_xfer & (1 << i))
++			fhc_scan_descriptors(fhd, fhc);
++	}
++
++	/*
++	 * Re-enable interrupts.
++	 */
++	channel_set_bit(fhd, MASK.XFER, fhd->all_chan_mask);
++	channel_set_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
++	channel_set_bit(fhd, MASK.ERROR, fhd->all_chan_mask);
++}
++
++static irqreturn_t fh_dma_interrupt(int irq, void *dev_id)
++{
++	struct fh_dma *fhd = dev_id;
++	u32 status;
++
++	dev_vdbg(fhd->dma.dev, "%s: status=0x%x\n", __func__,
++			dma_readl(fhd, STATUS_INT));
++
++	/*
++	 * Just disable the interrupts. We'll turn them back on in the
++	 * softirq handler.
++	 */
++	channel_clear_bit(fhd, MASK.XFER, fhd->all_chan_mask);
++	channel_clear_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
++	channel_clear_bit(fhd, MASK.ERROR, fhd->all_chan_mask);
++
++	status = dma_readl(fhd, STATUS_INT);
++	if (status) {
++		dev_err(fhd->dma.dev,
++			"BUG: Unexpected interrupts pending: 0x%x\n",
++			status);
++
++		/* Try to recover */
++		channel_clear_bit(fhd, MASK.XFER, (1 << 8) - 1);
++		channel_clear_bit(fhd, MASK.BLOCK, (1 << 8) - 1);
++		channel_clear_bit(fhd, MASK.SRC_TRAN, (1 << 8) - 1);
++		channel_clear_bit(fhd, MASK.DST_TRAN, (1 << 8) - 1);
++		channel_clear_bit(fhd, MASK.ERROR, (1 << 8) - 1);
++	}
++
++	tasklet_schedule(&fhd->tasklet);
++
++	return IRQ_HANDLED;
++}
++
++/*----------------------------------------------------------------------*/
++
++static dma_cookie_t fhc_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++	struct fh_desc		*desc = txd_to_fh_desc(tx);
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(tx->chan);
++	dma_cookie_t		cookie;
++	unsigned long		flags;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	cookie = dma_cookie_assign(tx);
++
++	/*
++	 * REVISIT: We should attempt to chain as many descriptors as
++	 * possible, perhaps even appending to those already submitted
++	 * for DMA. But this is hard to do in a race-free manner.
++	 */
++	if (list_empty(&fhc->active_list)) {
++		dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
++				desc->txd.cookie);
++		list_add_tail(&desc->desc_node, &fhc->active_list);
++		fhc_dostart(fhc, fhc_first_active(fhc));
++	} else {
++		dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
++				desc->txd.cookie);
++
++		list_add_tail(&desc->desc_node, &fhc->queue);
++	}
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	return cookie;
++}
++
++static struct dma_async_tx_descriptor *
++fhc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
++		size_t len, unsigned long flags)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(chan->device);
++	struct fh_desc		*desc;
++	struct fh_desc		*first;
++	struct fh_desc		*prev;
++	size_t			xfer_count;
++	size_t			offset;
++	unsigned int		src_width;
++	unsigned int		dst_width;
++	unsigned int		data_width;
++	u32			ctllo;
++
++	dev_vdbg(chan2dev(chan),
++			"%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
++			(unsigned long long)dest, (unsigned long long)src,
++			len, flags);
++
++	if (unlikely(!len)) {
++		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
++		return NULL;
++	}
++
++	fhc->direction = DMA_MEM_TO_MEM;
++
++	data_width = min_t(unsigned int, fhd->data_width[fhc->src_master],
++			   fhd->data_width[fhc->dst_master]);
++
++	src_width = dst_width = min_t(unsigned int, data_width,
++				      fhc_fast_fls(src | dest | len));
++
++	ctllo = FHC_DEFAULT_CTLLO(chan)
++			| FHC_CTLL_DST_WIDTH(dst_width)
++			| FHC_CTLL_SRC_WIDTH(src_width)
++			| FHC_CTLL_DST_INC
++			| FHC_CTLL_SRC_INC
++			| FHC_CTLL_FC_M2M;
++	prev = first = NULL;
++
++	for (offset = 0; offset < len; offset += xfer_count << src_width) {
++		xfer_count = min_t(size_t, (len - offset) >> src_width,
++					   fhc->block_size);
++
++		desc = fhc_desc_get(fhc);
++		if (!desc)
++			goto err_desc_get;
++
++		desc->lli.sar = src + offset;
++		desc->lli.dar = dest + offset;
++		desc->lli.ctllo = ctllo;
++		desc->lli.ctlhi = xfer_count;
++		desc->len = xfer_count << src_width;
++
++		if (!first) {
++			first = desc;
++		} else {
++			prev->lli.llp = desc->txd.phys;
++			list_add_tail(&desc->desc_node,
++					&first->tx_list);
++		}
++		prev = desc;
++	}
++
++	if (flags & DMA_PREP_INTERRUPT)
++		/* Trigger interrupt after last block */
++		prev->lli.ctllo |= FHC_CTLL_INT_EN;
++
++	prev->lli.llp = 0;
++	first->txd.flags = flags;
++	first->total_len = len;
++
++	return &first->txd;
++
++err_desc_get:
++	fhc_desc_put(fhc, first);
++	return NULL;
++}
++
++static struct dma_async_tx_descriptor *
++fhc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
++		unsigned int sg_len, enum dma_transfer_direction direction,
++		unsigned long flags, void *context)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(chan->device);
++	struct dma_slave_config	*sconfig = &fhc->dma_sconfig;
++	struct fh_desc		*prev;
++	struct fh_desc		*first;
++	u32			ctllo;
++	dma_addr_t		reg;
++	unsigned int		reg_width;
++	unsigned int		mem_width;
++	unsigned int		data_width;
++	unsigned int		i;
++	struct scatterlist	*sg;
++	size_t			total_len = 0;
++	struct fh_dma_pri *fh_pri = (struct fh_dma_pri *)context;
++	dev_vdbg(chan2dev(chan), "%s\n", __func__);
++
++	if (unlikely(!is_slave_direction(direction) || !sg_len))
++		return NULL;
++
++	fhc->direction = direction;
++
++	prev = first = NULL;
++
++	switch (direction) {
++	case DMA_MEM_TO_DEV:
++		reg_width = __fls(sconfig->dst_addr_width);
++		reg = sconfig->dst_addr;
++        if(!fh_pri){
++            ctllo = (FHC_DEFAULT_CTLLO(chan)
++                | FHC_CTLL_DST_WIDTH(reg_width)
++                | FHC_CTLL_DST_FIX
++                | FHC_CTLL_SRC_INC);
++        }
++        else{
++            ctllo = (FHC_DEFAULT_CTLLO(chan) | FHC_CTLL_DST_WIDTH(reg_width));
++            ctllo |= fh_pri->sinc << 9;
++            ctllo |= fh_pri->dinc << 7;
++        }
++
++		ctllo |= sconfig->device_fc ? FHC_CTLL_FC(FH_DMA_FC_P_M2P) :
++			FHC_CTLL_FC(FH_DMA_FC_D_M2P);
++
++		data_width = fhd->data_width[fhc->src_master];
++
++		for_each_sg(sgl, sg, sg_len, i) {
++			struct fh_desc	*desc;
++			u32		len, dlen, mem;
++
++			mem = sg_dma_address(sg);
++			len = sg_dma_len(sg);
++
++			mem_width = min_t(unsigned int,
++					  data_width, fhc_fast_fls(mem | len));
++
++slave_sg_todev_fill_desc:
++			desc = fhc_desc_get(fhc);
++			if (!desc) {
++				dev_err(chan2dev(chan),
++					"not enough descriptors available\n");
++				goto err_desc_get;
++			}
++
++			desc->lli.sar = mem;
++			desc->lli.dar = reg;
++			desc->lli.ctllo = ctllo | FHC_CTLL_SRC_WIDTH(mem_width);
++			if ((len >> mem_width) > fhc->block_size) {
++				dlen = fhc->block_size << mem_width;
++				mem += dlen;
++				len -= dlen;
++			} else {
++				dlen = len;
++				len = 0;
++			}
++
++			desc->lli.ctlhi = dlen >> mem_width;
++			desc->len = dlen;
++
++			if (!first) {
++				first = desc;
++			} else {
++				prev->lli.llp = desc->txd.phys;
++				list_add_tail(&desc->desc_node,
++						&first->tx_list);
++			}
++			prev = desc;
++			total_len += dlen;
++
++			if (len)
++				goto slave_sg_todev_fill_desc;
++		}
++		break;
++	case DMA_DEV_TO_MEM:
++		reg_width = __fls(sconfig->src_addr_width);
++		reg = sconfig->src_addr;
++
++        if(!fh_pri){
++            ctllo = (FHC_DEFAULT_CTLLO(chan)
++                | FHC_CTLL_SRC_WIDTH(reg_width)
++                | FHC_CTLL_DST_INC
++                | FHC_CTLL_SRC_FIX);
++        }
++        else{
++            ctllo = (FHC_DEFAULT_CTLLO(chan) | FHC_CTLL_SRC_WIDTH(reg_width));
++            ctllo |= fh_pri->sinc << 9;
++            ctllo |= fh_pri->dinc << 7;
++        }
++
++
++		ctllo |= sconfig->device_fc ? FHC_CTLL_FC(FH_DMA_FC_P_P2M) :
++			FHC_CTLL_FC(FH_DMA_FC_D_P2M);
++
++		data_width = fhd->data_width[fhc->dst_master];
++
++		for_each_sg(sgl, sg, sg_len, i) {
++			struct fh_desc	*desc;
++			u32		len, dlen, mem;
++
++			mem = sg_dma_address(sg);
++			len = sg_dma_len(sg);
++
++			mem_width = min_t(unsigned int,
++					  data_width, fhc_fast_fls(mem | len));
++
++slave_sg_fromdev_fill_desc:
++			desc = fhc_desc_get(fhc);
++			if (!desc) {
++				dev_err(chan2dev(chan),
++						"not enough descriptors available\n");
++				goto err_desc_get;
++			}
++
++			desc->lli.sar = reg;
++			desc->lli.dar = mem;
++			desc->lli.ctllo = ctllo | FHC_CTLL_DST_WIDTH(mem_width);
++			if ((len >> reg_width) > fhc->block_size) {
++				dlen = fhc->block_size << reg_width;
++				mem += dlen;
++				len -= dlen;
++			} else {
++				dlen = len;
++				len = 0;
++			}
++			desc->lli.ctlhi = dlen >> reg_width;
++			desc->len = dlen;
++
++			if (!first) {
++				first = desc;
++			} else {
++				prev->lli.llp = desc->txd.phys;
++				list_add_tail(&desc->desc_node,
++						&first->tx_list);
++			}
++			prev = desc;
++			total_len += dlen;
++
++			if (len)
++				goto slave_sg_fromdev_fill_desc;
++		}
++		break;
++	default:
++		return NULL;
++	}
++
++	if (flags & DMA_PREP_INTERRUPT)
++		/* Trigger interrupt after last block */
++		prev->lli.ctllo |= FHC_CTLL_INT_EN;
++
++	prev->lli.llp = 0;
++	first->total_len = total_len;
++
++	return &first->txd;
++
++err_desc_get:
++	fhc_desc_put(fhc, first);
++	return NULL;
++}
++
++/*
++ * Fix sconfig's burst size according to fh_dmac. We need to convert them as:
++ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
++ *
++ * NOTE: burst size 2 is not supported by controller.
++ *
++ * This can be done by finding least significant bit set: n & (n - 1)
++ */
++static inline void convert_burst(u32 *maxburst)
++{
++	if (*maxburst > 1)
++		*maxburst = fls(*maxburst) - 2;
++	else
++		*maxburst = 0;
++}
++
++static int
++set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
++{
++	struct fh_dma_chan *fhc = to_fh_dma_chan(chan);
++
++	/* Check if chan will be configured for slave transfers */
++	if (!is_slave_direction(sconfig->direction))
++		return -EINVAL;
++
++	memcpy(&fhc->dma_sconfig, sconfig, sizeof(*sconfig));
++	fhc->direction = sconfig->direction;
++
++	/* Take the request line from slave_id member */
++	if (fhc->request_line == ~0)
++		fhc->request_line = sconfig->slave_id;
++
++	convert_burst(&fhc->dma_sconfig.src_maxburst);
++	convert_burst(&fhc->dma_sconfig.dst_maxburst);
++
++	return 0;
++}
++
++static inline void fhc_chan_pause(struct fh_dma_chan *fhc)
++{
++	u32 cfglo = channel_readl(fhc, CFG_LO);
++	unsigned int count = 20;	/* timeout iterations */
++
++	channel_writel(fhc, CFG_LO, cfglo | FHC_CFGL_CH_SUSP);
++	while (!(channel_readl(fhc, CFG_LO) & FHC_CFGL_FIFO_EMPTY) && count--)
++		udelay(2);
++
++	fhc->paused = true;
++}
++
++static inline void fhc_chan_resume(struct fh_dma_chan *fhc)
++{
++	u32 cfglo = channel_readl(fhc, CFG_LO);
++
++	channel_writel(fhc, CFG_LO, cfglo & ~FHC_CFGL_CH_SUSP);
++
++	fhc->paused = false;
++}
++
++static int fhc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
++		       unsigned long arg)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(chan->device);
++	struct fh_desc		*desc, *_desc;
++	unsigned long		flags;
++	LIST_HEAD(list);
++
++	if (cmd == DMA_PAUSE) {
++		spin_lock_irqsave(&fhc->lock, flags);
++
++		fhc_chan_pause(fhc);
++
++		spin_unlock_irqrestore(&fhc->lock, flags);
++	} else if (cmd == DMA_RESUME) {
++		if (!fhc->paused)
++			return 0;
++
++		spin_lock_irqsave(&fhc->lock, flags);
++
++		fhc_chan_resume(fhc);
++
++		spin_unlock_irqrestore(&fhc->lock, flags);
++	} else if (cmd == DMA_TERMINATE_ALL) {
++		spin_lock_irqsave(&fhc->lock, flags);
++
++		clear_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags);
++
++		fhc_chan_disable(fhd, fhc);
++
++		fhc_chan_resume(fhc);
++
++		/* active_list entries will end up before queued entries */
++		list_splice_init(&fhc->queue, &list);
++		list_splice_init(&fhc->active_list, &list);
++
++		spin_unlock_irqrestore(&fhc->lock, flags);
++
++		/* Flush all pending and queued descriptors */
++		list_for_each_entry_safe(desc, _desc, &list, desc_node)
++			fhc_descriptor_complete(fhc, desc, false);
++	} else if (cmd == DMA_SLAVE_CONFIG) {
++		return set_runtime_config(chan, (struct dma_slave_config *)arg);
++	} else {
++		return -ENXIO;
++	}
++
++	return 0;
++}
++
++static inline u32 fhc_get_residue(struct fh_dma_chan *fhc)
++{
++	unsigned long flags;
++	u32 residue;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++
++	residue = fhc->residue;
++	if (test_bit(FH_DMA_IS_SOFT_LLP, &fhc->flags) && residue)
++		residue -= fhc_get_sent(fhc);
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++	return residue;
++}
++
++static enum dma_status
++fhc_tx_status(struct dma_chan *chan,
++	      dma_cookie_t cookie,
++	      struct dma_tx_state *txstate)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	enum dma_status		ret;
++
++	ret = dma_cookie_status(chan, cookie, txstate);
++	if (ret != DMA_SUCCESS) {
++		fhc_scan_descriptors(to_fh_dma(chan->device), fhc);
++
++		ret = dma_cookie_status(chan, cookie, txstate);
++	}
++
++	if (ret != DMA_SUCCESS)
++		dma_set_residue(txstate, fhc_get_residue(fhc));
++
++	if (fhc->paused)
++		return DMA_PAUSED;
++
++	return ret;
++}
++
++static void fhc_issue_pending(struct dma_chan *chan)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++
++	if (!list_empty(&fhc->queue))
++		fhc_scan_descriptors(to_fh_dma(chan->device), fhc);
++}
++
++static int fhc_alloc_chan_resources(struct dma_chan *chan)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(chan->device);
++	struct fh_desc		*desc;
++	int			i;
++	unsigned long		flags;
++
++	dev_vdbg(chan2dev(chan), "%s\n", __func__);
++
++	/* ASSERT:  channel is idle */
++	if (dma_readl(fhd, CH_EN) & fhc->mask) {
++		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
++		return -EIO;
++	}
++
++	dma_cookie_init(chan);
++
++	/*
++	 * NOTE: some controllers may have additional features that we
++	 * need to initialize here, like "scatter-gather" (which
++	 * doesn't mean what you think it means), and status writeback.
++	 */
++
++	fhc_set_masters(fhc);
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	i = fhc->descs_allocated;
++	while (fhc->descs_allocated < NR_DESCS_PER_CHANNEL) {
++		dma_addr_t phys;
++
++		spin_unlock_irqrestore(&fhc->lock, flags);
++
++		desc = dma_pool_alloc(fhd->desc_pool, GFP_ATOMIC, &phys);
++		if (!desc)
++			goto err_desc_alloc;
++
++		memset(desc, 0, sizeof(struct fh_desc));
++
++		INIT_LIST_HEAD(&desc->tx_list);
++		dma_async_tx_descriptor_init(&desc->txd, chan);
++		desc->txd.tx_submit = fhc_tx_submit;
++		desc->txd.flags = DMA_CTRL_ACK;
++		desc->txd.phys = phys;
++
++		fhc_desc_put(fhc, desc);
++
++		spin_lock_irqsave(&fhc->lock, flags);
++		i = ++fhc->descs_allocated;
++	}
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
++
++	return i;
++
++err_desc_alloc:
++	dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
++
++	return i;
++}
++
++static void fhc_free_chan_resources(struct dma_chan *chan)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(chan->device);
++	struct fh_desc		*desc, *_desc;
++	unsigned long		flags;
++	LIST_HEAD(list);
++
++	dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
++			fhc->descs_allocated);
++
++	/* ASSERT:  channel is idle */
++	BUG_ON(!list_empty(&fhc->active_list));
++	BUG_ON(!list_empty(&fhc->queue));
++	BUG_ON(dma_readl(to_fh_dma(chan->device), CH_EN) & fhc->mask);
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	list_splice_init(&fhc->free_list, &list);
++	fhc->descs_allocated = 0;
++	fhc->initialized = false;
++	fhc->request_line = ~0;
++
++	/* Disable interrupts */
++	channel_clear_bit(fhd, MASK.XFER, fhc->mask);
++	channel_clear_bit(fhd, MASK.BLOCK, fhc->mask);
++	channel_clear_bit(fhd, MASK.ERROR, fhc->mask);
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
++		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
++		dma_pool_free(fhd->desc_pool, desc, desc->txd.phys);
++	}
++
++	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
++}
++
++
++/* --------------------- Cyclic DMA API extensions -------------------- */
++
++/**
++ * fh_dma_cyclic_start - start the cyclic DMA transfer
++ * @chan: the DMA channel to start
++ *
++ * Must be called with soft interrupts disabled. Returns zero on success or
++ * -errno on failure.
++ */
++int fh_dma_cyclic_start(struct dma_chan *chan)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(fhc->chan.device);
++	unsigned long		flags;
++
++	if (!test_bit(FH_DMA_IS_CYCLIC, &fhc->flags)) {
++		dev_err(chan2dev(&fhc->chan), "missing prep for cyclic DMA\n");
++		return -ENODEV;
++	}
++
++	spin_lock_irqsave(&fhc->lock, flags);
++
++	/* Assert channel is idle */
++	if (dma_readl(fhd, CH_EN) & fhc->mask) {
++		dev_err(chan2dev(&fhc->chan),
++			"BUG: Attempted to start non-idle channel\n");
++		fhc_dump_chan_regs(fhc);
++		spin_unlock_irqrestore(&fhc->lock, flags);
++		return -EBUSY;
++	}
++
++	dma_writel(fhd, CLEAR.ERROR, fhc->mask);
++	dma_writel(fhd, CLEAR.XFER, fhc->mask);
++	dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
++
++	fhc_initialize(fhc);
++
++	/* Setup DMAC channel registers */
++	channel_writel(fhc, LLP, fhc->cdesc->desc[0]->txd.phys);
++	channel_writel(fhc, CTL_LO, FHC_CTLL_LLP_D_EN | FHC_CTLL_LLP_S_EN);
++	channel_writel(fhc, CTL_HI, 0);
++
++	channel_set_bit(fhd, CH_EN, fhc->mask);
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	return 0;
++}
++EXPORT_SYMBOL(fh_dma_cyclic_start);
++
++/**
++ * fh_dma_cyclic_stop - stop the cyclic DMA transfer
++ * @chan: the DMA channel to stop
++ *
++ * Must be called with soft interrupts disabled.
++ */
++void fh_dma_cyclic_stop(struct dma_chan *chan)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(fhc->chan.device);
++	unsigned long		flags;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++
++	fhc_chan_disable(fhd, fhc);
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++}
++EXPORT_SYMBOL(fh_dma_cyclic_stop);
++
++/**
++ * fh_dma_cyclic_prep - prepare the cyclic DMA transfer
++ * @chan: the DMA channel to prepare
++ * @buf_addr: physical DMA address where the buffer starts
++ * @buf_len: total number of bytes for the entire buffer
++ * @period_len: number of bytes for each period
++ * @direction: transfer direction, to or from device
++ *
++ * Must be called before trying to start the transfer. Returns a valid struct
++ * fh_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
++ */
++struct fh_cyclic_desc *fh_dma_cyclic_prep(struct dma_chan *chan,
++		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
++		enum dma_transfer_direction direction)
++{
++	struct fh_dma_chan		*fhc = to_fh_dma_chan(chan);
++	struct fh_dma_slave 		*fhs = chan->private;
++	struct fh_cyclic_desc		*cdesc;
++	struct fh_cyclic_desc		*retval = NULL;
++	struct fh_desc			*desc;
++	struct fh_desc			*last = NULL;
++	unsigned long			was_cyclic;
++	unsigned int			reg_width;
++	unsigned int			periods;
++	unsigned int			i;
++	unsigned long			flags;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++	if (fhc->nollp) {
++		spin_unlock_irqrestore(&fhc->lock, flags);
++		dev_dbg(chan2dev(&fhc->chan),
++				"channel doesn't support LLP transfers\n");
++		return ERR_PTR(-EINVAL);
++	}
++
++	if (!list_empty(&fhc->queue) || !list_empty(&fhc->active_list)) {
++		spin_unlock_irqrestore(&fhc->lock, flags);
++		dev_dbg(chan2dev(&fhc->chan),
++				"queue and/or active list are not empty\n");
++		return ERR_PTR(-EBUSY);
++	}
++
++	was_cyclic = test_and_set_bit(FH_DMA_IS_CYCLIC, &fhc->flags);
++	spin_unlock_irqrestore(&fhc->lock, flags);
++	if (was_cyclic) {
++		dev_dbg(chan2dev(&fhc->chan),
++				"channel already prepared for cyclic DMA\n");
++		return ERR_PTR(-EBUSY);
++	}
++
++	retval = ERR_PTR(-EINVAL);
++
++	reg_width = fhs->reg_width;
++
++	if (unlikely(!is_slave_direction(direction)))
++		goto out_err;
++
++	fhc->direction = direction;
++
++	periods = buf_len / period_len;
++
++	/* Check for too big/unaligned periods and unaligned DMA buffer. */
++	if (period_len > (fhc->block_size << reg_width))
++		goto out_err;
++	if (unlikely(period_len & ((1 << reg_width) - 1)))
++		goto out_err;
++	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
++		goto out_err;
++
++	retval = ERR_PTR(-ENOMEM);
++
++	if (periods > NR_DESCS_PER_CHANNEL)
++		goto out_err;
++
++	cdesc = kzalloc(sizeof(struct fh_cyclic_desc), GFP_KERNEL);
++	if (!cdesc)
++		goto out_err;
++
++	cdesc->desc = kzalloc(sizeof(struct fh_desc *) * periods, GFP_KERNEL);
++	if (!cdesc->desc)
++		goto out_err_alloc;
++
++	for (i = 0; i < periods; i++) {
++		desc = fhc_desc_get(fhc);
++		if (!desc)
++			goto out_err_desc_get;
++
++		switch (direction) {
++		case DMA_MEM_TO_DEV:
++			desc->lli.dar = fhs->tx_reg;
++			desc->lli.sar = buf_addr + (period_len * i);
++			desc->lli.ctllo = (FHC_DEFAULT_CTLLO_OLD(chan->private)
++					| FHC_CTLL_DST_WIDTH(reg_width)
++					| FHC_CTLL_SRC_WIDTH(reg_width)
++					| FHC_CTLL_DST_FIX
++					| FHC_CTLL_SRC_INC
++					| FHC_CTLL_FC(fhs->fc)
++					| FHC_CTLL_INT_EN);
++
++			break;
++		case DMA_DEV_TO_MEM:
++			desc->lli.dar = buf_addr + (period_len * i);
++			desc->lli.sar = fhs->rx_reg;
++			desc->lli.ctllo = (FHC_DEFAULT_CTLLO_OLD(chan->private)
++					| FHC_CTLL_SRC_WIDTH(reg_width)
++					| FHC_CTLL_DST_WIDTH(reg_width)
++					| FHC_CTLL_DST_INC
++					| FHC_CTLL_SRC_FIX
++					| FHC_CTLL_FC(fhs->fc)
++					| FHC_CTLL_INT_EN);
++
++
++			break;
++		default:
++			break;
++		}
++
++		desc->lli.ctlhi = (period_len >> reg_width);
++		cdesc->desc[i] = desc;
++
++		if (last)
++		{
++			last->lli.llp = desc->txd.phys;
++			dma_sync_single_for_device(chan2parent(chan),
++						   last->txd.phys,
++						   sizeof(last->lli),
++						   DMA_TO_DEVICE);
++		}
++
++		last = desc;
++	}
++
++	/* Let's make a cyclic list */
++	last->lli.llp = cdesc->desc[0]->txd.phys;
++	dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
++				   sizeof(last->lli), DMA_TO_DEVICE);
++
++	dev_dbg(chan2dev(&fhc->chan), "cyclic prepared buf 0x%llx len %zu "
++			"period %zu periods %d\n", (unsigned long long)buf_addr,
++			buf_len, period_len, periods);
++
++	cdesc->periods = periods;
++	fhc->cdesc = cdesc;
++
++	return cdesc;
++
++out_err_desc_get:
++	while (i--)
++		fhc_desc_put(fhc, cdesc->desc[i]);
++out_err_alloc:
++	kfree(cdesc);
++out_err:
++	clear_bit(FH_DMA_IS_CYCLIC, &fhc->flags);
++	return (struct fh_cyclic_desc *)retval;
++}
++EXPORT_SYMBOL(fh_dma_cyclic_prep);
++
++/**
++ * fh_dma_cyclic_free - free a prepared cyclic DMA transfer
++ * @chan: the DMA channel to free
++ */
++void fh_dma_cyclic_free(struct dma_chan *chan)
++{
++	struct fh_dma_chan	*fhc = to_fh_dma_chan(chan);
++	struct fh_dma		*fhd = to_fh_dma(fhc->chan.device);
++	struct fh_cyclic_desc	*cdesc = fhc->cdesc;
++	int			i;
++	unsigned long		flags;
++
++	dev_dbg(chan2dev(&fhc->chan), "%s\n", __func__);
++
++	if (!cdesc)
++		return;
++
++	spin_lock_irqsave(&fhc->lock, flags);
++
++	fhc_chan_disable(fhd, fhc);
++
++	dma_writel(fhd, CLEAR.ERROR, fhc->mask);
++	dma_writel(fhd, CLEAR.XFER, fhc->mask);
++	dma_writel(fhd, CLEAR.BLOCK, fhc->mask);
++
++	spin_unlock_irqrestore(&fhc->lock, flags);
++
++	for (i = 0; i < cdesc->periods; i++)
++		fhc_desc_put(fhc, cdesc->desc[i]);
++
++	kfree(cdesc->desc);
++	kfree(cdesc);
++
++	clear_bit(FH_DMA_IS_CYCLIC, &fhc->flags);
++}
++EXPORT_SYMBOL(fh_dma_cyclic_free);
++
++/*----------------------------------------------------------------------*/
++
++static void fh_dma_off(struct fh_dma *fhd)
++{
++	int i;
++
++	dma_writel(fhd, CFG, 0);
++
++	channel_clear_bit(fhd, MASK.XFER, fhd->all_chan_mask);
++	channel_clear_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
++	channel_clear_bit(fhd, MASK.SRC_TRAN, fhd->all_chan_mask);
++	channel_clear_bit(fhd, MASK.DST_TRAN, fhd->all_chan_mask);
++	channel_clear_bit(fhd, MASK.ERROR, fhd->all_chan_mask);
++
++	while (dma_readl(fhd, CFG) & FH_CFG_DMA_EN)
++		cpu_relax();
++
++	for (i = 0; i < fhd->dma.chancnt; i++)
++		fhd->chan[i].initialized = false;
++}
++
++static int fh_dma_probe(struct platform_device *pdev)
++{
++	struct fh_dma_platform_data *pdata;
++	struct resource		*io;
++	struct fh_dma		*fhd;
++	size_t			size;
++	void __iomem		*regs;
++	bool			autocfg;
++	unsigned int		fh_params;
++	unsigned int		nr_channels;
++	unsigned int		max_blk_size = 0;
++	int			irq;
++	int			err;
++	int			i;
++
++	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!io)
++		return -EINVAL;
++
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0)
++		return irq;
++
++    if (!request_mem_region(io->start, FH_REGLEN, pdev->dev.driver->name))
++        return -EBUSY;
++
++    regs = ioremap(io->start, FH_REGLEN);
++    if (!regs) {
++        err = -ENOMEM;
++        goto err_release_r;
++    }
++
++	/* Apply default dma_mask if needed */
++	if (!pdev->dev.dma_mask) {
++		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
++		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
++	}
++
++	fh_params = dma_read_byaddr(regs, FH_PARAMS);
++	autocfg = fh_params >> FH_PARAMS_EN & 0x1;
++
++	dev_dbg(&pdev->dev, "FH_PARAMS: 0x%08x\n", fh_params);
++
++	pdata = dev_get_platdata(&pdev->dev);
++
++	if (!pdata && autocfg) {
++		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
++		if (!pdata)
++			return -ENOMEM;
++
++		/* Fill platform data with the default values */
++		pdata->is_private = true;
++		pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
++		pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
++	} else if (!pdata || pdata->nr_channels > FH_DMA_MAX_NR_CHANNELS)
++		return -EINVAL;
++
++	if (autocfg)
++		nr_channels = (fh_params >> FH_PARAMS_NR_CHAN & 0x7) + 1;
++	else
++		nr_channels = pdata->nr_channels;
++
++	size = sizeof(struct fh_dma) + nr_channels * sizeof(struct fh_dma_chan);
++	fhd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
++	if (!fhd)
++		return -ENOMEM;
++
++	fhd->clk = clk_get(&pdev->dev, "ahb_clk");
++	if (IS_ERR(fhd->clk))
++		return PTR_ERR(fhd->clk);
++	clk_enable(fhd->clk);
++
++	fhd->regs = regs;
++
++	/* Get hardware configuration parameters */
++	if (autocfg) {
++		max_blk_size = dma_readl(fhd, MAX_BLK_SIZE);
++
++		fhd->nr_masters = (fh_params >> FH_PARAMS_NR_MASTER & 3) + 1;
++		for (i = 0; i < fhd->nr_masters; i++) {
++			fhd->data_width[i] =
++				(fh_params >> FH_PARAMS_DATA_WIDTH(i) & 3) + 2;
++		}
++	} else {
++		fhd->nr_masters = pdata->nr_masters;
++		memcpy(fhd->data_width, pdata->data_width, 4);
++	}
++
++	/* Calculate all channel mask before DMA setup */
++	fhd->all_chan_mask = (1 << nr_channels) - 1;
++
++	/* Force dma off, just in case */
++	fh_dma_off(fhd);
++
++	/* Disable BLOCK interrupts as well */
++	channel_clear_bit(fhd, MASK.BLOCK, fhd->all_chan_mask);
++
++	err = devm_request_irq(&pdev->dev, irq, fh_dma_interrupt, 0,
++			       "fh_dmac", fhd);
++	if (err)
++		return err;
++
++	platform_set_drvdata(pdev, fhd);
++
++	/* Create a pool of consistent memory blocks for hardware descriptors */
++	fhd->desc_pool = dmam_pool_create("fh_dmac_desc_pool", &pdev->dev,
++					 sizeof(struct fh_desc), 4, 0);
++	if (!fhd->desc_pool) {
++		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
++		return -ENOMEM;
++	}
++
++	tasklet_init(&fhd->tasklet, fh_dma_tasklet, (unsigned long)fhd);
++
++	INIT_LIST_HEAD(&fhd->dma.channels);
++	for (i = 0; i < nr_channels; i++) {
++		struct fh_dma_chan	*fhc = &fhd->chan[i];
++		int			r = nr_channels - i - 1;
++
++		fhc->chan.device = &fhd->dma;
++		dma_cookie_init(&fhc->chan);
++		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
++			list_add_tail(&fhc->chan.device_node,
++					&fhd->dma.channels);
++		else
++			list_add(&fhc->chan.device_node, &fhd->dma.channels);
++
++		/* 7 is highest priority & 0 is lowest. */
++		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
++			fhc->priority = r;
++		else
++			fhc->priority = i;
++
++		fhc->ch_regs = &__fh_regs(fhd)->CHAN[i];
++		spin_lock_init(&fhc->lock);
++		fhc->mask = 1 << i;
++
++		INIT_LIST_HEAD(&fhc->active_list);
++		INIT_LIST_HEAD(&fhc->queue);
++		INIT_LIST_HEAD(&fhc->free_list);
++
++		channel_clear_bit(fhd, CH_EN, fhc->mask);
++
++		fhc->direction = DMA_TRANS_NONE;
++		fhc->request_line = ~0;
++
++		/* Hardware configuration */
++		if (autocfg) {
++			unsigned int fhc_params;
++
++			fhc_params = dma_read_byaddr(regs + r * sizeof(u32),
++						     FHC_PARAMS);
++
++			dev_dbg(&pdev->dev, "FHC_PARAMS[%d]: 0x%08x\n", i,
++					    fhc_params);
++
++			/* Decode maximum block size for given channel. The
++			 * stored 4 bit value represents blocks from 0x00 for 3
++			 * up to 0x0a for 4095. */
++			fhc->block_size =
++				(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
++			fhc->nollp =
++				(fhc_params >> FHC_PARAMS_MBLK_EN & 0x1) == 0;
++		} else {
++			fhc->block_size = pdata->block_size;
++
++			/* Check if channel supports multi block transfer */
++			channel_writel(fhc, LLP, 0xfffffffc);
++			fhc->nollp =
++				(channel_readl(fhc, LLP) & 0xfffffffc) == 0;
++			channel_writel(fhc, LLP, 0);
++		}
++	}
++
++	/* Clear all interrupts on all channels. */
++	dma_writel(fhd, CLEAR.XFER, fhd->all_chan_mask);
++	dma_writel(fhd, CLEAR.BLOCK, fhd->all_chan_mask);
++	dma_writel(fhd, CLEAR.SRC_TRAN, fhd->all_chan_mask);
++	dma_writel(fhd, CLEAR.DST_TRAN, fhd->all_chan_mask);
++	dma_writel(fhd, CLEAR.ERROR, fhd->all_chan_mask);
++
++	dma_cap_set(DMA_MEMCPY, fhd->dma.cap_mask);
++	dma_cap_set(DMA_SLAVE, fhd->dma.cap_mask);
++	if (pdata->is_private)
++		dma_cap_set(DMA_PRIVATE, fhd->dma.cap_mask);
++	fhd->dma.dev = &pdev->dev;
++	fhd->dma.device_alloc_chan_resources = fhc_alloc_chan_resources;
++	fhd->dma.device_free_chan_resources = fhc_free_chan_resources;
++
++	fhd->dma.device_prep_dma_memcpy = fhc_prep_dma_memcpy;
++
++	fhd->dma.device_prep_slave_sg = fhc_prep_slave_sg;
++	fhd->dma.device_control = fhc_control;
++
++	fhd->dma.device_tx_status = fhc_tx_status;
++	fhd->dma.device_issue_pending = fhc_issue_pending;
++
++	dma_writel(fhd, CFG, FH_CFG_DMA_EN);
++
++	err = dma_async_device_register(&fhd->dma);
++
++	if(err)
++	    pr_err("dma register failed, ret %d\n", err);
++
++    dev_info(&pdev->dev, "FH DMA Controller, %d channels\n",
++         nr_channels);
++
++	return 0;
++
++err_release_r:
++    release_resource(io);
++    return err;
++}
++
++static int fh_dma_remove(struct platform_device *pdev)
++{
++	struct fh_dma		*fhd = platform_get_drvdata(pdev);
++	struct fh_dma_chan	*fhc, *_fhc;
++
++	fh_dma_off(fhd);
++	dma_async_device_unregister(&fhd->dma);
++
++	tasklet_kill(&fhd->tasklet);
++
++	list_for_each_entry_safe(fhc, _fhc, &fhd->dma.channels,
++			chan.device_node) {
++		list_del(&fhc->chan.device_node);
++		channel_clear_bit(fhd, CH_EN, fhc->mask);
++	}
++
++	return 0;
++}
++
++static void fh_dma_shutdown(struct platform_device *pdev)
++{
++	struct fh_dma	*fhd = platform_get_drvdata(pdev);
++
++	fh_dma_off(fhd);
++	clk_disable(fhd->clk);
++}
++
++static int fh_dma_suspend_noirq(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct fh_dma	*fhd = platform_get_drvdata(pdev);
++
++	fh_dma_off(fhd);
++	clk_disable(fhd->clk);
++
++	return 0;
++}
++
++static int fh_dma_resume_noirq(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct fh_dma	*fhd = platform_get_drvdata(pdev);
++
++	clk_enable(fhd->clk);
++	dma_writel(fhd, CFG, FH_CFG_DMA_EN);
++
++	return 0;
++}
++
++static const struct dev_pm_ops fh_dma_dev_pm_ops = {
++	.suspend_noirq = fh_dma_suspend_noirq,
++	.resume_noirq = fh_dma_resume_noirq,
++	.freeze_noirq = fh_dma_suspend_noirq,
++	.thaw_noirq = fh_dma_resume_noirq,
++	.restore_noirq = fh_dma_resume_noirq,
++	.poweroff_noirq = fh_dma_suspend_noirq,
++};
++
++static struct platform_driver fh_dma_driver = {
++	.probe		= fh_dma_probe,
++	.remove		= fh_dma_remove,
++	.shutdown	= fh_dma_shutdown,
++	.driver = {
++		.name	= "fh_dmac",
++		.pm	= &fh_dma_dev_pm_ops,
++	},
++};
++
++static int __init fh_dma_init(void)
++{
++	return platform_driver_register(&fh_dma_driver);
++}
++subsys_initcall(fh_dma_init);
++
++static void __exit fh_dma_exit(void)
++{
++	platform_driver_unregister(&fh_dma_driver);
++}
++module_exit(fh_dma_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("FH DMA Controller driver");
+diff --git a/drivers/dma/fh_dmac_regs.h b/drivers/dma/fh_dmac_regs.h
+new file mode 100644
+index 00000000..8ca1589f
+--- /dev/null
++++ b/drivers/dma/fh_dmac_regs.h
+@@ -0,0 +1,236 @@
++/*
++ * Driver for the Synopsys DesignWare AHB DMA Controller
++ *
++ * Copyright (C) 2005-2007 Atmel Corporation
++ * Copyright (C) 2010-2011 ST Microelectronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <mach/fh_dmac.h>
++
++#define FH_DMA_MAX_NR_CHANNELS	8
++
++/*
++ * Redefine this macro to handle differences between 32- and 64-bit
++ * addressing, big vs. little endian, etc.
++ */
++#define FH_REG(name)		u32 name; u32 __pad_##name
++
++/* Hardware register definitions. */
++struct fh_dma_chan_regs {
++	FH_REG(SAR);		/* Source Address Register */
++	FH_REG(DAR);		/* Destination Address Register */
++	FH_REG(LLP);		/* Linked List Pointer */
++	u32	CTL_LO;		/* Control Register Low */
++	u32	CTL_HI;		/* Control Register High */
++	FH_REG(SSTAT);
++	FH_REG(DSTAT);
++	FH_REG(SSTATAR);
++	FH_REG(DSTATAR);
++	u32	CFG_LO;		/* Configuration Register Low */
++	u32	CFG_HI;		/* Configuration Register High */
++	FH_REG(SGR);
++	FH_REG(DSR);
++};
++
++struct fh_dma_irq_regs {
++	FH_REG(XFER);
++	FH_REG(BLOCK);
++	FH_REG(SRC_TRAN);
++	FH_REG(DST_TRAN);
++	FH_REG(ERROR);
++};
++
++struct fh_dma_regs {
++	/* per-channel registers */
++	struct fh_dma_chan_regs	CHAN[FH_DMA_MAX_NR_CHANNELS];
++
++	/* irq handling */
++	struct fh_dma_irq_regs	RAW;		/* r */
++	struct fh_dma_irq_regs	STATUS;		/* r (raw & mask) */
++	struct fh_dma_irq_regs	MASK;		/* rw (set = irq enabled) */
++	struct fh_dma_irq_regs	CLEAR;		/* w (ack, affects "raw") */
++
++	FH_REG(STATUS_INT);			/* r */
++
++	/* software handshaking */
++	FH_REG(REQ_SRC);
++	FH_REG(REQ_DST);
++	FH_REG(SGL_REQ_SRC);
++	FH_REG(SGL_REQ_DST);
++	FH_REG(LAST_SRC);
++	FH_REG(LAST_DST);
++
++	/* miscellaneous */
++	FH_REG(CFG);
++	FH_REG(CH_EN);
++	FH_REG(ID);
++	FH_REG(TEST);
++
++	/* optional encoded params, 0x3c8..0x3 */
++};
++
++/* Bitfields in CTL_LO */
++#define FHC_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
++#define FHC_CTLL_DST_WIDTH(n)	((n)<<1)	/* bytes per element */
++#define FHC_CTLL_SRC_WIDTH(n)	((n)<<4)
++#define FHC_CTLL_DST_INC	(0<<7)		/* DAR update/not */
++#define FHC_CTLL_DST_DEC	(1<<7)
++#define FHC_CTLL_DST_FIX	(2<<7)
++#define FHC_CTLL_SRC_INC	(0<<9)		/* SAR update/not */
++#define FHC_CTLL_SRC_DEC	(1<<9)
++#define FHC_CTLL_SRC_FIX	(2<<9)
++#define FHC_CTLL_DST_MSIZE(n)	((n)<<11)	/* burst, #elements */
++#define FHC_CTLL_SRC_MSIZE(n)	((n)<<14)
++#define FHC_CTLL_S_GATH_EN	(1 << 17)	/* src gather, !FIX */
++#define FHC_CTLL_D_SCAT_EN	(1 << 18)	/* dst scatter, !FIX */
++#define FHC_CTLL_FC(n)		((n) << 20)
++#define FHC_CTLL_FC_M2M		(0 << 20)	/* mem-to-mem */
++#define FHC_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
++#define FHC_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
++#define FHC_CTLL_FC_P2P		(3 << 20)	/* periph-to-periph */
++/* plus 4 transfer types for peripheral-as-flow-controller */
++#define FHC_CTLL_DMS(n)		((n)<<23)	/* dst master select */
++#define FHC_CTLL_SMS(n)		((n)<<25)	/* src master select */
++#define FHC_CTLL_LLP_D_EN	(1 << 27)	/* dest block chain */
++#define FHC_CTLL_LLP_S_EN	(1 << 28)	/* src block chain */
++
++/* Bitfields in CTL_HI */
++#define FHC_CTLH_DONE		0x00001000
++#define FHC_CTLH_BLOCK_TS_MASK	0x00000fff
++
++/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/FH_dmac.h> */
++#define FHC_CFGL_CH_PRIOR_MASK	(0x7 << 5)	/* priority mask */
++#define FHC_CFGL_CH_PRIOR(x)	((x) << 5)	/* priority */
++#define FHC_CFGL_CH_SUSP	(1 << 8)	/* pause xfer */
++#define FHC_CFGL_FIFO_EMPTY	(1 << 9)	/* pause xfer */
++#define FHC_CFGL_HS_DST		(1 << 10)	/* handshake w/dst */
++#define FHC_CFGL_HS_SRC		(1 << 11)	/* handshake w/src */
++#define FHC_CFGL_MAX_BURST(x)	((x) << 20)
++#define FHC_CFGL_RELOAD_SAR	(1 << 30)
++#define FHC_CFGL_RELOAD_DAR	(1 << 31)
++
++/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/FH_dmac.h> */
++#define FHC_CFGH_DS_UPD_EN	(1 << 5)
++#define FHC_CFGH_SS_UPD_EN	(1 << 6)
++
++/* Bitfields in SGR */
++#define FHC_SGR_SGI(x)		((x) << 0)
++#define FHC_SGR_SGC(x)		((x) << 20)
++
++/* Bitfields in DSR */
++#define FHC_DSR_DSI(x)		((x) << 0)
++#define FHC_DSR_DSC(x)		((x) << 20)
++
++/* Bitfields in CFG */
++#define FH_CFG_DMA_EN		(1 << 0)
++
++#define FH_REGLEN		0x400
++
++enum fh_dmac_flags {
++	FH_DMA_IS_CYCLIC = 0,
++};
++
++struct fh_dma_chan {
++	struct dma_chan		chan;
++	void __iomem		*ch_regs;
++	u8			mask;
++	u8			priority;
++	bool			paused;
++	bool				initialized;
++	spinlock_t		lock;
++
++	/* these other elements are all protected by lock */
++	unsigned long		flags;
++	dma_cookie_t		completed;
++	struct list_head	active_list;
++	struct list_head	queue;
++	struct list_head	free_list;
++	struct fh_cyclic_desc	*cdesc;
++
++	unsigned int		descs_allocated;
++
++};
++
++static inline struct fh_dma_chan_regs __iomem *
++__fhc_regs(struct fh_dma_chan *fhc)
++{
++	return fhc->ch_regs;
++}
++
++#define channel_readl(fhc, name) \
++	readl(&(__fhc_regs(fhc)->name))
++#define channel_writel(fhc, name, val) \
++	writel((val), &(__fhc_regs(fhc)->name))
++
++static inline struct fh_dma_chan *to_fh_dma_chan(struct dma_chan *chan)
++{
++	return container_of(chan, struct fh_dma_chan, chan);
++}
++
++struct fh_dma {
++	struct dma_device	dma;
++	void __iomem		*regs;
++	struct tasklet_struct	tasklet;
++	struct clk		*clk;
++
++	u8			all_chan_mask;
++
++	struct fh_dma_chan	chan[0];
++};
++
++static inline struct fh_dma_regs __iomem *__fh_regs(struct fh_dma *fh)
++{
++	return fh->regs;
++}
++
++#define dma_readl(fh, name) \
++	readl(&(__fh_regs(fh)->name))
++#define dma_writel(fh, name, val) \
++	writel((val), &(__fh_regs(fh)->name))
++
++#define channel_set_bit(fh, reg, mask) \
++	dma_writel(fh, reg, ((mask) << 8) | (mask))
++#define channel_clear_bit(fh, reg, mask) \
++	dma_writel(fh, reg, ((mask) << 8) | 0)
++
++static inline struct fh_dma *to_fh_dma(struct dma_device *ddev)
++{
++	return container_of(ddev, struct fh_dma, dma);
++}
++
++/* LLI == Linked List Item; a.k.a. DMA block descriptor */
++struct fh_lli {
++	/* values that are not changed by hardware */
++	u32	sar;
++	u32	dar;
++	u32	llp;		/* chain to next lli */
++	u32		ctllo;
++	/* values that may get written back: */
++	u32		ctlhi;
++	/* sstat and dstat can snapshot peripheral register state.
++	 * silicon config may discard either or both...
++	 */
++	u32		sstat;
++	u32		dstat;
++};
++
++struct fh_desc {
++	/* FIRST values the hardware uses */
++	struct fh_lli			lli;
++
++	/* THEN values for driver housekeeping */
++	struct list_head		desc_node;
++	struct list_head		tx_list;
++	struct dma_async_tx_descriptor	txd;
++	size_t				len;
++};
++
++static inline struct fh_desc *
++txd_to_fh_desc(struct dma_async_tx_descriptor *txd)
++{
++	return container_of(txd, struct fh_desc, txd);
++}
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 2967002a..3780557d 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -85,6 +85,11 @@ config GPIO_IT8761E
+ 	tristate "IT8761E GPIO support"
+ 	help
+ 	  Say yes here to support GPIO functionality of IT8761E super I/O chip.
++	  
++config GPIO_FH
++	tristate "FH GPIO support"
++	help
++	  Say yes here to support GPIO functionality of FH.
+ 
+ config GPIO_EXYNOS4
+ 	def_bool y
+diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
+index b605f8ec..3562c0f9 100644
+--- a/drivers/gpio/Makefile
++++ b/drivers/gpio/Makefile
+@@ -34,6 +34,7 @@ obj-$(CONFIG_GPIO_XILINX)	+= xilinx_gpio.o
+ obj-$(CONFIG_GPIO_CS5535)	+= cs5535-gpio.o
+ obj-$(CONFIG_GPIO_BT8XX)	+= bt8xxgpio.o
+ obj-$(CONFIG_GPIO_IT8761E)	+= it8761e_gpio.o
++obj-$(CONFIG_GPIO_FH)		+= fh_gpio.o
+ obj-$(CONFIG_GPIO_VR41XX)	+= vr41xx_giu.o
+ obj-$(CONFIG_GPIO_WM831X)	+= wm831x-gpio.o
+ obj-$(CONFIG_GPIO_WM8350)	+= wm8350-gpiolib.o
+diff --git a/drivers/gpio/fh_gpio.c b/drivers/gpio/fh_gpio.c
+new file mode 100644
+index 00000000..b3257211
+--- /dev/null
++++ b/drivers/gpio/fh_gpio.c
+@@ -0,0 +1,507 @@
++#include <linux/kernel.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <asm-generic/gpio.h>
++#include <mach/gpio.h>
++
++struct fh_gpio_chip *fh_gpio0, *fh_gpio1;
++
++static inline void __iomem* gpio_to_base(unsigned int gpio)
++{
++    if (gpio >= 32 && gpio < 64)
++    {
++        return fh_gpio1->base;
++    }
++    else if(gpio < 32)
++    {
++        return fh_gpio0->base;
++    }
++    else
++    {
++        pr_err("ERROR: incorrect GPIO num\n");
++        return NULL;
++    }
++}
++
++static int _set_gpio_irq_type(unsigned int gpio, unsigned int type)
++{
++    u32 int_type, int_polarity;
++    u32 bit = gpio % 32;
++    void __iomem* base;
++    base = gpio_to_base(gpio);
++
++    switch (type & IRQF_TRIGGER_MASK) {
++    case IRQ_TYPE_EDGE_BOTH:
++        int_type = GPIO_INT_TYPE_EDGE;
++        // toggle trigger
++        if (FH_GPIO_GetValue((u32)base, bit))
++            int_polarity = GPIO_INT_POL_LOW;
++        else
++            int_polarity = GPIO_INT_POL_HIGH;
++        break;
++    case IRQ_TYPE_EDGE_RISING:
++        int_type = GPIO_INT_TYPE_EDGE;
++        int_polarity = GPIO_INT_POL_HIGH;
++        break;
++    case IRQ_TYPE_EDGE_FALLING:
++        int_type = GPIO_INT_TYPE_EDGE;
++        int_polarity = GPIO_INT_POL_LOW;
++        break;
++    case IRQ_TYPE_LEVEL_HIGH:
++        int_type = GPIO_INT_TYPE_LEVEL;
++        int_polarity = GPIO_INT_POL_HIGH;
++        break;
++    case IRQ_TYPE_LEVEL_LOW:
++        int_type = GPIO_INT_TYPE_LEVEL;
++        int_polarity = GPIO_INT_POL_LOW;
++        break;
++    case IRQ_TYPE_NONE:
++        return 0;
++    default:
++        return -EINVAL;
++    }
++    FH_GPIO_SetInterruptType((u32)base, bit, int_type);
++    FH_GPIO_SetInterruptPolarity((u32)base, bit, int_polarity);
++    return 0;
++}
++
++int fh_set_gpio_irq(struct gpio_irq_info * info)
++{
++    void __iomem* base;
++    base = gpio_to_base(info->irq_gpio);
++
++    return _set_gpio_irq_type(info->irq_gpio, info->irq_type);
++}
++EXPORT_SYMBOL(fh_set_gpio_irq);
++
++void fh_irq_enable(unsigned int gpio)
++{
++    void __iomem* base;
++    int gpio_num = gpio % 32;
++    base = gpio_to_base(gpio);
++
++    FH_GPIO_EnableInterrupt((u32)base, gpio_num, TRUE);
++}
++EXPORT_SYMBOL(fh_irq_enable);
++
++void fh_irq_disable(unsigned int gpio)
++{
++    void __iomem* base;
++    int gpio_num = gpio % 32;
++    base = gpio_to_base(gpio);
++
++    FH_GPIO_EnableInterrupt((u32)base, gpio_num, FALSE);
++}
++EXPORT_SYMBOL(fh_irq_disable);
++
++void fh_clear_gpio_irq(int gpio_id)
++{
++    void __iomem* base;
++    int gpio_num = gpio_id % 32;
++    base = gpio_to_base(gpio_id);
++
++    FH_GPIO_ClearInterrupt((u32)base, gpio_num);
++}
++EXPORT_SYMBOL(fh_clear_gpio_irq);
++
++
++static inline void __iomem* irq_to_controller(struct irq_data* d)
++{
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
++
++    if (likely(d->irq >= NR_INTERNAL_IRQS))
++        return fh_gpio->base;
++    pr_err("irq num: %d is not a gpio irq!\n", d->irq);
++    return 0;
++}
++
++static void gpio_irq_ack(struct irq_data* d)
++{
++    void __iomem* base;
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
++    base = irq_to_controller(d);
++
++    FH_GPIO_ClearInterrupt((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base);
++}
++
++static void gpio_irq_enable(struct irq_data *d)
++{
++    void __iomem* base;
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
++    base = irq_to_controller(d);
++
++    FH_GPIO_EnableInterrupt((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, TRUE);
++}
++
++static void gpio_irq_disable(struct irq_data *d)
++{
++    void __iomem* base;
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
++    base = irq_to_controller(d);
++
++    FH_GPIO_EnableInterrupt((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, FALSE);
++}
++
++static void gpio_irq_mask(struct irq_data *d)
++{
++    void __iomem* base;
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
++    base = irq_to_controller(d);
++
++    FH_GPIO_EnableInterruptMask((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, TRUE);
++}
++
++static void gpio_irq_unmask(struct irq_data *d)
++{
++    void __iomem* base;
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
++    base = irq_to_controller(d);
++
++    FH_GPIO_EnableInterruptMask((u32)base, d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base, FALSE);
++}
++
++static int gpio_irq_type(struct irq_data *d, unsigned int type)
++{
++    void __iomem* base;
++    base = irq_to_controller(d);
++
++    return _set_gpio_irq_type(d->irq - NR_INTERNAL_IRQS, type);
++}
++
++#ifdef CONFIG_PM
++
++static int gpio_irq_set_wake(struct irq_data *d, unsigned value)
++{
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(d);
++
++    if (unlikely(d->irq >= NR_IRQS))
++        return -EINVAL;
++
++    if (value)
++        fh_gpio->gpio_wakeups |= (1 << (d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base));
++    else
++        fh_gpio->gpio_wakeups &= ~(1 << (d->irq - NR_INTERNAL_IRQS - fh_gpio->chip.base));
++
++    return 0;
++}
++
++void fh_gpio_irq_suspend(void)
++{
++    fh_gpio0->gpio_backups = FH_GPIO_GetEnableInterrupts((u32)fh_gpio0->base);
++    fh_gpio1->gpio_backups = FH_GPIO_GetEnableInterrupts((u32)fh_gpio1->base);
++
++    FH_GPIO_SetEnableInterrupts((u32)fh_gpio0->base, fh_gpio0->gpio_wakeups);
++    FH_GPIO_SetEnableInterrupts((u32)fh_gpio1->base, fh_gpio1->gpio_wakeups);
++}
++
++void fh_gpio_irq_resume(void)
++{
++    FH_GPIO_SetEnableInterrupts((u32)fh_gpio0->base, fh_gpio0->gpio_backups);
++    FH_GPIO_SetEnableInterrupts((u32)fh_gpio1->base, fh_gpio1->gpio_backups);
++}
++
++#else
++#define gpio_irq_set_wake   NULL
++#endif
++
++static struct irq_chip gpio_irqchip = {
++    .name           = "FH_GPIO_INTC",
++    .irq_ack        = gpio_irq_ack,
++    .irq_enable     = gpio_irq_enable,
++    .irq_disable    = gpio_irq_disable,
++    .irq_mask       = gpio_irq_mask,
++    .irq_unmask     = gpio_irq_unmask,
++    .irq_set_type   = gpio_irq_type,
++    .irq_set_wake   = gpio_irq_set_wake,
++};
++
++static void gpio_toggle_trigger(unsigned int gpio, unsigned int offs)
++{
++    u32 int_polarity;
++    int gpio_num = gpio % 32;
++    void __iomem* base = gpio_to_base(gpio);
++
++    if (FH_GPIO_GetValue((u32)base, gpio))
++        int_polarity = GPIO_INT_POL_LOW;
++    else
++        int_polarity = GPIO_INT_POL_HIGH;
++
++    printk(">>>>> do trigger gpio=%d, set polarity=%x\n", offs, int_polarity);
++    FH_GPIO_SetInterruptPolarity((u32)base, gpio_num, int_polarity);
++}
++
++static inline u32 irq_get_trigger_type(unsigned int irq)
++{
++    struct irq_data *d = irq_get_irq_data(irq);
++    return d ? irqd_get_trigger_type(d) : 0;
++}
++
++static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
++{
++    struct irq_data *irqdata = irq_desc_get_irq_data(desc);
++    struct irq_chip *irqchip = irq_data_get_irq_chip(irqdata);
++    struct fh_gpio_chip *fh_gpio = irq_data_get_irq_chip_data(irqdata);
++    u32 irq_status;
++    int gpio_num, gpio;
++
++    irq_status = FH_GPIO_GetInterruptStatus((u32)fh_gpio->base);
++
++    if (unlikely(irq_status == 0)) {
++        pr_err("gpio irq status is zero.\n");
++        return;
++    }
++
++    /* temporarily mask (level sensitive) parent IRQ */
++    irqchip->irq_mask(irqdata);
++
++    gpio_num = fls(irq_status) - 1;
++
++    FH_GPIO_ClearInterrupt((u32)fh_gpio->base, gpio_num);
++
++    gpio = gpio_num + fh_gpio->chip.base;
++
++    generic_handle_irq(gpio_to_irq(gpio));
++
++    if ((irq_get_trigger_type(gpio_to_irq(gpio)) & IRQ_TYPE_SENSE_MASK)
++            == IRQ_TYPE_EDGE_BOTH)
++        gpio_toggle_trigger(gpio, gpio_num);
++
++    irqchip->irq_unmask(irqdata);
++    /* now it may re-trigger */
++}
++
++/*
++ * This lock class tells lockdep that GPIO irqs are in a different
++ * category than their parents, so it won't report false recursion.
++ */
++static struct lock_class_key gpio_lock_class;
++
++static void fh_gpio_irq_init(struct platform_device *pdev)
++{
++    int i, gpio_irq;
++    struct fh_gpio_chip *plat_data;
++
++    plat_data = pdev->dev.platform_data;
++
++    for (i = 0; i < 32; i++) {
++        gpio_irq = i + NR_INTERNAL_IRQS + 32 * pdev->id;
++        irq_set_lockdep_class(gpio_irq, &gpio_lock_class);
++        irq_set_chip_and_handler(gpio_irq, &gpio_irqchip, handle_simple_irq);
++        set_irq_flags(gpio_irq, IRQF_VALID);
++        irq_set_chip_data(gpio_irq, plat_data);
++    }
++
++    irq_set_chip_data(plat_data->irq, plat_data);
++    irq_set_chained_handler(plat_data->irq, gpio_irq_handler);
++    enable_irq_wake(plat_data->irq);
++}
++
++static int chip_to_irq(struct gpio_chip *c, unsigned offset)
++{
++    struct fh_gpio_chip* chip;
++    chip = container_of(c, struct fh_gpio_chip, chip);
++    return offset + NR_INTERNAL_IRQS + chip->chip.base;
++}
++
++static int chip_gpio_get(struct gpio_chip *c, unsigned offset)
++{
++    u32 bit = offset % 32;
++    struct fh_gpio_chip* chip;
++    chip = container_of(c, struct fh_gpio_chip, chip);
++
++    if(offset / 32)
++	    return FH_GPIOB_GetValue((u32)chip->base, bit);
++    else
++	    return FH_GPIO_GetValue((u32)chip->base, bit);
++}
++
++static void chip_gpio_set(struct gpio_chip *c, unsigned offset, int val)
++{
++    u32 bit = offset % 32;
++    struct fh_gpio_chip* chip;
++    chip = container_of(c, struct fh_gpio_chip, chip);
++    if(offset / 32)
++	    FH_GPIOB_SetValue((u32)chip->base, bit, val);
++    else
++	    FH_GPIO_SetValue((u32)chip->base, bit, val);
++}
++
++static int chip_direction_input(struct gpio_chip *c, unsigned offset)
++{
++    u32 bit = offset % 32;
++    unsigned long flags;
++    struct fh_gpio_chip* chip;
++    chip = container_of(c, struct fh_gpio_chip, chip);
++    spin_lock_irqsave(&chip->lock, flags);
++    if(offset / 32)
++	    FH_GPIOB_SetDirection((u32)chip->base, bit, GPIO_DIR_INPUT);
++    else
++	    FH_GPIO_SetDirection((u32)chip->base, bit, GPIO_DIR_INPUT);
++    spin_unlock_irqrestore(&chip->lock, flags);
++
++    return 0;
++}
++
++static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
++{
++    u32 bit = offset % 32;
++    unsigned long flags;
++    struct fh_gpio_chip* chip;
++    chip = container_of(c, struct fh_gpio_chip, chip);
++
++    spin_lock_irqsave(&chip->lock, flags);
++    if(offset / 32)
++    {
++	    FH_GPIOB_SetDirection((u32)chip->base, bit, GPIO_DIR_OUTPUT);
++	    FH_GPIOB_SetValue((u32)chip->base, bit, val);
++    }
++    else
++    {
++	    FH_GPIO_SetDirection((u32)chip->base, bit, GPIO_DIR_OUTPUT);
++	    FH_GPIO_SetValue((u32)chip->base, bit, val);
++    }
++    spin_unlock_irqrestore(&chip->lock, flags);
++
++    return 0;
++}
++
++void fh_gpio_set(int gpio_id, int value)
++{
++    __gpio_set_value(gpio_id, value);
++}
++EXPORT_SYMBOL(fh_gpio_set);
++
++int fh_gpio_get(int gpio_id, int* value)
++{
++    *value = __gpio_get_value(gpio_id);
++    return 0;
++}
++EXPORT_SYMBOL(fh_gpio_get);
++
++int fh_gpio_reset(int gpio_id)
++{
++    return 0;
++}
++EXPORT_SYMBOL(fh_gpio_reset);
++
++static int __devinit fh_gpio_probe(struct platform_device *pdev)
++{
++    struct resource *res;
++    int err = -EIO;
++    struct fh_gpio_chip *plat_data;
++
++    /* There are two ways to get the GPIO base address; one is by
++     * fetching it from MSR_LBAR_GPIO, the other is by reading the
++     * PCI BAR info.  The latter method is easier (especially across
++     * different architectures), so we'll stick with that for now.  If
++     * it turns out to be unreliable in the face of crappy BIOSes, we
++     * can always go back to using MSRs.. */
++
++    plat_data = pdev->dev.platform_data;
++    plat_data->chip.get = chip_gpio_get;
++    plat_data->chip.set = chip_gpio_set;
++    plat_data->chip.direction_input = chip_direction_input;
++    plat_data->chip.direction_output = chip_direction_output;
++    plat_data->chip.to_irq = chip_to_irq;
++
++    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++    if (!res) {
++        dev_err(&pdev->dev, "can't fetch device resource info\n");
++        goto done;
++    }
++
++    if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
++        dev_err(&pdev->dev, "can't request region\n");
++        goto done;
++    }
++
++    /* set up the driver-specific struct */
++    plat_data->base = ioremap(res->start, resource_size(res));
++
++    if(pdev->id)
++        fh_gpio1 = plat_data;
++    else
++        fh_gpio0 = plat_data;
++
++    plat_data->pdev = pdev;
++    spin_lock_init(&plat_data->lock);
++    res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++    if (!res) {
++        pr_err("%s: ERROR: getting resource failed"
++               "cannot get IORESOURCE_MEM\n", __func__);
++        goto release_region;
++    }
++    plat_data->irq = res->start;
++
++    /* finally, register with the generic GPIO API */
++    err = gpiochip_add(&plat_data->chip);
++    if (err) {
++        pr_err("GPIO support load fail.\n");
++        goto release_region;
++    }
++
++    fh_gpio_irq_init(pdev);
++    pr_debug("GPIO support successfully loaded.\n\tBase Addr: 0x%p\n",
++         plat_data->base);
++
++    return 0;
++
++release_region:
++    release_region(res->start, resource_size(res));
++done:
++    return err;
++}
++
++static int __devexit fh_gpio_remove(struct platform_device *pdev)
++{
++    struct resource *r;
++    int err;
++    struct fh_gpio_chip *plat_data;
++
++    plat_data = pdev->dev.platform_data;
++    err = gpiochip_remove(&plat_data->chip);
++    if (err) {
++        dev_err(&pdev->dev, "unable to remove gpio_chip\n");
++        return err;
++    }
++
++    iounmap(plat_data->base);
++
++    r = platform_get_resource(pdev, IORESOURCE_IO, 0);
++    release_region(r->start, resource_size(r));
++    return 0;
++}
++
++static struct platform_driver fh_gpio_driver = {
++    .driver = {
++        .name = GPIO_NAME,
++        .owner = THIS_MODULE,
++    },
++    .probe = fh_gpio_probe,
++    .remove = __devexit_p(fh_gpio_remove),
++};
++
++static int __init fh_gpio_init(void)
++{
++    return platform_driver_register(&fh_gpio_driver);
++}
++
++static void __exit fh_gpio_exit(void)
++{
++    platform_driver_unregister(&fh_gpio_driver);
++}
++
++module_init(fh_gpio_init);
++module_exit(fh_gpio_exit);
++
++MODULE_AUTHOR("QIN");
++MODULE_DESCRIPTION("FH GPIO Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform: FH");
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 646068e5..82347f92 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -298,7 +298,18 @@ config I2C_AT91
+ 	  documented way to issue repeated START conditions, as needed
+ 	  to support combined I2C messages.  Use the i2c-gpio driver
+ 	  unless your system can cope with those limitations.
++	  
++config I2C_FH_INTERRUPT
++	tristate "FH I2C Driver with Interrupt"
++	help
++	  This supports the use of the I2C interface on Fullhan
++	  processors.	
++	  
++	  Only master mode is supported.
+ 
++	  This driver can also be built as a module.  If so, the module
++	  will be called
++	
+ config I2C_AU1550
+ 	tristate "Au1550/Au1200 SMBus interface"
+ 	depends on SOC_AU1550 || SOC_AU1200
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index e6cf294d..93dbee32 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -82,5 +82,6 @@ obj-$(CONFIG_I2C_SIBYTE)	+= i2c-sibyte.o
+ obj-$(CONFIG_I2C_STUB)		+= i2c-stub.o
+ obj-$(CONFIG_SCx200_ACB)	+= scx200_acb.o
+ obj-$(CONFIG_SCx200_I2C)	+= scx200_i2c.o
++obj-$(CONFIG_I2C_FH_INTERRUPT)	+= i2c_fh_interrupt.o
+ 
+ ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
+diff --git a/drivers/i2c/busses/i2c_fh_interrupt.c b/drivers/i2c/busses/i2c_fh_interrupt.c
+new file mode 100644
+index 00000000..0c027f09
+--- /dev/null
++++ b/drivers/i2c/busses/i2c_fh_interrupt.c
+@@ -0,0 +1,928 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/clk.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <mach/i2c.h>
++//#define FH_I2C_DEBUG
++
++#ifdef FH_I2C_DEBUG
++#define PRINT_DBG(fmt, args...)  printk(fmt, ## args)
++#else
++#define PRINT_DBG(fmt, args...)  do { } while (0)
++#endif
++
++/*
++ * Registers offset
++ */
++#define DW_IC_CON		0x0
++#define DW_IC_TAR		0x4
++#define DW_IC_DATA_CMD		0x10
++#define DW_IC_SS_SCL_HCNT	0x14
++#define DW_IC_SS_SCL_LCNT	0x18
++#define DW_IC_FS_SCL_HCNT	0x1c
++#define DW_IC_FS_SCL_LCNT	0x20
++#define DW_IC_INTR_STAT		0x2c
++#define DW_IC_INTR_MASK		0x30
++#define DW_IC_RAW_INTR_STAT	0x34
++#define DW_IC_RX_TL		0x38
++#define DW_IC_TX_TL		0x3c
++#define DW_IC_CLR_INTR		0x40
++#define DW_IC_CLR_RX_UNDER	0x44
++#define DW_IC_CLR_RX_OVER	0x48
++#define DW_IC_CLR_TX_OVER	0x4c
++#define DW_IC_CLR_RD_REQ	0x50
++#define DW_IC_CLR_TX_ABRT	0x54
++#define DW_IC_CLR_RX_DONE	0x58
++#define DW_IC_CLR_ACTIVITY	0x5c
++#define DW_IC_CLR_STOP_DET	0x60
++#define DW_IC_CLR_START_DET	0x64
++#define DW_IC_CLR_GEN_CALL	0x68
++#define DW_IC_ENABLE		0x6c
++#define DW_IC_STATUS		0x70
++#define DW_IC_TXFLR		0x74
++#define DW_IC_RXFLR		0x78
++#define DW_IC_COMP_PARAM_1	0xf4
++#define DW_IC_TX_ABRT_SOURCE	0x80
++
++#define DW_IC_CON_MASTER		0x1
++#define DW_IC_CON_SPEED_STD		0x2
++#define DW_IC_CON_SPEED_FAST		0x4
++#define DW_IC_CON_10BITADDR_MASTER	0x10
++#define DW_IC_CON_RESTART_EN		0x20
++#define DW_IC_CON_SLAVE_DISABLE		0x40
++
++#define DW_IC_INTR_RX_UNDER	0x001
++#define DW_IC_INTR_RX_OVER	0x002
++#define DW_IC_INTR_RX_FULL	0x004
++#define DW_IC_INTR_TX_OVER	0x008
++#define DW_IC_INTR_TX_EMPTY	0x010
++#define DW_IC_INTR_RD_REQ	0x020
++#define DW_IC_INTR_TX_ABRT	0x040
++#define DW_IC_INTR_RX_DONE	0x080
++#define DW_IC_INTR_ACTIVITY	0x100
++#define DW_IC_INTR_STOP_DET	0x200
++#define DW_IC_INTR_START_DET	0x400
++#define DW_IC_INTR_GEN_CALL	0x800
++
++#define DW_IC_INTR_DEFAULT_MASK		(DW_IC_INTR_RX_FULL | \
++					 DW_IC_INTR_TX_EMPTY | \
++					 DW_IC_INTR_TX_ABRT | \
++					 DW_IC_INTR_STOP_DET)
++
++#define DW_IC_STATUS_ACTIVITY	0x1
++#define DW_IC_STATUS_MASTER_ACTIVITY   0x20
++
++#define DW_IC_ERR_TX_ABRT	0x1
++
++/*
++ * status codes
++ */
++#define STATUS_IDLE			0x0
++#define STATUS_WRITE_IN_PROGRESS	0x1
++#define STATUS_READ_IN_PROGRESS		0x2
++
++#define TIMEOUT			20 /* ms */
++
++/*
++ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
++ *
++ * only expected abort codes are listed here
++ * refer to the datasheet for the full list
++ */
++#define ABRT_7B_ADDR_NOACK	0
++#define ABRT_10ADDR1_NOACK	1
++#define ABRT_10ADDR2_NOACK	2
++#define ABRT_TXDATA_NOACK	3
++#define ABRT_GCALL_NOACK	4
++#define ABRT_GCALL_READ		5
++#define ABRT_SBYTE_ACKDET	7
++#define ABRT_SBYTE_NORSTRT	9
++#define ABRT_10B_RD_NORSTRT	10
++#define ABRT_MASTER_DIS		11
++#define ARB_LOST		12
++
++#define DW_IC_TX_ABRT_7B_ADDR_NOACK	(1UL << ABRT_7B_ADDR_NOACK)
++#define DW_IC_TX_ABRT_10ADDR1_NOACK	(1UL << ABRT_10ADDR1_NOACK)
++#define DW_IC_TX_ABRT_10ADDR2_NOACK	(1UL << ABRT_10ADDR2_NOACK)
++#define DW_IC_TX_ABRT_TXDATA_NOACK	(1UL << ABRT_TXDATA_NOACK)
++#define DW_IC_TX_ABRT_GCALL_NOACK	(1UL << ABRT_GCALL_NOACK)
++#define DW_IC_TX_ABRT_GCALL_READ	(1UL << ABRT_GCALL_READ)
++#define DW_IC_TX_ABRT_SBYTE_ACKDET	(1UL << ABRT_SBYTE_ACKDET)
++#define DW_IC_TX_ABRT_SBYTE_NORSTRT	(1UL << ABRT_SBYTE_NORSTRT)
++#define DW_IC_TX_ABRT_10B_RD_NORSTRT	(1UL << ABRT_10B_RD_NORSTRT)
++#define DW_IC_TX_ABRT_MASTER_DIS	(1UL << ABRT_MASTER_DIS)
++#define DW_IC_TX_ARB_LOST		(1UL << ARB_LOST)
++
++#define DW_IC_TX_ABRT_NOACK		(DW_IC_TX_ABRT_7B_ADDR_NOACK | \
++					 DW_IC_TX_ABRT_10ADDR1_NOACK | \
++					 DW_IC_TX_ABRT_10ADDR2_NOACK | \
++					 DW_IC_TX_ABRT_TXDATA_NOACK | \
++					 DW_IC_TX_ABRT_GCALL_NOACK)
++
++static char *abort_sources[] = {
++	[ABRT_7B_ADDR_NOACK] =
++		"slave address not acknowledged (7bit mode)",
++	[ABRT_10ADDR1_NOACK] =
++		"first address byte not acknowledged (10bit mode)",
++	[ABRT_10ADDR2_NOACK] =
++		"second address byte not acknowledged (10bit mode)",
++	[ABRT_TXDATA_NOACK] =
++		"data not acknowledged",
++	[ABRT_GCALL_NOACK] =
++		"no acknowledgement for a general call",
++	[ABRT_GCALL_READ] =
++		"read after general call",
++	[ABRT_SBYTE_ACKDET] =
++		"start byte acknowledged",
++	[ABRT_SBYTE_NORSTRT] =
++		"trying to send start byte when restart is disabled",
++	[ABRT_10B_RD_NORSTRT] =
++		"trying to read when restart is disabled (10bit mode)",
++	[ABRT_MASTER_DIS] =
++		"trying to use disabled adapter",
++	[ARB_LOST] =
++		"lost arbitration",
++};
++
++/**
++ * struct fh_i2c_dev - private i2c-designware data
++ * @dev: driver model device node
++ * @base: IO registers pointer
++ * @cmd_complete: tx completion indicator
++ * @lock: protect this struct and IO registers
++ * @clk: input reference clock
++ * @cmd_err: run time hadware error code
++ * @msgs: points to an array of messages currently being transferred
++ * @msgs_num: the number of elements in msgs
++ * @msg_write_idx: the element index of the current tx message in the msgs
++ *	array
++ * @tx_buf_len: the length of the current tx buffer
++ * @tx_buf: the current tx buffer
++ * @msg_read_idx: the element index of the current rx message in the msgs
++ *	array
++ * @rx_buf_len: the length of the current rx buffer
++ * @rx_buf: the current rx buffer
++ * @msg_err: error status of the current transfer
++ * @status: i2c master status, one of STATUS_*
++ * @abort_source: copy of the TX_ABRT_SOURCE register
++ * @irq: interrupt number for the i2c master
++ * @adapter: i2c subsystem adapter node
++ * @tx_fifo_depth: depth of the hardware tx fifo
++ * @rx_fifo_depth: depth of the hardware rx fifo
++ */
++struct fh_i2c_dev {
++	struct device		*dev;
++	void __iomem		*base;
++	struct completion	cmd_complete;
++	struct mutex		lock;
++	struct clk		*clk;
++	int			cmd_err;
++	struct i2c_msg		*msgs;
++	int			msgs_num;
++	int			msg_write_idx;
++	u32			tx_buf_len;
++	u8			*tx_buf;
++	int			msg_read_idx;
++	u32			rx_buf_len;
++	u8			*rx_buf;
++	int			msg_err;
++	unsigned int		status;
++	u32			abort_source;
++	int			irq;
++	struct i2c_adapter	adapter;
++	unsigned int		tx_fifo_depth;
++	unsigned int		rx_fifo_depth;
++};
++
++
++static int i2c_fh_wait_master_not_active(struct fh_i2c_dev *dev)
++{
++    int timeout = 200;  //2000 us
++
++    while (I2c_IsActiveMst( dev->base))
++    {
++        if (timeout <= 0)
++        {
++            dev_warn(dev->dev, "timeout waiting for master not active\n");
++            return -ETIMEDOUT;
++        }
++        timeout--;
++        udelay(10);
++    }
++
++    return 0;
++}
++
++static u32
++i2c_fh_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
++{
++	/*
++	 * DesignWare I2C core doesn't seem to have solid strategy to meet
++	 * the tHD;STA timing spec.  Configuring _HCNT based on tHIGH spec
++	 * will result in violation of the tHD;STA spec.
++	 */
++	if (cond)
++		/*
++		 * Conditional expression:
++		 *
++		 *   IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH
++		 *
++		 * This is based on the DW manuals, and represents an ideal
++		 * configuration.  The resulting I2C bus speed will be
++		 * faster than any of the others.
++		 *
++		 * If your hardware is free from tHD;STA issue, try this one.
++		 */
++		return (ic_clk * tSYMBOL + 5000) / 10000 - 8 + offset;
++	else
++		/*
++		 * Conditional expression:
++		 *
++		 *   IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
++		 *
++		 * This is just experimental rule; the tHD;STA period turned
++		 * out to be proportinal to (_HCNT + 3).  With this setting,
++		 * we could meet both tHIGH and tHD;STA timing specs.
++		 *
++		 * If unsure, you'd better to take this alternative.
++		 *
++		 * The reason why we need to take into account "tf" here,
++		 * is the same as described in i2c_fh_scl_lcnt().
++		 */
++		return (ic_clk * (tSYMBOL + tf) + 5000) / 10000 - 3 + offset;
++}
++
++static u32 i2c_fh_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
++{
++	/*
++	 * Conditional expression:
++	 *
++	 *   IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf)
++	 *
++	 * DW I2C core starts counting the SCL CNTs for the LOW period
++	 * of the SCL clock (tLOW) as soon as it pulls the SCL line.
++	 * In order to meet the tLOW timing spec, we need to take into
++	 * account the fall time of SCL signal (tf).  Default tf value
++	 * should be 0.3 us, for safety.
++	 */
++	return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
++}
++
++/**
++ * i2c_fh_init() - initialize the designware i2c master hardware
++ * @dev: device private data
++ *
++ * This functions configures and enables the I2C master.
++ * This function is called during I2C init function, and in case of timeout at
++ * run time.
++ */
++static void i2c_fh_init(struct fh_i2c_dev *dev)
++{
++	u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
++	u32 ic_con, hcnt, lcnt;
++
++	/* Disable the adapter */
++	i2c_fh_wait_master_not_active(dev);
++	I2c_DisEnable((unsigned int)dev->base);
++
++	/* set standard and fast speed deviders for high/low periods */
++
++	/* Standard-mode */
++	hcnt = i2c_fh_scl_hcnt(input_clock_khz,
++				40,	/* tHD;STA = tHIGH = 4.0 us */
++				3,	/* tf = 0.3 us */
++				0,	/* 0: DW default, 1: Ideal */
++				0);	/* No offset */
++	lcnt = i2c_fh_scl_lcnt(input_clock_khz,
++				47,	/* tLOW = 4.7 us */
++				3,	/* tf = 0.3 us */
++				0);	/* No offset */
++	I2c_SetSsHcnt( dev->base ,hcnt);
++	I2c_SetSsLcnt( dev->base ,lcnt);
++	pr_info("\tClock: %dkhz, Standard-mode HCNT:LCNT = %d:%d\n", input_clock_khz, hcnt, lcnt);
++
++	/* Fast-mode */
++	hcnt = i2c_fh_scl_hcnt(input_clock_khz,
++				6,	/* tHD;STA = tHIGH = 0.6 us */
++				3,	/* tf = 0.3 us */
++				0,	/* 0: DW default, 1: Ideal */
++				0);	/* No offset */
++	lcnt = i2c_fh_scl_lcnt(input_clock_khz,
++				13,	/* tLOW = 1.3 us */
++				3,	/* tf = 0.3 us */
++				0);	/* No offset */
++	I2c_SetFsHcnt( dev->base ,hcnt);
++	I2c_SetFsLcnt( dev->base ,lcnt);
++	//dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
++
++	/* Configure Tx/Rx FIFO threshold levels */
++
++	I2c_SetTxRxTl(dev->base  ,dev->tx_fifo_depth - 1,0);
++	/* configure the i2c master */
++	ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
++		/*DW_IC_CON_RESTART_EN |*/ DW_IC_CON_SPEED_FAST;/// DW_IC_CON_SPEED_STD;
++	I2c_SetCon(dev->base,ic_con);
++
++}
++
++/*
++ * Waiting for bus not busy
++ */
++static int i2c_fh_wait_bus_not_busy(struct fh_i2c_dev *dev)
++{
++	int timeout = TIMEOUT;
++
++	while (I2c_IsActiveMst( dev->base)) {
++		if (timeout <= 0) {
++			dev_warn(dev->dev, "timeout waiting for bus ready\n");
++			return -ETIMEDOUT;
++		}
++		timeout--;
++		msleep(1);
++	}
++
++	return 0;
++}
++
++static void i2c_fh_xfer_init(struct fh_i2c_dev *dev)
++{
++	struct i2c_msg *msgs = dev->msgs;
++	u32 ic_con;
++
++	/* Disable the adapter */
++	i2c_fh_wait_master_not_active(dev);
++	I2c_DisEnable((unsigned int)dev->base);
++
++	/* set the slave (target) address */
++	I2c_SetDeviceId(dev->base,msgs[dev->msg_write_idx].addr);
++
++	/* if the slave address is ten bit address, enable 10BITADDR */
++	ic_con = I2c_GetCon(dev->base);
++	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
++		ic_con |= DW_IC_CON_10BITADDR_MASTER;
++	else
++		ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
++	I2c_SetCon(dev->base,ic_con);
++
++	/* Enable the adapter */
++	I2c_Enable(dev->base);
++
++	/* Enable interrupts */
++	I2c_SetIntrMask(dev->base,DW_IC_INTR_DEFAULT_MASK);
++
++}
++
++/*
++ * Initiate (and continue) low level master read/write transaction.
++ * This function is only called from i2c_fh_isr, and pumping i2c_msg
++ * messages into the tx buffer.  Even if the size of i2c_msg data is
++ * longer than the size of the tx buffer, it handles everything.
++ */
++static void
++i2c_fh_xfer_msg(struct fh_i2c_dev *dev)
++{
++	struct i2c_msg *msgs = dev->msgs;
++	u32 intr_mask, cmd;
++	int tx_limit, rx_limit;
++	u32 addr = msgs[dev->msg_write_idx].addr;
++	u32 buf_len = dev->tx_buf_len;
++	u8 *buf = dev->tx_buf;;
++
++	PRINT_DBG("i2c_fh_xfer_msg start, dev->msgs_num: %d\n", dev->msgs_num);
++
++	intr_mask = DW_IC_INTR_DEFAULT_MASK;
++
++	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++)
++	{
++		/*
++		 * if target address has changed, we need to
++		 * reprogram the target address in the i2c
++		 * adapter when we are done with this transfer
++		 */
++		if (msgs[dev->msg_write_idx].addr != addr) {
++			dev_err(dev->dev,
++				"%s: invalid target address\n", __func__);
++			dev->msg_err = -EINVAL;
++			break;
++		}
++
++		if (msgs[dev->msg_write_idx].len == 0) {
++			dev_err(dev->dev,
++				"%s: invalid message length\n", __func__);
++			dev->msg_err = -EINVAL;
++			break;
++		}
++
++		if (!(dev->status & STATUS_WRITE_IN_PROGRESS))
++		{
++			/* new i2c_msg */
++			buf = msgs[dev->msg_write_idx].buf;
++			buf_len = msgs[dev->msg_write_idx].len;
++
++			PRINT_DBG("new msg: len: %d, buf: 0x%x\n", buf_len, buf[0]);
++		}
++
++		tx_limit = dev->tx_fifo_depth - I2c_GetTxTl(dev->base );
++		rx_limit = dev->rx_fifo_depth - I2c_GetRxTl(dev->base );
++
++		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0)
++		{
++			if (msgs[dev->msg_write_idx].flags & I2C_M_RD)
++			{
++				cmd = 0x100;
++				rx_limit--;
++			}
++			else
++			{
++				cmd = *buf++;
++			}
++
++			tx_limit--; buf_len--;
++
++			if(!buf_len)
++				cmd |= 0x200;
++
++			I2c_Write(dev->base, cmd);
++		}
++		PRINT_DBG("\n");
++
++		dev->tx_buf = buf;
++		dev->tx_buf_len = buf_len;
++
++		if (buf_len > 0)
++		{
++			/* more bytes to be written */
++			dev->status |= STATUS_WRITE_IN_PROGRESS;
++			break;
++		}
++		else
++		{
++			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
++		}
++	}
++
++	/*
++	 * If i2c_msg index search is completed, we don't need TX_EMPTY
++	 * interrupt any more.
++	 */
++
++	if (dev->msg_write_idx == dev->msgs_num)
++		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
++
++	if (dev->msg_err)
++		intr_mask = 0;
++
++	I2c_SetIntrMask(dev->base,intr_mask);
++
++}
++
++static void
++i2c_fh_read(struct fh_i2c_dev *dev)
++{
++	struct i2c_msg *msgs = dev->msgs;
++	int rx_valid;
++
++	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++)
++	{
++		u32 len;
++		u8 *buf;
++
++		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
++			continue;
++
++		if (!(dev->status & STATUS_READ_IN_PROGRESS))
++		{
++			len = msgs[dev->msg_read_idx].len;
++			buf = msgs[dev->msg_read_idx].buf;
++		}
++		else
++		{
++			PRINT_DBG("STATUS_READ_IN_PROGRESS\n");
++			len = dev->rx_buf_len;
++			buf = dev->rx_buf;
++		}
++
++		rx_valid = I2c_GetRxFLR(dev->base);
++
++		if(rx_valid == 0)
++		{
++			PRINT_DBG("rx_valid == 0\n");
++		}
++
++		for (; len > 0 && rx_valid > 0; len--, rx_valid--)
++		{
++			*buf++ = I2c_Read(dev->base);
++		}
++
++		PRINT_DBG("i2c_fh_read, len: %d, buf[0]: 0x%x\n", msgs[dev->msg_read_idx].len, msgs[dev->msg_read_idx].buf[0]);
++
++		if (len > 0)
++		{
++			PRINT_DBG("len > 0\n");
++			dev->status |= STATUS_READ_IN_PROGRESS;
++			dev->rx_buf_len = len;
++			dev->rx_buf = buf;
++			return;
++		} else
++			dev->status &= ~STATUS_READ_IN_PROGRESS;
++	}
++}
++
++static int i2c_fh_handle_tx_abort(struct fh_i2c_dev *dev)
++{
++	unsigned long abort_source = dev->abort_source;
++	int i;
++
++	if (abort_source & DW_IC_TX_ABRT_NOACK) {
++		for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
++		{
++			PRINT_DBG(
++				"%s: %s\n", __func__, abort_sources[i]);
++		}
++		return -EREMOTEIO;
++	}
++
++	for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
++		dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
++
++	if (abort_source & DW_IC_TX_ARB_LOST)
++		return -EAGAIN;
++	else if (abort_source & DW_IC_TX_ABRT_GCALL_READ)
++		return -EINVAL; /* wrong msgs[] data */
++	else
++		return -EIO;
++}
++
++/*
++ * Prepare controller for a transaction and call i2c_fh_xfer_msg
++ */
++static int
++i2c_fh_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
++{
++	struct fh_i2c_dev *dev = i2c_get_adapdata(adap);
++	int ret;
++
++	PRINT_DBG("-------i2c, %s: msgs: %d\n", __func__, num);
++
++	mutex_lock(&dev->lock);
++
++	INIT_COMPLETION(dev->cmd_complete);
++	dev->msgs = msgs;
++	dev->msgs_num = num;
++	dev->cmd_err = 0;
++	dev->msg_write_idx = 0;
++	dev->msg_read_idx = 0;
++	dev->msg_err = 0;
++	dev->status = STATUS_IDLE;
++	dev->abort_source = 0;
++
++	ret = i2c_fh_wait_bus_not_busy(dev);
++	if (ret < 0)
++	{
++		goto done;
++	}
++
++	/* start the transfers */
++	i2c_fh_xfer_init(dev);
++
++	/* wait for tx to complete */
++	ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
++	if (ret == 0) {
++		dev_err(dev->dev, "controller timed out\n");
++		i2c_fh_init(dev);
++		ret = -ETIMEDOUT;
++		goto done;
++	} else if (ret < 0)
++		goto done;
++
++	if (dev->msg_err)
++	{
++		PRINT_DBG("dev->msg_err\n");
++		ret = dev->msg_err;
++		goto done;
++	}
++
++	/* no error */
++	if (likely(!dev->cmd_err)) {
++		/* Disable the adapter */
++	    i2c_fh_wait_master_not_active(dev);
++	    I2c_DisEnable(dev->base);
++		ret = num;
++		goto done;
++	}
++
++	/* We have an error */
++	if (dev->cmd_err == DW_IC_ERR_TX_ABRT)
++	{
++		PRINT_DBG("dev->cmd_err == DW_IC_ERR_TX_ABRT\n");
++		ret = i2c_fh_handle_tx_abort(dev);
++		goto done;
++	}
++
++	ret = -EIO;
++
++done:
++	PRINT_DBG("buf: 0x%x\n", dev->msgs[num - 1].buf[0]);
++	mutex_unlock(&dev->lock);
++
++	return ret;
++}
++
++static u32 i2c_fh_func(struct i2c_adapter *adap)
++{
++	return	I2C_FUNC_I2C |
++		I2C_FUNC_SMBUS_BYTE |
++		I2C_FUNC_SMBUS_BYTE_DATA |
++		I2C_FUNC_SMBUS_WORD_DATA |
++		I2C_FUNC_SMBUS_I2C_BLOCK;
++}
++
++static u32 i2c_fh_read_clear_intrbits(struct fh_i2c_dev *dev)
++{
++	u32 stat;
++
++	/*
++	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
++	 * Ths unmasked raw version of interrupt status bits are available
++	 * in the IC_RAW_INTR_STAT register.
++	 *
++	 * That is,
++	 *   stat = readl(IC_INTR_STAT);
++	 * equals to,
++	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
++	 *
++	 * The raw version might be useful for debugging purposes.
++	 */
++	stat = readl(dev->base + DW_IC_INTR_STAT);
++
++	/*
++	 * Do not use the IC_CLR_INTR register to clear interrupts, or
++	 * you'll miss some interrupts, triggered during the period from
++	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
++	 *
++	 * Instead, use the separately-prepared IC_CLR_* registers.
++	 */
++	if (stat & DW_IC_INTR_RX_UNDER)
++		I2c_ClrIntr(dev->base,DW_IC_CLR_RX_UNDER);
++	if (stat & DW_IC_INTR_RX_OVER)
++		I2c_ClrIntr(dev->base , DW_IC_CLR_RX_OVER);
++	if (stat & DW_IC_INTR_TX_OVER)
++		I2c_ClrIntr(dev->base , DW_IC_CLR_TX_OVER);
++	if (stat & DW_IC_INTR_RD_REQ)
++		I2c_ClrIntr(dev->base , DW_IC_CLR_RD_REQ);
++	if (stat & DW_IC_INTR_TX_ABRT) {
++		/*
++		 * The IC_TX_ABRT_SOURCE register is cleared whenever
++		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
++		 */
++		dev->abort_source = readl(dev->base + DW_IC_TX_ABRT_SOURCE);
++		I2c_ClrIntr(dev->base , DW_IC_CLR_TX_ABRT);
++	}
++	if (stat & DW_IC_INTR_RX_DONE)
++		I2c_ClrIntr(dev->base ,DW_IC_CLR_RX_DONE);
++	if (stat & DW_IC_INTR_ACTIVITY)
++		I2c_ClrIntr(dev->base ,DW_IC_CLR_ACTIVITY);
++	if (stat & DW_IC_INTR_STOP_DET)
++		I2c_ClrIntr(dev->base , DW_IC_CLR_STOP_DET);
++	if (stat & DW_IC_INTR_START_DET)
++		I2c_ClrIntr(dev->base , DW_IC_CLR_START_DET);
++	if (stat & DW_IC_INTR_GEN_CALL)
++		I2c_ClrIntr(dev->base , DW_IC_CLR_GEN_CALL);
++
++	return stat;
++}
++
++/*
++ * Interrupt service routine. This gets called whenever an I2C interrupt
++ * occurs.
++ */
++static irqreturn_t i2c_fh_isr(int this_irq, void *dev_id)
++{
++	struct fh_i2c_dev *dev = dev_id;
++	u32 stat;
++
++	stat = i2c_fh_read_clear_intrbits(dev);
++	PRINT_DBG("-----------i2c, %s: stat=0x%x\n", __func__, stat);
++
++	if (stat & DW_IC_INTR_TX_ABRT)
++	{
++		PRINT_DBG("DW_IC_INTR_TX_ABRT\n");
++		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
++		dev->status = STATUS_IDLE;
++
++		/*
++		 * Anytime TX_ABRT is set, the contents of the tx/rx
++		 * buffers are flushed.  Make sure to skip them.
++		 */
++		I2c_SetIntrMask( dev->base,DW_IC_INTR_NONE);
++		goto tx_aborted;
++	}
++
++	if (stat & DW_IC_INTR_RX_FULL)
++	{
++		PRINT_DBG("i2c_fh_read\n");
++		i2c_fh_read(dev);
++	}
++
++	if (stat & DW_IC_INTR_TX_EMPTY)
++	{
++		PRINT_DBG("i2c_fh_xfer_msg\n");
++		i2c_fh_xfer_msg(dev);
++	}
++
++	/*
++	 * No need to modify or disable the interrupt mask here.
++	 * i2c_fh_xfer_msg() will take care of it according to
++	 * the current transmit status.
++	 */
++
++tx_aborted:
++	if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
++		complete(&dev->cmd_complete);
++
++	return IRQ_HANDLED;
++}
++
++static struct i2c_algorithm i2c_fh_algo =
++{
++	.master_xfer	= i2c_fh_xfer,
++	.functionality	= i2c_fh_func,
++};
++
++static int __devinit fh_i2c_probe(struct platform_device *pdev)
++{
++	struct fh_i2c_dev *dev;
++	struct i2c_adapter *adap;
++	struct resource *mem, *ioarea;
++	int irq, r;
++
++	pr_info("I2C driver:\n\tplatform registration... ");
++
++	/* NOTE: driver uses the static register mapping */
++	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!mem)
++	{
++		dev_err(&pdev->dev, "no mem resource?\n");
++		return -EINVAL;
++	}
++
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0)
++	{
++		dev_err(&pdev->dev, "no irq resource?\n");
++		return irq; /* -ENXIO */
++	}
++
++	ioarea = request_mem_region(mem->start, resource_size(mem),
++			pdev->name);
++	if (!ioarea)
++	{
++		dev_err(&pdev->dev, "I2C region already claimed\n");
++		return -EBUSY;
++	}
++
++	dev = kzalloc(sizeof(struct fh_i2c_dev), GFP_KERNEL);
++	if (!dev)
++	{
++		r = -ENOMEM;
++		goto err_release_region;
++	}
++
++	init_completion(&dev->cmd_complete);
++	mutex_init(&dev->lock);
++	dev->dev = get_device(&pdev->dev);
++	dev->irq = irq;
++	platform_set_drvdata(pdev, dev);
++
++	if(pdev->id)
++		dev->clk = clk_get(NULL, "i2c1_clk");
++	else
++		dev->clk = clk_get(NULL, "i2c0_clk");
++
++
++	if (IS_ERR(dev->clk))
++	{
++		r = -ENODEV;
++		goto err_free_mem;
++	}
++	clk_enable(dev->clk);
++
++	dev->base = ioremap(mem->start, resource_size(mem));
++	if (dev->base == NULL)
++	{
++		dev_err(&pdev->dev, "failure mapping io resources\n");
++		r = -ENOMEM;
++		goto err_unuse_clocks;
++	}
++	{
++		dev->tx_fifo_depth = I2c_GetTxFifoDepth(dev->base);
++		dev->rx_fifo_depth = I2c_GetRxFifoDepth(dev->base);
++	}
++	i2c_fh_init(dev);
++
++	pr_info("\ttx fifo depth: %d, rx fifo depth: %d\n", dev->tx_fifo_depth, dev->rx_fifo_depth);
++
++	I2c_SetIntrMask( dev->base,DW_IC_INTR_NONE); /* disable IRQ */
++	r = request_irq(dev->irq, i2c_fh_isr, IRQF_DISABLED, pdev->name, dev);
++	if (r)
++	{
++		dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
++		goto err_iounmap;
++	}
++
++	adap = &dev->adapter;
++	i2c_set_adapdata(adap, dev);
++	adap->owner = THIS_MODULE;
++	adap->class = I2C_CLASS_HWMON;
++	strlcpy(adap->name, "FH I2C adapter",
++			sizeof(adap->name));
++	adap->algo = &i2c_fh_algo;
++	adap->dev.parent = &pdev->dev;
++
++	adap->nr = pdev->id;
++	r = i2c_add_numbered_adapter(adap);
++	if (r) {
++		dev_err(&pdev->dev, "failure adding adapter\n");
++		goto err_free_irq;
++	}
++
++	pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
++		   "\t\tIO base addr: 0x%p)\n", "I2C", pdev->name,
++		   pdev->id, dev->irq, dev->base);
++
++	return 0;
++
++err_free_irq:
++	free_irq(dev->irq, dev);
++err_iounmap:
++	iounmap(dev->base);
++err_unuse_clocks:
++	clk_disable(dev->clk);
++	clk_put(dev->clk);
++	dev->clk = NULL;
++err_free_mem:
++	platform_set_drvdata(pdev, NULL);
++	put_device(&pdev->dev);
++	kfree(dev);
++err_release_region:
++	release_mem_region(mem->start, resource_size(mem));
++
++	return r;
++}
++
++static int __devexit fh_i2c_remove(struct platform_device *pdev)
++{
++	struct fh_i2c_dev *dev = platform_get_drvdata(pdev);
++	struct resource *mem;
++
++	platform_set_drvdata(pdev, NULL);
++	i2c_del_adapter(&dev->adapter);
++	put_device(&pdev->dev);
++
++	clk_disable(dev->clk);
++	clk_put(dev->clk);
++	dev->clk = NULL;
++	i2c_fh_wait_master_not_active(dev);
++	writel(0, dev->base + DW_IC_ENABLE);
++	free_irq(dev->irq, dev);
++	kfree(dev);
++
++	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	release_mem_region(mem->start, resource_size(mem));
++	return 0;
++}
++
++static struct platform_driver fh_i2c_driver =
++{
++	.remove		= __devexit_p(fh_i2c_remove),
++	.driver		=
++	{
++		.name	= "fh_i2c",
++		.owner	= THIS_MODULE,
++	},
++};
++
++static int __init fh_i2c_init_driver(void)
++{
++	return platform_driver_probe(&fh_i2c_driver, fh_i2c_probe);
++}
++module_init(fh_i2c_init_driver);
++
++static void __exit fh_i2c_exit_driver(void)
++{
++	platform_driver_unregister(&fh_i2c_driver);
++}
++module_exit(fh_i2c_exit_driver);
++
++MODULE_AUTHOR("QIN");
++MODULE_ALIAS("platform:fh");
++MODULE_DESCRIPTION("FH I2C bus adapter");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 35464744..9d4eb336 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -157,6 +157,8 @@ config INTEL_MID_PTI
+ 	  an Intel Atom (non-netbook) mobile device containing a MIPI
+ 	  P1149.7 standard implementation.
+ 
++
++
+ config SGI_IOC4
+ 	tristate "SGI IOC4 Base IO support"
+ 	depends on PCI
+@@ -404,6 +406,60 @@ config EP93XX_PWM
+ 	  To compile this driver as a module, choose M here: the module will
+ 	  be called ep93xx_pwm.
+ 
++config FH_I2S_SLAVE
++	depends on ARCH_FH8830 || ARCH_FH8833
++	tristate "FH I2S SLAVE MODE support"
++	default n
++
++config FH_I2S_MASTER
++	depends on  ARCH_FH8833
++	tristate "FH I2S MASTER MODE support"
++	default n
++
++
++	  
++
++config FH_PINCTRL
++	tristate "FH Pinctrl support"
++	default n
++	help
++	  To compile this driver as a module, choose M here: the module will
++	  be called.
++	  
++	  
++config FH_SADC
++	depends on ARCH_FULLHAN
++    tristate "FH SADC support"
++	help
++	  To compile this driver as a module, choose M here: the module will
++	  be called fh_sadc.
++	  
++	  HW para:10bits precision, 8 channels, 5M clk in. 
++	  one conversion need almost (12/5M *1)second
++
++
++config FH_FIRMWARE_LOADER
++	tristate "Enable FH firmware loader"
++	default m
++	help
++	  enable firmware loader
++
++config FH_EFUSE
++	tristate "FH EFUSE support"
++	help
++	  To compile this driver as a module, choose M here: the module will
++	  be called fh_efuse.
++	  
++	  HW para:60 bytes could be programmed. 
++	  the "efuse2aes map" is fixed by hardware..EX. 0~4 : aes key0, 5~8 : aes key1.
++
++config FH_CLK_MISC
++	tristate "FH clk miscdev support"
++	default n
++	help
++	  To compile this driver as a module, choose M here: the module will
++	  be called.
++	  
+ config DS1682
+ 	tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
+ 	depends on I2C && EXPERIMENTAL
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index 5f03172c..f12fef3c 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -9,6 +9,7 @@ obj-$(CONFIG_AD525X_DPOT_SPI)	+= ad525x_dpot-spi.o
+ 0bj-$(CONFIG_INTEL_MID_PTI)	+= pti.o
+ obj-$(CONFIG_ATMEL_PWM)		+= atmel_pwm.o
+ obj-$(CONFIG_ATMEL_SSC)		+= atmel-ssc.o
++obj-$(CONFIG_ATMEL_ACW)		+= atmel-acw.o
+ obj-$(CONFIG_ATMEL_TCLIB)	+= atmel_tclib.o
+ obj-$(CONFIG_BMP085)		+= bmp085.o
+ obj-$(CONFIG_ICS932S401)	+= ics932s401.o
+@@ -31,6 +32,15 @@ obj-$(CONFIG_ISL29003)		+= isl29003.o
+ obj-$(CONFIG_ISL29020)		+= isl29020.o
+ obj-$(CONFIG_SENSORS_TSL2550)	+= tsl2550.o
+ obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
++obj-$(CONFIG_FH_PINCTRL)		+= fh_pinctrl_dev.o
++obj-$(CONFIG_FH_I2S_MASTER)		+= fh_dw_i2s.o
++obj-$(CONFIG_FH_I2S_SLAVE)		+= fh_i2s.o
++obj-$(CONFIG_FH_SADC)		+= fh_sadc.o
++
++obj-$(CONFIG_FH_FIRMWARE_LOADER) += fh_fw_loader.o
++
++obj-$(CONFIG_FH_EFUSE)		+= fh_efuse.o
++
+ obj-$(CONFIG_DS1682)		+= ds1682.o
+ obj-$(CONFIG_TI_DAC7512)	+= ti_dac7512.o
+ obj-$(CONFIG_C2PORT)		+= c2port/
+@@ -46,3 +56,5 @@ obj-y				+= ti-st/
+ obj-$(CONFIG_AB8500_PWM)	+= ab8500-pwm.o
+ obj-y				+= lis3lv02d/
+ obj-y				+= carma/
++obj-$(CONFIG_FH_DMAC_MISC) += fh_dma_miscdev.o
++obj-$(CONFIG_FH_CLK_MISC) += fh_clk_miscdev.o
+diff --git a/drivers/misc/fh_clk_miscdev.c b/drivers/misc/fh_clk_miscdev.c
+new file mode 100644
+index 00000000..a6a7ca90
+--- /dev/null
++++ b/drivers/misc/fh_clk_miscdev.c
+@@ -0,0 +1,170 @@
++#include <linux/miscdevice.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/printk.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++#include <linux/clk.h>
++
++#include "fh_clk_miscdev.h"
++
++//#define FH_CLK_DEBUG
++
++#if defined(FH_CLK_DEBUG)
++#define PRINT_CLK_DBG(fmt, args...)   \
++	do                                \
++	{                                 \
++		printk("FH_CLK_DEBUG: "); \
++		printk(fmt, ##args);      \
++	} while (0)
++#else
++#define PRINT_CLK_DBG(fmt, args...) \
++	do                              \
++	{                               \
++	} while (0)
++#endif
++
++static int fh_clk_miscdev_open(struct inode *inode, struct file *file)
++{
++	return 0;
++}
++
++static int fh_clk_miscdev_release(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++static long fh_clk_miscdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++	int ret = -ENODEV;
++	struct clk *clk;
++	struct clk_usr uclk;
++
++	if (unlikely(_IOC_TYPE(cmd) != CLK_IOCTL_MAGIC))
++	{
++		pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
++			   __func__, _IOC_TYPE(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (unlikely(_IOC_NR(cmd) > CLK_IOCTL_MAXNR))
++	{
++		pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
++			   __func__, _IOC_NR(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (_IOC_DIR(cmd) & _IOC_READ)
++	{
++		ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
++	}
++	else if(_IOC_DIR(cmd) & _IOC_WRITE)
++	{
++		ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
++	}
++
++	if(ret)
++	{
++		pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
++			   __func__, _IOC_NR(cmd), -EACCES);
++		return -EACCES;
++	}
++
++	if (copy_from_user((void *)&uclk, (void __user *)arg,
++			sizeof(struct clk_usr)))
++		return -EFAULT;
++
++	switch(cmd)
++	{
++	case ENABLE_CLK:
++		clk = clk_get(NULL, uclk.name);
++		if(!IS_ERR(clk))
++		{
++			clk_enable(clk);
++			ret = 0;
++		}
++		break;
++	case DISABLE_CLK:
++		clk = clk_get(NULL, uclk.name);
++		if(!IS_ERR(clk))
++		{
++			clk_disable(clk);
++			ret = 0;
++		}
++		break;
++	case SET_CLK_RATE:
++		clk = clk_get(NULL, uclk.name);
++		ret = PTR_ERR(clk);
++		if(!IS_ERR(clk))
++		{
++			ret = clk_set_rate(clk, uclk.frequency);
++		}
++		PRINT_CLK_DBG("%s, set clk: %s, rate: %lu\n",
++				__func__, uclk.name, uclk.frequency);
++		break;
++	case GET_CLK_RATE:
++		clk = clk_get(NULL, uclk.name);
++		ret = PTR_ERR(clk);
++		if(!IS_ERR(clk))
++		{
++			uclk.frequency = clk_get_rate(clk);
++			ret = 0;
++		}
++		PRINT_CLK_DBG("%s, get clk: %s, rate: %lu\n",__func__,
++			uclk.name, uclk.frequency);
++		if (copy_to_user((void __user *)arg, (void *)&uclk,
++				sizeof(struct clk_usr)))
++			return -EFAULT;
++	}
++
++
++	return ret;
++}
++
++static const struct file_operations fh_clk_fops =
++{
++	.owner 			= THIS_MODULE,
++	.open 			= fh_clk_miscdev_open,
++	.release 		= fh_clk_miscdev_release,
++	.unlocked_ioctl 	= fh_clk_miscdev_ioctl,
++};
++
++static struct miscdevice fh_clk_miscdev =
++{
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = DEVICE_NAME,
++	.fops = &fh_clk_fops,
++};
++
++int __init fh_clk_miscdev_init(void)
++{
++	int err;
++
++	err = misc_register(&fh_clk_miscdev);
++
++	if(err < 0)
++	{
++		pr_err("%s: ERROR: %s registration failed, ret=%d",
++			   __func__, DEVICE_NAME, err);
++		return -ENXIO;
++	}
++
++	pr_info("CLK misc driver init successfully\n");
++	return 0;
++}
++
++
++static void __exit fh_clk_miscdev_exit(void)
++{
++	misc_deregister(&fh_clk_miscdev);
++}
++module_init(fh_clk_miscdev_init);
++module_exit(fh_clk_miscdev_exit);
++
++MODULE_AUTHOR("QIN");
++MODULE_DESCRIPTION("Misc Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform: FH");
+diff --git a/drivers/misc/fh_clk_miscdev.h b/drivers/misc/fh_clk_miscdev.h
+new file mode 100644
+index 00000000..ce405554
+--- /dev/null
++++ b/drivers/misc/fh_clk_miscdev.h
+@@ -0,0 +1,10 @@
++
++#ifndef FH_CLK_MISCDEV_H_
++#define FH_CLK_MISCDEV_H_
++
++#include <mach/clock.h>
++
++#define DEVICE_NAME		"fh_clk_miscdev"
++
++
++#endif /* FH_CLK_MISCDEV_H_ */
+diff --git a/drivers/misc/fh_dma_miscdev.c b/drivers/misc/fh_dma_miscdev.c
+new file mode 100644
+index 00000000..7fad61de
+--- /dev/null
++++ b/drivers/misc/fh_dma_miscdev.c
+@@ -0,0 +1,363 @@
++#include <linux/miscdevice.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/printk.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++
++
++#include "fh_dma_miscdev.h"
++
++#define MEMCPY_UNIT     (4095 * 4 * 64)         //4095 xfer * 32-bit * 64 desc
++
++//#define FH_DMA_DEBUG
++
++#ifdef FH_DMA_DEBUG
++#define PRINT_DMA_DBG(fmt, args...)     \
++    do                              \
++    {                               \
++        printk("FH_DMA_DEBUG: ");   \
++        printk(fmt, ## args);       \
++    }                               \
++    while(0)
++#else
++#define PRINT_DMA_DBG(fmt, args...)  do { } while (0)
++#endif
++
++
++static void fh_dma_callback(void *data)
++{
++    PRINT_DMA_DBG("dma transfer done, end=%lu\n", jiffies);
++    complete(data);
++}
++
++static int kick_off_dma(struct dma_chan *channel, unsigned int src_offset, unsigned int dst_offset, unsigned int size)
++{
++    int ret;
++    struct completion cmp;
++    struct dma_async_tx_descriptor *dma_tx_desc = NULL;
++    struct dma_device *dma_dev = channel->device;
++    dma_cookie_t cookie;
++    unsigned long timeout;
++    unsigned long flag;
++
++    flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
++
++    PRINT_DMA_DBG("try to copy 0x%x bytes: 0x%x --> 0x%x\n", MEMCPY_UNIT, src_offset, dst_offset);
++
++    dma_tx_desc = dma_dev->device_prep_dma_memcpy(channel, dst_offset, src_offset, size, flag);
++
++    PRINT_DMA_DBG("device_prep_dma_memcpy end\n");
++
++    if(!dma_tx_desc)
++    {
++        pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
++        ret = -ENODEV;
++        return ret;
++    }
++
++    init_completion(&cmp);
++    dma_tx_desc->callback = fh_dma_callback;
++    dma_tx_desc->callback_param = &cmp;
++    PRINT_DMA_DBG("tx_submit start\n");
++    cookie = dma_tx_desc->tx_submit(dma_tx_desc);
++    PRINT_DMA_DBG("tx_submit end\n");
++    if (dma_submit_error(cookie))
++    {
++        pr_err("ERROR: %s, tx_submit fail\n", __func__);
++        ret = -ENODEV;
++        return ret;
++    }
++    PRINT_DMA_DBG("dma_async_issue_pending start\n");
++    dma_async_issue_pending(channel);
++    PRINT_DMA_DBG("dma_async_issue_pending end, %d\n", DMA_MEMCPY_TIMEOUT);
++
++    timeout = wait_for_completion_timeout(&cmp, msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));
++
++    PRINT_DMA_DBG("wait_for_completion_timeout end, timeout: %lu\n", timeout);
++
++    if(!timeout)
++    {
++        pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
++        ret = -ENODEV;
++        return ret;
++    }
++
++    ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);
++
++    if(ret)
++    {
++        pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n", __func__, ret);
++        ret = -ENODEV;
++        return ret;
++    }
++
++    return 0;
++}
++
++
++static int fh_dma_start_transfer(struct dma_chan *channel, struct dma_memcpy* memcpy)
++{
++    int ret;
++    unsigned int i;
++
++    for(i=0; i<memcpy->size / MEMCPY_UNIT; i++)
++    {
++        ret = kick_off_dma(channel, memcpy->src_addr_phy + MEMCPY_UNIT*i, memcpy->dst_addr_phy + MEMCPY_UNIT*i, MEMCPY_UNIT);
++        if(ret)
++        {
++            return ret;
++        }
++    }
++
++    ret = kick_off_dma(channel, memcpy->src_addr_phy + MEMCPY_UNIT*i, memcpy->dst_addr_phy + MEMCPY_UNIT*i, memcpy->size % MEMCPY_UNIT);
++    return ret;
++}
++
++
++static bool chan_filter(struct dma_chan *chan, void *param)
++{
++    struct dma_memcpy* memcpy = param;
++    PRINT_DMA_DBG("chan_filter, channel id: %d\n", memcpy->chan_id);
++    if(memcpy->chan_id < 0)
++    {
++        return false;
++    }
++
++    if(memcpy->chan_id == chan->chan_id)
++    {
++        return true;
++    }
++    else
++    {
++        return false;
++    }
++
++}
++
++static int fh_dma_memcpy(struct dma_memcpy* memcpy)
++{
++    //fixme: ioctl should be atomic, otherwise channel will be changed.
++    struct dma_chan *dma_channel;
++    dma_cap_mask_t mask;
++    int ret;
++
++    PRINT_DMA_DBG("fh_dma_memcpy start\n");
++    PRINT_DMA_DBG("ioctl, memcpy->size: 0x%x\n", memcpy->size);
++
++
++    PRINT_DMA_DBG("fh_dma_request_channel start\n");
++    dma_cap_zero(mask);
++    PRINT_DMA_DBG("dma_cap_zero end\n");
++    dma_cap_set(DMA_MEMCPY, mask);
++    PRINT_DMA_DBG("dma_cap_set end\n");
++
++    dma_channel = dma_request_channel(mask, chan_filter, memcpy);
++
++    PRINT_DMA_DBG("dma_request_channel finished, channel_addr: 0x%x\n", (u32)dma_channel);
++
++    if(!dma_channel)
++    {
++        pr_err("ERROR: %s, No Channel Available, channel: %d\n", __func__, memcpy->chan_id);
++        return -EBUSY;
++    }
++    memcpy->chan_id = dma_channel->chan_id;
++    PRINT_DMA_DBG("dma channel name: %s\n", dma_chan_name(dma_channel));
++
++    ret = fh_dma_start_transfer(dma_channel, memcpy);
++
++    if(ret)
++    {
++        pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);
++    }
++
++    dma_channel->device->device_free_chan_resources(dma_channel);
++    dma_release_channel(dma_channel);
++
++    return ret;
++}
++
++static long fh_dma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++    int ret = 0;
++    struct dma_memcpy memcpy;
++
++
++    if (unlikely(_IOC_TYPE(cmd) != DMA_IOCTL_MAGIC))
++    {
++        pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
++               __func__, _IOC_TYPE(cmd), -ENOTTY);
++        return -ENOTTY;
++    }
++
++    if (unlikely(_IOC_NR(cmd) > DMA_IOCTL_MAXNR))
++    {
++        pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
++               __func__, _IOC_NR(cmd), -ENOTTY);
++        return -ENOTTY;
++    }
++
++    if (_IOC_DIR(cmd) & _IOC_READ)
++    {
++        ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
++    }
++    else if(_IOC_DIR(cmd) & _IOC_WRITE)
++    {
++        ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
++    }
++
++    if(ret)
++    {
++        pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
++               __func__, _IOC_NR(cmd), -EACCES);
++        return -EACCES;
++    }
++
++    switch(cmd)
++    {
++
++    case DMA_MEMCOPY:
++		if(copy_from_user((void *)&memcpy,
++							(void __user *)arg,
++							sizeof(struct dma_memcpy)))
++		{
++			return -EFAULT;
++		}
++        ret = fh_dma_memcpy(&memcpy);
++        break;
++    }
++
++    return ret;
++}
++
++static int fh_dma_open(struct inode *inode, struct file *file)
++{
++    PRINT_DMA_DBG("fh_dma_open\n");
++    return 0;
++}
++
++static int fh_dma_release(struct inode *inode, struct file *filp)
++{
++    PRINT_DMA_DBG("fh_dma_release\n");
++    return 0;
++}
++
++
++static void *v_seq_start(struct seq_file *s, loff_t *pos)
++{
++    static unsigned long counter = 0;
++    if (*pos == 0)
++        return &counter;
++    else
++    {
++        *pos = 0;
++        return NULL;
++    }
++}
++
++static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++    (*pos)++;
++    return NULL;
++}
++
++static void v_seq_stop(struct seq_file *s, void *v)
++{
++
++}
++
++static int v_seq_show(struct seq_file *sfile, void *v)
++{
++
++    seq_printf(sfile, "\nISP Status\n");
++    seq_printf(sfile, "\nCTRL: \n");
++
++#if 0
++    int i;
++    u32 data;
++    seq_printf(sfile, "ipf reg:\n");
++    for(i=0; i<10; i++)
++    {
++        data = GET_IPF_REG_V(i*4);
++        seq_printf(sfile, "0x%05x, 0x%08x\n", i*4, data);
++    }
++#endif
++
++    return 0;
++}
++
++static const struct seq_operations fh_dma_seq_ops =
++{
++    .start = v_seq_start,
++    .next = v_seq_next,
++    .stop = v_seq_stop,
++    .show = v_seq_show
++};
++
++static int isp_proc_open(struct inode *inode, struct file *file)
++{
++    return seq_open(file, &fh_dma_seq_ops);
++}
++
++static struct file_operations fh_dma_proc_ops =
++{
++    .owner = THIS_MODULE,
++    .open = isp_proc_open,
++    .read = seq_read,
++};
++
++static const struct file_operations fh_dma_fops =
++{
++    .owner                  = THIS_MODULE,
++    .open                   = fh_dma_open,
++    .release                = fh_dma_release,
++    .unlocked_ioctl         = fh_dma_ioctl,
++};
++
++static struct miscdevice fh_dma_device =
++{
++    .minor = MISC_DYNAMIC_MINOR,
++    .name = DEVICE_NAME,
++    .fops = &fh_dma_fops,
++};
++
++static int __init fh_dma_init(void)
++{
++    int ret;
++    struct proc_dir_entry *proc_file;
++    ret = misc_register(&fh_dma_device);
++
++    if(ret < 0)
++    {
++        pr_err("%s: ERROR: %s registration failed",
++            __func__, DEVICE_NAME);
++        return -ENXIO;
++    }
++
++    proc_file = create_proc_entry(PROC_FILE, 0644, NULL);
++
++    if (proc_file)
++        proc_file->proc_fops = &fh_dma_proc_ops;
++    else
++        pr_err("%s: ERROR: %s proc file create failed",
++               __func__, DEVICE_NAME);
++
++
++    return ret;
++}
++
++static void __exit fh_dma_exit(void)
++{
++    remove_proc_entry(PROC_FILE, NULL);
++    misc_deregister(&fh_dma_device);
++}
++module_init(fh_dma_init);
++module_exit(fh_dma_exit);
++
++MODULE_AUTHOR("QIN");
++MODULE_DESCRIPTION("Misc Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform: FH");
+diff --git a/drivers/misc/fh_dma_miscdev.h b/drivers/misc/fh_dma_miscdev.h
+new file mode 100644
+index 00000000..c294c331
+--- /dev/null
++++ b/drivers/misc/fh_dma_miscdev.h
+@@ -0,0 +1,32 @@
++
++#ifndef FH_DMA_MISCDEV_H_
++#define FH_DMA_MISCDEV_H_
++
++#include <linux/dmaengine.h>
++
++
++#define DEVICE_NAME                 "fh_dma_misc"
++#define PROC_FILE                   "driver/dma_misc"
++
++#define DMA_IOCTL_MAGIC             'd'
++#define RESERVERD                   _IO(DMA_IOCTL_MAGIC, 0)
++#define REQUEST_CHANNEL             _IOWR(DMA_IOCTL_MAGIC, 1, __u32)
++#define DMA_MEMCOPY                 _IOWR(DMA_IOCTL_MAGIC, 2, __u32)
++
++#define DMA_IOCTL_MAXNR             14
++
++#define DMA_MEMCPY_TIMEOUT          5000 //msec
++
++struct dma_memcpy
++{
++    int chan_id;
++    void *src_addr_vir;
++    void *dst_addr_vir;
++    unsigned int size;
++    unsigned int src_addr_phy;
++    unsigned int dst_addr_phy;
++};
++
++
++
++#endif /* FH_DMA_MISCDEV_H_ */
+diff --git a/drivers/misc/fh_dw_i2s.c b/drivers/misc/fh_dw_i2s.c
+new file mode 100644
+index 00000000..e9d4689f
+--- /dev/null
++++ b/drivers/misc/fh_dw_i2s.c
+@@ -0,0 +1,1624 @@
++/**@file
++ * @Copyright (c) 2016 Shanghai Fullhan Microelectronics Co., Ltd.
++ * @brief
++ *
++ * @author      fullhan
++ * @date        2016-7-15
++ * @version     V1.0
++ * @version     V1.1  modify code style
++ * @note: misc audio driver for fh8830 embedded audio codec.
++ * @note History:
++ * @note     <author>   <time>    <version >   <desc>
++ * @note
++ * @warning: the codec is fixed to 24 bit, so remember to move the 24 bit data to 16 bit in
++ *   application layer, the next version CPU will sovle this bug.
++ */
++
++
++#include <linux/irqreturn.h>
++#include <linux/clk.h>
++#include <linux/device.h>
++#include <linux/file.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++#include <linux/poll.h>
++#include <linux/ioctl.h>
++#include <linux/i2c.h>
++#include <linux/workqueue.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <mach/fh_dmac.h>
++#include <mach/fh_predefined.h>
++
++#include "ac.h"
++struct i2c_adapter *codec_i2c_adapter;
++#define NR_DESCS_PER_CHANNEL 64
++#define DW_I2S_FH8833 0xf0900000
++#define FIX_SAMPLE_BIT       32
++
++#define ACW_HW_NUM_RX  0
++#define ACW_HW_NUM_TX  1
++#define ACW_DMA_CAP_CHANNEL 3
++#define ACW_DMA_PAY_CHANNEL 2
++
++#define ACW_CTRL 						0x0
++#define ACW_TXFIFO_CTRL 				0x4
++#define ACW_RXFIFO_CTRL 				0x8
++#define ACW_STATUS             			0x0c
++#define ACW_DAT_CTL				  		0x10
++#define ACW_DBG_CTL					  	0x14
++#define ACW_STATUS1					  	0x18
++#define ACW_STATUS2						0x1c
++
++#define ACW_DACL_FIFO 					0xf0a00100
++#define ACW_DACR_FIFO 					0xf0a00100
++#define ACW_ADC_FIFO					0xf0a00200
++
++#define AUDIO_DMA_PREALLOC_SIZE 128*1024
++
++#define ACW_INTR_RX_UNDERFLOW   0x10000
++#define ACW_INTR_RX_OVERFLOW    0x20000
++#define ACW_INTR_TX_UNDERFLOW   0x40000
++#define ACW_INTR_TX_OVERFLOW    0x80000
++
++ //#define FH_AUDIO_DEBUG
++#ifdef FH_AUDIO_DEBUG
++#define PRINT_AUDIO_DBG(fmt, args...)     \
++    do                              \
++    {                               \
++        printk("FH_AUDIO_DEBUG: ");   \
++        printk(fmt, ## args);       \
++    }                               \
++    while(0)
++#else
++#define PRINT_AUDIO_DBG(fmt, args...)  do { } while (0)
++#endif
++
++enum audio_type
++{
++    capture = 0,
++    playback,
++};
++
++int i2c_write_codec(u8 addr, u8 data) {
++		int rval;
++		struct i2c_msg msgs[1];
++		u8 send[2];
++		msgs[0].len = 2;
++		msgs[0].addr = 0x1b;
++		msgs[0].flags = 0;
++		msgs[0].buf = send;
++		send[0] = addr;
++		send[1] = data;
++		rval = i2c_transfer(codec_i2c_adapter, msgs, 1);
++		return rval;
++}
++
++enum audio_state
++{
++    STATE_NORMAL = 0,
++    STATE_XRUN,
++    STATE_STOP,
++    STATE_RUN,
++    STATE_PAUSE
++};
++
++struct infor_record_t
++{
++	int record_pid;
++	int play_pid;
++}infor_record;
++
++struct audio_config {
++	int rate;
++	int volume;
++	enum io_select io_type;
++    int frame_bit;
++    int channels;
++    int buffer_size;
++    int period_size;
++    int buffer_bytes;
++    int period_bytes;
++    int start_threshold;
++    int stop_threshold;
++};
++
++struct audio_ptr_t
++{
++	struct audio_config cfg;
++    enum audio_state state;
++    long size;
++    int hw_ptr;
++    int appl_ptr;
++    spinlock_t lock;
++    struct device dev;
++    u8 *area; /*virtual pointer*/
++    dma_addr_t addr; /*physical address*/
++    u8 * mmap_addr;
++};
++
++struct fh_audio_cfg
++{
++    struct audio_ptr_t capture;
++    struct audio_ptr_t playback;
++    wait_queue_head_t readqueue;
++    wait_queue_head_t writequeue;
++	struct semaphore sem_capture;
++	struct semaphore sem_playback;
++};
++
++struct fh_dma_chan
++{
++    struct dma_chan     *chan;
++    void __iomem        *ch_regs;
++    u8          mask;
++    u8          priority;
++    bool            paused;
++    bool                initialized;
++    spinlock_t      lock;
++    /* these other elements are all protected by lock */
++    unsigned long       flags;
++    dma_cookie_t        completed;
++    struct list_head    active_list;
++    struct list_head    queue;
++    struct list_head    free_list;
++    struct fh_cyclic_desc   *cdesc;
++    unsigned int        descs_allocated;
++};
++
++struct fh_acw_dma_transfer
++{
++    struct dma_chan *chan;
++    struct fh_dma_slave cfg;
++    struct scatterlist sgl;
++    struct dma_async_tx_descriptor *desc;
++};
++
++struct channel_assign
++{
++	int capture_channel;
++	int playback_channel;
++};
++
++struct audio_dev
++{
++	struct channel_assign channel_assign;
++    struct fh_audio_cfg audio_config;
++    struct miscdevice fh_audio_miscdev;
++};
++
++static const struct file_operations acw_fops;
++
++static struct audio_dev fh_audio_dev =
++{
++    .channel_assign = {
++        .capture_channel = ACW_DMA_CAP_CHANNEL,
++        .playback_channel = ACW_DMA_PAY_CHANNEL,
++    },
++    .fh_audio_miscdev = {
++        .fops       = &acw_fops,
++        .name       = "fh_audio",
++        .minor      = MISC_DYNAMIC_MINOR,
++    }
++
++};
++
++static struct
++{
++    spinlock_t      lock;
++    void __iomem        *regs;
++    struct clk      *clk;
++    unsigned long       in_use;
++    unsigned long       next_heartbeat;
++    struct timer_list   timer;
++    int         expect_close;
++    int         irq;
++} fh_audio_module;
++//#define STERO
++static struct fh_dma_chan *dma_rx_transfer = NULL;
++static struct fh_dma_chan *dma_tx_transfer = NULL;
++#ifdef 	STERO
++static struct fh_dma_chan *dma_tx_right_transfer = NULL;
++#endif
++static struct work_struct playback_wq;
++
++static struct audio_param_store
++{
++    int input_volume;
++    enum io_select input_io_type;
++} audio_param_store;
++
++#ifdef 	STERO
++static void fh_acw_tx_right_dma_done(void *arg);
++#define TX_RIGHT_CHANNEL_DMA_CHANNEL 0
++#endif
++static void fh_acw_tx_dma_done(void *arg);
++static void fh_acw_rx_dma_done(void *arg);
++static bool  fh_acw_dma_chan_filter(struct dma_chan *chan, void *filter_param);
++static void create_proc(void);
++static void remove_proc(void);
++
++void fh_acw_stop_playback(struct fh_audio_cfg *audio_config)
++{
++	unsigned int status;
++    if(audio_config->playback.state == STATE_STOP)
++    {
++        return;
++    }
++    audio_config->playback.state = STATE_STOP;
++    status = readl( fh_audio_module.regs + 0x8);
++    status &=(~(1<<0));
++    writel(status, fh_audio_module.regs + 0x8);//tx fifo disable
++    fh_dma_cyclic_stop(dma_tx_transfer->chan);
++    fh_dma_cyclic_free(dma_tx_transfer->chan);
++#ifdef 	STERO
++    fh_dma_cyclic_stop(dma_tx_right_transfer->chan);
++    fh_dma_cyclic_free(dma_tx_right_transfer->chan);
++#endif
++    up(&audio_config->sem_playback);
++}
++
++void fh_acw_stop_capture(struct fh_audio_cfg *audio_config)
++{
++
++    u32 rx_status,status;
++
++    if(audio_config->capture.state == STATE_STOP)
++    {
++        return;
++    }
++//    rx_status = readl( fh_audio_module.regs + ACW_RXFIFO_CTRL);//clear rx fifo
++//    rx_status =  rx_status|(1<<4);
++//    writel(rx_status,fh_audio_module.regs + ACW_RXFIFO_CTRL);
++
++    audio_config->capture.state = STATE_STOP;
++
++    status = readl( fh_audio_module.regs + 0x4);
++    status &=(~(1<<0));
++    writel(status, fh_audio_module.regs + 0x4);//tx fifo disable
++
++    fh_dma_cyclic_stop(dma_rx_transfer->chan);
++    fh_dma_cyclic_free(dma_rx_transfer->chan);
++    up(&audio_config->sem_capture);
++}
++
++void switch_io_type(enum audio_type type, enum io_select io_type)
++{
++#if 0
++#ifndef CONFIG_MACH_FH8830_FPGA
++	int reg;
++    if (capture == type)
++    {
++        reg = readl(fh_audio_module.regs + ACW_ADC_PATH_CTRL);
++        if (mic_in == io_type)
++        {
++            printk(KERN_INFO"audio input changed to mic_in\n");
++            writel( reg & (~(1<<1)),fh_audio_module.regs + ACW_ADC_PATH_CTRL);
++        }
++        else if (line_in == io_type)
++        {
++            printk(KERN_INFO"audio input changed to line_in\n");
++            writel(reg | (1<<1), fh_audio_module.regs + ACW_ADC_PATH_CTRL);
++        }
++    }
++    else
++    {
++        reg = readl(fh_audio_module.regs + ACW_DAC_PATH_CTRL);
++        if (speaker_out == io_type)
++        {
++            printk(KERN_INFO"audio output changed to speaker_out\n");
++            reg = reg & (~(3<<21));
++            writel(reg, fh_audio_module.regs + ACW_DAC_PATH_CTRL);
++            reg = reg | (1<<21);
++            writel(reg,fh_audio_module.regs + ACW_DAC_PATH_CTRL);
++            reg = reg | (1<<18);
++            writel(reg, fh_audio_module.regs + ACW_DAC_PATH_CTRL);/*unmute speaker*/
++            reg = reg | (3<<30);
++            writel(reg,fh_audio_module.regs + ACW_DAC_PATH_CTRL);/*mute line out*/
++        }
++        else if (line_out == io_type)
++        {
++            printk(KERN_INFO"audio output changed to line_out\n");
++            reg = reg & (~(3<<21));
++            writel(reg, fh_audio_module.regs + ACW_DAC_PATH_CTRL);/*mute speaker*/
++            reg = reg & (~(3<<30));
++            writel(reg, fh_audio_module.regs + ACW_DAC_PATH_CTRL);/*unmute line out*/
++        }
++    }
++#endif
++#endif
++}
++
++int get_factor_from_table(int rate)
++{
++    int factor;
++    switch(rate)
++    {
++        case 8000:
++            factor = 4;
++            break;
++        case 16000:
++            factor = 1;
++            break;
++        case 32000:
++            factor = 0;
++            break;
++        case 44100:
++            factor = 13;
++            break;
++        case 48000:
++            factor = 6;
++            break;
++        default:
++            factor = -EFAULT;
++            break;
++    }
++    return factor;
++}
++
++void switch_rate(enum audio_type type, int rate)
++{
++#if 0 
++#ifndef CONFIG_MACH_FH8830_FPGA
++	int reg, factor;
++    factor = get_factor_from_table(rate);
++    if (factor < 0)
++    {
++        printk(KERN_ERR "unsupported sample rate\n");
++        return;
++    }
++    reg = readl(fh_audio_module.regs + ACW_DIG_IF_CTRL);
++    if (capture == type)
++    {
++        printk(KERN_INFO"capture rate set to %d\n", rate);
++        reg = reg & (~(0xf<<0));
++        writel(reg, fh_audio_module.regs + ACW_DIG_IF_CTRL);/*adc and dac sample rate*/
++        reg = reg | (factor<<0);
++        writel(reg,fh_audio_module.regs + ACW_DIG_IF_CTRL);
++    }
++    else
++    {
++        printk(KERN_INFO"playback rate set to %d\n", rate);
++        reg = reg & (~(0xf<<8));
++        writel(reg, fh_audio_module.regs + ACW_DIG_IF_CTRL);/*adc and dac sample rate*/
++        reg = reg | (factor<<8);
++        writel(reg, fh_audio_module.regs + ACW_DIG_IF_CTRL);
++    }
++#endif
++#endif
++}
++
++int get_param_from_volume(int volume)
++{
++    int param, max_param, min_param, max_volume;
++	max_volume = 100;
++	if (volume < 0 || volume > max_volume)
++    {
++        printk(KERN_ERR "unsupported input volume\n");
++        return -EINVAL;
++    }
++	max_param = 63;
++    min_param = 0;
++	param = max_param - (max_volume - volume);
++	if (param <= min_param)
++    {
++        param = min_param;
++    }
++    return param;
++}
++
++void switch_input_volume(int volume)
++{
++#if 0
++#ifndef CONFIG_MACH_FH8830_FPGA
++    int reg, param;
++    param = get_param_from_volume(volume);
++    if (param < 0)
++    {
++        return;
++    }
++    printk(KERN_INFO"capture volume set to %d\n", volume);
++	reg = readl(fh_audio_module.regs + ACW_ADC_PATH_CTRL);
++    reg = reg & (~(0x3f<<8));
++    writel(reg, fh_audio_module.regs + ACW_ADC_PATH_CTRL);
++    reg = reg | (param<<8);
++    writel(reg,fh_audio_module.regs + ACW_ADC_PATH_CTRL);
++#endif
++#endif
++}
++
++void init_audio(enum audio_type type,struct fh_audio_cfg  *audio_config)
++{
++	writel(0x1,fh_audio_module.regs +0x0);
++#ifndef CONFIG_MACH_FH8830_FPGA
++    int reg;
++//    reg = readl(fh_audio_module.regs + ACW_CTRL);
++//    if ((reg & 0x80000000) == 0)
++//    {
++//        writel(0x80000000, fh_audio_module.regs + ACW_CTRL);/*enable audio*/
++//    }
++//    reg = readl(fh_audio_module.regs + ACW_MISC_CTRL);
++//    if (0x40400 != reg)
++//    {
++//        writel(0x40400,fh_audio_module.regs + ACW_MISC_CTRL);/*misc ctl*/
++//    }
++//    if (capture == type)
++//    {
++//        writel(0x61141b06,fh_audio_module.regs + ACW_ADC_PATH_CTRL);/*adc cfg*/
++//        writel(0x167f2307, fh_audio_module.regs + ACW_ADC_ALC_CTRL);/*adc alc*/
++//        writel(0, fh_audio_module.regs + ACW_RXFIFO_CTRL);/*rx fifo disable*/
++//        switch_input_volume(audio_config->capture.cfg.volume);
++//        switch_rate(capture, audio_config->capture.cfg.rate);
++//        switch_io_type(capture, audio_config->capture.cfg.io_type);
++//    }
++//    else
++//    {
++//        writel(0x3b403f09, fh_audio_module.regs + ACW_DAC_PATH_CTRL);/*dac cfg*/
++//        writel(0, fh_audio_module.regs + ACW_TXFIFO_CTRL);/*tx fifo disable*/
++//        switch_rate(playback, audio_config->playback.cfg.rate);
++//        switch_io_type(playback, audio_config->playback.cfg.io_type);
++//    }
++#endif
++
++}
++
++static inline long bytes_to_frames(int frame_bit, int bytes)
++{
++    return bytes * 8 /frame_bit;
++}
++
++static inline long  frames_to_bytes(int frame_bit, int frames)
++{
++    return frames * frame_bit / 8;
++}
++
++int avail_data_len(enum audio_type type,struct fh_audio_cfg *stream)
++{
++    int delta;
++    if (capture == type)
++    {
++        spin_lock(&stream->capture.lock);
++        delta = stream->capture.hw_ptr - stream->capture.appl_ptr;
++        spin_unlock(&stream->capture.lock);
++        if (delta < 0)
++        {
++            delta += stream->capture.size;
++        }
++        return delta;
++    }
++    else
++    {
++        spin_lock(&stream->playback.lock);
++        delta = stream->playback.appl_ptr - stream->playback.hw_ptr;
++        spin_unlock(&stream->playback.lock);
++        if (delta < 0)
++        {
++            delta += stream->playback.size;
++        }
++        return stream->playback.size - delta;
++    }
++}
++
++static int fh_audio_close(struct inode *ip, struct file *fp)
++{
++	struct miscdevice *miscdev = fp->private_data;
++	struct audio_dev	*dev = container_of(miscdev, struct audio_dev, fh_audio_miscdev);
++	struct fh_audio_cfg *audio_config = &dev->audio_config;
++	int pid;
++
++	pid= current->tgid;
++//#ifndef CONFIG_MACH_FH8830_FPGA
++//	//disable interrupts
++//	u32 reg;
++//    reg = readl(fh_audio_module.regs + ACW_CTRL);
++//    reg &= ~(0x3ff);
++//    writel(reg, fh_audio_module.regs + ACW_CTRL);
++//#endif
++	if( infor_record.play_pid == pid)
++	{
++		fh_acw_stop_playback(audio_config);
++
++	}
++	if (infor_record.record_pid==pid)
++	{
++		fh_acw_stop_capture(audio_config);
++	}
++	return 0;
++}
++
++int register_tx_dma(struct fh_audio_cfg  *audio_config)
++{
++	int ret;
++	struct fh_dma_slave *tx_config;
++
++	tx_config =  kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
++	if (!tx_config)
++	{
++		return -ENOMEM;
++	}
++#ifdef STERO
++	//right channel
++	struct dma_slave_config *tx_config_right;
++	tx_config_right =  kzalloc(sizeof(struct dma_slave_config), GFP_KERNEL);
++	if (!tx_config_right)
++	{
++		kfree(tx_config);
++		return -ENOMEM;
++	}
++	tx_config_right->slave_id = ACW_HW_NUM_TX;
++	tx_config_right->src_maxburst = FH_DMA_MSIZE_8;
++	tx_config_right->dst_maxburst = FH_DMA_MSIZE_8;
++	tx_config_right->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++	tx_config_right->device_fc = TRUE;
++	tx_config_right->dst_addr = ACW_DACR_FIFO;
++
++	dma_tx_right_transfer->cdesc =fh_dma_cyclic_prep(dma_tx_right_transfer->chan,audio_config->playback.addr,
++				audio_config->playback.cfg.buffer_bytes,audio_config->playback.cfg.period_bytes, DMA_TO_DEVICE);
++
++	if(dma_tx_transfer->cdesc <= 0)
++	{
++	    printk(KERN_ERR"cyclic desc err\n");
++	    ret = -ENOMEM;
++	    kfree(tx_config_right);
++	    goto fail;
++	}
++	dma_tx_transfer->cdesc->period_callback = fh_acw_tx_right_dma_done;
++	dma_tx_transfer->cdesc->period_callback_param = audio_config;
++	fh_dma_cyclic_start(dma_tx_right_transfer->chan);
++
++	kfree(tx_config_right);
++
++	/*must set NULL to tell DMA driver that we free the DMA slave*/
++	dma_tx_right_transfer->chan->private = NULL;
++#endif
++
++
++	tx_config->cfg_hi = FHC_CFGH_DST_PER(11);
++	tx_config->dst_msize = FH_DMA_MSIZE_8;
++	tx_config->src_msize = FH_DMA_MSIZE_8;
++	tx_config->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
++	tx_config->fc = FH_DMA_FC_D_M2P;
++	tx_config->tx_reg = 0xf09001c8;
++
++	dma_tx_transfer->chan->private =  tx_config;
++	if ((audio_config->playback.cfg.buffer_bytes < audio_config->playback.cfg.period_bytes) ||
++	(audio_config->playback.cfg.buffer_bytes <= 0) || (audio_config->playback.cfg.period_bytes <= 0) ||
++	(audio_config->playback.cfg.buffer_bytes/audio_config->playback.cfg.period_bytes > NR_DESCS_PER_CHANNEL))
++	{
++		printk(KERN_ERR "buffer_size and period_size are invalid\n");
++		ret = -EINVAL;
++		goto fail;
++	}
++
++	dma_tx_transfer->cdesc =fh_dma_cyclic_prep(dma_tx_transfer->chan,audio_config->playback.addr,
++			       audio_config->playback.cfg.buffer_bytes,audio_config->playback.cfg.period_bytes, DMA_MEM_TO_DEV);
++	if(dma_tx_transfer->cdesc <= 0)
++	{
++		printk(KERN_ERR "cyclic desc err\n");
++		ret = -ENOMEM;
++		goto fail;
++	}
++	dma_tx_transfer->cdesc->period_callback = fh_acw_tx_dma_done;
++	dma_tx_transfer->cdesc->period_callback_param = audio_config;
++	fh_dma_cyclic_start(dma_tx_transfer->chan);
++
++	kfree(tx_config);
++
++	/*must set NULL to tell DMA driver that we free the DMA slave*/
++	dma_tx_transfer->chan->private = NULL;
++	return 0;
++fail:
++	kfree(tx_config);
++	return ret;
++}
++
++int register_rx_dma( struct fh_audio_cfg  *audio_config)
++{
++    int ret,status;
++
++    struct fh_dma_slave *rx_config;
++    rx_config =  kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
++    if (!rx_config)
++    {
++        return -ENOMEM;
++    }
++
++    rx_config->cfg_hi = FHC_CFGH_SRC_PER(10);
++    rx_config->dst_msize = FH_DMA_MSIZE_8;
++    rx_config->src_msize = FH_DMA_MSIZE_8;
++    rx_config->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
++    rx_config->fc = FH_DMA_FC_D_P2M;
++    rx_config->rx_reg = 0xf09001c0;
++    dma_rx_transfer->chan->private =  rx_config;
++    if ((audio_config->capture.cfg.buffer_bytes < audio_config->capture.cfg.period_bytes) ||
++        (audio_config->capture.cfg.buffer_bytes <= 0) ||(audio_config->capture.cfg.period_bytes <= 0) ||
++        (audio_config->capture.cfg.buffer_bytes/audio_config->capture.cfg.period_bytes > NR_DESCS_PER_CHANNEL))
++    {
++        printk(KERN_ERR "buffer_size and period_size are invalid\n");
++        ret = -EINVAL;
++        goto fail;
++    }
++    dma_rx_transfer->cdesc = fh_dma_cyclic_prep(dma_rx_transfer->chan,audio_config->capture.addr,
++                               audio_config->capture.cfg.buffer_bytes, audio_config->capture.cfg.period_bytes,DMA_DEV_TO_MEM);
++    if(dma_rx_transfer->cdesc <= 0)
++    {
++        printk(KERN_ERR" cyclic desc err\n");
++        ret = -ENOMEM;
++        goto fail;
++    }
++    dma_rx_transfer->cdesc->period_callback = fh_acw_rx_dma_done;
++    dma_rx_transfer->cdesc->period_callback_param = audio_config;
++    fh_dma_cyclic_start(dma_rx_transfer->chan);
++
++    writel(0x1,fh_audio_module.regs + 0x4);/*enable rx fifo*/
++    writel(0x1,fh_audio_module.regs + 0x1c4);/*reset dma*/
++    writel(0x1,fh_audio_module.regs + 0x14);/*reset dma*/
++    writel(0x1,fh_audio_module.regs + 0x28);/*reset dma*/
++
++    if (rx_config)
++    {
++        kfree(rx_config);
++    }
++    /*must set NULL to tell DMA driver that we free the DMA slave*/
++    dma_rx_transfer->chan->private = NULL;
++    return 0;
++fail:
++    kfree(rx_config);
++    return ret;
++}
++
++
++void playback_start_wq_handler(struct work_struct *work)
++{
++    int avail;
++    unsigned int status;
++    while(1)
++    {
++        if (STATE_STOP == fh_audio_dev.audio_config.playback.state)
++        {
++            return;
++        }
++        avail = avail_data_len(playback, &fh_audio_dev.audio_config);
++        if (avail > fh_audio_dev.audio_config.playback.cfg.period_bytes)
++        {
++            msleep(0);
++        }
++        else
++        {
++        	   writel(0x1,fh_audio_module.regs + 0x8);/*enable rx fifo*/
++        	    writel(0x1,fh_audio_module.regs + 0x1cc);/*reset dma*/
++        	    writel(0x1,fh_audio_module.regs + 0x18);/*reset dma*/
++        	    writel(0x1,fh_audio_module.regs + 0x2c);/*reset dma*/
++
++            break;
++        }
++    }
++}
++
++int fh_acw_start_playback(struct fh_audio_cfg *audio_config)
++{
++    int ret;
++
++    if(audio_config->playback.state == STATE_RUN)
++    {
++        return 0;
++    }
++
++    if (audio_config->playback.cfg.buffer_bytes >= AUDIO_DMA_PREALLOC_SIZE)
++    {
++        printk("DMA prealloc buffer is smaller than  audio_config->buffer_bytes\n");
++        return -ENOMEM;
++    }
++    memset(audio_config->playback.area, 0, audio_config->playback.cfg.buffer_bytes);
++    audio_config->playback.size = audio_config->playback.cfg.buffer_bytes;
++    audio_config->playback.state = STATE_RUN;
++    ret = register_tx_dma(audio_config);
++    if (ret < 0)
++    {
++        return ret;
++    }
++    INIT_WORK(&playback_wq, playback_start_wq_handler);
++    schedule_work(&playback_wq);
++    return 0;
++}
++
++int fh_acw_start_capture(struct fh_audio_cfg *audio_config)
++{
++    if(audio_config->capture.state == STATE_RUN)
++    {
++        return 0;
++    }
++
++    if (audio_config->capture.cfg.buffer_bytes >= AUDIO_DMA_PREALLOC_SIZE)
++    {
++        printk("DMA prealloc buffer is smaller than  audio_config->buffer_bytes\n");
++        return -ENOMEM;
++    }
++    memset(audio_config->capture.area, 0, audio_config->capture.cfg.buffer_bytes);
++    audio_config->capture.size = audio_config->capture.cfg.buffer_bytes;
++
++    audio_config->capture.state = STATE_RUN;
++    return register_rx_dma(audio_config);
++}
++
++
++static void fh_acw_rx_dma_done(void *arg)
++{
++    struct fh_audio_cfg *audio_config;
++    audio_config = ( struct fh_audio_cfg *)arg;
++printk("%s",__func__);
++    spin_lock(&audio_config->capture.lock);
++    audio_config->capture.hw_ptr += audio_config->capture.cfg.period_bytes;
++    if (audio_config->capture.hw_ptr > audio_config->capture.size )
++    {
++        audio_config->capture.hw_ptr = audio_config->capture.hw_ptr - audio_config->capture.size;
++    }
++    spin_unlock(&audio_config->capture.lock);
++    if (waitqueue_active(&audio_config->readqueue))
++    {
++        int avail = avail_data_len(capture,audio_config);
++        if (avail > audio_config->capture.cfg.period_bytes)
++        {
++            wake_up_interruptible(&audio_config->readqueue);
++        }
++    }
++
++}
++#ifdef 	STERO
++static void fh_acw_tx_right_dma_done(void *arg)
++{
++	printk("tx_right_dam_done\n");
++
++}
++#endif
++
++static void fh_acw_tx_dma_done(void *arg)
++{
++	printk("%s",__func__);
++    struct fh_audio_cfg *audio_config;
++    audio_config = ( struct fh_audio_cfg *)arg;
++    spin_lock(&audio_config->playback.lock);
++    audio_config->playback.hw_ptr +=  audio_config->playback.cfg.period_bytes;
++    if (audio_config->playback.hw_ptr > audio_config->playback.size )
++    {
++        audio_config->playback.hw_ptr = audio_config->playback.hw_ptr - audio_config->playback.size;
++    }
++    spin_unlock(&audio_config->playback.lock);
++    if (waitqueue_active(&audio_config->writequeue))
++    {
++        int avail = avail_data_len(playback,audio_config);
++        if (avail > audio_config->playback.cfg.period_bytes)
++        {
++            wake_up_interruptible(&audio_config->writequeue);
++        }
++    }
++}
++
++bool  fh_acw_dma_chan_filter(struct dma_chan *chan, void *filter_param)
++{
++    int dma_channel = *(int *)filter_param;
++    bool ret = false;
++
++    if (chan->chan_id == dma_channel)
++    {
++        ret = true;
++    }
++    return ret;
++}
++
++int arg_config_support(struct fh_audio_cfg_arg * cfg)
++{
++	int ret;
++
++	ret = get_param_from_volume(cfg->volume);
++	if (ret < 0) {
++		return -EINVAL;
++	}
++	ret = get_factor_from_table(cfg->rate);
++	if (ret < 0) {
++		return -EINVAL;
++	}
++    return 0;
++}
++
++void reset_dma_buff(enum audio_type type, struct fh_audio_cfg *audio_config)
++{
++    if (capture == type)
++    {
++        audio_config->capture.appl_ptr = 0;
++        audio_config->capture.hw_ptr = 0;
++    }
++    else
++    {
++        audio_config->playback.appl_ptr = 0;
++        audio_config->playback.hw_ptr = 0;
++    }
++}
++
++unsigned int config_flag = 0;
++static long fh_audio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++    struct fh_audio_cfg_arg cfg;
++    struct miscdevice *miscdev = filp->private_data;
++    struct audio_dev  *dev = container_of(miscdev, struct audio_dev, fh_audio_miscdev);
++    struct fh_audio_cfg  *audio_config = &dev->audio_config;
++    int ret;
++    int value,pid;
++    int __user *p = (int __user *)arg;
++    int rx_status;
++
++#ifndef CONFIG_MACH_FH8830_FPGA
++    int tx_status;
++#endif
++    pid = current->tgid;
++    switch (cmd)
++    {
++        case AC_INIT_CAPTURE_MEM:
++
++            if (copy_from_user((void *)&cfg, (void __user *)arg, sizeof(struct fh_audio_cfg_arg)))
++            {
++                printk(KERN_ERR "copy err\n");
++                return -EIO;
++            }
++            if (0 == arg_config_support(&cfg))
++            {
++            	if (down_trylock(&audio_config->sem_capture))
++	            {
++	            	printk(KERN_ERR "another thread is running capture.\n");
++	                return -EBUSY;
++	            }
++            	infor_record.record_pid =  pid;
++                audio_config->capture.cfg.io_type = cfg.io_type;
++                audio_config->capture.cfg.volume = cfg.volume;
++                audio_config->capture.cfg.rate = cfg.rate;
++                audio_config->capture.cfg.channels = cfg.channels;
++                audio_config->capture.cfg.buffer_size = cfg.buffer_size;
++                audio_config->capture.cfg.frame_bit = FIX_SAMPLE_BIT;
++                audio_config->capture.cfg.period_size = cfg.period_size;
++                audio_config->capture.cfg.buffer_bytes = frames_to_bytes(audio_config->capture.cfg.frame_bit,audio_config->capture.cfg.buffer_size);
++                audio_config->capture.cfg.period_bytes = frames_to_bytes(audio_config->capture.cfg.frame_bit,audio_config->capture.cfg.period_size);
++                audio_config->capture.cfg.start_threshold =audio_config->capture.cfg.buffer_bytes;
++                audio_config->capture.cfg.stop_threshold = audio_config->capture.cfg.buffer_bytes;
++                reset_dma_buff(capture, audio_config);
++                init_waitqueue_head(&audio_config->readqueue);
++                spin_lock_init(&audio_config->capture.lock);
++                init_audio(capture, audio_config);
++                audio_param_store.input_io_type = audio_config->capture.cfg.io_type;
++                audio_param_store.input_volume = audio_config->capture.cfg.volume;
++                /* *  config sample  *  */
++                codec_i2c_adapter = i2c_get_adapter(0);
++                i2c_write_codec(0x20, 0x00);//config pll to 8k rate
++            	i2c_write_codec(0x22, 0x14);
++            	i2c_write_codec(0x23, 0x55);
++                i2c_write_codec(0x24, 0x0);
++                msleep(1000);
++                i2c_write_codec(0x20, 0x01);
++
++                i2c_write_codec(0x10, 0x11);
++                i2c_write_codec(0x17, 0xf0);
++                i2c_write_codec(0x0, 0x01);
++                i2c_write_codec(0x1, 0x5);
++                i2c_write_codec(0x1f, 0x1);
++                i2c_write_codec(0x2, 0x10);
++                /*  * config wrapper work format  *   */
++        	    writel(0x08,fh_audio_module.regs + 0x10);/*reset dma*/
++        	    writel(0x3,fh_audio_module.regs + 0x180);/*reset dma*/
++        	    writel(0x1f,fh_audio_module.regs + 0x188);/*reset dma*/
++        	    writel(0x1f,fh_audio_module.regs + 0x184);/*reset dma*/
++        	    writel(0x1,fh_audio_module.regs + 0x8);/*reset dma*/
++        	    writel(0x1,fh_audio_module.regs + 0xc);/*reset dma*/
++                config_flag = 1;
++            }
++            else
++            {
++                return -EINVAL;
++            }
++
++            break;
++        case AC_INIT_PLAYBACK_MEM:
++            if (copy_from_user((void *)&cfg, (void __user *)arg, sizeof(struct fh_audio_cfg_arg)))
++            {
++                printk(KERN_ERR "copy err\n");
++                return -EIO;
++            }
++            
++            if (0 == arg_config_support(&cfg))
++            {
++            	if (down_trylock(&audio_config->sem_playback))
++	            {
++	            	printk(KERN_ERR "another thread is running playback.\n");
++	                return -EBUSY;
++	            }
++            	infor_record.play_pid = pid;
++                audio_config->playback.cfg.io_type = cfg.io_type;
++                audio_config->playback.cfg.volume = cfg.volume;
++                audio_config->playback.cfg.rate = cfg.rate;
++                audio_config->playback.cfg.channels = cfg.channels;
++                audio_config->playback.cfg.buffer_size = cfg.buffer_size;
++                audio_config->playback.cfg.frame_bit = FIX_SAMPLE_BIT;
++                audio_config->playback.cfg.period_size = cfg.period_size;
++                audio_config->playback.cfg.buffer_bytes = frames_to_bytes(audio_config->playback.cfg.frame_bit,audio_config->playback.cfg.buffer_size);
++                audio_config->playback.cfg.period_bytes = frames_to_bytes(audio_config->playback.cfg.frame_bit,audio_config->playback.cfg.period_size);
++                audio_config->playback.cfg.start_threshold =audio_config->playback.cfg.buffer_bytes;
++                audio_config->playback.cfg.stop_threshold = audio_config->playback.cfg.buffer_bytes;
++                reset_dma_buff(playback, audio_config);
++                codec_i2c_adapter = i2c_get_adapter(0);
++                if(!config_flag){
++                	/* * config sample  *  */
++                	i2c_write_codec(0x20, 0x0);//config pll
++                  	i2c_write_codec(0x22, 0x14);
++                    i2c_write_codec(0x23, 0x55);
++                    i2c_write_codec(0x24, 0x0);
++					msleep(1000);
++					i2c_write_codec(0x20, 0x1);
++
++					i2c_write_codec(0x10, 0x11);
++					i2c_write_codec(0x17, 0xf0);
++					i2c_write_codec(0x0, 0x01);
++					i2c_write_codec(0x1, 0x5);
++					i2c_write_codec(0x1f, 0x1);
++
++					i2c_write_codec(0x2, 0x10);
++					/*  * config wrapper work format  *   */
++
++	        	    writel(0x08,fh_audio_module.regs + 0x10);/*reset dma*/
++	         	    writel(0x3,fh_audio_module.regs + 0x180);/*reset dma*/
++	        	    writel(0x1f,fh_audio_module.regs + 0x188);/*reset dma*/
++	        	    writel(0x1f,fh_audio_module.regs + 0x184);/*reset dma*/
++	        	    writel(0x1,fh_audio_module.regs + 0x8);/*reset dma*/
++	        	    writel(0x1,fh_audio_module.regs + 0xc);/*reset dma*/
++	                /** dont config again **/
++	                config_flag = 1;
++
++                }
++                init_waitqueue_head(&audio_config->writequeue);
++                spin_lock_init(&audio_config->playback.lock);
++                init_audio(playback, audio_config);
++            }
++            else
++            {
++                return -EINVAL;
++            }
++            break;
++        case AC_AI_EN:
++            if (infor_record.record_pid != pid){
++            	return -EBUSY;
++            }
++            return fh_acw_start_capture(audio_config);
++        case AC_AO_EN:
++			if (infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++			return fh_acw_start_playback(audio_config);
++        case AC_SET_VOL:
++			if (infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++            if (get_user(value, p))
++            {
++                return -EFAULT;
++            }
++			ret = get_param_from_volume(value);
++			if (ret < 0) {
++				return -EINVAL;
++			}
++            audio_param_store.input_volume = value;
++            switch_input_volume(audio_param_store.input_volume);
++            break;
++        case AC_SET_INPUT_MODE:
++			if (infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++            if (get_user(value, p))
++            {
++                return -EFAULT;
++            }
++			if (value != mic_in && value != line_in) {
++				return -EINVAL;
++			}
++            audio_param_store.input_io_type = value;
++            switch_io_type(capture, audio_param_store.input_io_type);
++            break;
++        case AC_SET_OUTPUT_MODE:
++			if (infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++            if (get_user(value, p))
++            {
++                return -EFAULT;
++            }
++			if (value != speaker_out && value != line_out) {
++				return -EINVAL;
++			}
++            switch_io_type(playback, value);
++            break;
++        case AC_AI_DISABLE:
++            printk("[ac_driver]AC_AI_DISABLE\n");
++			if (infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++			fh_acw_stop_capture(audio_config);
++            printk(" AC_AI_DISABLE\n");
++            break;
++        case AC_AO_DISABLE:
++            printk("[ac_driver]AC_AO_DISABLE\n");
++			if (infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++            fh_acw_stop_playback(audio_config);
++            printk(" AC_AO_DISABLE\n");
++            break;
++        case AC_AI_PAUSE:
++			if (infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++            printk(KERN_INFO "capture pause\n");
++            audio_config->capture.state = STATE_PAUSE;
++            rx_status = readl(fh_audio_module.regs + ACW_RXFIFO_CTRL);/*rx fifo disable*/
++            rx_status =  rx_status&(~(1<<0));
++            writel(rx_status, fh_audio_module.regs + ACW_RXFIFO_CTRL);/*rx fifo disable*/
++            break;
++        case AC_AI_RESUME:
++			if (infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++            printk(KERN_INFO "capture resume\n");
++            audio_config->capture.state = STATE_RUN;
++#ifndef CONFIG_MACH_FH8830_FPGA
++            rx_status = readl( fh_audio_module.regs + ACW_RXFIFO_CTRL);//clear rx fifo
++            rx_status =  rx_status|(1<<4);
++            writel(rx_status,fh_audio_module.regs + ACW_RXFIFO_CTRL);/*enable rx fifo*/
++            rx_status =  rx_status&(~(1<<4));
++            rx_status =  rx_status|(1<<0);
++            writel(rx_status,fh_audio_module.regs + ACW_RXFIFO_CTRL);/*enable rx fifo*/
++#endif
++            break;
++        case AC_AO_PAUSE:
++			if (infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++            audio_config->playback.state = STATE_PAUSE;
++            printk(KERN_INFO "playback pause\n");
++//#ifndef CONFIG_MACH_FH8830_FPGA
++//            tx_status = readl(fh_audio_module.regs + ACW_TXFIFO_CTRL);/*rx fifo disable*/
++//            tx_status =  tx_status&(~(1<<0));
++//            writel(tx_status, fh_audio_module.regs + ACW_TXFIFO_CTRL);/*tx fifo disable*/
++//#endif
++            break;
++        case AC_AO_RESUME:
++			if (infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++            printk(KERN_INFO "playback resume\n");
++            audio_config->playback.state = STATE_RUN;
++//#ifndef CONFIG_MACH_FH8830_FPGA
++//            tx_status = readl( fh_audio_module.regs + ACW_TXFIFO_CTRL);//clear rx fifo
++//            tx_status =  tx_status|(1<<0);
++//            writel(tx_status,fh_audio_module.regs + ACW_TXFIFO_CTRL); //enable tx fifo read enable
++//#endif
++            break;
++        default:
++            return -ENOTTY;
++    }
++    return 0;
++}
++
++static int fh_audio_open(struct inode *ip, struct file *fp)
++{
++
++    fp->private_data = &fh_audio_dev.fh_audio_miscdev;
++//    //enable interrupts
++
++    return 0;
++}
++
++static u32 fh_audio_poll(struct file *filp, poll_table *wait)
++{
++    struct miscdevice *miscdev = filp->private_data;
++    struct audio_dev  *dev = container_of(miscdev, struct audio_dev, fh_audio_miscdev);
++    struct fh_audio_cfg  *audio_config = &dev->audio_config;
++    u32 mask = 0;
++    long avail;
++    if (STATE_RUN == audio_config->capture.state)
++    {
++        poll_wait(filp,&audio_config->readqueue,wait);
++        avail = avail_data_len(capture, audio_config);
++        if (avail >  audio_config->capture.cfg.period_bytes)
++        {
++            mask |=  POLLIN | POLLRDNORM;
++        }
++    }
++    if (STATE_RUN == audio_config->playback.state)
++    {
++        poll_wait(filp,&audio_config->writequeue,wait);
++        avail = avail_data_len(playback, audio_config);
++        if (avail >  audio_config->playback.cfg.period_bytes)
++        {
++            mask |=  POLLOUT | POLLWRNORM;
++        }
++    }
++    return mask;
++}
++
++static int fh_audio_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
++{
++
++    int ret;
++    struct miscdevice *miscdev = filp->private_data;
++    struct audio_dev  *dev = container_of(miscdev, struct audio_dev, fh_audio_miscdev);
++    struct fh_audio_cfg  *audio_config = &dev->audio_config;
++    int after,left;
++    int pid,avail;
++    pid = current->tgid;
++    if (infor_record.record_pid != pid){
++    	return -EBUSY;
++    }
++
++    avail = avail_data_len(capture, audio_config);
++    if (avail > len)
++    {
++        avail = len;
++    }
++    after = avail + audio_config->capture.appl_ptr;
++    if(after  > audio_config->capture.size)
++    {
++        left = avail - (audio_config->capture.size - audio_config->capture.appl_ptr);
++        ret = copy_to_user(buf, audio_config->capture.area+audio_config->capture.appl_ptr, audio_config->capture.size-audio_config->capture.appl_ptr);
++        ret = copy_to_user(buf+audio_config->capture.size-audio_config->capture.appl_ptr,audio_config->capture.area,left);
++        spin_lock(&audio_config->capture.lock);
++        audio_config->capture.appl_ptr = left;
++        spin_unlock(&audio_config->capture.lock);
++    }
++    else
++    {
++        ret = copy_to_user(buf,audio_config->capture.area+audio_config->capture.appl_ptr,avail);
++        spin_lock(&audio_config->capture.lock);
++        audio_config->capture.appl_ptr += avail;
++        spin_unlock(&audio_config->capture.lock);
++    }
++
++    return avail;
++
++}
++
++static int fh_audio_write(struct file *filp, const char __user *buf,
++                            size_t len, loff_t *off)
++{
++
++    struct miscdevice *miscdev = filp->private_data;
++    struct audio_dev  *dev = container_of(miscdev, struct audio_dev, fh_audio_miscdev);
++    struct fh_audio_cfg  *audio_config = &dev->audio_config;
++    int  ret;
++    int after,left;
++    int pid,avail;
++   pid = current->tgid;
++   if (infor_record.play_pid != pid){
++	return -EBUSY;
++   }
++    avail = avail_data_len(playback,audio_config);
++    if (0 == avail)
++    {
++        return 0;
++    }
++    if (avail > len)
++    {
++        avail = len;
++    }
++    after = avail+audio_config->playback.appl_ptr;
++    if(after  > audio_config->playback.size)
++    {
++        left = avail - (audio_config->playback.size-audio_config->playback.appl_ptr);
++        ret = copy_from_user(audio_config->playback.area+audio_config->playback.appl_ptr,buf,audio_config->playback.size-audio_config->playback.appl_ptr);
++        ret = copy_from_user(audio_config->playback.area,buf+audio_config->playback.size-audio_config->playback.appl_ptr,left);
++        spin_lock(&audio_config->playback.lock);
++        audio_config->playback.appl_ptr = left;
++        spin_unlock(&audio_config->playback.lock);
++    }
++    else
++    {
++        ret = copy_from_user(audio_config->playback.area+audio_config->playback.appl_ptr,buf,avail);
++        spin_lock(&audio_config->playback.lock);
++        audio_config->playback.appl_ptr += avail;
++        spin_unlock(&audio_config->playback.lock);
++    }
++
++     return avail;
++}
++
++static irqreturn_t fh_audio_interrupt(int irq, void *dev_id)
++{
++#ifndef CONFIG_MACH_FH8830_FPGA
++	u32 interrupts, rx_status;
++    struct fh_audio_cfg  *audio_config = &fh_audio_dev.audio_config;
++
++    interrupts = readl(fh_audio_module.regs + ACW_CTRL);
++    //interrupts &= ~(0x3ff) << 16;
++    writel(interrupts, fh_audio_module.regs + ACW_CTRL);
++
++    if(interrupts & ACW_INTR_RX_UNDERFLOW)
++    {
++        fh_acw_stop_capture(audio_config);
++        fh_acw_start_capture(audio_config);
++        PRINT_AUDIO_DBG("ACW_INTR_RX_UNDERFLOW\n");
++    }
++
++    if(interrupts & ACW_INTR_RX_OVERFLOW)
++    {
++        if (audio_config->capture.state == STATE_RUN) {
++            fh_acw_stop_capture(audio_config);
++            fh_acw_start_capture(audio_config);
++        } else {
++            rx_status = readl( fh_audio_module.regs + ACW_RXFIFO_CTRL);//clear rx fifo
++            rx_status =  rx_status|(1<<4);
++            writel(rx_status,fh_audio_module.regs + ACW_RXFIFO_CTRL);
++        }
++        PRINT_AUDIO_DBG("ACW_INTR_RX_OVERFLOW\n");
++    }
++
++    if(interrupts & ACW_INTR_TX_UNDERFLOW)
++    {
++        fh_acw_stop_playback(audio_config);
++        fh_acw_start_playback(audio_config);
++        PRINT_AUDIO_DBG("ACW_INTR_TX_UNDERFLOW\n");
++    }
++
++    if(interrupts & ACW_INTR_TX_OVERFLOW)
++    {
++        fh_acw_stop_playback(audio_config);
++        fh_acw_start_playback(audio_config);
++        PRINT_AUDIO_DBG("ACW_INTR_TX_OVERFLOW\n");
++    }
++
++    PRINT_AUDIO_DBG("interrupts: 0x%x\n", interrupts);
++#endif
++    return IRQ_HANDLED;
++}
++
++static const struct file_operations acw_fops =
++{
++    .owner      = THIS_MODULE,
++    .llseek     = no_llseek,
++    .unlocked_ioctl = fh_audio_ioctl,
++    .release = fh_audio_close,
++    .open = fh_audio_open,
++    .poll = fh_audio_poll,
++    .read = fh_audio_read,
++    .write = fh_audio_write,
++
++};
++
++static int __devinit fh_audio_drv_probe(struct platform_device *pdev)
++{
++    int ret;
++    struct resource *irq_res, *mem;
++
++    mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++    if (!mem)
++        return -EINVAL;
++    printk("acw probe\n");
++    if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
++                                 "fh_audio_module"))
++        return -ENOMEM;
++
++    fh_audio_module.regs = devm_ioremap(&pdev->dev, DW_I2S_FH8833, resource_size(mem));
++
++    if (!fh_audio_module.regs){
++    	ret = -ENOMEM;
++    	goto remap_fail;
++        }
++
++    fh_audio_module.clk = clk_get(NULL, "ac_clk");
++	if (!fh_audio_module.clk) {
++		ret = -EINVAL;
++		goto clk_fail;
++		}
++    clk_enable(fh_audio_module.clk);
++
++    spin_lock_init(&fh_audio_module.lock);
++
++    ret = misc_register(&fh_audio_dev.fh_audio_miscdev);
++
++    if (ret)
++        goto out_disable_clk;
++
++    irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++    if (!irq_res)
++    {
++        pr_err("%s: ERROR: getting resource failed"
++               "cannot get IORESOURCE_IRQ\n", __func__);
++        ret = -ENXIO;
++        goto out_disable_clk;
++    }
++
++    fh_audio_module.irq = irq_res->start;
++
++    ret = request_irq(fh_audio_module.irq, fh_audio_interrupt, IRQF_SHARED, "audio", &fh_audio_module);
++
++    return 0;
++
++out_disable_clk:
++    clk_disable(fh_audio_module.clk);
++	fh_audio_module.clk = NULL;
++clk_fail:
++	devm_iounmap(&pdev->dev, fh_audio_module.regs);
++	fh_audio_module.regs = NULL;
++remap_fail:
++	devm_release_mem_region(&pdev->dev, mem->start, resource_size(mem));
++    return ret;
++}
++
++static int __devexit fh_acw_drv_remove(struct platform_device *pdev)
++{
++	struct resource *mem;
++    misc_deregister(&fh_audio_dev.fh_audio_miscdev);
++
++    free_irq(fh_audio_module.irq, &fh_audio_module);
++
++	if (fh_audio_module.clk) {
++		clk_disable(fh_audio_module.clk);
++    	clk_put(fh_audio_module.clk);
++	}
++	if (fh_audio_module.regs) {
++		devm_iounmap(&pdev->dev, fh_audio_module.regs);
++	}
++	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++    if (mem) {
++		devm_release_mem_region(&pdev->dev, mem->start, resource_size(mem));
++    }
++    printk("acw remove ok\n");
++    return 0;
++}
++
++static struct platform_driver fh_audio_driver =
++{
++    .probe      = fh_audio_drv_probe,
++    .remove     = __devexit_p(fh_acw_drv_remove),
++    .driver     = {
++        .name   = "fh_acw",
++        .owner  = THIS_MODULE,
++    }
++};
++
++void audio_prealloc_dma_buffer(struct fh_audio_cfg  *audio_config)
++{
++    int pg;
++    gfp_t gfp_flags;
++    pg = get_order(AUDIO_DMA_PREALLOC_SIZE);
++    gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
++    audio_config->capture.dev.coherent_dma_mask = DMA_BIT_MASK(32);
++    audio_config->capture.area  = dma_alloc_coherent(&audio_config->capture.dev, PAGE_SIZE << pg, &audio_config->capture.addr, gfp_flags );
++    if (!audio_config->capture.area)
++    {
++        printk(KERN_ERR"no enough mem for capture  buffer alloc\n");
++        return ;
++    }
++    audio_config->playback.dev.coherent_dma_mask = DMA_BIT_MASK(32);
++    audio_config->playback.area  = dma_alloc_coherent(&audio_config->playback.dev, PAGE_SIZE << pg, &audio_config->playback.addr, gfp_flags );
++    if (!audio_config->playback.area)
++    {
++        printk(KERN_ERR"no enough mem for  playback buffer alloc\n");
++        return ;
++    }
++}
++
++void audio_free_prealloc_dma_buffer(struct fh_audio_cfg  *audio_config)
++{
++    int pg;
++    pg = get_order(AUDIO_DMA_PREALLOC_SIZE);
++    dma_free_coherent(&audio_config->capture.dev, PAGE_SIZE<<pg, audio_config->capture.area, audio_config->capture.addr);
++    dma_free_coherent(&audio_config->playback.dev, PAGE_SIZE<<pg, audio_config->playback.area, audio_config->playback.addr);
++}
++
++static void init_audio_mutex(struct fh_audio_cfg  *audio_config)
++{
++    sema_init(&audio_config->sem_capture, 1);
++    sema_init(&audio_config->sem_playback, 1);
++}
++
++int audio_request_dma_channel(void)
++{
++    dma_cap_mask_t mask;
++    /*request audio rx dma channel*/
++    dma_rx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
++    if (!dma_rx_transfer)
++    {
++        printk(KERN_ERR"alloc  dma_rx_transfer failed\n");
++        goto mem_fail;
++    }
++    memset(dma_rx_transfer, 0, sizeof(struct fh_dma_chan));
++
++    dma_cap_zero(mask);
++    dma_cap_set(DMA_SLAVE, mask);
++    dma_rx_transfer->chan = dma_request_channel(mask, fh_acw_dma_chan_filter, &fh_audio_dev.channel_assign.capture_channel);
++    if (!dma_rx_transfer->chan)
++    {
++        printk(KERN_ERR"request audio rx dma channel failed \n");
++        goto channel_fail;
++    }
++
++    /*request audio tx dma channel*/
++    dma_tx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
++    if (!dma_tx_transfer)
++    {
++        printk(KERN_ERR"alloc  dma_tx_transfer failed\n");
++        goto mem_fail;
++    }
++    memset(dma_tx_transfer, 0, sizeof(struct fh_dma_chan));
++
++    dma_cap_zero(mask);
++    dma_cap_set(DMA_SLAVE, mask);
++    dma_tx_transfer->chan = dma_request_channel(mask, fh_acw_dma_chan_filter, &fh_audio_dev.channel_assign.playback_channel);
++    if (!dma_tx_transfer->chan)
++    {
++        printk(KERN_ERR"request dma channel failed \n");
++        return -EFAULT;
++    }
++#ifdef STERO
++    /*request audio tx dma channel*/
++    dma_tx_right_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
++    if (!dma_tx_right_transfer)
++    {
++        printk(KERN_ERR"alloc  dma_tx_right_transfer failed\n");
++        goto mem_fail;
++    }
++    memset(dma_tx_right_transfer, 0, sizeof(struct fh_dma_chan));
++
++    dma_cap_zero(mask);
++    dma_cap_set(DMA_SLAVE, mask);
++    dma_tx_right_transfer->chan = dma_request_channel(mask, TX_RIGHT_CHANNEL_DMA_CHANNEL, &fh_audio_dev.channel_assign.playback_channel);
++    if (!dma_tx_right_transfer->chan)
++    {
++        printk(KERN_ERR"request dma channel dma_tx_right_transfer failed \n");
++        return -EFAULT;
++    }
++#endif
++    return 0;
++channel_fail:
++    if (!dma_rx_transfer->chan)
++    {
++        dma_release_channel(dma_rx_transfer->chan);
++		dma_rx_transfer->chan = NULL;
++    }
++    if (!dma_tx_transfer->chan)
++    {
++        dma_release_channel(dma_tx_transfer->chan);
++		dma_tx_transfer->chan = NULL;
++    }
++#ifdef STERO
++    if (!dma_tx_right_transfer->chan)
++    {
++        dma_release_channel(dma_tx_right_transfer->chan);
++		dma_tx_right_transfer->chan = NULL;
++    }
++#endif
++mem_fail:
++    if (dma_rx_transfer != NULL)
++    {
++        kfree(dma_rx_transfer);
++        dma_rx_transfer = NULL;
++    }
++    if (dma_tx_transfer != NULL)
++    {
++        kfree(dma_tx_transfer);
++        dma_tx_transfer = NULL;
++    }
++#ifdef 	STERO
++    if (dma_tx_right_transfer != NULL)
++    {
++        kfree(dma_tx_right_transfer);
++        dma_tx_right_transfer = NULL;
++    }
++#endif
++    return -EFAULT;
++}
++
++void audio_release_dma_channel(void)
++{
++    /*release audio tx dma channel*/
++    if (dma_tx_transfer != NULL)
++    {
++    	if (dma_tx_transfer->chan) {
++			dma_release_channel(dma_tx_transfer->chan);
++			dma_tx_transfer->chan = NULL;
++    	}
++        kfree(dma_tx_transfer);
++        dma_tx_transfer = NULL;
++    }
++
++    /*release audio rx dma channel*/
++    if (dma_rx_transfer != NULL)
++    {
++    	if (dma_rx_transfer->chan) {
++			dma_release_channel(dma_rx_transfer->chan);
++			dma_rx_transfer->chan = NULL;
++    	}
++        
++        kfree(dma_rx_transfer);
++        dma_rx_transfer = NULL;
++    }
++#ifdef STERO
++    /*release audio tx dma channel*/
++    if (dma_tx_right_transfer != NULL)
++    {
++    	if (dma_tx_right_transfer->chan) {
++			dma_release_channel(dma_tx_right_transfer->chan);
++			dma_tx_right_transfer->chan = NULL;
++    	}
++        kfree(dma_tx_right_transfer);
++        dma_tx_right_transfer = NULL;
++    }
++
++#endif
++
++}
++
++static int __init fh_audio_init(void)
++{
++    create_proc();
++    init_audio_mutex(&fh_audio_dev.audio_config);
++    audio_prealloc_dma_buffer(&fh_audio_dev.audio_config);
++    audio_request_dma_channel();
++    return platform_driver_register(&fh_audio_driver);
++}
++module_init(fh_audio_init);
++
++static void __exit fh_audio_exit(void)
++{
++
++    remove_proc();
++    audio_release_dma_channel();
++    audio_free_prealloc_dma_buffer(&fh_audio_dev.audio_config);
++    platform_driver_unregister(&fh_audio_driver);
++}
++module_exit(fh_audio_exit);
++
++MODULE_AUTHOR("FH_AUDIO");
++MODULE_DESCRIPTION("FH_AUDIO");
++MODULE_LICENSE("GPL");
++
++/****************************debug proc*****************************/
++#include <linux/proc_fs.h>
++
++struct proc_dir_entry *proc_ac_entry;
++#define proc_name "fh_audio"
++
++ssize_t proc_ac_read(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++    ssize_t len = 0;
++	int i;
++
++	for (i = 0;i <= 0x20;i += 4) {
++		printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_audio_module.regs + i));
++	}
++    return len;
++}
++
++static void create_proc(void)
++{
++    proc_ac_entry = create_proc_entry(proc_name, S_IRUGO, NULL);
++    if (!proc_ac_entry)
++    {
++        printk(KERN_ERR"create proc failed\n");
++    }
++    else
++    {
++        proc_ac_entry->read_proc = proc_ac_read;
++    }
++}
++
++static void remove_proc(void)
++{
++    remove_proc_entry(proc_name, NULL);
++}
++
+diff --git a/drivers/misc/fh_efuse.c b/drivers/misc/fh_efuse.c
+new file mode 100644
+index 00000000..6d90b79f
+--- /dev/null
++++ b/drivers/misc/fh_efuse.c
+@@ -0,0 +1,551 @@
++/*
++ * fh_efuse.c
++ *
++ *  Created on: Mar 13, 2015
++ *      Author: duobao
++ */
++#include <linux/uaccess.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++/*****************************************************************************
++ *  Include Section
++ *  add all #include here
++ *****************************************************************************/
++#include   "fh_efuse.h"
++/*****************************************************************************
++ * Define section
++ * add all #define here
++ *****************************************************************************/
++
++#define wrap_readl(wrap, name) \
++	__raw_readl(&(((struct wrap_efuse_reg *)wrap->regs)->name))
++
++#define wrap_writel(wrap, name, val) \
++	__raw_writel((val), &(((struct wrap_efuse_reg *)wrap->regs)->name))
++
++#define wrap_readw(wrap, name) \
++	__raw_readw(&(((struct wrap_efuse_reg *)wrap->regs)->name))
++
++#define wrap_writew(wrap, name, val) \
++	__raw_writew((val), &(((struct wrap_efuse_reg *)wrap->regs)->name))
++
++#define wrap_readb(wrap, name) \
++	__raw_readb(&(((struct wrap_efuse_reg *)wrap->regs)->name))
++
++#define wrap_writeb(wrap, name, val) \
++	__raw_writeb((val), &(((struct wrap_efuse_reg *)wrap->regs)->name))
++
++#define FH_EFUSE_PLAT_DEVICE_NAME			"fh_efuse"
++#define FH_EFUSE_MISC_DEVICE_NAME			"fh_efuse"
++#define FH_EFUSE_MISC_DEVICE_NODE_NAME		"fh_efuse_node"
++/****************************************************************************
++ * ADT section
++ *  add definition of user defined Data Type
++ *  that only be used in this file  here
++ ***************************************************************************/
++
++
++
++
++
++struct wrap_efuse_reg {
++	u32 efuse_cmd;				/*0x0*/
++	u32 efuse_config;			/*0x4*/
++	u32 efuse_match_key;		/*0x8*/
++	u32 efuse_timing0;			/*0xc*/
++	u32 efuse_timing1;			/*0x10*/
++	u32 efuse_timing2;			/*0x14*/
++	u32 efuse_timing3;			/*0x18*/
++	u32 efuse_timing4;			/*0x1c*/
++	u32 efuse_timing5;			/*0x20*/
++	u32 efuse_timing6;			/*0x24*/
++	u32 efuse_dout;				/*0x28*/
++	u32 efuse_status0;			/*0x2c*/
++	u32 efuse_status1;			/*0x30*/
++	u32 efuse_status2;			/*0x34*/
++	u32 efuse_status3;			/*0x38*/
++	u32 efuse_status4;			/*0x3c*/
++	u32 efuse_mem_info;
++};
++
++
++
++
++enum {
++	CMD_TRANS_AESKEY = 4,
++	CMD_WFLGA_AUTO = 8,
++};
++
++struct wrap_efuse_obj s_efuse_obj;
++
++#define EFUSE_MAX_ENTRY			60
++
++
++/******************************************************************************
++ * Function prototype section
++ * add prototypes for all functions called by this file,execepting those
++ * declared in header file
++ *****************************************************************************/
++
++/*****************************************************************************
++ * Global variables section - Exported
++ * add declaration of global variables that will be exported here
++ * e.g.
++ *  int8_t foo;
++ ****************************************************************************/
++
++/*****************************************************************************
++
++ *  static fun;
++ *****************************************************************************/
++static int fh_efuse_open(struct inode *inode, struct file *file);
++static int fh_efuse_release(struct inode *inode, struct file *filp);
++static long fh_efuse_ioctl(struct file *filp, unsigned int cmd,
++		unsigned long arg);
++/*****************************************************************************
++ * Global variables section - Local
++ * define global variables(will be refered only in this file) here,
++ * static keyword should be used to limit scope of local variable to this file
++ * e.g.
++ *  static uint8_t ufoo;
++ *****************************************************************************/
++
++static const struct file_operations fh_efuse_fops = {
++		.owner = THIS_MODULE,
++		.open = fh_efuse_open,
++		.release = fh_efuse_release,
++		.unlocked_ioctl = fh_efuse_ioctl,
++};
++
++static struct miscdevice fh_efuse_misc = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = FH_EFUSE_MISC_DEVICE_NAME,
++	/*.nodename = FH_EFUSE_MISC_DEVICE_NODE_NAME,*/
++	.fops = &fh_efuse_fops,
++};
++
++static int fh_efuse_open(struct inode *inode, struct file *file)
++{
++	return 0;
++}
++
++static int fh_efuse_release(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++void efuse_detect_complete(struct wrap_efuse_obj *obj, int pos)
++{
++	unsigned int rdata;
++	unsigned int time = 0;
++    /*printk("efuse wait pos %x...\n",pos);*/
++	do {
++			time++;
++			rdata = wrap_readl(obj, efuse_status0);
++			if (time > 1000) {
++								printk("[efuse]:detect time out...pos: 0x%x\n", pos);
++								return;
++							}
++			udelay(10);
++	} while ((rdata&(1<<pos)) != 1<<pos);
++    /*printk("efuse wait pos done...\n",pos);*/
++}
++
++void auto_check_efuse_pro_bits(struct wrap_efuse_obj *obj, u32 *buff)
++{
++	/*first set auto check cmd*/
++	wrap_writel(obj, efuse_cmd, CMD_WFLGA_AUTO);
++	efuse_detect_complete(obj, 8);
++	/*
++	mdelay(300);
++	read status from hw status
++	bit 1 means could be writen....
++	*/
++	buff[0] = wrap_readl(obj, efuse_status1);
++	buff[1] = wrap_readl(obj, efuse_status2);
++}
++
++void efuse_write_key_byte(struct wrap_efuse_obj *obj, u32 entry, u8 data)
++{
++	u32 temp = 0;
++	temp = (u32)data;
++	/*0~255*/
++	temp &= ~0xffffff00;
++	temp <<= 12;
++	/*0~63*/
++	entry &= 0x3f;
++	temp |= (entry<<4) | (0x02);
++	/*
++	printk("efuse write entry: %x,data: %x\n",entry,data);
++
++	printk("efuse write data :%x\n",temp);
++	wrap_writel(obj,efuse_cmd,(data<<12) + (entry<<4) + (0x02));
++	*/
++	wrap_writel(obj, efuse_cmd, temp);
++	efuse_detect_complete(obj, 2);
++}
++
++void efuse_load_usrcmd(struct wrap_efuse_obj *obj)
++{
++	wrap_writel(obj, efuse_cmd, 1);
++	efuse_detect_complete(obj, 1);
++}
++
++void efuse_trans_key(struct wrap_efuse_obj *obj, u32 start_no, u32 size)
++{
++	int i ;
++	if (obj->map_para.open_flag == 0) {
++		printk("Default mode...\n");
++		for (i = 0; i < size; i++) {
++			wrap_writel(obj, efuse_config, ((start_no + i)<<28));
++			wrap_writel(obj, efuse_cmd, (i<<20) + 0x04);
++			efuse_detect_complete(obj, 4);
++			/*mdelay(100);*/
++		}
++	}
++	if (obj->map_para.open_flag != 0) {
++		printk("parse efuse map...\n");
++		switch (obj->map_para.open_flag) {
++		case  OPEN_4_ENTRY_TO_1_KEY:
++			for (i = 0; i < obj->map_para.map_size; i++) {
++				/*here aes key must be started from 0...just parse efuse entry.*/
++				printk("times:%d,efuse entry : %d\n", i, obj->map_para.map[i].efuse_entry);
++				wrap_writel(obj, efuse_config, ((obj->map_para.map[i].efuse_entry / 4) << 28));
++				wrap_writel(obj, efuse_cmd, (i<<20) + 0x04);
++				efuse_detect_complete(obj, 4);
++			}
++			break;
++		case OPEN_1_ENTRY_TO_1_KEY:
++			printk("not support here now..\n");
++			break;
++		default:
++			printk("parse error efuse map para..\n");
++			break;
++		}
++	} else {
++		for (i = 0; i < size; i++) {
++			wrap_writel(obj, efuse_config, ((start_no + i)<<28));
++			wrap_writel(obj, efuse_cmd, (i<<20) + 0x04);
++			efuse_detect_complete(obj, 4);
++			/*mdelay(100);*/
++		}
++	}
++}
++
++static void aes_biglittle_swap(u8 *buf)
++{
++	u8 tmp, tmp1;
++	tmp = buf[0];
++	tmp1 = buf[1];
++	buf[0] = buf[3];
++	buf[1] = buf[2];
++	buf[2] = tmp1;
++	buf[3] = tmp;
++}
++void efuse_get_lock_status(struct wrap_efuse_obj *obj, struct efuse_status * status)
++{
++	status->efuse_apb_lock = (wrap_readl(obj, efuse_status0)>>20) & 0x0f;
++	status->aes_ahb_lock = (wrap_readl(obj, efuse_status0)>>24) & 0x0f;
++}
++
++void efuse_read_entry(struct wrap_efuse_obj *obj, u32 key, u32 start_entry, u8 *buff, u32 size)
++{
++	/*
++	wrap_writel(obj,efuse_cmd,1);
++	set key..
++	*/
++	u32 data, i;
++
++	for (i = 0; i < size; i++) {
++		wrap_writel(obj, efuse_match_key, key);
++		wrap_writel(obj, efuse_cmd, ((start_entry + i)<<4) + 0x03);
++		efuse_detect_complete(obj, 3);
++		data = wrap_readl(obj, efuse_dout);
++		*buff++ = (u8)data;
++	}
++}
++
++long fh_efuse_ioctl(struct file *filp, unsigned int cmd,
++		unsigned long arg)
++{
++	int i;
++
++	EFUSE_INFO efuse_info = {0};
++	u32 *p_dst = NULL;
++	u8 *p_dst_8 = NULL;
++	unsigned int data;
++	u32 temp_swap_data[32] = {0};
++
++	if (copy_from_user((void *) &efuse_info, (void __user*) arg,
++			sizeof(EFUSE_INFO))) {
++		return -EFAULT;
++	}
++/*
++	printk(
++			"app before raw info is: entry: %x,,key size: %x,
++			trans_key_start: %x, no is : %x, src_key_add:%x\n",
++			efuse_info.efuse_entry_no,
++			efuse_info.key_size,
++			efuse_info.trans_key_start_no,
++			efuse_info.trans_key_size,
++			(u32)efuse_info.key_buff);
++*/
++	switch (cmd) {
++	case IOCTL_EFUSE_CHECK_PRO:
++		/*printk("efuse check pro...\n");*/
++		efuse_load_usrcmd(&s_efuse_obj);
++		auto_check_efuse_pro_bits(&s_efuse_obj, efuse_info.status.protect_bits);
++		/*
++		memcpy((void*)s_efuse_obj.status.protect_bits,
++		(void*)efuse_info.status.protect_bits,sizeof());
++		//mdelay(300);
++		*/
++		break;
++	case IOCTL_EFUSE_WRITE_KEY:
++		if (copy_from_user((void *) &temp_swap_data[0],
++			(void __user *) efuse_info.key_buff,
++			efuse_info.key_size)) {
++			return -EFAULT;
++		}
++
++		p_dst = &temp_swap_data[0];
++		for (i = 0; i < efuse_info.key_size / sizeof(u32); i++) {
++			aes_biglittle_swap((u8 *) (p_dst + i));
++			/*printk("swap data is %x\n",*(p_dst + i));*/
++		}
++		p_dst_8 = (u8 *)&temp_swap_data[0];
++
++		for (i = 0; i < efuse_info.key_size; i++) {
++			efuse_write_key_byte(&s_efuse_obj, efuse_info.efuse_entry_no + i, *(p_dst_8 + i));
++			/*mdelay(300);*/
++		}
++		s_efuse_obj.efuse_entry_no = efuse_info.efuse_entry_no;
++		s_efuse_obj.key_buff = efuse_info.key_buff;
++		s_efuse_obj.key_size = efuse_info.key_size;
++		efuse_load_usrcmd(&s_efuse_obj);
++		break;
++	case IOCTL_EFUSE_CHECK_LOCK:
++		/*printk("efuse check lock\n");*/
++		efuse_load_usrcmd(&s_efuse_obj);
++		efuse_get_lock_status(&s_efuse_obj, &efuse_info.status);
++		break;
++	case IOCTL_EFUSE_TRANS_KEY:
++		/*printk("efuse trans key\n");*/
++		efuse_trans_key(&s_efuse_obj, efuse_info.trans_key_start_no, efuse_info.trans_key_size);
++		/*
++		s_efuse_obj.trans_key_start_no = efuse_info.trans_key_start_no;
++		s_efuse_obj.trans_key_size = efuse_info.trans_key_size;
++	    efuse_info.status.open_flag = USE_EFUSE_SET_KEY;
++		*/
++		break;
++	case IOCTL_EFUSE_SWITCH_CPU_KEY_MODE:
++		s_efuse_obj.open_flag = USE_CPU_SET_KEY;
++		break;
++	case IOCTL_EFUSE_SWITCH_EFUSE_KEY_MODE:
++		s_efuse_obj.open_flag = USE_EFUSE_SET_KEY;
++		break;
++	case IOCTL_EFUSE_CHECK_ERROR:
++		break;
++	case IOCTL_EFUSE_READ_KEY:
++		/*printf("match is  : %x..\n",match_key);*/
++		efuse_read_entry(&s_efuse_obj, efuse_info.status.error,
++		efuse_info.efuse_entry_no,
++		(u8 *)&temp_swap_data[0], efuse_info.key_size);
++		p_dst = (u32 *)temp_swap_data;
++
++		for (i = 0; i < efuse_info.key_size / sizeof(u32); i++) {
++			aes_biglittle_swap((u8 *) (p_dst + i));
++			/*printk("swap data is %x\n",*(p_dst + i));*/
++		}
++		if (copy_to_user((void __user *) (efuse_info.key_buff),
++			(void *) &temp_swap_data[0],
++			efuse_info.key_size)) {
++			return -EFAULT;
++		}
++		/*memcpy(efuse_user_info,&efuse_info,sizeof(EFUSE_INFO));*/
++		break;
++	case IOCTL_EFUSE_SET_LOCK:
++		/*parse lock data...*/
++		data = efuse_info.status.aes_ahb_lock;
++		data <<= 4;
++		data &= 0xf0;
++		efuse_info.status.efuse_apb_lock &= 0x0f;
++		data |= efuse_info.status.efuse_apb_lock;
++		efuse_write_key_byte(&s_efuse_obj, 63, (u8)data);
++		break;
++	case IOCTL_EFUSE_SET_MAP_PARA_4_TO_1:
++		/*
++		check data correction. efuse entry must be %4 ==0
++		array key no from 0 to 8.
++		*/
++		if (efuse_info.map_para.map_size > MAX_EFUSE_MAP_SIZE) {
++			printk("error map size:0x%x..\n", efuse_info.map_para.map_size);
++			return -EFAULT;
++		}
++		for (i = 0; i < efuse_info.map_para.map_size; i++) {
++			if ((efuse_info.map_para.map[i].efuse_entry % 4 != 0)
++					|| (efuse_info.map_para.map[i].aes_key_no != i)) {
++				printk("map[%d]:entry[0x%x]:aes key[0x%x] para error..\n", i,
++						efuse_info.map_para.map[i].efuse_entry,
++						efuse_info.map_para.map[i].aes_key_no);
++				return -EFAULT;
++			}
++		}
++		/*cpy usr data to driver pri data...*/
++		memcpy(&s_efuse_obj.map_para,
++				&efuse_info.map_para, sizeof(efuse_info.map_para));
++		s_efuse_obj.map_para.open_flag = OPEN_4_ENTRY_TO_1_KEY;
++		break;
++	case IOCTL_EFUSE_SET_MAP_PARA_1_TO_1:
++		printk("not support this func now..\n");
++		break;
++	case IOCTL_EFUSE_CLR_MAP_PARA:
++		s_efuse_obj.map_para.open_flag = 0;
++		break;
++	default:
++		break;
++	}
++/*
++	printk(
++			"app after raw info is: entry: %x,,key size: %x,
++			trans_key_start: %x, no is : %x, src_key_add:%x\n",
++			efuse_info.efuse_entry_no,
++			efuse_info.key_size,
++			efuse_info.trans_key_start_no,
++			efuse_info.trans_key_size,
++			(u32)efuse_info.key_buff);
++*/
++
++	memcpy((void *)&s_efuse_obj.status, (void *)&efuse_info.status,
++			sizeof(struct efuse_status));
++
++	if (copy_to_user((void __user *) (&((EFUSE_INFO *)arg)->status),
++			(void *) &efuse_info.status, sizeof(struct efuse_status))) {
++			return -EFAULT;
++	}
++/*
++	copy_from_user((void *) &t_efuse_info, (void __user*) arg,
++				sizeof(EFUSE_INFO));
++
++	printk(
++			"########raw info is: entry: %x,,key size: %x,
++			trans_key_start: %x, no is : %x, src_key_add:%x,
++			pro_data0:%x,pro_data1:%x\n",
++			t_efuse_info.efuse_entry_no,
++			t_efuse_info.key_size,
++			t_efuse_info.trans_key_start_no,
++			t_efuse_info.trans_key_size,
++			(u32)t_efuse_info.key_buff,
++			t_efuse_info.status.protect_bits[0],
++			t_efuse_info.status.protect_bits[1]);
++*/
++	return 0;
++}
++/*****************************************************************************
++ *
++ *
++ *		function body
++ *
++ *
++ *****************************************************************************/
++static int __devinit fh_efuse_probe(struct platform_device *pdev)
++{
++	int err;
++	struct resource *res;
++
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res) {
++		dev_err(&pdev->dev, "efuse get platform source error..\n");
++		return -ENODEV;
++	}
++
++	res = request_mem_region(res->start, resource_size(res), pdev->name);
++	if (res == NULL) {
++		dev_err(&pdev->dev, "efuse region already claimed\n");
++		return -EBUSY;
++	}
++
++	s_efuse_obj.regs = ioremap(res->start, resource_size(res));
++	if (s_efuse_obj.regs == NULL) {
++		err = -ENXIO;
++		goto fail_no_ioremap;
++	}
++
++	err = misc_register(&fh_efuse_misc);
++
++	if (err != 0) {
++		dev_err(&pdev->dev, "efuse register misc error\n");
++		return err;
++	}
++
++	platform_set_drvdata(pdev, &fh_efuse_misc);
++
++	return 0;
++
++fail_no_ioremap:
++	release_mem_region(res->start, resource_size(res));
++
++	return err;
++}
++
++static int __exit fh_efuse_remove(struct platform_device *pdev)
++{
++	struct resource *res;
++	struct miscdevice *misc;
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++	misc = (struct miscdevice *)platform_get_drvdata(pdev);
++	misc_deregister(misc);
++	iounmap(s_efuse_obj.regs);
++	release_mem_region(res->start, resource_size(res));
++
++	return 0;
++}
++
++/*******************
++ *
++ *
++ * add platform cause of i need the board info...
++ * in the probe function. i will register the sadc misc drive...
++ * then the app can open the sadc misc device..
++ *
++ ******************/
++static struct platform_driver fh_efuse_driver = {
++	.driver	= {
++		.name	= FH_EFUSE_PLAT_DEVICE_NAME,
++		.owner	= THIS_MODULE,
++	},
++	.probe = fh_efuse_probe,
++	.remove = __exit_p(fh_efuse_remove),
++};
++/*
++EXPORT_SYMBOL(auto_check_efuse_pro_bits);
++EXPORT_SYMBOL(efuse_write_key_byte);
++EXPORT_SYMBOL(efuse_load_usrcmd);
++EXPORT_SYMBOL(efuse_get_lock_status);
++EXPORT_SYMBOL(efuse_trans_key);
++
++EXPORT_SYMBOL(efuse_write_key_byte);
++EXPORT_SYMBOL(efuse_write_key_byte);
++EXPORT_SYMBOL(efuse_write_key_byte);
++EXPORT_SYMBOL(efuse_write_key_byte);
++*/
++
++static int __init fh_efuse_init(void)
++{
++	return platform_driver_register(&fh_efuse_driver);
++}
++
++static void __exit fh_efuse_exit(void)
++{
++	platform_driver_unregister(&fh_efuse_driver);
++}
++
++module_init(fh_efuse_init);
++module_exit(fh_efuse_exit);
++
++MODULE_DESCRIPTION("fh efuse driver");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("zhangy@fullhan.com");
++MODULE_ALIAS("platform:FH_efuse");
+diff --git a/drivers/misc/fh_efuse.h b/drivers/misc/fh_efuse.h
+new file mode 100644
+index 00000000..35d0c10f
+--- /dev/null
++++ b/drivers/misc/fh_efuse.h
+@@ -0,0 +1,111 @@
++/*
++ * fh_efuse.h
++ *
++ *  Created on: Aug 9, 2016
++ *      Author: duobao
++ */
++
++#ifndef FH_EFUSE_H_
++#define FH_EFUSE_H_
++
++#include <linux/io.h>
++#include <linux/scatterlist.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/workqueue.h>
++#include <linux/bug.h>
++#include <linux/completion.h>
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++
++/****************************************************************************
++ * #define section
++ *	add constant #define here if any
++ ***************************************************************************/
++/*#define FH_EFUSE_PROC_FILE    "driver/efuse"*/
++#define MAX_EFUSE_MAP_SIZE		8
++
++#define IOCTL_EFUSE_CHECK_PRO						0
++#define IOCTL_EFUSE_WRITE_KEY						1
++#define IOCTL_EFUSE_CHECK_LOCK						2
++#define IOCTL_EFUSE_TRANS_KEY						3
++#define IOCTL_EFUSE_SWITCH_CPU_KEY_MODE				4
++#define IOCTL_EFUSE_SWITCH_EFUSE_KEY_MODE			5
++#define IOCTL_EFUSE_CHECK_ERROR						6
++#define IOCTL_EFUSE_READ_KEY						7
++#define IOCTL_EFUSE_SET_LOCK						8
++#define IOCTL_EFUSE_SET_MAP_PARA_4_TO_1					9
++#define IOCTL_EFUSE_SET_MAP_PARA_1_TO_1					10
++#define IOCTL_EFUSE_CLR_MAP_PARA					11
++
++#define OPEN_4_ENTRY_TO_1_KEY			0x55
++#define OPEN_1_ENTRY_TO_1_KEY			0xaa
++/****************************************************************************
++ * ADT section
++ *	add Abstract Data Type definition here
++ ***************************************************************************/
++
++struct efuse_status {
++	/*bit 1 means could write..0 not write*/
++	u32 protect_bits[2];
++	/*bit 1 means cpu couldn't read efuse entry data...*/
++	u32 efuse_apb_lock;
++	u32 aes_ahb_lock;
++	u32 error;
++};
++
++/*add efuse_aes_map struct*/
++struct efuse_aes_map {
++	u32 aes_key_no;
++	u32 efuse_entry;
++};
++
++struct efuse_aes_map_para {
++	u32 open_flag;
++	u32 map_size;
++	struct efuse_aes_map map[MAX_EFUSE_MAP_SIZE];
++};
++
++typedef struct {
++	u32 efuse_entry_no; /*from 0 ~ 31*/
++	u8 *key_buff;
++	u32 key_size;
++	u32 trans_key_start_no; /*from 0 ~ 7*/
++	u32 trans_key_size; /*max 8*/
++	struct efuse_aes_map_para map_para;
++	struct efuse_status status;
++} EFUSE_INFO;
++
++struct wrap_efuse_obj {
++	void *regs;
++	/*write key*/
++	u32 efuse_entry_no; /*from 0 ~ 31*/
++	u8 *key_buff;
++	u32 key_size;
++	/*trans key*/
++	u32 trans_key_start_no; /*from 0 ~ 7*/
++	u32 trans_key_size; /*max 8*/
++	struct efuse_aes_map_para map_para;
++	/*status*/
++	struct efuse_status status;
++
++#define USE_CPU_SET_KEY				0
++#define USE_EFUSE_SET_KEY			1
++	u32 open_flag;
++};
++/****************************************************************************
++ *  extern variable declaration section
++ ***************************************************************************/
++
++/****************************************************************************
++ *  section
++ *	add function prototype here if any
++ ***************************************************************************/
++void efuse_trans_key(struct wrap_efuse_obj *obj, u32 start_no, u32 size);
++#endif /* FH_EFUSE_H_ */
+diff --git a/drivers/misc/fh_fw_loader.c b/drivers/misc/fh_fw_loader.c
+new file mode 100644
+index 00000000..2897ebad
+--- /dev/null
++++ b/drivers/misc/fh_fw_loader.c
+@@ -0,0 +1,259 @@
++/*
++ * fw_loader.c
++ *
++ *  Created on: Aug 9, 2016
++ *      Author: duobao
++ */
++
++#include <linux/io.h>
++#include <linux/miscdevice.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/printk.h>
++#include <linux/module.h>
++#include <linux/firmware.h>
++#include <linux/slab.h>
++#include <mach/fh8830.h>
++#include <mach/pmu.h>
++#include <asm/delay.h>
++#include <asm/uaccess.h>
++
++#define DEVICE_NAME   "fw_loader"
++
++struct fwl_parameter
++{
++    unsigned int addr;
++    unsigned int size;
++};
++
++struct fwl_description
++{
++    char* name;
++    void *virt_addr;
++    unsigned int phy_addr;
++    const struct firmware *fw_entry;
++    struct fwl_parameter fwl_param;
++};
++
++//#define  FW_LOADER_DEBUG
++#ifdef FW_LOADER_DEBUG
++#define PRINT_DBG(fmt,args...)  printk(fmt,##args)
++#else
++#define PRINT_DBG(fmt,args...)  do{} while(0)
++#endif
++
++
++#define FWL_LOADER_IOCTL_MAGIC             'f'
++
++#define FWL_LOAD_FW          _IOWR(FWL_LOADER_IOCTL_MAGIC, 0, unsigned int)
++#define FWL_SET_PARAM        _IOWR(FWL_LOADER_IOCTL_MAGIC, 1, struct fwl_parameter)
++#define FWL_START_FW         _IOWR(FWL_LOADER_IOCTL_MAGIC, 2, unsigned int)
++
++#define FWL_LOADER_IOCTL_MAXNR             8
++
++
++/* Module parameters */
++#define FIRMWARE_NAME   "rtthread_arc.bin"
++static char* fw_name = FIRMWARE_NAME;
++module_param(fw_name, charp, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(fw_name, "firmware name");
++
++#define FIRMWARE_PHY_ADDRESS   0xa7800000
++static unsigned int fw_phy_addr = FIRMWARE_PHY_ADDRESS;
++module_param(fw_phy_addr, uint, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(fw_phy_addr, "firmware address");
++
++static struct miscdevice fwl_loader_device;
++static struct fwl_description *fwl_desc;
++
++static int fwl_load_firmware(void)
++{
++    int ret = 0;
++
++    if(fwl_desc->fw_entry)
++        fwl_desc->virt_addr = ioremap(fwl_desc->phy_addr, fwl_desc->fw_entry->size);
++    else
++        pr_err("ERROR: SET_FW_ADDRESS, fw_entry is NULL\n");
++
++    if(!fwl_desc->virt_addr)
++        pr_err("ERROR: SET_FW_ADDRESS, ioremap failed\n");
++
++    memcpy(fwl_desc->virt_addr, fwl_desc->fw_entry->data, fwl_desc->fw_entry->size);
++
++    writel(fwl_desc->fwl_param.addr, fwl_desc->virt_addr + fwl_desc->fw_entry->size - 8);
++    writel(fwl_desc->fwl_param.size, fwl_desc->virt_addr + fwl_desc->fw_entry->size - 4);
++
++
++    iounmap(fwl_desc->virt_addr);
++    fwl_desc->virt_addr = NULL;
++
++    printk("firmware: %s loaded\n", fwl_desc->name);
++
++    return ret;
++}
++
++static int fwl_start_firmware(void)
++{
++    unsigned int arc_addr;
++    unsigned int reg;
++
++    arc_addr = ((fwl_desc->phy_addr & 0xffff) << 16) | (fwl_desc->phy_addr >> 16);
++
++    // ARC Reset
++    fh_pmu_set_reg(REG_PMU_SWRSTN_NSR, 0xFFBFFFFF);
++
++    fh_pmu_set_reg(REG_PMU_A625BOOT0 , 0x7940266B);
++    fh_pmu_set_reg(REG_PMU_A625BOOT1 , arc_addr);  // Configure ARC Bootcode start address
++    fh_pmu_set_reg(REG_PMU_A625BOOT2 , 0x0F802020);
++    fh_pmu_set_reg(REG_PMU_A625BOOT3 , arc_addr);
++
++    fh_pmu_set_reg(REG_PMU_REMAP , 0 );  // Disable A2X BUS Remap and Resize
++
++    // ARC reset released
++    fh_pmu_set_reg( REG_PMU_SWRSTN_NSR, 0xFFFFFFFF);
++
++    // wait ramloader done, about 1024 ARC CPU cycle
++    udelay(100);
++
++    // start ARC625
++    reg = fh_pmu_get_reg(REG_PMU_A625_START_CTRL);
++    reg &= ~(0xff);
++    reg |= 0x10;
++    fh_pmu_set_reg(REG_PMU_A625_START_CTRL, reg);
++
++    printk("firmware: %s started\n", fwl_desc->name);
++
++    return 0;
++}
++
++
++static long fwl_loader_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++    int ret = 0;
++
++    if (unlikely(_IOC_TYPE(cmd) != FWL_LOADER_IOCTL_MAGIC))
++    {
++        pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
++               __func__, _IOC_TYPE(cmd), -ENOTTY);
++        return -ENOTTY;
++    }
++
++    if (unlikely(_IOC_NR(cmd) > FWL_LOADER_IOCTL_MAXNR))
++    {
++        pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
++               __func__, _IOC_NR(cmd), -ENOTTY);
++        return -ENOTTY;
++    }
++
++    if (_IOC_DIR(cmd) & _IOC_READ)
++    {
++        ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
++    }
++    else if(_IOC_DIR(cmd) & _IOC_WRITE)
++    {
++        ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
++    }
++
++    if(ret)
++    {
++        pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
++               __func__, _IOC_NR(cmd), -EACCES);
++        return -EACCES;
++    }
++
++    switch(cmd)
++    {
++    case FWL_SET_PARAM:
++        PRINT_DBG("%s, FWL_SET_PARAM start\n", __func__);
++        ret = __copy_from_user(&fwl_desc->fwl_param,
++                (struct fwl_parameter __user *)arg,
++                sizeof(struct fwl_parameter));
++        break;
++    case FWL_LOAD_FW:
++        PRINT_DBG("%s, FWL_LOAD_FW start\n", __func__);
++        __get_user(fwl_desc->phy_addr, (unsigned int __user *)arg);
++        fwl_load_firmware();
++        break;
++    case FWL_START_FW:
++        PRINT_DBG("%s, FWL_START_FW start\n", __func__);
++        fwl_start_firmware();
++        break;
++
++    }
++
++    return ret;
++}
++
++static int fwl_loader_open(struct inode *inode, struct file *file)
++{
++
++    int ret;
++    PRINT_DBG("%s, start\n", __func__);
++
++    ret = request_firmware(&fwl_desc->fw_entry, fwl_desc->name, fwl_loader_device.this_device);
++
++    if(ret)
++    {
++        pr_err("ERROR: %s, request firmware failed, ret: %d\n", __func__, ret);
++    }
++
++    return ret;
++}
++
++static int fwl_loader_release(struct inode *inode, struct file *filp)
++{
++    release_firmware(fwl_desc->fw_entry);
++    return 0;
++}
++
++static const struct file_operations fwl_loader_fops =
++{
++    .owner                  = THIS_MODULE,
++    .open                   = fwl_loader_open,
++    .release                = fwl_loader_release,
++    .unlocked_ioctl         = fwl_loader_ioctl,
++};
++
++static struct miscdevice fwl_loader_device =
++{
++    .minor = MISC_DYNAMIC_MINOR,
++    .name = DEVICE_NAME,
++    .fops = &fwl_loader_fops,
++};
++
++static int __init fwl_loader_init(void)
++{
++    int ret;
++
++    fwl_desc = (struct fwl_description *)kzalloc(sizeof(struct fwl_description), GFP_KERNEL);
++    fwl_desc->name = fw_name;
++    fwl_desc->phy_addr = fw_phy_addr;
++
++    ret = misc_register(&fwl_loader_device);
++
++    if(ret < 0)
++    {
++            pr_err("%s: ERROR: %s registration failed",
++                       __func__, DEVICE_NAME);
++            return -ENXIO;
++    }
++
++
++
++    return ret;
++}
++
++static void __exit fwl_loader_exit(void)
++{
++    misc_deregister(&fwl_loader_device);
++    kfree(fwl_desc);
++    fwl_desc = NULL;
++}
++module_init(fwl_loader_init);
++module_exit(fwl_loader_exit);
++
++MODULE_AUTHOR("QIN");
++MODULE_DESCRIPTION("Misc Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform: FH");
+diff --git a/drivers/misc/fh_i2s.c b/drivers/misc/fh_i2s.c
+new file mode 100644
+index 00000000..84e6292c
+--- /dev/null
++++ b/drivers/misc/fh_i2s.c
+@@ -0,0 +1,1346 @@
++/**@file
++ * @Copyright (c) 2016 Shanghai Fullhan Microelectronics Co., Ltd.
++ * @brief
++ *
++ * @author      fullhan
++ * @date        2016-7-15
++ * @version     V1.0
++ * @version     V1.1  modify code style
++ * @note: misc i2s driver for fh8830 embedded i2s codec.
++ * @note History:
++ * @note     <author>   <time>    <version >   <desc>
++ * @note
++ * @warning: the codec is fixed to 24 bit, so remember to move the 24 bit data to 16 bit in
++ *   application layer, the next version CPU will sovle this bug.
++ */
++
++
++#include <linux/irqreturn.h>
++#include <linux/clk.h>
++#include <linux/device.h>
++#include <linux/file.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++#include <linux/poll.h>
++#include <linux/ioctl.h>
++#include <linux/i2c.h>
++#include <linux/workqueue.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <mach/fh_dmac.h>
++#include <mach/fh_predefined.h>
++
++#include "fh_i2s.h"
++
++
++#define NR_DESCS_PER_CHANNEL 64
++
++#define FIX_SAMPLE_BIT       32
++
++#define I2S_HW_NUM_RX  0
++#define I2S_HW_NUM_TX  1
++#define I2S_DMA_CAP_CHANNEL 2
++#define I2S_DMA_PAY_CHANNEL 3
++
++#define I2S_CTRL 						0x0
++#define I2S_TXFIFO_CTRL 				0x4
++#define I2S_RXFIFO_CTRL 				0x8
++#define I2S_STATUS             			0x0c
++#define I2S_DAT_CTL				  		0x10
++#define I2S_DBG_CTL					  	0x14
++#define I2S_STATUS1					  	0x18
++#define I2S_STATUS2						0x1c
++
++#define I2S_DACL_FIFO 					0xf0a00100
++#define I2S_DACR_FIFO 					0xf0a00300
++#define I2S_ADC_FIFO					0xf0a00200
++
++#define I2S_DMA_PREALLOC_SIZE 128*1024
++
++#define I2S_INTR_RX_UNDERFLOW   0x10000
++#define I2S_INTR_RX_OVERFLOW    0x20000
++#define I2S_INTR_TX_UNDERFLOW   0x40000
++#define I2S_INTR_TX_OVERFLOW    0x80000
++
++#define I2S_EXT_EN             1<<12
++#define I2S_EN				   1<<0
++#define I2S_DACL_CHEN_EN       1<<30
++#define I2S_DACR_CHEN_EN       1<<31
++#define I2S_ADC_CHEN_EN        1<<29
++#define I2S_SHIFT_BIT          6
++#define I2S_DAC_FIFO_CLEAR     1<<4
++#define I2S_ADC_FIFO_CLEAR     1<<4
++#define I2S_ADC_FIFO_EN		   1<<0
++#define I2S_DAC_FIFO_EN        1<<0
++#define FH_i2s_DEBUG
++#ifdef FH_i2s_DEBUG
++#define PRINT_i2s_DBG(fmt, args...)     \
++    do                              \
++    {                               \
++        printk("FH_i2s_DEBUG: ");   \
++        printk(fmt, ## args);       \
++    }                               \
++    while(0)
++#else
++#define PRINT_i2s_DBG(fmt, args...)  do { } while (0)
++#endif
++
++enum i2s_type
++{
++    capture = 0,
++    playback,
++};
++
++
++enum i2s_state
++{
++    STATE_NORMAL = 0,
++    STATE_XRUN,
++    STATE_STOP,
++    STATE_RUN,
++    STATE_PAUSE
++};
++
++struct i2s_infor_record_t
++{
++	int record_pid;
++	int play_pid;
++}i2s_infor_record;
++
++
++struct fh_dma_chan
++{
++    struct dma_chan     *chan;
++    void __iomem        *ch_regs;
++    u8          mask;
++    u8          priority;
++    bool            paused;
++    bool                initialized;
++    spinlock_t      lock;
++    /* these other elements are all protected by lock */
++    unsigned long       flags;
++    dma_cookie_t        completed;
++    struct list_head    active_list;
++    struct list_head    queue;
++    struct list_head    free_list;
++    struct fh_cyclic_desc   *cdesc;
++    unsigned int        descs_allocated;
++};
++
++struct i2s_config {
++	int rate;
++	int volume;
++    int frame_bit;
++    int channels;
++    int buffer_size;
++    int period_size;
++    int buffer_bytes;
++    int period_bytes;
++    int start_threshold;
++    int stop_threshold;
++};
++
++struct i2s_ptr_t
++{
++	struct i2s_config cfg;
++    enum i2s_state state;
++    long size;
++    int hw_ptr;
++    int appl_ptr;
++    spinlock_t lock;
++    struct device dev;
++    u8 *area; /*virtual pointer*/
++    dma_addr_t addr; /*physical address*/
++    u8 * mmap_addr;
++};
++
++struct fh_i2s_cfg
++{
++    struct i2s_ptr_t capture;
++    struct i2s_ptr_t playback;
++    wait_queue_head_t readqueue;
++    wait_queue_head_t writequeue;
++	struct semaphore sem_capture;
++	struct semaphore sem_playback;
++};
++
++struct fh_i2s_dma_chan
++{
++    struct dma_chan     *chan;
++    void __iomem        *ch_regs;
++    u8          mask;
++    u8          priority;
++    bool            paused;
++    bool                initialized;
++    spinlock_t      lock;
++    /* these other elements are all protected by lock */
++    unsigned long       flags;
++    dma_cookie_t        completed;
++    struct list_head    active_list;
++    struct list_head    queue;
++    struct list_head    free_list;
++    struct dma_async_tx_descriptor   *cdesc;
++    unsigned int        descs_allocated;
++};
++
++struct fh_I2S_dma_transfer
++{
++    struct dma_chan *chan;
++    struct fh_dma_slave cfg;
++    struct scatterlist sgl;
++    struct fh_cyclic_desc *desc;
++};
++
++struct channel_assign
++{
++	int capture_channel;
++	int playback_channel;
++};
++
++struct i2s_dev
++{
++	struct channel_assign channel_assign;
++    struct fh_i2s_cfg i2s_config;
++    struct miscdevice fh_i2s_miscdev;
++};
++
++static const struct file_operations I2S_fops;
++
++static struct i2s_dev fh_i2s_dev =
++{
++    .channel_assign = {
++        .capture_channel = I2S_DMA_CAP_CHANNEL,
++        .playback_channel = I2S_DMA_PAY_CHANNEL,
++    },
++    .fh_i2s_miscdev = {
++        .fops       = &I2S_fops,
++        .name       = "fh_fh8830_i2s",
++        .minor      = MISC_DYNAMIC_MINOR,
++    }
++
++};
++
++static struct
++{
++    spinlock_t      lock;
++    void __iomem        *regs;
++    struct clk      *clk;
++    unsigned long       in_use;
++    unsigned long       next_heartbeat;
++    struct timer_list   timer;
++    int         irq;
++} fh_i2s_module;
++static struct fh_dma_chan *dma_rx_transfer = NULL;
++static struct fh_dma_chan *dma_tx_transfer = NULL;
++static struct work_struct playback_wq;
++
++static struct i2s_param_store
++{
++    int input_volume;
++} i2s_param_store;
++
++
++static void fh_I2S_tx_dma_done(void *arg);
++static void fh_I2S_rx_dma_done(void *arg);
++static bool  fh_I2S_dma_chan_filter(struct dma_chan *chan, void *filter_param);
++
++void fh_I2S_stop_playback(struct fh_i2s_cfg *i2s_config)
++{
++
++    if(i2s_config->playback.state == STATE_STOP)
++    {
++        return;
++    }
++    i2s_config->playback.state = STATE_STOP;
++    writel(0, fh_i2s_module.regs + I2S_TXFIFO_CTRL);//tx fifo disable
++    fh_dma_cyclic_stop(dma_tx_transfer->chan);
++    fh_dma_cyclic_free(dma_tx_transfer->chan);
++    up(&i2s_config->sem_playback);
++}
++
++void fh_I2S_stop_capture(struct fh_i2s_cfg *i2s_config)
++{
++
++    u32 rx_status;
++    if(i2s_config->capture.state == STATE_STOP)
++    {
++        return;
++    }
++    rx_status = readl( fh_i2s_module.regs + I2S_RXFIFO_CTRL);//clear rx fifo
++    rx_status =  rx_status|I2S_ADC_FIFO_CLEAR;
++    writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);
++
++    i2s_config->capture.state = STATE_STOP;
++
++    writel(0, fh_i2s_module.regs + I2S_RXFIFO_CTRL);//rx fifo disable
++
++    fh_dma_cyclic_stop(dma_rx_transfer->chan);
++    fh_dma_cyclic_free(dma_rx_transfer->chan);
++    up(&i2s_config->sem_capture);
++}
++
++
++int fh_i2s_get_factor_from_table(int rate)
++{
++	return 0;
++}
++
++void fh_switch_input_volume(int volume)
++{
++
++}
++
++void init_i2s(enum i2s_type type,struct fh_i2s_cfg  *i2s_config)
++{
++
++}
++
++static inline long bytes_to_frames(int frame_bit, int bytes)
++{
++    return bytes * 8 /frame_bit;
++}
++
++static inline long  fh_i2s_frames_to_bytes(int frame_bit, int frames)
++{
++    return frames * frame_bit / 8;
++}
++
++int i2s_avail_data_len(enum i2s_type type,struct fh_i2s_cfg *stream)
++{
++    int delta;
++    if (capture == type)
++    {
++        spin_lock(&stream->capture.lock);
++        delta = stream->capture.hw_ptr - stream->capture.appl_ptr;
++        spin_unlock(&stream->capture.lock);
++        if (delta < 0)
++        {
++            delta += stream->capture.size;
++        }
++        return delta;
++    }
++    else
++    {
++        spin_lock(&stream->playback.lock);
++        delta = stream->playback.appl_ptr - stream->playback.hw_ptr;
++        spin_unlock(&stream->playback.lock);
++        if (delta < 0)
++        {
++            delta += stream->playback.size;
++        }
++        return stream->playback.size - delta;
++    }
++}
++
++static int fh_i2s_close(struct inode *ip, struct file *fp)
++{
++	struct miscdevice *miscdev = fp->private_data;
++	struct i2s_dev	*dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
++	struct fh_i2s_cfg *i2s_config = &dev->i2s_config;
++	int pid;
++
++	pid= current->tgid;
++
++	if( i2s_infor_record.play_pid == pid)
++	{
++		fh_I2S_stop_playback(i2s_config);
++
++	}
++	if (i2s_infor_record.record_pid==pid)
++	{
++		fh_I2S_stop_capture(i2s_config);
++	}
++	return 0;
++}
++
++int fh_i2s_register_tx_dma(struct fh_i2s_cfg  *i2s_config)
++{
++    int ret;
++    unsigned int reg;
++    struct fh_dma_slave *tx_slave;
++    tx_slave =  kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
++    if (!tx_slave)
++    {
++        return -ENOMEM;
++    }
++    tx_slave->cfg_hi = FHC_CFGH_DST_PER(I2S_HW_NUM_TX);
++    tx_slave->dst_msize = FH_DMA_MSIZE_8;
++    tx_slave->src_msize = FH_DMA_MSIZE_8;
++    tx_slave->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
++    tx_slave->fc = FH_DMA_FC_D_M2P;
++    tx_slave->tx_reg = I2S_DACL_FIFO;
++    dma_tx_transfer->chan->private =  tx_slave;
++
++    if ((i2s_config->playback.cfg.buffer_bytes < i2s_config->playback.cfg.period_bytes) ||
++        (i2s_config->playback.cfg.buffer_bytes <= 0) || (i2s_config->playback.cfg.period_bytes <= 0) ||
++        (i2s_config->playback.cfg.buffer_bytes/i2s_config->playback.cfg.period_bytes > NR_DESCS_PER_CHANNEL))
++    {
++        printk(KERN_ERR "buffer_size and period_size are invalid\n");
++        ret = -EINVAL;
++        goto fail;
++    }
++
++    dma_tx_transfer->cdesc =
++    	    fh_dma_cyclic_prep(dma_tx_transfer->chan,i2s_config->playback.addr,
++    	    		i2s_config->playback.cfg.buffer_bytes,i2s_config->playback.cfg.period_bytes, DMA_MEM_TO_DEV);
++    if(dma_tx_transfer->cdesc <= 0)
++    {
++        printk(KERN_ERR "cyclic desc err\n");
++        ret = -ENOMEM;
++        goto fail;
++    }
++    dma_tx_transfer->cdesc->period_callback = fh_I2S_tx_dma_done;
++    dma_tx_transfer->cdesc->period_callback_param = i2s_config;
++    fh_dma_cyclic_start(dma_tx_transfer->chan);
++    if (tx_slave)
++    {
++        kfree(tx_slave);
++    }
++
++    reg = readl(fh_i2s_module.regs + I2S_CTRL);
++    reg = reg <<I2S_SHIFT_BIT;
++    reg |= I2S_DACL_CHEN_EN;
++	writel(reg,fh_i2s_module.regs + I2S_CTRL);// enable left tx fifo
++
++	writel(0x1<<4,fh_i2s_module.regs +  I2S_TXFIFO_CTRL);// clear tx fifo
++	writel(0x20027,fh_i2s_module.regs + I2S_TXFIFO_CTRL);// enbale tx fifo
++
++    /*must set NULL to tell DMA driver that we free the DMA slave*/
++    dma_tx_transfer->chan->private = NULL;
++
++    return 0;
++fail:
++    return ret;
++}
++
++int fh_i2s_register_rx_dma( struct fh_i2s_cfg  *i2s_config)
++{
++    int ret;
++    unsigned int reg;
++    struct fh_dma_slave *rx_slave;
++    rx_slave =  kzalloc(sizeof(struct fh_dma_slave), GFP_KERNEL);
++    if (!rx_slave)
++    {
++        return -ENOMEM;
++    }
++
++    rx_slave->cfg_hi = FHC_CFGH_SRC_PER(I2S_HW_NUM_RX);
++    rx_slave->dst_msize = FH_DMA_MSIZE_8;
++    rx_slave->src_msize = FH_DMA_MSIZE_8;
++    rx_slave->reg_width = FH_DMA_SLAVE_WIDTH_32BIT;
++    rx_slave->fc = FH_DMA_FC_D_P2M;
++    rx_slave->rx_reg = I2S_ADC_FIFO;
++	dma_rx_transfer->chan->private =  rx_slave;
++
++    if ((i2s_config->capture.cfg.buffer_bytes < i2s_config->capture.cfg.period_bytes) ||
++        (i2s_config->capture.cfg.buffer_bytes <= 0) ||(i2s_config->capture.cfg.period_bytes <= 0) ||
++        (i2s_config->capture.cfg.buffer_bytes/i2s_config->capture.cfg.period_bytes > NR_DESCS_PER_CHANNEL))
++    {
++        printk(KERN_ERR "buffer_size and period_size are invalid\n");
++        ret = -EINVAL;
++        goto fail;
++    }
++
++    dma_rx_transfer->cdesc=
++    fh_dma_cyclic_prep(dma_rx_transfer->chan,i2s_config->capture.addr,
++    		i2s_config->capture.cfg.buffer_bytes,i2s_config->capture.cfg.period_bytes, DMA_DEV_TO_MEM);
++    if(dma_rx_transfer->cdesc <= 0)
++    {
++        printk(KERN_ERR" cyclic desc err\n");
++        ret = -ENOMEM;
++        goto fail;
++    }
++
++    dma_rx_transfer->cdesc->period_callback = fh_I2S_rx_dma_done;
++    dma_rx_transfer->cdesc->period_callback_param = i2s_config;
++    fh_dma_cyclic_start(dma_rx_transfer->chan);
++
++    reg = readl(fh_i2s_module.regs + I2S_CTRL);
++    reg = reg<<I2S_SHIFT_BIT;
++    reg |= I2S_ADC_CHEN_EN ;
++  	writel(reg,fh_i2s_module.regs + I2S_CTRL);//enbale dac fifo
++	writel(0x1<<4,fh_i2s_module.regs + I2S_RXFIFO_CTRL);// clear rx fifo
++	writel(0x20027,fh_i2s_module.regs + I2S_RXFIFO_CTRL);// enable rx fifo
++
++
++    if (rx_slave)
++    {
++        kfree(rx_slave);
++    }
++    /*must set NULL to tell DMA driver that we free the DMA slave*/
++    dma_rx_transfer->chan->private = NULL;
++    return 0;
++fail:
++    kfree(rx_slave);
++    return ret;
++}
++
++
++void fh_i2s_playback_start_wq_handler(struct work_struct *work)
++{
++    int avail;
++    while(1)
++    {
++        if (STATE_STOP == fh_i2s_dev.i2s_config.playback.state)
++        {
++            return;
++        }
++        avail = i2s_avail_data_len(playback, &fh_i2s_dev.i2s_config);
++        if (avail > fh_i2s_dev.i2s_config.playback.cfg.period_bytes)
++        {
++            msleep(0);
++        }
++        else
++        {
++            break;
++        }
++
++    }
++}
++
++int fh_I2S_start_playback(struct fh_i2s_cfg *i2s_config)
++{
++    int ret;
++
++    if(i2s_config->playback.state == STATE_RUN)
++    {
++        return 0;
++    }
++	if (i2s_config->playback.cfg.buffer_bytes >= I2S_DMA_PREALLOC_SIZE)
++    {
++        printk("DMA prealloc buffer is smaller than  i2s_config->buffer_bytes\n");
++        return -ENOMEM;
++    }
++    memset(i2s_config->playback.area, 0x0, i2s_config->playback.cfg.buffer_bytes);
++    i2s_config->playback.size = i2s_config->playback.cfg.buffer_bytes;
++    i2s_config->playback.state = STATE_RUN;
++    ret = fh_i2s_register_tx_dma(i2s_config);
++    if (ret < 0)
++    {
++        return ret;
++    }
++    INIT_WORK(&playback_wq, fh_i2s_playback_start_wq_handler);
++    schedule_work(&playback_wq);
++
++    return 0;
++}
++
++int fh_I2S_start_capture(struct fh_i2s_cfg *i2s_config)
++{
++    if(i2s_config->capture.state == STATE_RUN)
++    {
++        return 0;
++    }
++
++    if (i2s_config->capture.cfg.buffer_bytes >= I2S_DMA_PREALLOC_SIZE)
++    {
++        printk("DMA prealloc buffer is smaller than  i2s_config->buffer_bytes\n");
++        return -ENOMEM;
++    }
++    memset(i2s_config->capture.area, 0, i2s_config->capture.cfg.buffer_bytes);
++    i2s_config->capture.size = i2s_config->capture.cfg.buffer_bytes;
++
++    i2s_config->capture.state = STATE_RUN;
++    return fh_i2s_register_rx_dma(i2s_config);
++}
++
++
++static void fh_I2S_rx_dma_done(void *arg)
++{
++    struct fh_i2s_cfg *i2s_config;
++    i2s_config = ( struct fh_i2s_cfg *)arg;
++    spin_lock(&i2s_config->capture.lock);
++    i2s_config->capture.hw_ptr += i2s_config->capture.cfg.period_bytes;
++    if (i2s_config->capture.hw_ptr > i2s_config->capture.size )
++    {
++        i2s_config->capture.hw_ptr = i2s_config->capture.hw_ptr - i2s_config->capture.size;
++    }
++    spin_unlock(&i2s_config->capture.lock);
++    if (waitqueue_active(&i2s_config->readqueue))
++    {
++        int avail = i2s_avail_data_len(capture,i2s_config);
++        if (avail > i2s_config->capture.cfg.period_bytes)
++        {
++            wake_up_interruptible(&i2s_config->readqueue);
++        }
++    }
++
++}
++
++
++static void fh_I2S_tx_dma_done(void *arg)
++{
++
++	struct fh_i2s_cfg *i2s_config;
++    i2s_config = ( struct fh_i2s_cfg *)arg;
++	spin_lock(&i2s_config->playback.lock);
++    i2s_config->playback.hw_ptr +=  i2s_config->playback.cfg.period_bytes;
++    if (i2s_config->playback.hw_ptr > i2s_config->playback.size )
++    {
++        i2s_config->playback.hw_ptr = i2s_config->playback.hw_ptr - i2s_config->playback.size;
++    }
++    spin_unlock(&i2s_config->playback.lock);
++    if (waitqueue_active(&i2s_config->writequeue))
++    {
++        int avail = i2s_avail_data_len(playback,i2s_config);
++        if (avail > i2s_config->playback.cfg.period_bytes)
++        {
++            wake_up_interruptible(&i2s_config->writequeue);
++        }
++    }
++}
++
++bool  fh_I2S_dma_chan_filter(struct dma_chan *chan, void *filter_param)
++{
++    int dma_channel = *(int *)filter_param;
++    bool ret = false;
++
++    if (chan->chan_id == dma_channel)
++    {
++        ret = true;
++    }
++    return ret;
++}
++
++int fh_i2s_arg_config_support(struct fh_i2s_cfg_arg * cfg)
++{
++    return 0;
++}
++
++void fh_i2s_reset_dma_buff(enum i2s_type type, struct fh_i2s_cfg *i2s_config)
++{
++    if (capture == type)
++    {
++        i2s_config->capture.appl_ptr = 0;
++        i2s_config->capture.hw_ptr = 0;
++    }
++    else
++    {
++        i2s_config->playback.appl_ptr = 0;
++        i2s_config->playback.hw_ptr = 0;
++    }
++}
++
++static long fh_i2s_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++    struct fh_i2s_cfg_arg cfg;
++    struct miscdevice *miscdev = filp->private_data;
++    struct i2s_dev  *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
++    struct fh_i2s_cfg  *i2s_config = &dev->i2s_config;
++    int pid;
++    int rx_status;
++    int tx_status;
++
++    pid = current->tgid;
++    switch (cmd)
++    {
++        case I2S_INIT_CAPTURE_MEM:
++
++            if (copy_from_user((void *)&cfg, (void __user *)arg, sizeof(struct fh_i2s_cfg_arg)))
++            {
++                printk(KERN_ERR "copy err\n");
++                return -EIO;
++            }
++            if (0 == fh_i2s_arg_config_support(&cfg))
++            {
++            	if (down_trylock(&i2s_config->sem_capture))
++	            {
++	            	printk(KERN_ERR "another thread is running capture.\n");
++	                return -EBUSY;
++	            }
++               	i2s_infor_record.record_pid =  pid;
++                i2s_config->capture.cfg.volume = cfg.volume;
++                i2s_config->capture.cfg.rate = cfg.rate;
++                i2s_config->capture.cfg.channels = cfg.channels;
++                i2s_config->capture.cfg.buffer_size = cfg.buffer_size;
++                i2s_config->capture.cfg.frame_bit = FIX_SAMPLE_BIT;
++                i2s_config->capture.cfg.period_size = cfg.period_size;
++                i2s_config->capture.cfg.buffer_bytes = fh_i2s_frames_to_bytes(i2s_config->capture.cfg.frame_bit,i2s_config->capture.cfg.buffer_size);
++                i2s_config->capture.cfg.period_bytes = fh_i2s_frames_to_bytes(i2s_config->capture.cfg.frame_bit,i2s_config->capture.cfg.period_size);
++                i2s_config->capture.cfg.start_threshold =i2s_config->capture.cfg.buffer_bytes;
++                i2s_config->capture.cfg.stop_threshold = i2s_config->capture.cfg.buffer_bytes;
++                fh_i2s_reset_dma_buff(capture, i2s_config);
++                init_waitqueue_head(&i2s_config->readqueue);
++                spin_lock_init(&i2s_config->capture.lock);
++                init_i2s(capture, i2s_config);
++                i2s_param_store.input_volume = i2s_config->capture.cfg.volume;
++                /* *  config sample  *  */
++
++            }
++            else
++            {
++                return -EINVAL;
++            }
++
++            break;
++        case I2S_INIT_PLAYBACK_MEM:
++            if (copy_from_user((void *)&cfg, (void __user *)arg, sizeof(struct fh_i2s_cfg_arg)))
++            {
++                printk(KERN_ERR "copy err\n");
++                return -EIO;
++            }
++            
++            if (0 == fh_i2s_arg_config_support(&cfg))
++            {
++            	if (down_trylock(&i2s_config->sem_playback))
++	            {
++	            	printk(KERN_ERR "another thread is running playback.\n");
++	                return -EBUSY;
++	            }
++            	i2s_infor_record.play_pid = pid;
++                i2s_config->playback.cfg.volume = cfg.volume;
++                i2s_config->playback.cfg.rate = cfg.rate;
++                i2s_config->playback.cfg.channels = cfg.channels;
++                i2s_config->playback.cfg.buffer_size = cfg.buffer_size;
++                i2s_config->playback.cfg.frame_bit = FIX_SAMPLE_BIT;
++                i2s_config->playback.cfg.period_size = cfg.period_size;
++                i2s_config->playback.cfg.buffer_bytes = fh_i2s_frames_to_bytes(i2s_config->playback.cfg.frame_bit,i2s_config->playback.cfg.buffer_size);
++                i2s_config->playback.cfg.period_bytes = fh_i2s_frames_to_bytes(i2s_config->playback.cfg.frame_bit,i2s_config->playback.cfg.period_size);
++                i2s_config->playback.cfg.start_threshold =i2s_config->playback.cfg.buffer_bytes;
++                i2s_config->playback.cfg.stop_threshold = i2s_config->playback.cfg.buffer_bytes;
++                fh_i2s_reset_dma_buff(playback, i2s_config);
++                init_waitqueue_head(&i2s_config->writequeue);
++                spin_lock_init(&i2s_config->playback.lock);
++                init_i2s(playback, i2s_config);
++
++            }
++            else
++            {
++                return -EINVAL;
++            }
++            break;
++        case I2S_AI_EN:
++            if (i2s_infor_record.record_pid != pid){
++            	return -EBUSY;
++            }
++            return fh_I2S_start_capture(i2s_config);
++        case I2S_AO_EN:
++			if (i2s_infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++			return fh_I2S_start_playback(i2s_config);
++
++        case I2S_AI_DISABLE:
++            printk("[ac_driver]AC_AI_DISABLE\n");
++			if (i2s_infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++			fh_I2S_stop_capture(i2s_config);
++            printk(" AC_AI_DISABLE\n");
++            break;
++        case I2S_AO_DISABLE:
++            printk("[ac_driver]AC_AO_DISABLE\n");
++			if (i2s_infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++            fh_I2S_stop_playback(i2s_config);
++            printk(" AC_AO_DISABLE\n");
++            break;
++        case I2S_AI_PAUSE:
++			if (i2s_infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++            printk(KERN_INFO "capture pause\n");
++            i2s_config->capture.state = STATE_PAUSE;
++            rx_status = readl(fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*rx fifo disable*/
++            rx_status =  rx_status&(~I2S_ADC_FIFO_EN);
++            writel(rx_status, fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*rx fifo disable*/
++            break;
++        case I2S_AI_RESUME:
++			if (i2s_infor_record.record_pid != pid) {
++				return -EBUSY;
++			}
++            printk(KERN_INFO "capture resume\n");
++            i2s_config->capture.state = STATE_RUN;
++            rx_status = readl( fh_i2s_module.regs + I2S_RXFIFO_CTRL);//clear rx fifo
++            rx_status =  rx_status|I2S_ADC_FIFO_CLEAR;
++            writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*enable rx fifo*/
++            rx_status =  rx_status&(~I2S_ADC_FIFO_CLEAR);
++            rx_status =  rx_status|I2S_ADC_FIFO_EN;
++            writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);/*enable rx fifo*/
++            break;
++        case I2S_AO_PAUSE:
++			if (i2s_infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++            i2s_config->playback.state = STATE_PAUSE;
++            printk(KERN_INFO "playback pause\n");
++            tx_status = readl(fh_i2s_module.regs + I2S_TXFIFO_CTRL);/*rx fifo disable*/
++            tx_status =  tx_status&(~I2S_DAC_FIFO_EN);
++            writel(tx_status, fh_i2s_module.regs + I2S_TXFIFO_CTRL);/*tx fifo disable*/
++            break;
++        case I2S_AO_RESUME:
++			if (i2s_infor_record.play_pid != pid) {
++				return -EBUSY;
++			}
++            printk(KERN_INFO "playback resume\n");
++            i2s_config->playback.state = STATE_RUN;
++            tx_status = readl( fh_i2s_module.regs + I2S_TXFIFO_CTRL);//clear rx fifo
++            tx_status =  tx_status|I2S_DAC_FIFO_EN;
++            writel(tx_status,fh_i2s_module.regs + I2S_TXFIFO_CTRL); //enable tx fifo read enable
++            break;
++        default:
++            return -ENOTTY;
++    }
++    return 0;
++}
++
++static int fh_i2s_open(struct inode *ip, struct file *fp)
++{
++
++    fp->private_data = &fh_i2s_dev.fh_i2s_miscdev;
++
++    return 0;
++}
++
++static u32 fh_i2s_poll(struct file *filp, poll_table *wait)
++{
++    struct miscdevice *miscdev = filp->private_data;
++    struct i2s_dev  *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
++    struct fh_i2s_cfg  *i2s_config = &dev->i2s_config;
++    u32 mask = 0;
++    long avail;
++    if (STATE_RUN == i2s_config->capture.state)
++    {
++        poll_wait(filp,&i2s_config->readqueue,wait);
++        avail = i2s_avail_data_len(capture, i2s_config);
++        if (avail >  i2s_config->capture.cfg.period_bytes)
++        {
++            mask |=  POLLIN | POLLRDNORM;
++        }
++    }
++    if (STATE_RUN == i2s_config->playback.state)
++    {
++        poll_wait(filp,&i2s_config->writequeue,wait);
++        avail = i2s_avail_data_len(playback, i2s_config);
++        if (avail >  i2s_config->playback.cfg.period_bytes)
++        {
++            mask |=  POLLOUT | POLLWRNORM;
++        }
++    }
++    return mask;
++}
++
++static int fh_i2s_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
++{
++
++    int ret;
++    struct miscdevice *miscdev = filp->private_data;
++    struct i2s_dev  *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
++    struct fh_i2s_cfg  *i2s_config = &dev->i2s_config;
++    int after,left;
++    int pid,avail;
++    pid = current->tgid;
++    if (i2s_infor_record.record_pid != pid){
++    	return -EBUSY;
++    }
++
++    avail = i2s_avail_data_len(capture, i2s_config);
++    if (avail > len)
++    {
++        avail = len;
++    }
++    after = avail + i2s_config->capture.appl_ptr;
++    if(after  > i2s_config->capture.size)
++    {
++        left = avail - (i2s_config->capture.size - i2s_config->capture.appl_ptr);
++        ret = copy_to_user(buf, i2s_config->capture.area+i2s_config->capture.appl_ptr, i2s_config->capture.size-i2s_config->capture.appl_ptr);
++        ret = copy_to_user(buf+i2s_config->capture.size-i2s_config->capture.appl_ptr,i2s_config->capture.area,left);
++        spin_lock(&i2s_config->capture.lock);
++        i2s_config->capture.appl_ptr = left;
++        spin_unlock(&i2s_config->capture.lock);
++    }
++    else
++    {
++        ret = copy_to_user(buf,i2s_config->capture.area+i2s_config->capture.appl_ptr,avail);
++        spin_lock(&i2s_config->capture.lock);
++        i2s_config->capture.appl_ptr += avail;
++        spin_unlock(&i2s_config->capture.lock);
++    }
++
++    return avail;
++
++}
++
++static int fh_i2s_write(struct file *filp, const char __user *buf,
++                            size_t len, loff_t *off)
++{
++
++    struct miscdevice *miscdev = filp->private_data;
++    struct i2s_dev  *dev = container_of(miscdev, struct i2s_dev, fh_i2s_miscdev);
++    struct fh_i2s_cfg  *i2s_config = &dev->i2s_config;
++    int  ret;
++    int after,left;
++    int pid,avail;
++   pid = current->tgid;
++   if (i2s_infor_record.play_pid != pid){
++	return -EBUSY;
++   }
++    avail = i2s_avail_data_len(playback,i2s_config);
++    if (0 == avail)
++    {
++        return 0;
++    }
++    if (avail > len)
++    {
++        avail = len;
++    }
++    after = avail+i2s_config->playback.appl_ptr;
++    if(after  > i2s_config->playback.size)
++    {
++        left = avail - (i2s_config->playback.size-i2s_config->playback.appl_ptr);
++        ret = copy_from_user(i2s_config->playback.area+i2s_config->playback.appl_ptr,buf,i2s_config->playback.size-i2s_config->playback.appl_ptr);
++        ret = copy_from_user(i2s_config->playback.area,buf+i2s_config->playback.size-i2s_config->playback.appl_ptr,left);
++        spin_lock(&i2s_config->playback.lock);
++        i2s_config->playback.appl_ptr = left;
++        spin_unlock(&i2s_config->playback.lock);
++    }
++    else
++    {
++        ret = copy_from_user(i2s_config->playback.area+i2s_config->playback.appl_ptr,buf,avail);
++        spin_lock(&i2s_config->playback.lock);
++        i2s_config->playback.appl_ptr += avail;
++        spin_unlock(&i2s_config->playback.lock);
++    }
++
++     return avail;
++}
++
++static irqreturn_t fh_i2s_interrupt(int irq, void *dev_id)
++{
++#if 0
++#ifndef CONFIG_MACH_FH8830_FPGA
++	u32 interrupts, rx_status;
++    struct fh_i2s_cfg  *i2s_config = &fh_i2s_dev.i2s_config;
++
++    interrupts = readl(fh_i2s_module.regs + I2S_CTRL);
++    //interrupts &= ~(0x3ff) << 16;
++    writel(interrupts, fh_i2s_module.regs + I2S_CTRL);
++
++    if(interrupts & I2S_INTR_RX_UNDERFLOW)
++    {
++        fh_I2S_stop_capture(i2s_config);
++        fh_I2S_start_capture(i2s_config);
++        PRINT_i2s_DBG("I2S_INTR_RX_UNDERFLOW\n");
++    }
++
++    if(interrupts & I2S_INTR_RX_OVERFLOW)
++    {
++        if (i2s_config->capture.state == STATE_RUN) {
++            fh_I2S_stop_capture(i2s_config);
++            fh_I2S_start_capture(i2s_config);
++        } else {
++            rx_status = readl( fh_i2s_module.regs + I2S_RXFIFO_CTRL);//clear rx fifo
++            rx_status =  rx_status|(1<<4);
++            writel(rx_status,fh_i2s_module.regs + I2S_RXFIFO_CTRL);
++        }
++        PRINT_i2s_DBG("I2S_INTR_RX_OVERFLOW\n");
++    }
++
++    if(interrupts & I2S_INTR_TX_UNDERFLOW)
++    {
++        fh_I2S_stop_playback(i2s_config);
++        fh_I2S_start_playback(i2s_config);
++        PRINT_i2s_DBG("I2S_INTR_TX_UNDERFLOW\n");
++    }
++
++    if(interrupts & I2S_INTR_TX_OVERFLOW)
++    {
++        fh_I2S_stop_playback(i2s_config);
++        fh_I2S_start_playback(i2s_config);
++        PRINT_i2s_DBG("I2S_INTR_TX_OVERFLOW\n");
++    }
++
++    PRINT_i2s_DBG("interrupts: 0x%x\n", interrupts);
++#endif
++#endif
++    return IRQ_HANDLED;
++}
++
++static const struct file_operations I2S_fops =
++{
++    .owner      = THIS_MODULE,
++    .llseek     = no_llseek,
++    .unlocked_ioctl = fh_i2s_ioctl,
++    .release = fh_i2s_close,
++    .open = fh_i2s_open,
++    .poll = fh_i2s_poll,
++    .read = fh_i2s_read,
++    .write = fh_i2s_write,
++
++};
++
++static int __devinit fh_i2s_drv_probe(struct platform_device *pdev)
++{
++    int ret;
++    struct resource *irq_res, *mem;
++
++    mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++    if (!mem)
++        return -EINVAL;
++    printk("I2S probe\n");
++    if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
++                                 "fh_fh8830_i2s_module"))
++        return -ENOMEM;
++    printk("I2S :%d\n",__LINE__);
++    PRINT_i2s_DBG("%d\n",__LINE__);
++    fh_i2s_module.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
++    PRINT_i2s_DBG("probe: regs %p\n",fh_i2s_module.regs);
++    if (!fh_i2s_module.regs){
++    	ret = -ENOMEM;
++    	goto remap_fail;
++        }
++
++    fh_i2s_module.clk = clk_get(NULL, "ac_clk");
++	if (!fh_i2s_module.clk) {
++		ret = -EINVAL;
++		goto clk_fail;
++		}
++    clk_enable(fh_i2s_module.clk);
++    PRINT_i2s_DBG("%d\n",__LINE__);
++    spin_lock_init(&fh_i2s_module.lock);
++
++    ret = misc_register(&fh_i2s_dev.fh_i2s_miscdev);
++
++    if (ret)
++        goto out_disable_clk;
++
++    irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++    if (!irq_res)
++    {
++        pr_err("%s: ERROR: getting resource failed"
++               "cannot get IORESOURCE_IRQ\n", __func__);
++        ret = -ENXIO;
++        goto out_disable_clk;
++    }
++    fh_i2s_module.irq = irq_res->start;
++    ret = request_irq(fh_i2s_module.irq, fh_i2s_interrupt, IRQF_SHARED, "i2s", &fh_i2s_module);
++    ret = readl(fh_i2s_module.regs + I2S_DBG_CTL);
++    ret = ret | I2S_EXT_EN| I2S_EN;
++    writel(ret,fh_i2s_module.regs + I2S_DBG_CTL);
++    return 0;
++
++out_disable_clk:
++    clk_disable(fh_i2s_module.clk);
++	fh_i2s_module.clk = NULL;
++clk_fail:
++	devm_iounmap(&pdev->dev, fh_i2s_module.regs);
++	fh_i2s_module.regs = NULL;
++remap_fail:
++	devm_release_mem_region(&pdev->dev, mem->start, resource_size(mem));
++    return ret;
++}
++
++static int __devexit fh_I2S_drv_remove(struct platform_device *pdev)
++{
++	struct resource *mem;
++    misc_deregister(&fh_i2s_dev.fh_i2s_miscdev);
++
++    free_irq(fh_i2s_module.irq, &fh_i2s_module);
++
++	if (fh_i2s_module.clk) {
++		clk_disable(fh_i2s_module.clk);
++    	clk_put(fh_i2s_module.clk);
++	}
++	if (fh_i2s_module.regs) {
++		devm_iounmap(&pdev->dev, fh_i2s_module.regs);
++	}
++	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++    if (mem) {
++		devm_release_mem_region(&pdev->dev, mem->start, resource_size(mem));
++    }
++    printk("I2S remove ok\n");
++    return 0;
++}
++
++static struct platform_driver fh_i2s_driver =
++{
++    .probe      = fh_i2s_drv_probe,
++    .remove     = __devexit_p(fh_I2S_drv_remove),
++    .driver     = {
++        .name   = "fh_fh8830_i2s",
++        .owner  = THIS_MODULE,
++    }
++};
++
++void i2s_prealloc_dma_buffer(struct fh_i2s_cfg  *i2s_config)
++{
++    int pg;
++    gfp_t gfp_flags;
++    pg = get_order(I2S_DMA_PREALLOC_SIZE);
++    gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
++    i2s_config->capture.dev.coherent_dma_mask = DMA_BIT_MASK(32);
++    i2s_config->capture.area  = dma_alloc_coherent(&i2s_config->capture.dev, PAGE_SIZE << pg, &i2s_config->capture.addr, gfp_flags );
++    if (!i2s_config->capture.area)
++    {
++        printk(KERN_ERR"no enough mem for capture  buffer alloc\n");
++        return ;
++    }
++    i2s_config->playback.dev.coherent_dma_mask = DMA_BIT_MASK(32);
++    i2s_config->playback.area  = dma_alloc_coherent(&i2s_config->playback.dev, PAGE_SIZE << pg, &i2s_config->playback.addr, gfp_flags );
++    if (!i2s_config->playback.area)
++    {
++        printk(KERN_ERR"no enough mem for  playback buffer alloc\n");
++        return ;
++    }
++}
++
++void i2s_free_prealloc_dma_buffer(struct fh_i2s_cfg  *i2s_config)
++{
++    int pg;
++    pg = get_order(I2S_DMA_PREALLOC_SIZE);
++    dma_free_coherent(&i2s_config->capture.dev, PAGE_SIZE<<pg, i2s_config->capture.area, i2s_config->capture.addr);
++    dma_free_coherent(&i2s_config->playback.dev, PAGE_SIZE<<pg, i2s_config->playback.area, i2s_config->playback.addr);
++}
++
++static void init_i2s_mutex(struct fh_i2s_cfg  *i2s_config)
++{
++    sema_init(&i2s_config->sem_capture, 1);
++    sema_init(&i2s_config->sem_playback, 1);
++}
++
++int i2s_request_dma_channel(void)
++{
++    dma_cap_mask_t mask;
++    /*request i2s rx dma channel*/
++    dma_rx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
++    if (!dma_rx_transfer)
++    {
++        printk(KERN_ERR"alloc  dma_rx_transfer failed\n");
++        goto mem_fail;
++    }
++    memset(dma_rx_transfer, 0, sizeof(struct fh_dma_chan));
++
++    dma_cap_zero(mask);
++    dma_cap_set(DMA_SLAVE, mask);
++    dma_rx_transfer->chan = dma_request_channel(mask, fh_I2S_dma_chan_filter, &fh_i2s_dev.channel_assign.capture_channel);
++    if (!dma_rx_transfer->chan)
++    {
++        printk(KERN_ERR"request i2s rx dma channel failed \n");
++        goto channel_fail;
++    }
++
++    /*request i2s tx dma channel*/
++    dma_tx_transfer = kzalloc(sizeof(struct fh_dma_chan), GFP_KERNEL);
++    if (!dma_tx_transfer)
++    {
++        printk(KERN_ERR"alloc  dma_tx_transfer failed\n");
++        goto mem_fail;
++    }
++    memset(dma_tx_transfer, 0, sizeof(struct fh_dma_chan));
++
++    dma_cap_zero(mask);
++    dma_cap_set(DMA_SLAVE, mask);
++    dma_tx_transfer->chan = dma_request_channel(mask, fh_I2S_dma_chan_filter, &fh_i2s_dev.channel_assign.playback_channel);
++    if (!dma_tx_transfer->chan)
++    {
++        printk(KERN_ERR"request dma channel failed \n");
++        return -EFAULT;
++    }
++
++    return 0;
++channel_fail:
++    if (!dma_rx_transfer->chan)
++    {
++        dma_release_channel(dma_rx_transfer->chan);
++		dma_rx_transfer->chan = NULL;
++    }
++    if (!dma_tx_transfer->chan)
++    {
++        dma_release_channel(dma_tx_transfer->chan);
++		dma_tx_transfer->chan = NULL;
++    }
++
++mem_fail:
++    if (dma_rx_transfer != NULL)
++    {
++        kfree(dma_rx_transfer);
++        dma_rx_transfer = NULL;
++    }
++    if (dma_tx_transfer != NULL)
++    {
++        kfree(dma_tx_transfer);
++        dma_tx_transfer = NULL;
++    }
++
++    return -EFAULT;
++}
++
++void i2s_release_dma_channel(void)
++{
++    /*release i2s tx dma channel*/
++    if (dma_tx_transfer != NULL)
++    {
++    	if (dma_tx_transfer->chan) {
++			dma_release_channel(dma_tx_transfer->chan);
++			dma_tx_transfer->chan = NULL;
++    	}
++        kfree(dma_tx_transfer);
++        dma_tx_transfer = NULL;
++    }
++
++    /*release i2s rx dma channel*/
++    if (dma_rx_transfer != NULL)
++    {
++    	if (dma_rx_transfer->chan) {
++			dma_release_channel(dma_rx_transfer->chan);
++			dma_rx_transfer->chan = NULL;
++    	}
++        
++        kfree(dma_rx_transfer);
++        dma_rx_transfer = NULL;
++    }
++
++
++}
++static void create_fh8830_i2s_proc(void);
++static void remove_fh8830_i2s_proc(void);
++static int __init fh_i2s_init(void)
++{
++    int status;
++	init_i2s_mutex(&fh_i2s_dev.i2s_config);
++
++    i2s_prealloc_dma_buffer(&fh_i2s_dev.i2s_config);
++
++    status = i2s_request_dma_channel();
++    if(status)
++    	printk("fh i2s init fail status=0x%x\n",status);
++    create_fh8830_i2s_proc();
++    return platform_driver_register(&fh_i2s_driver);
++}
++module_init(fh_i2s_init);
++
++static void __exit fh_i2s_exit(void)
++{
++
++    remove_fh8830_i2s_proc();
++    i2s_release_dma_channel();
++    i2s_free_prealloc_dma_buffer(&fh_i2s_dev.i2s_config);
++    platform_driver_unregister(&fh_i2s_driver);
++}
++module_exit(fh_i2s_exit);
++
++MODULE_AUTHOR("FH_i2s");
++MODULE_DESCRIPTION("FH_i2s");
++MODULE_LICENSE("GPL");
++
++/****************************debug proc*****************************/
++#include <linux/proc_fs.h>
++#include <asm/unistd.h>
++struct proc_dir_entry *proc_ac_entry;
++#define proc_name "fh_fh8830_i2s"
++#define I2S_TEST_LOOP 1
++#define I2S_TEST_OUT  0
++ssize_t proc_ac_read(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++    ssize_t len = 0;
++	int i;
++	unsigned int reg;
++	unsigned int *data_addr;
++	unsigned int file_len;
++	struct file *fp;
++	loff_t pos;
++	mm_segment_t old_fs;
++	for (i = 0;i <= 0x1C;i += 4) {
++		printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
++	}
++	for (i = 80;i <= 0xd0;i += 4) {
++			printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
++		}
++	printk("loop test start __________\n");
++	writel(0xe0000000,fh_i2s_module.regs + I2S_CTRL);// ctrl
++	writel(0x1,fh_i2s_module.regs + I2S_TXFIFO_CTRL);// tx
++	writel(0x1,fh_i2s_module.regs + I2S_RXFIFO_CTRL);// rx
++	writel(0x1 <<12 |0x3,fh_i2s_module.regs + I2S_DBG_CTL);// loop
++
++	writel(0x1000000,0xfe0901ac);
++	i = 0;
++	data_addr = kzalloc(4096*4, GFP_KERNEL);
++	    if (!data_addr)
++	    {
++	        printk("alloc  dma_rx_transfer failed\n");
++	    }
++	    memset(data_addr, 0, 4096*4);
++	    mdelay(1000);
++	while (1) {
++#if I2S_TEST_LOOP
++		reg =0xff & readl(fh_i2s_module.regs + 0x0c);
++
++		if(reg >0){
++			reg = readl(fh_i2s_module.regs +0x200);
++			writel(reg,fh_i2s_module.regs + 0x100);
++		}
++
++#endif
++
++
++#if I2S_TEST_OUT
++		reg =0xff00 & readl(fh_i2s_module.regs + 0x0c);
++		reg = reg>>8;
++	//	printk("write dac date reg = %x \n ",reg);
++		if (reg < 0x40) {
++			writel(i, fh_i2s_module.regs + 0x100);
++			writel(i, fh_i2s_module.regs + 0x300);
++
++
++		}
++		i = i+0x200;
++		if(i >= 0xffffff)
++			i = 0;
++	//	printk("water level 0x%x\n",readl(fh_i2s_module.regs + 0x0c));
++
++#endif
++	}
++//TEST I2S_INPUT TO FILE
++	pos =0;
++
++	old_fs = get_fs();
++	set_fs(KERNEL_DS);
++	fp =  filp_open("/home/test",O_RDWR|O_CREAT,0644);
++	if(IS_ERR(fp)){
++		printk("file is error\n");
++		return 1;
++
++	}
++	 file_len = fp->f_op->write(fp,(void*)data_addr,4096,&pos);
++	 printk("write len is %d\n",len);
++	 set_fs(old_fs);
++	 filp_close(fp,NULL);
++	printk("loop test stop ___________\n");
++	for (i = 0;i <= 0x1C;i += 4) {
++		printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
++	}
++	for (i = 80;i <= 0xd0;i += 4) {
++			printk(KERN_INFO"0x%x reg = 0x%x\n",i, readl(fh_i2s_module.regs + i));
++		}
++    return len;
++}
++
++static void create_fh8830_i2s_proc(void)
++{
++    proc_ac_entry = create_proc_entry(proc_name, S_IRUGO, NULL);
++    if (!proc_ac_entry)
++    {
++        printk(KERN_ERR"create proc failed\n");
++    }
++    else
++    {
++        proc_ac_entry->read_proc = proc_ac_read;
++    }
++}
++
++static void remove_fh8830_i2s_proc(void)
++{
++    remove_proc_entry(proc_name, NULL);
++}
+diff --git a/drivers/misc/fh_i2s.h b/drivers/misc/fh_i2s.h
+new file mode 100644
+index 00000000..7f067a76
+--- /dev/null
++++ b/drivers/misc/fh_i2s.h
+@@ -0,0 +1,35 @@
++#ifndef __FH_LINBA0_I2S_H
++#define __FH_LINBA0_I2S_H
++
++#define	FH_I2S_IOCTL_MEM_BASE	'M'
++#define	I2S_INIT_CAPTURE_MEM    	       _IOWR(FH_I2S_IOCTL_MEM_BASE, 0, int)
++#define	I2S_INIT_PLAYBACK_MEM           _IOWR(FH_I2S_IOCTL_MEM_BASE, 1, int)
++
++#define	FH_I2S_IOCTL_PARAM_BASE	'P'
++#define I2S_SET_VOL                     _IOWR(FH_I2S_IOCTL_PARAM_BASE, 0, int)
++#define I2S_SET_INPUT_MODE              _IOWR(FH_I2S_IOCTL_PARAM_BASE, 1, int)
++#define I2S_SET_OUTPUT_MODE             _IOWR(FH_I2S_IOCTL_PARAM_BASE, 2, int)
++
++#define	FH_I2S_IOCTL_ENA_BASE	'E'
++#define I2S_AI_EN                       _IOWR(FH_I2S_IOCTL_ENA_BASE, 0, int)
++#define I2S_AO_EN                       _IOWR(FH_I2S_IOCTL_ENA_BASE, 1, int)
++#define I2S_AI_DISABLE                  _IOWR(FH_I2S_IOCTL_ENA_BASE, 2, int)
++#define I2S_AO_DISABLE       		   _IOWR(FH_I2S_IOCTL_ENA_BASE, 3, int)
++#define I2S_AI_PAUSE               	   _IOWR(FH_I2S_IOCTL_ENA_BASE, 4, int)
++#define I2S_AI_RESUME          		   _IOWR(FH_I2S_IOCTL_ENA_BASE, 5, int)
++#define I2S_AO_PAUSE              	   _IOWR(FH_I2S_IOCTL_ENA_BASE, 6, int)
++#define I2S_AO_RESUME          		   _IOWR(FH_I2S_IOCTL_ENA_BASE, 7, int)
++
++
++struct fh_i2s_cfg_arg{
++
++	int volume;
++	int rate;
++	int frame_bit;
++	int channels;
++	int buffer_size;
++	int period_size;
++};
++
++#endif
++
+diff --git a/drivers/misc/fh_pinctrl_dev.c b/drivers/misc/fh_pinctrl_dev.c
+new file mode 100644
+index 00000000..c8790f40
+--- /dev/null
++++ b/drivers/misc/fh_pinctrl_dev.c
+@@ -0,0 +1,279 @@
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/pwm.h>
++#include <linux/printk.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include <asm/uaccess.h>
++#include "fh_pinctrl_dev.h"
++
++#define FH_PINCTRL_PROC_FILE "driver/pinctrl"
++
++#undef  FH_PINCTRL_DEBUG
++#ifdef FH_PINCTRL_DEBUG
++#define PRINT_DBG(fmt,args...)  printk(fmt,##args)
++#else
++#define PRINT_DBG(fmt,args...)  do{} while(0)
++#endif
++
++struct proc_dir_entry *pinctrl_proc_file;
++
++static int fh_pinctrl_open(struct inode *inode, struct file *file)
++{
++	return 0;
++}
++
++static int fh_pinctrl_release(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++
++static long fh_pinctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++	int ret = 0;
++
++	if (unlikely(_IOC_TYPE(cmd) != PINCTRL_IOCTL_MAGIC))
++	{
++		pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
++			   __func__, _IOC_TYPE(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (unlikely(_IOC_NR(cmd) > PINCTRL_IOCTL_MAXNR))
++	{
++		pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
++			   __func__, _IOC_NR(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (_IOC_DIR(cmd) & _IOC_READ)
++	{
++		ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
++	}
++	else if(_IOC_DIR(cmd) & _IOC_WRITE)
++	{
++		ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
++	}
++
++	if(ret)
++	{
++		pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
++			   __func__, _IOC_NR(cmd), -EACCES);
++		return -EACCES;
++	}
++
++	switch(cmd)
++	{
++
++	}
++
++	return ret;
++}
++
++static const struct file_operations fh_pinctrl_fops =
++{
++	.owner 			= THIS_MODULE,
++	.open 			= fh_pinctrl_open,
++	.release 		= fh_pinctrl_release,
++	.unlocked_ioctl 	= fh_pinctrl_ioctl,
++};
++
++static struct miscdevice fh_pinctrl_misc =
++{
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = DEVICE_NAME,
++	.fops = &fh_pinctrl_fops,
++};
++
++static void del_char(char* str,char ch)
++{
++	char *p = str;
++	char *q = str;
++	while(*q)
++	{
++		if (*q !=ch)
++		{
++			*p++ = *q;
++		}
++		q++;
++	}
++	*p='\0';
++}
++
++static ssize_t fh_pinctrl_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
++{
++	int i;
++	char message[32] = {0};
++	char * const delim = ",";
++	char *cur = message;
++	char* param_str[4];
++	unsigned int param[4];
++
++	len = (len > 32) ? 32 : len;
++
++	if (copy_from_user(message, buf, len))
++		return -EFAULT;
++
++	for(i=0; i<4; i++)
++	{
++		param_str[i] = strsep(&cur, delim);
++		if(!param_str[i])
++		{
++			pr_err("%s: ERROR: parameter[%d] is empty\n", __func__, i);
++			pr_err("[dev/mux], [dev name], [mux name], [func sel]\n");
++			return -EINVAL;
++		}
++		else
++		{
++			del_char(param_str[i], ' ');
++			del_char(param_str[i], '\n');
++		}
++	}
++
++        if(!strcmp(param_str[0], "dev"))
++        {
++        	fh_pinctrl_sdev(param_str[1], 0);
++        }
++        else if(!strcmp(param_str[0], "mux"))
++        {
++		param[3] = (u32)simple_strtoul(param_str[3], NULL, 10);
++		if(param[3] < 0)
++		{
++			pr_err("ERROR: parameter[3] is incorrect\n");
++			return -EINVAL;
++		}
++        	fh_pinctrl_smux(param_str[1], param_str[2], param[3], 0);
++        }
++        else
++        {
++		pr_err("ERROR: parameter[0] is incorrect\n"
++			"[dev/mux], [dev name], [mux name], [func sel]\n");
++		return -EINVAL;
++        }
++
++	return len;
++}
++
++static void *v_seq_start(struct seq_file *s, loff_t *pos)
++{
++	static unsigned long counter = 0;
++	if (*pos == 0)
++		return &counter;
++	else
++	{
++		*pos = 0;
++		return NULL;
++	}
++}
++
++static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++	(*pos)++;
++	return NULL;
++}
++
++static void v_seq_stop(struct seq_file *s, void *v)
++{
++
++}
++
++static int v_seq_show(struct seq_file *sfile, void *v)
++{
++
++	fh_pinctrl_prt(sfile);
++	return 0;
++}
++
++static const struct seq_operations isp_seq_ops =
++{
++	.start = v_seq_start,
++	.next = v_seq_next,
++	.stop = v_seq_stop,
++	.show = v_seq_show
++};
++
++static int fh_pinctrl_proc_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &isp_seq_ops);
++}
++
++static struct file_operations fh_pinctrl_proc_ops =
++{
++	.owner = THIS_MODULE,
++	.open = fh_pinctrl_proc_open,
++	.read = seq_read,
++	.write = fh_pinctrl_proc_write,
++	.release = seq_release,
++};
++
++
++static int __devinit fh_pinctrl_probe(struct platform_device *pdev)
++{
++	int err;
++
++	err = misc_register(&fh_pinctrl_misc);
++
++	if(err < 0)
++	{
++		pr_err("%s: ERROR: %s registration failed",
++			   __func__, DEVICE_NAME);
++		return -ENXIO;
++	}
++
++	pinctrl_proc_file = create_proc_entry(FH_PINCTRL_PROC_FILE, 0644, NULL);
++
++	if (pinctrl_proc_file)
++		pinctrl_proc_file->proc_fops = &fh_pinctrl_proc_ops;
++	else
++		pr_err("%s: ERROR: %s proc file create failed",
++				__func__, DEVICE_NAME);
++
++	return 0;
++}
++
++static int __exit fh_pinctrl_remove(struct platform_device *pdev)
++{
++	misc_deregister(&fh_pinctrl_misc);
++	return 0;
++}
++
++static struct platform_driver fh_pinctrl_driver =
++{
++	.driver	=
++	{
++		.name	= DEVICE_NAME,
++		.owner	= THIS_MODULE,
++	},
++	.probe 		= fh_pinctrl_probe,
++	.remove		= __exit_p(fh_pinctrl_remove),
++};
++
++static int __init fh_pinctrl_dev_init(void)
++{
++	return platform_driver_register(&fh_pinctrl_driver);
++}
++
++static void __exit fh_pinctrl_dev_exit(void)
++{
++
++	platform_driver_unregister(&fh_pinctrl_driver);
++
++}
++
++module_init(fh_pinctrl_dev_init);
++module_exit(fh_pinctrl_dev_exit);
++
++
++MODULE_AUTHOR("fullhan");
++
++MODULE_DESCRIPTION("FH PINCTRL driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
+diff --git a/drivers/misc/fh_pinctrl_dev.h b/drivers/misc/fh_pinctrl_dev.h
+new file mode 100644
+index 00000000..8fbf6c0e
+--- /dev/null
++++ b/drivers/misc/fh_pinctrl_dev.h
+@@ -0,0 +1,18 @@
++
++#ifndef FH_PINCTRL_DEV_H_
++#define FH_PINCTRL_DEV_H_
++
++#include <linux/slab.h>
++#include <linux/ioctl.h>
++#include <mach/pinctrl.h>
++
++
++#define DEVICE_NAME					"fh_pinctrl"
++
++
++#define PINCTRL_IOCTL_MAGIC 				'p'
++
++#define PINCTRL_IOCTL_MAXNR 				8
++
++
++#endif /* FH_PINCTRL_DEV_H_ */
+diff --git a/drivers/misc/fh_sadc.c b/drivers/misc/fh_sadc.c
+new file mode 100644
+index 00000000..b9fc3243
+--- /dev/null
++++ b/drivers/misc/fh_sadc.c
+@@ -0,0 +1,537 @@
++/*
++ * fh_sadc.c
++ *
++ *  Created on: Mar 13, 2015
++ *      Author: duobao
++ */
++#include <linux/uaccess.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++/*****************************************************************************
++ *  Include Section
++ *  add all #include here
++ *****************************************************************************/
++#include   <mach/fh_sadc.h>
++/*****************************************************************************
++ * Define section
++ * add all #define here
++ *****************************************************************************/
++#define wrap_readl(wrap, name) \
++	__raw_readl(&(((struct wrap_sadc_reg *)wrap->regs)->name))
++
++#define wrap_writel(wrap, name, val) \
++	__raw_writel((val), &(((struct wrap_sadc_reg *)wrap->regs)->name))
++
++#define wrap_readw(wrap, name) \
++	__raw_readw(&(((struct wrap_sadc_reg *)wrap->regs)->name))
++
++#define wrap_writew(wrap, name, val) \
++	__raw_writew((val), &(((struct wrap_sadc_reg *)wrap->regs)->name))
++
++#define wrap_readb(wrap, name) \
++	__raw_readb(&(((struct wrap_sadc_reg *)wrap->regs)->name))
++
++#define wrap_writeb(wrap, name, val) \
++	__raw_writeb((val), &(((struct wrap_sadc_reg *)wrap->regs)->name))
++
++#define IOCTL_GET_SADC_DATA	1
++#define IOCTL_SADC_POWER_DOWN	0xff
++#define SADC_WRAP_BASE			(0xf1200000)
++#define SADC_IRQn			(23)
++#define SADC_MAX_CONTROLLER		(1)
++#define SADC_STATUS_COLESD		(0)
++#define SADC_STATUS_OPEN		(1)
++#define FH_SADC_PLAT_DEVICE_NAME		"fh_sadc"
++#define FH_SADC_MISC_DEVICE_NAME		"fh_sadc"
++/****************************************************************************
++ * ADT section
++ *  add definition of user defined Data Type that only be used in this file  here
++ ***************************************************************************/
++typedef struct{
++	int channel;
++	int sadc_data;
++} SADC_INFO;
++/******************************************************************************
++ * Function prototype section
++ * add prototypes for all functions called by this file,execepting those
++ * declared in header file
++ *****************************************************************************/
++
++/*****************************************************************************
++ * Global variables section - Exported
++ * add declaration of global variables that will be exported here
++ * e.g.
++ *  int8_t foo;
++ ****************************************************************************/
++
++/*****************************************************************************
++
++ *  static fun;
++ *****************************************************************************/
++static u32 fh_sadc_isr_read_data(struct wrap_sadc_obj *sadc, u32 channel,u16 *buf);
++static int fh_sadc_open(struct inode *inode, struct file *file);
++static int fh_sadc_release(struct inode *inode, struct file *filp);
++static long fh_sadc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
++/*****************************************************************************
++ * Global variables section - Local
++ * define global variables(will be refered only in this file) here,
++ * static keyword should be used to limit scope of local variable to this file
++ * e.g.
++ *  static uint8_t ufoo;
++ *****************************************************************************/
++static struct wrap_sadc_obj fh_sadc_obj;
++
++static const struct file_operations fh_sadc_fops = {
++	.owner 			= THIS_MODULE,
++	.open 			= fh_sadc_open,
++	.release 		= fh_sadc_release,
++	.unlocked_ioctl = fh_sadc_ioctl,
++};
++
++static struct miscdevice fh_sadc_misc = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = FH_SADC_MISC_DEVICE_NAME,
++	.fops = &fh_sadc_fops,
++};
++
++/*****************************************************************************
++ *
++ *
++ *		function body
++ *
++ *
++ *****************************************************************************/
++u32 fh_sadc_isr_read_data(struct wrap_sadc_obj *sadc, u32 channel,
++		u16 *buf) {
++	u32 xainsel = 1 << channel;
++	u32 xversel = 0;
++	u32 xpwdb = 1;
++	//cnt
++	u32 sel2sam_pre_cnt = 2;
++	u32 sam_cnt = 2;
++	u32 sam2sel_pos_cnt = 2;
++	//time out
++	u32 eoc_tos = 0xff;
++	u32 eoc_toe = 0xff;
++	u32 time_out = 0xffff;
++	//set isr en..
++	u32 sadc_isr = 0x01;
++	//start
++	u32 sadc_cmd = 0x01;
++	//get data
++	u32 temp_data = 0;
++	u32 ret_time;
++
++	//control...
++	wrap_writel(sadc, sadc_control, xainsel | (xversel << 8) | (xpwdb << 12));
++
++	wrap_writel(sadc, sadc_cnt,
++			sel2sam_pre_cnt | (sam_cnt << 8) | (sam2sel_pos_cnt << 16));
++
++	wrap_writel(sadc, sadc_timeout,
++			eoc_tos | (eoc_toe << 8) | (time_out << 16));
++
++	wrap_writel(sadc, sadc_ier, sadc_isr);
++
++	wrap_writel(sadc, sadc_cmd, sadc_cmd);
++
++	ret_time = wait_for_completion_timeout(&sadc->done,5000);
++	if(ret_time == 0){
++		printk("sadc timeout..\n");
++		return SADC_TIMEOUT;
++	}
++
++	switch (channel) {
++	case 0:
++	case 1:
++		//read channel 0 1
++		temp_data = wrap_readl(sadc, sadc_dout0);
++		break;
++
++	case 2:
++	case 3:
++		//read channel 2 3
++		temp_data = wrap_readl(sadc, sadc_dout1);
++		break;
++
++	case 4:
++	case 5:
++		//read channel 4 5
++		temp_data = wrap_readl(sadc, sadc_dout2);
++		break;
++
++	case 6:
++	case 7:
++		//read channel 6 7
++		temp_data = wrap_readl(sadc, sadc_dout3);
++		break;
++	default:
++		break;
++	}
++	if (channel % 2) {
++		//read low 16bit
++		*buf = (u16) (temp_data & 0xffff);
++	} else {
++		//read high 16bit
++		*buf = (u16) (temp_data >> 16);
++	}
++	return 0;
++
++}
++EXPORT_SYMBOL(fh_sadc_isr_read_data);
++
++int fh_sadc_enable(void)
++{
++    u32 control_reg;
++    struct wrap_sadc_obj *sadc = &fh_sadc_obj;
++
++    control_reg = wrap_readl(sadc, sadc_control);
++    control_reg |= 1 << 12;
++    wrap_writel(sadc, sadc_control, control_reg);
++    return 0;
++}
++EXPORT_SYMBOL(fh_sadc_enable);
++
++int fh_sadc_disable(void)
++{
++    u32 control_reg;
++    struct wrap_sadc_obj *sadc = &fh_sadc_obj;
++
++    control_reg = wrap_readl(sadc, sadc_control);
++    control_reg &= ~(1 << 12);
++    wrap_writel(sadc, sadc_control, control_reg);
++    return 0;
++}
++EXPORT_SYMBOL(fh_sadc_disable);
++
++static irqreturn_t fh_sadc_isr(int irq, void *dev_id) {
++
++	u32 isr_status;
++	struct wrap_sadc_obj *sadc = (struct wrap_sadc_obj *) dev_id;
++
++	isr_status = wrap_readl(sadc, sadc_int_status);
++	if (isr_status & 0x01) {
++
++		u32 sadc_isr = 0x00;
++		wrap_writel(sadc, sadc_ier, sadc_isr);
++
++		wrap_writel(sadc, sadc_int_status, isr_status);
++		complete(&(sadc->done));
++	} else {
++
++		printk("sadc maybe error!\n");
++	}
++
++	return IRQ_HANDLED;
++}
++
++long fh_sadc_get_value(int channel) {
++	unsigned int ret;
++	long w = 0;
++	u16 ad_raw_data;
++
++	if(channel < 0) {
++		printk("ERROR: %s, sadc channel no %d is incorrect\n",
++				__func__, channel);
++		return 0;
++	}
++
++	fh_sadc_enable();
++
++	ret = fh_sadc_isr_read_data(&fh_sadc_obj, channel, &ad_raw_data);
++
++	if(ret != 0) {
++		printk(KERN_INFO "sadc error code:0x%x\n",ret);
++	}
++	else {
++		w = ad_raw_data * SADC_REF / SADC_MAX_AD_VALUE;
++		printk(KERN_INFO "the value of sadc is: %ld\n", w);
++	}
++
++	return w;
++}
++EXPORT_SYMBOL(fh_sadc_get_value);
++
++static void del_char(char* str,char ch)
++{
++   char *p = str;
++   char *q = str;
++   while(*q)
++   {
++       if (*q !=ch)
++       {
++           *p++ = *q;
++       }
++       q++;
++   }
++   *p='\0';
++}
++
++static ssize_t fh_sadc_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
++{
++    char message[32] = {0};
++    char * const delim = ",";
++    char *cur = message, *power_str;
++    int power;
++
++    len = (len > 32) ? 32 : len;
++
++    if (copy_from_user(message, buf, len))
++        return -EFAULT;
++
++    power_str = strsep(&cur, delim);
++    if(!power_str)
++    {
++        pr_err("%s: ERROR: parameter is empty\n", __func__);
++        return -EINVAL;
++    }
++    else
++    {
++        del_char(power_str, ' ');
++        del_char(power_str, '\n');
++        power = (u32)simple_strtoul(power_str, NULL, 10);
++        if(power < 0)
++        {
++            pr_err("%s: ERROR: parameter is incorrect\n", __func__);
++            return -EINVAL;
++        }
++    }
++
++    power ? fh_sadc_enable() : fh_sadc_disable();
++
++    return len;
++}
++
++static void *v_seq_start(struct seq_file *s, loff_t *pos)
++{
++    static unsigned long counter = 0;
++    if (*pos == 0)
++        return &counter;
++    else
++    {
++        *pos = 0;
++        return NULL;
++    }
++}
++
++static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++    (*pos)++;
++    return NULL;
++}
++
++static void v_seq_stop(struct seq_file *s, void *v)
++{
++
++}
++
++static int v_seq_show(struct seq_file *sfile, void *v)
++{
++    int i;
++    u32 reg;
++    struct wrap_sadc_obj *sadc = &fh_sadc_obj;
++    u32 ret;
++
++    reg = wrap_readl(sadc, sadc_control) & 0x1000;
++
++    seq_printf(sfile, "\nSADC Status:\n");
++    seq_printf(sfile, "Power %s\n\n", reg ? "up" : "down");
++
++    for(i=0; i<8; i++)
++    {
++        u16 ad_raw_data;
++        ret = fh_sadc_isr_read_data(&fh_sadc_obj, i, &ad_raw_data);
++        if(ret != 0){
++        	seq_printf(sfile,"sadc error code:0x%x\n",ret);
++        }
++        else
++        	seq_printf(sfile, "channel: %d \tvalue: %u\n", i, ad_raw_data * SADC_REF / SADC_MAX_AD_VALUE);
++    }
++    return 0;
++}
++
++static const struct seq_operations isp_seq_ops =
++{
++    .start = v_seq_start,
++    .next = v_seq_next,
++    .stop = v_seq_stop,
++    .show = v_seq_show
++};
++
++static int fh_sadc_proc_open(struct inode *inode, struct file *file)
++{
++    return seq_open(file, &isp_seq_ops);
++}
++
++static struct file_operations fh_sadc_proc_ops =
++{
++	.owner = THIS_MODULE,
++	.open = fh_sadc_proc_open,
++	.read = seq_read,
++	.write = fh_sadc_proc_write,
++	.release = seq_release,
++};
++
++static int __devinit fh_sadc_probe(struct platform_device *pdev) {
++	int err;
++	struct resource *res;
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res) {
++		dev_err(&pdev->dev, "sadc get platform source error..\n");
++		return -ENODEV;
++	}
++
++	fh_sadc_obj.irq_no = platform_get_irq(pdev, 0);
++	if (fh_sadc_obj.irq_no < 0) {
++		dev_warn(&pdev->dev, "sadc interrupt is not available.\n");
++		return fh_sadc_obj.irq_no;
++	}
++
++	res = request_mem_region(res->start, resource_size(res), pdev->name);
++	if (res == NULL) {
++		dev_err(&pdev->dev, "sadc region already claimed\n");
++		return -EBUSY;
++	}
++
++	fh_sadc_obj.regs = ioremap(res->start, resource_size(res));
++	if (fh_sadc_obj.regs == NULL) {
++		err = -ENXIO;
++		goto fail_no_ioremap;
++	}
++
++	init_completion(&fh_sadc_obj.done);
++	fh_sadc_obj.active_channel_no = 0;
++
++	err = request_irq(fh_sadc_obj.irq_no, fh_sadc_isr, 0, dev_name(&pdev->dev),
++			&fh_sadc_obj);
++	if (err) {
++		dev_dbg(&pdev->dev, "request_irq failed, %d\n", err);
++		err = -ENXIO;
++		goto err_irq;
++	}
++
++	err = misc_register(&fh_sadc_misc);
++
++	if (err < 0) {
++		pr_err("%s: ERROR: %s registration failed", __func__,
++				FH_SADC_MISC_DEVICE_NAME);
++		err = -ENXIO;
++		goto misc_error;
++	}
++
++	fh_sadc_obj.proc_file = create_proc_entry(FH_SADC_PROC_FILE, 0644, NULL);
++
++    if (fh_sadc_obj.proc_file)
++        fh_sadc_obj.proc_file->proc_fops = &fh_sadc_proc_ops;
++    else
++        pr_err("%s: ERROR: %s proc file create failed",
++               __func__, "SADC");
++
++	return 0;
++
++misc_error:
++	free_irq(fh_sadc_obj.irq_no, &fh_sadc_obj);
++
++err_irq:
++	iounmap(fh_sadc_obj.regs);
++
++fail_no_ioremap:
++	release_mem_region(res->start, resource_size(res));
++
++	return err;
++}
++
++static int __exit fh_sadc_remove(struct platform_device *pdev) {
++
++	struct resource *res;
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	misc_deregister(&fh_sadc_misc);
++	free_irq(fh_sadc_obj.irq_no, &fh_sadc_obj);
++	iounmap(fh_sadc_obj.regs);
++	release_mem_region(res->start, resource_size(res));
++	return 0;
++
++}
++
++
++static int fh_sadc_open(struct inode *inode, struct file *file) {
++	return 0;
++}
++
++static int fh_sadc_release(struct inode *inode, struct file *filp) {
++	return 0;
++}
++
++static long fh_sadc_ioctl(struct file *filp, unsigned int cmd,
++		unsigned long arg) {
++
++	u32 ad_data;
++	u32 control_reg;
++	u16 ad_raw_data;
++	SADC_INFO sadc_info;
++	struct wrap_sadc_obj *sadc = &fh_sadc_obj;
++
++	if (cmd == IOCTL_GET_SADC_DATA) {
++		if (copy_from_user((void *) &sadc_info, (void __user*) arg,
++				sizeof(sadc_info))) {
++			return -EFAULT;
++		}
++
++		fh_sadc_isr_read_data(&fh_sadc_obj, sadc_info.channel, &ad_raw_data);
++		ad_data = ad_raw_data * SADC_REF;
++		ad_data /= SADC_MAX_AD_VALUE;
++		sadc_info.sadc_data = ad_data;
++		if (put_user(sadc_info.sadc_data,
++				(int __user* )(&((SADC_INFO* )arg)->sadc_data))) {
++			return -EFAULT;
++		}
++	}
++
++	else if (cmd == IOCTL_SADC_POWER_DOWN) {
++		control_reg = wrap_readl(sadc, sadc_control);
++		control_reg &= ~(1 << 12);
++		wrap_writel(sadc, sadc_control, control_reg);
++	}
++	return 0;
++}
++
++
++/*******************
++ *
++ *
++ * add platform cause of i need the board info...
++ * in the probe function. i will register the sadc misc drive...then the app can open the sadc misc device..
++ *
++ ******************/
++static struct platform_driver fh_sadc_driver =
++{
++	.driver	=
++	{
++		.name	= FH_SADC_PLAT_DEVICE_NAME,
++		.owner	= THIS_MODULE,
++	},
++	.probe 		= fh_sadc_probe,
++	.remove		= __exit_p(fh_sadc_remove),
++};
++
++
++
++static int __init fh_sadc_init(void) {
++
++	return platform_driver_register(&fh_sadc_driver);
++}
++
++static void __exit fh_sadc_exit(void) {
++
++	platform_driver_unregister(&fh_sadc_driver);
++
++}
++
++module_init(fh_sadc_init);
++module_exit(fh_sadc_exit);
++
++MODULE_DESCRIPTION("fh sadc driver");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("zhangy@fullhan.com");
++MODULE_ALIAS("platform:FH_sadc");
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 38089b25..2b645fbe 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -1557,6 +1557,9 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
+ }
+ EXPORT_SYMBOL(mmc_set_blocklen);
+ 
++extern struct mmc_host *mmc_sd1;
++extern struct mmc_host *mmc_sd0;
++
+ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+ {
+ 	host->f_init = freq;
+@@ -1631,6 +1634,14 @@ void mmc_rescan(struct work_struct *work)
+ 	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+ 		goto out;
+ 
++	/* rescan 5 times when detect mmc */
++	if (!(host->caps & MMC_CAP_NONREMOVABLE)) {
++		if (host->rescan_count > 5)
++			goto out;
++		else
++			host->rescan_count++;
++	}
++
+ 	mmc_claim_host(host);
+ 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
+index d9411ed2..01b3b3d3 100644
+--- a/drivers/mmc/core/core.h
++++ b/drivers/mmc/core/core.h
+@@ -13,7 +13,7 @@
+ 
+ #include <linux/delay.h>
+ 
+-#define MMC_CMD_RETRIES        3
++#define MMC_CMD_RETRIES        0 //FIXME, by PeterJiang, don't retry...
+ 
+ struct mmc_bus_ops {
+ 	int (*awake)(struct mmc_host *);
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index ff277412..c1649228 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -305,6 +305,8 @@ static int mmc_read_switch(struct mmc_card *card)
+ 
+ 		goto out;
+ 	}
++	if (status[13] & 0x02) //modify by kernel 3.0.101
++				card->sw_caps.hs_max_dtr = 50000000;
+ 
+ 	if (card->scr.sda_spec3) {
+ 		card->sw_caps.sd3_bus_mode = status[13];
+@@ -348,11 +350,11 @@ static int mmc_read_switch(struct mmc_card *card)
+ 		}
+ 
+ 		card->sw_caps.sd3_curr_limit = status[7];
+-	} else {
++	} /*else {//modify by kernel 3.0.101
+ 		if (status[13] & 0x02)
+ 			card->sw_caps.hs_max_dtr = 50000000;
+ 	}
+-
++	 */
+ out:
+ 	kfree(status);
+ 
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 262fff01..06dbfdde 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -111,7 +111,7 @@ static int sdio_read_cccr(struct mmc_card *card)
+ 
+ 	cccr_vsn = data & 0x0f;
+ 
+-	if (cccr_vsn > SDIO_CCCR_REV_1_20) {
++	if (cccr_vsn > SDIO_CCCR_REV_3_00) { /* to support SDIO 3.0 (luoc) */
+ 		printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n",
+ 			mmc_hostname(card->host), cccr_vsn);
+ 		return -EINVAL;
+@@ -881,3 +881,73 @@ err:
+ 	return err;
+ }
+ 
++int sdio_reset_comm(struct mmc_card *card)
++{
++	struct mmc_host *host = card->host;
++	u32 ocr;
++	int err;
++	printk("%s():\n", __func__);
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	mmc_claim_host(host);
++	mmc_go_idle(host);
++	mmc_set_clock(host, host->f_min);
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	err = mmc_send_io_op_cond(host, 0, &ocr);
++	if (err)
++		goto err;
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	host->ocr = mmc_select_voltage(host, ocr);
++	if (!host->ocr) {
++		err = -EINVAL;
++		goto err;
++	}
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	err = mmc_send_io_op_cond(host, host->ocr, &ocr);
++	if (err)
++		goto err;
++	if (mmc_host_is_spi(host)) {
++		err = mmc_spi_set_crc(host, use_spi_crc);
++		if (err)
++			goto err;
++	}
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	if (!mmc_host_is_spi(host)) {
++		err = mmc_send_relative_addr(host, &card->rca);
++		if (err)
++			goto err;
++		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
++	}
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	if (!mmc_host_is_spi(host)) {
++		err = mmc_select_card(card);
++		if (err)
++			goto err;
++	}
++	/*
++	 * Switch to high-speed (if supported).
++	 */
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	err = sdio_enable_hs(card);
++	if (err > 0)
++		mmc_sd_go_highspeed(card);
++	else if (err)
++		goto err;
++	/*
++	 * Change to the card's maximum speed.
++	 */
++	printk("%s line %d.\n", __FILE__, __LINE__);
++	mmc_set_clock(host, mmc_sdio_get_max_clock(card));
++	err = sdio_enable_4bit_bus(card);
++	if (err > 0)
++		mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
++	else if (err)
++		goto err;
++	mmc_release_host(host);
++	return 0;
++err:
++	printk("%s: Error resetting SDIO communications (%d)\n",
++	       mmc_hostname(host), err);
++	mmc_release_host(host);
++	return err;
++}
++EXPORT_SYMBOL(sdio_reset_comm);
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 56dbf3f6..96361a63 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -518,6 +518,23 @@ config MMC_DW_IDMAC
+ 	  Designware Mobile Storage IP block. This disables the external DMA
+ 	  interface.
+ 
++config MMC_FH
++	tristate "FH Memory Card Interface"
++	depends on ARM
++	help
++	  This selects support for the Synopsys DesignWare Mobile Storage IP
++	  block, this provides host support for SD and MMC interfaces, in both
++	  PIO and external DMA modes.
++
++config MMC_FH_IDMAC
++	bool "Internal DMAC interface"
++	depends on MMC_FH
++	help
++	  This selects support for the internal DMAC block within the Synopsys
++	  Designware Mobile Storage IP block. This disables the external DMA
++	  interface.
++
++
+ config MMC_SH_MMCIF
+ 	tristate "SuperH Internal MMCIF support"
+ 	depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
+diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
+index 58a5cf73..e56cfa32 100644
+--- a/drivers/mmc/host/Makefile
++++ b/drivers/mmc/host/Makefile
+@@ -39,6 +39,7 @@ obj-$(CONFIG_MMC_CB710)		+= cb710-mmc.o
+ obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o
+ obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
+ obj-$(CONFIG_MMC_DW)		+= dw_mmc.o
++obj-$(CONFIG_MMC_FH)		+= fhmci/
+ obj-$(CONFIG_MMC_SH_MMCIF)	+= sh_mmcif.o
+ obj-$(CONFIG_MMC_JZ4740)	+= jz4740_mmc.o
+ obj-$(CONFIG_MMC_VUB300)	+= vub300.o
+diff --git a/drivers/mmc/host/fh_mmc.c b/drivers/mmc/host/fh_mmc.c
+new file mode 100644
+index 00000000..151b5278
+--- /dev/null
++++ b/drivers/mmc/host/fh_mmc.c
+@@ -0,0 +1,2150 @@
++/*
++ * Synopsys DesignWare Multimedia Card Interface driver
++ *  (Based on NXP driver for lpc 31xx)
++ *
++ * Copyright (C) 2009 NXP Semiconductors
++ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/blkdev.h>
++#include <linux/clk.h>
++#include <linux/debugfs.h>
++#define DEBUG
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/ioport.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/scatterlist.h>
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <linux/stat.h>
++#include <linux/delay.h>
++#include <linux/irq.h>
++#include <linux/mmc/host.h>
++#include <linux/mmc/mmc.h>
++#include <linux/bitops.h>
++#include <linux/regulator/consumer.h>
++#include <linux/proc_fs.h>
++#include <linux/gpio.h>
++
++#include "fh_mmc.h"
++#include "fh_mmc_reg.h"
++
++#define TEST_GPIO		4
++
++#define SDC_DESC_SIZE		(PAGE_SIZE * 2)
++#define T_END 10
++
++/* Common flag combinations */
++#define FH_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DTO | SDMMC_INT_DCRC | \
++				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
++				 SDMMC_INT_EBE)
++#define FH_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
++				 SDMMC_INT_RESP_ERR)
++#define FH_MCI_ERROR_FLAGS	(FH_MCI_DATA_ERROR_FLAGS | \
++				 FH_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
++#define FH_MCI_SEND_STATUS	1
++#define FH_MCI_RECV_STATUS	2
++#define FH_MCI_DMA_THRESHOLD	16
++
++#ifdef CONFIG_MMC_FH_IDMAC
++struct idmac_desc {
++	u32		des0;	/* Control Descriptor */
++#define IDMAC_DES0_DIC	BIT(1)
++#define IDMAC_DES0_LD	BIT(2)
++#define IDMAC_DES0_FD	BIT(3)
++#define IDMAC_DES0_CH	BIT(4)
++#define IDMAC_DES0_ER	BIT(5)
++#define IDMAC_DES0_CES	BIT(30)
++#define IDMAC_DES0_OWN	BIT(31)
++
++	u32		des1;	/* Buffer sizes */
++#define IDMAC_SET_BUFFER1_SIZE(d, s) \
++	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
++
++	u32		des2;	/* buffer 1 physical address */
++
++	u32		des3;	/* buffer 2 physical address */
++};
++#endif /* CONFIG_MMC_FH_IDMAC */
++
++/**
++ * struct fh_mci_slot - MMC slot state
++ * @mmc: The mmc_host representing this slot.
++ * @host: The MMC controller this slot is using.
++ * @ctype: Card type for this slot.
++ * @mrq: mmc_request currently being processed or waiting to be
++ *	processed, or NULL when the slot is idle.
++ * @queue_node: List node for placing this node in the @queue list of
++ *	&struct fh_mci.
++ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
++ * @flags: Random state bits associated with the slot.
++ * @id: Number of this slot.
++ * @last_detect_state: Most recently observed card detect state.
++ */
++struct fh_mci_slot {
++	struct mmc_host		*mmc;
++	struct fh_mci		*host;
++
++	u32			ctype;
++
++	struct mmc_request	*mrq;
++	struct list_head	queue_node;
++
++	unsigned int		clock;
++	unsigned long		flags;
++#define FH_MMC_CARD_PRESENT	0
++#define FH_MMC_CARD_NEED_INIT	1
++	int			id;
++	int			last_detect_state;
++};
++
++#if defined(CONFIG_DEBUG_FS)
++static int fh_mci_req_show(struct seq_file *s, void *v)
++{
++	struct fh_mci_slot *slot = s->private;
++	struct mmc_request *mrq;
++	struct mmc_command *cmd;
++	struct mmc_command *stop;
++	struct mmc_data	*data;
++
++	/* Make sure we get a consistent snapshot */
++	spin_lock_bh(&slot->host->lock);
++	mrq = slot->mrq;
++
++	if (mrq) {
++		cmd = mrq->cmd;
++		data = mrq->data;
++		stop = mrq->stop;
++
++		if (cmd)
++			seq_printf(s,
++				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
++				   cmd->opcode, cmd->arg, cmd->flags,
++				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
++				   cmd->resp[2], cmd->error);
++		if (data)
++			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
++				   data->bytes_xfered, data->blocks,
++				   data->blksz, data->flags, data->error);
++		if (stop)
++			seq_printf(s,
++				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
++				   stop->opcode, stop->arg, stop->flags,
++				   stop->resp[0], stop->resp[1], stop->resp[2],
++				   stop->resp[2], stop->error);
++	}
++
++	spin_unlock_bh(&slot->host->lock);
++
++	return 0;
++}
++
++static int fh_mci_req_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, fh_mci_req_show, inode->i_private);
++}
++
++static const struct file_operations fh_mci_req_fops = {
++	.owner		= THIS_MODULE,
++	.open		= fh_mci_req_open,
++	.read		= seq_read,
++	.llseek		= seq_lseek,
++	.release	= single_release,
++};
++
++static int fh_mci_regs_show(struct seq_file *s, void *v)
++{
++	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
++	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
++	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
++	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
++	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
++	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
++
++	return 0;
++}
++
++static int fh_mci_regs_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, fh_mci_regs_show, inode->i_private);
++}
++
++static const struct file_operations fh_mci_regs_fops = {
++	.owner		= THIS_MODULE,
++	.open		= fh_mci_regs_open,
++	.read		= seq_read,
++	.llseek		= seq_lseek,
++	.release	= single_release,
++};
++
++static void fh_mci_init_debugfs(struct fh_mci_slot *slot)
++{
++	struct mmc_host	*mmc = slot->mmc;
++	struct fh_mci *host = slot->host;
++	struct dentry *root;
++	struct dentry *node;
++
++	root = mmc->debugfs_root;
++	if (!root)
++		return;
++
++	node = debugfs_create_file("regs", S_IRUSR, root, host,
++				   &fh_mci_regs_fops);
++	if (!node)
++		goto err;
++
++	node = debugfs_create_file("req", S_IRUSR, root, slot,
++				   &fh_mci_req_fops);
++	if (!node)
++		goto err;
++
++	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
++	if (!node)
++		goto err;
++
++	node = debugfs_create_x32("pending_events", S_IRUSR, root,
++				  (u32 *)&host->pending_events);
++	if (!node)
++		goto err;
++
++	node = debugfs_create_x32("completed_events", S_IRUSR, root,
++				  (u32 *)&host->completed_events);
++	if (!node)
++		goto err;
++
++	return;
++
++err:
++	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
++}
++#endif /* defined(CONFIG_DEBUG_FS) */
++
++
++
++static void fh_mci_set_timeout(struct fh_mci *host)
++{
++	/* timeout (maximum) */
++	mci_writel(host, TMOUT, 0xffffffff);
++}
++
++static u32 fh_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
++{
++	struct mmc_data	*data;
++	u32 cmdr;
++	cmd->error = -EINPROGRESS;
++
++	cmdr = cmd->opcode;
++
++	if (cmdr == MMC_STOP_TRANSMISSION)
++		cmdr |= SDMMC_CMD_STOP;
++	else
++		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
++
++	if (cmd->flags & MMC_RSP_PRESENT) {
++		/* We expect a response, so set this bit */
++		cmdr |= SDMMC_CMD_RESP_EXP;
++		if (cmd->flags & MMC_RSP_136)
++			cmdr |= SDMMC_CMD_RESP_LONG;
++	}
++
++	if (cmd->flags & MMC_RSP_CRC)
++		cmdr |= SDMMC_CMD_RESP_CRC;
++
++	data = cmd->data;
++	if (data) {
++		cmdr |= SDMMC_CMD_DAT_EXP;
++		if (data->flags & MMC_DATA_STREAM)
++			cmdr |= SDMMC_CMD_STRM_MODE;
++		if (data->flags & MMC_DATA_WRITE)
++			cmdr |= SDMMC_CMD_DAT_WR;
++	}
++	cmdr |= SDMMC_CMD_USE_HOLD_REG;
++	return cmdr;
++}
++
++
++static void fh_mci_start_command(struct fh_mci *host,
++				 struct mmc_command *cmd, u32 cmd_flags)
++{
++	host->cmd = cmd;
++	dev_vdbg(&host->pdev->dev,
++		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
++		 cmd->arg, cmd_flags);
++	mci_writel(host, CMDARG, cmd->arg);
++	wmb();
++
++	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
++}
++
++static void send_stop_cmd(struct fh_mci *host, struct mmc_data *data)
++{
++	fh_mci_start_command(host, data->stop, host->stop_cmdr);
++}
++
++/* DMA interface functions */
++static void fh_mci_stop_dma(struct fh_mci *host)
++{
++	if (host->use_dma) {
++		host->dma_ops->stop(host);
++		host->dma_ops->cleanup(host);
++	}
++
++	/* Data transfer was stopped by the interrupt handler */
++	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
++
++}
++
++#ifdef CONFIG_MMC_FH_IDMAC
++
++
++
++static void fh_mci_idmac_reset(struct fh_mci *host)
++{
++	u32 bmod = mci_readl(host, BMOD);
++	/* Software reset of DMA */
++	bmod |= SDMMC_IDMAC_SWRESET;
++	mci_writel(host, BMOD, bmod);
++
++}
++
++static void fh_mci_dma_cleanup(struct fh_mci *host)
++{
++	struct mmc_data *data = host->data;
++
++	if (data && host->dma_data_mapped)
++		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
++			     ((data->flags & MMC_DATA_WRITE)
++			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
++
++	host->dma_data_mapped =0;
++}
++
++static void fh_mci_idmac_stop_dma(struct fh_mci *host)
++{
++	u32 temp;
++
++	/* Disable and reset the IDMAC interface */
++	temp = mci_readl(host, CTRL);
++	temp &= ~SDMMC_CTRL_USE_IDMAC;
++	temp |= SDMMC_CTRL_DMA_RESET;
++	mci_writel(host, CTRL, temp);
++
++	/* Stop the IDMAC running */
++	temp = mci_readl(host, BMOD);
++	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
++	temp |= SDMMC_IDMAC_SWRESET;
++	mci_writel(host, BMOD, temp);
++}
++
++static void fh_mci_idmac_complete_dma(struct fh_mci *host)
++{
++	struct mmc_data *data = host->data;
++
++	dev_vdbg(&host->pdev->dev, "DMA complete\n");
++
++	host->dma_ops->cleanup(host);
++
++	/*
++	 * If the card was removed, data will be NULL. No point in trying to
++	 * send the stop command or waiting for NBUSY in this case.
++	 */
++	if (data) {
++		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
++		tasklet_schedule(&host->tasklet);
++	}
++}
++
++static void fh_mci_translate_sglist(struct fh_mci *host, struct mmc_data *data,
++				    unsigned int sg_len)
++{
++	#define DMA_ONE_BUF_SIZE_MAX (0x2000 - 16)
++
++	int i;
++	int num = 0;
++	u32 seglen;
++	struct idmac_desc *desc = host->sg_cpu;
++	struct idmac_desc *ldesc = NULL;
++
++	for (i = 0; i < sg_len; i++) {
++		unsigned int length = sg_dma_len(&data->sg[i]);
++		u32 mem_addr = sg_dma_address(&data->sg[i]);
++
++		while (length > 0) {
++			//FIXME
++			//this should not happen
++			if (++num > host->ring_size) { 
++				panic("%s, line %d, too long DMA transfer!\n", 
++					 __FILE__, __LINE__);
++			}
++
++			seglen = length;
++			if (seglen > DMA_ONE_BUF_SIZE_MAX) {
++				seglen = DMA_ONE_BUF_SIZE_MAX;
++			}
++
++			/* Set the OWN bit and disable interrupts for this descriptor */
++			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
++			/* Buffer length */
++			IDMAC_SET_BUFFER1_SIZE(desc, seglen);
++			/* Physical address to DMA to/from */
++			desc->des2 = mem_addr;
++
++			mem_addr += seglen;
++			length -= seglen;
++
++			ldesc = desc++;
++		}
++	}
++
++	/* Set first descriptor */
++	desc = host->sg_cpu;
++	desc->des0 |= IDMAC_DES0_FD;
++
++	/* Set last descriptor */
++	if (ldesc) {
++		ldesc->des0 |= IDMAC_DES0_LD;
++		ldesc->des0 &= ~IDMAC_DES0_DIC;
++	}
++
++	wmb();
++}
++
++static void fh_mci_idmac_start_dma(struct fh_mci *host, unsigned int sg_len)
++{
++	u32 temp;
++
++	fh_mci_translate_sglist(host, host->data, sg_len);
++
++	/* Select IDMAC interface */
++	temp = mci_readl(host, CTRL);
++	temp |= SDMMC_CTRL_USE_IDMAC;
++	mci_writel(host, CTRL, temp);
++
++	wmb();
++
++	/* Enable the IDMAC */
++	temp = mci_readl(host, BMOD);
++	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
++	mci_writel(host, BMOD, temp);
++	/* Start it running */
++	mci_writel(host, PLDMND, 1);
++}
++
++static int fh_mci_idmac_init(struct fh_mci *host)
++{
++	struct idmac_desc *p;
++	int i;
++
++	/* Number of descriptors in the ring buffer */
++	//host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
++	host->ring_size = SDC_DESC_SIZE / sizeof(struct idmac_desc);
++
++	/* Forward link the descriptor list */
++	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
++		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
++
++	/* Set the last descriptor as the end-of-ring descriptor */
++	p->des3 = host->sg_dma;
++	p->des0 = IDMAC_DES0_ER;
++	fh_mci_idmac_reset(host);
++	/* Mask out interrupts - get Tx & Rx complete only */
++	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
++		   SDMMC_IDMAC_INT_TI);
++
++	/* Set the descriptor base address */
++	mci_writel(host, DBADDR, host->sg_dma);
++	return 0;
++}
++
++static struct fh_mci_dma_ops fh_mci_idmac_ops = {
++	.init = fh_mci_idmac_init,
++	.start = fh_mci_idmac_start_dma,
++	.stop = fh_mci_idmac_stop_dma,
++	.complete = fh_mci_idmac_complete_dma,
++	.cleanup = fh_mci_dma_cleanup,
++};
++#endif /* CONFIG_MMC_FH_IDMAC */
++
++static int fh_mci_pre_dma_transfer(struct fh_mci *host,
++				   struct mmc_data *data,
++				   bool next)
++{
++	struct scatterlist *sg;
++	unsigned int i, direction, sg_len;
++
++//#define SDIO_DMA
++//#define SDIO_PIO
++
++#ifdef SDIO_PIO
++	return -EINVAL;
++#else
++#ifdef SDIO_DMA
++
++#else
++	/*
++	 * We don't do DMA on "complex" transfers, i.e. with
++	 * non-word-aligned buffers or lengths. Also, we don't bother
++	 * with all the DMA setup overhead for short transfers.
++	 */
++	if (data->blocks * data->blksz < FH_MCI_DMA_THRESHOLD)
++		return -EINVAL;
++	if (data->blksz & 3)
++		return -EINVAL;
++	for_each_sg(data->sg, sg, data->sg_len, i) {
++		if (sg->offset & 3 || sg->length & 3)
++			return -EINVAL;
++	}
++#endif
++#endif
++	if (data->flags & MMC_DATA_READ)
++		direction = DMA_FROM_DEVICE;
++	else
++		direction = DMA_TO_DEVICE;
++
++	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
++			    direction);
++
++	host->dma_data_mapped = 1;
++	return sg_len;
++}
++
++
++
++static int fh_mci_submit_data_dma(struct fh_mci *host, struct mmc_data *data)
++{
++	int sg_len;
++	u32 temp;
++
++	host->using_dma = 0;
++
++	/* If we don't have a channel, we can't do DMA */
++	if (!host->use_dma)
++		return -ENODEV;
++
++	sg_len = fh_mci_pre_dma_transfer(host, data, 0);
++	if (sg_len < 0) {
++		host->dma_ops->stop(host);
++		return sg_len;
++	}
++
++	host->using_dma = 1;
++
++	dev_vdbg(&host->pdev->dev,
++		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
++		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
++		 sg_len);
++
++#if 0
++	//test data blocksize
++	WARN((host->prev_blksz && (host->prev_blksz != data->blksz)),
++			"Block size changed, from %d to %d",
++			host->prev_blksz,
++			data->blksz);
++#endif
++
++	/* Enable the DMA interface */
++	temp = mci_readl(host, CTRL);
++	temp |= SDMMC_CTRL_DMA_ENABLE;
++	mci_writel(host, CTRL, temp);
++
++	/* Disable RX/TX IRQs, let DMA handle it */
++	temp = mci_readl(host, INTMASK);
++	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
++	mci_writel(host, INTMASK, temp);
++	host->dma_ops->start(host, sg_len);
++	return 0;
++}
++
++static void fh_mci_submit_data(struct fh_mci *host, struct mmc_data *data)
++{
++	u32 temp;
++	int ret;
++	data->error = -EINPROGRESS;
++
++	WARN_ON(host->data);
++	host->sg = NULL;
++	host->data = data;
++
++	if (data->flags & MMC_DATA_READ)
++		host->dir_status = FH_MCI_RECV_STATUS;
++	else
++		host->dir_status = FH_MCI_SEND_STATUS;
++
++	ret = fh_mci_submit_data_dma(host, data);
++	if (ret) {
++		host->sg = data->sg;
++		host->pio_offset = 0;
++
++		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
++
++		temp = mci_readl(host, INTMASK);
++		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
++		mci_writel(host, INTMASK, temp);
++		temp = mci_readl(host, CTRL);
++		temp &= ~SDMMC_CTRL_DMA_ENABLE;
++		mci_writel(host, CTRL, temp);
++		host->prev_blksz = 0;
++	}else {
++		/*
++		 * Keep the current block size.
++		 * It will be used to decide whether to update
++		 * fifoth register next time.
++		 */
++		host->prev_blksz = data->blksz;
++	}
++}
++
++static void mci_send_cmd(struct fh_mci_slot *slot, u32 cmd, u32 arg)
++{
++	struct fh_mci *host = slot->host;
++	unsigned long timeout = jiffies + msecs_to_jiffies(500);
++	unsigned int cmd_status = 0;
++
++	mci_writel(host, CMDARG, arg);
++	wmb();
++	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
++
++	while (time_before(jiffies, timeout)) {
++		cmd_status = mci_readl(host, CMD);
++		if (!(cmd_status & SDMMC_CMD_START))
++			return;
++	}
++	dev_err(&slot->mmc->class_dev,
++		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
++		cmd, arg, cmd_status);
++}
++
++static void fh_mci_setup_bus(struct fh_mci_slot *slot)
++{
++	struct fh_mci *host = slot->host;
++	u32 div;
++
++	if (slot->clock != host->current_speed) {
++		if (host->bus_hz % slot->clock)
++			/*
++			 * move the + 1 after the divide to prevent
++			 * over-clocking the card.
++			 */
++			div = ((host->bus_hz / slot->clock) >> 1) + 1;
++		else
++			div = (host->bus_hz  / slot->clock) >> 1;
++
++		dev_info(&slot->mmc->class_dev,
++			 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
++			 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
++			 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
++
++		/* disable clock */
++		mci_writel(host, CLKENA, 0);
++		mci_writel(host, CLKSRC, 0);
++
++		/* inform CIU */
++		mci_send_cmd(slot,
++			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
++
++		/* set clock to desired speed */
++		mci_writel(host, CLKDIV, div);
++
++		/* inform CIU */
++		mci_send_cmd(slot,
++			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
++
++		/* enable clock */
++		mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
++			   SDMMC_CLKEN_LOW_PWR);
++
++		/* inform CIU */
++		mci_send_cmd(slot,
++			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
++
++		host->current_speed = slot->clock;
++	}
++
++	/* Set the current slot bus width */
++	mci_writel(host, CTYPE, slot->ctype);
++}
++
++
++static void fh_mci_start_request(struct fh_mci *host,
++				 struct fh_mci_slot *slot)
++{
++	struct mmc_request *mrq;
++	struct mmc_command *cmd;
++	struct mmc_data	*data;
++	u32 cmdflags;
++
++	mrq = slot->mrq;
++	if (host->pdata->select_slot)
++		host->pdata->select_slot(slot->id);
++	/* Slot specific timing and width adjustment */
++	// do_gettimeofday(&mrq->rq_times[3]);
++	fh_mci_setup_bus(slot);
++	host->cur_slot = slot;
++	host->mrq = mrq;
++
++	host->pending_events = 0;
++	host->completed_events = 0;
++	host->data_status = 0;
++	// do_gettimeofday(&mrq->rq_times[4]);
++	host->data_error_flag = 0;
++	data = mrq->data;
++	if (data) {
++		fh_mci_set_timeout(host);
++		mci_writel(host, BYTCNT, data->blksz*data->blocks);
++		mci_writel(host, BLKSIZ, data->blksz);
++	}
++	// do_gettimeofday(&mrq->rq_times[5]);
++	cmd = mrq->cmd;
++	cmdflags = fh_mci_prepare_command(slot->mmc, cmd);
++	/* this is the first command, send the initialization clock */
++	if (test_and_clear_bit(FH_MMC_CARD_NEED_INIT, &slot->flags))
++		cmdflags |= SDMMC_CMD_INIT;
++	// do_gettimeofday(&mrq->rq_times[6]);
++	if (data) {
++		fh_mci_submit_data(host, data);
++		wmb();
++	}
++	fh_mci_start_command(host, cmd, cmdflags);
++	if (mrq->stop){
++		host->stop_cmdr = fh_mci_prepare_command(slot->mmc, mrq->stop);
++	}
++
++}
++
++
++/* must be called with host->lock held */
++static void fh_mci_queue_request(struct fh_mci *host, struct fh_mci_slot *slot,
++				 struct mmc_request *mrq)
++{
++	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
++		 host->state);
++	slot->mrq = mrq;
++
++	if (host->state == STATE_IDLE) {
++		host->state = STATE_SENDING_CMD;
++		fh_mci_start_request(host, slot);
++	} else {
++		list_add_tail(&slot->queue_node, &host->queue);
++
++	}
++}
++
++static void fh_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
++{
++	struct fh_mci_slot *slot = mmc_priv(mmc);
++	struct fh_mci *host = slot->host;
++
++	WARN_ON(slot->mrq);
++	/*
++	 * The check for card presence and queueing of the request must be
++	 * atomic, otherwise the card could be removed in between and the
++	 * request wouldn't fail until another card was inserted.
++	 */
++	spin_lock_bh(&host->lock);
++	if (!test_bit(FH_MMC_CARD_PRESENT, &slot->flags)) {
++		spin_unlock_bh(&host->lock);
++		mrq->cmd->error = -ENOMEDIUM;
++		mmc_request_done(mmc, mrq);
++		return;
++	}
++
++	/* We don't support multiple blocks of weird lengths. */
++	fh_mci_queue_request(host, slot, mrq);
++	spin_unlock_bh(&host->lock);
++}
++
++static void fh_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
++{
++	u32 regs;
++	struct fh_mci_slot *slot = mmc_priv(mmc);
++#if 0
++	struct clk* sdc_clk;
++	int sdc_id = slot->host->pdev->id;
++
++
++	if(sdc_id)
++	{
++		printk("fh_mci_set_ios, clk: %lu\n", ios->clock);
++		sdc_clk = clk_get(NULL, "sdc1_clk");
++		clk_set_rate(sdc_clk,ios->clock);
++	}
++	else
++	{
++		sdc_clk = clk_get(NULL, "sdc0_clk");
++		clk_set_rate(sdc_clk,ios->clock);
++	}
++#endif
++
++	switch (ios->bus_width) {
++	case MMC_BUS_WIDTH_4:
++		slot->ctype = SDMMC_CTYPE_4BIT;
++		break;
++	case MMC_BUS_WIDTH_8:
++		slot->ctype = SDMMC_CTYPE_8BIT;
++		break;
++	default:
++		/* set default 1 bit mode */
++		slot->ctype = SDMMC_CTYPE_1BIT;
++	}
++	/* DDR mode set */
++	if (ios->ddr) {
++		regs = mci_readl(slot->host, UHS_REG);
++		regs |= (0x1 << slot->id) << 16;
++		mci_writel(slot->host, UHS_REG, regs);
++	}
++
++	if (ios->clock) {
++		/*
++		 * Use mirror of ios->clock to prevent race with mmc
++		 * core ios update when finding the minimum.
++		 */
++		slot->clock = ios->clock;
++	}
++
++	switch (ios->power_mode) {
++	case MMC_POWER_UP:
++		set_bit(FH_MMC_CARD_NEED_INIT, &slot->flags);
++		break;
++	default:
++		break;
++	}
++}
++
++static int fh_mci_get_ro(struct mmc_host *mmc)
++{
++	int read_only;
++	struct fh_mci_slot *slot = mmc_priv(mmc);
++	struct fh_mci_board *brd = slot->host->pdata;
++
++	/* Use platform get_ro function, else try on board write protect */
++	if (brd->get_ro)
++		read_only = brd->get_ro(slot->id);
++	else
++		read_only =
++			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
++
++	dev_dbg(&mmc->class_dev, "card is %s\n",
++		read_only ? "read-only" : "read-write");
++
++	return read_only;
++}
++
++static int fh_mci_get_cd(struct mmc_host *mmc)
++{
++	int present;
++	struct fh_mci_slot *slot = mmc_priv(mmc);
++	struct fh_mci_board *brd = slot->host->pdata;
++	struct fh_mci *host = slot->host;
++
++	/* Use platform get_cd function, else try onboard card detect */
++	if (brd->quirks & FH_MCI_QUIRK_BROKEN_CARD_DETECTION)
++		present = 1;
++	else if (brd->get_cd)
++		present = !brd->get_cd(slot->id);
++	else
++		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
++			== 0 ? 1 : 0;
++
++	spin_lock_bh(&host->lock);
++	if (present)
++		dev_dbg(&mmc->class_dev, "card is present\n");
++	else
++		dev_dbg(&mmc->class_dev, "card is not present\n");
++	spin_unlock_bh(&host->lock);
++
++	return present;
++}
++
++/*
++ * Disable lower power mode.
++ *
++ * Low power mode will stop the card clock when idle.  According to the
++ * description of the CLKENA register we should disable low power mode
++ * for SDIO cards if we need SDIO interrupts to work.
++ *
++ * This function is fast if low power mode is already disabled.
++ */
++static void fh_mci_disable_low_power(struct fh_mci_slot *slot)
++{
++	struct fh_mci *host = slot->host;
++	u32 clk_en_a;
++	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
++
++	clk_en_a = mci_readl(host, CLKENA);
++
++	if (clk_en_a & clken_low_pwr) {
++		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
++		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
++			     SDMMC_CMD_PRV_DAT_WAIT, 0);
++	}
++}
++
++static void fh_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
++{
++	struct fh_mci_slot *slot = mmc_priv(mmc);
++	struct fh_mci *host = slot->host;
++	u32 int_mask;
++
++	/* Enable/disable Slot Specific SDIO interrupt */
++	int_mask = mci_readl(host, INTMASK);
++	if (enb) {
++		/*
++		 * Turn off low power mode if it was enabled.  This is a bit of
++		 * a heavy operation and we disable / enable IRQs a lot, so
++		 * we'll leave low power mode disabled and it will get
++		 * re-enabled again in fh_mci_setup_bus().
++		 */
++		fh_mci_disable_low_power(slot);
++
++		mci_writel(host, INTMASK,
++			   (int_mask | SDMMC_INT_SDIO(slot->id)));
++	} else {
++		mci_writel(host, INTMASK,
++			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
++	}
++}
++
++
++static const struct mmc_host_ops fh_mci_ops = {
++	.request	= fh_mci_request,
++	.set_ios	= fh_mci_set_ios,
++	.get_ro		= fh_mci_get_ro,
++	.get_cd		= fh_mci_get_cd,
++
++
++	.enable_sdio_irq	= fh_mci_enable_sdio_irq,
++};
++
++static void fh_mci_request_end(struct fh_mci *host, struct mmc_request *mrq)
++	__releases(&host->lock)
++	__acquires(&host->lock)
++{
++	struct fh_mci_slot *slot;
++	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
++
++	if(host->data && host->data->error)
++		printk(KERN_ERR "fh SDC : func request_end\n");
++
++	WARN_ON(host->cmd || host->data);
++
++	host->cur_slot->mrq = NULL;
++	host->mrq = NULL;
++	if (!list_empty(&host->queue)) {
++		slot = list_entry(host->queue.next,
++				  struct fh_mci_slot, queue_node);
++		list_del(&slot->queue_node);
++		dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
++			 mmc_hostname(slot->mmc));
++		host->state = STATE_SENDING_CMD;
++		fh_mci_start_request(host, slot);
++	} else {
++		dev_vdbg(&host->pdev->dev, "list empty\n");
++		host->state = STATE_IDLE;
++	}
++
++	spin_unlock(&host->lock);
++	mmc_request_done(prev_mmc, mrq);
++	spin_lock(&host->lock);
++}
++
++static void fh_mci_command_complete(struct fh_mci *host, struct mmc_command *cmd)
++{
++	u32 status = host->cmd_status;
++
++	host->cmd_status = 0;
++
++	/* Read the response from the card (up to 16 bytes) */
++	if (cmd->flags & MMC_RSP_PRESENT) {
++		if (cmd->flags & MMC_RSP_136) {
++			cmd->resp[3] = mci_readl(host, RESP0);
++			cmd->resp[2] = mci_readl(host, RESP1);
++			cmd->resp[1] = mci_readl(host, RESP2);
++			cmd->resp[0] = mci_readl(host, RESP3);
++		} else {
++			cmd->resp[0] = mci_readl(host, RESP0);
++			cmd->resp[1] = 0;
++			cmd->resp[2] = 0;
++			cmd->resp[3] = 0;
++		}
++
++	}
++
++	if (status & SDMMC_INT_RTO)
++		cmd->error = -ETIMEDOUT;
++	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
++		cmd->error = -EILSEQ;
++	else if (status & SDMMC_INT_RESP_ERR)
++		cmd->error = -EIO;
++	else
++		cmd->error = 0;
++
++	if (cmd->error) {
++		/* newer ip versions need a delay between retries */
++		if (host->quirks & FH_MCI_QUIRK_RETRY_DELAY)
++			mdelay(20);
++
++		if (cmd->data) {
++			host->data = NULL;
++			fh_mci_stop_dma(host);
++		}
++	}
++}
++
++static void fh_mci_tasklet_func(unsigned long priv)
++{
++	struct fh_mci *host = (struct fh_mci *)priv;
++	struct mmc_data	*data;
++	struct mmc_command *cmd;
++	enum fh_mci_state state;
++	enum fh_mci_state prev_state;
++	u32 status;
++
++	spin_lock(&host->lock);
++
++	state = host->state;
++	data = host->data;
++
++	do {
++		prev_state = state;
++
++		switch (state) {
++		case STATE_IDLE:
++			break;
++
++		case STATE_SENDING_CMD:
++			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
++						&host->pending_events))
++				break;
++
++			cmd = host->cmd;
++			host->cmd = NULL;
++			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
++			fh_mci_command_complete(host, host->mrq->cmd);
++
++
++			if (!host->mrq->data || cmd->error) {
++				fh_mci_request_end(host, host->mrq);
++				goto unlock;
++			}
++
++			prev_state = state = STATE_SENDING_DATA;
++			/* fall through */
++
++		case STATE_SENDING_DATA:
++			if (test_and_clear_bit(EVENT_DATA_ERROR,
++					       &host->pending_events)) {
++				printk(KERN_ERR "fh SDC : STATE_SENDING_DATA EVENT_DATA_ERROR\n");
++				fh_mci_stop_dma(host);
++				if (data->stop)
++					send_stop_cmd(host, data);
++				state = STATE_DATA_ERROR;
++				break;
++			}
++
++			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
++						&host->pending_events))
++				break;
++
++			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
++			prev_state = state = STATE_DATA_BUSY;
++			/* fall through */
++
++		case STATE_DATA_BUSY:
++			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
++						&host->pending_events))
++				break;
++
++			host->data = NULL;
++			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
++			status = host->data_status;
++
++			if (status & FH_MCI_DATA_ERROR_FLAGS) {
++				printk(KERN_ERR "fh SDC : STATE_DATA_BUSY\n");
++				if (status & SDMMC_INT_DTO) {
++					dev_err(&host->pdev->dev,
++						"data timeout error\n");
++					data->error = -ETIMEDOUT;
++				} else if (status & SDMMC_INT_DCRC) {
++					dev_err(&host->pdev->dev,
++						"data CRC error\n");
++					data->error = -EILSEQ;
++				} else {
++					dev_err(&host->pdev->dev,
++						"data FIFO error "
++						"(status=%08x)\n",
++						status);
++					data->error = -EIO;
++				}
++			} else {
++				data->bytes_xfered = data->blocks * data->blksz;
++				data->error = 0;
++			}
++
++			if (!data->stop) {
++				fh_mci_request_end(host, host->mrq);
++				goto unlock;
++			}
++
++			prev_state = state = STATE_SENDING_STOP;
++			if (!data->error)
++				send_stop_cmd(host, data);
++			/* fall through */
++
++		case STATE_SENDING_STOP:
++			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
++						&host->pending_events))
++				break;
++
++			host->cmd = NULL;
++			fh_mci_command_complete(host, host->mrq->stop);
++			fh_mci_request_end(host, host->mrq);
++			goto unlock;
++
++		case STATE_DATA_ERROR:
++			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
++						&host->pending_events))
++				break;
++			printk(KERN_ERR "fh SDC : STATE_DATA_ERROR\n");
++
++			state = STATE_DATA_BUSY;
++			break;
++		}
++	} while (state != prev_state);
++	host->state = state;
++unlock:
++	spin_unlock(&host->lock);
++	;
++
++}
++
++static void fh_mci_push_data16(struct fh_mci *host, void *buf, int cnt)
++{
++	u16 *pdata = (u16 *)buf;
++
++	WARN_ON(cnt % 2 != 0);
++
++	cnt = cnt >> 1;
++	while (cnt > 0) {
++		mci_writew(host, DATA, *pdata++);
++		cnt--;
++	}
++}
++
++static void fh_mci_pull_data16(struct fh_mci *host, void *buf, int cnt)
++{
++	u16 *pdata = (u16 *)buf;
++
++	WARN_ON(cnt % 2 != 0);
++
++	cnt = cnt >> 1;
++	while (cnt > 0) {
++		*pdata++ = mci_readw(host, DATA);
++		cnt--;
++	}
++}
++
++static void fh_mci_push_data32(struct fh_mci *host, void *buf, int cnt)
++{
++	u32 *pdata = (u32 *)buf;
++
++	WARN_ON(cnt % 4 != 0);
++	WARN_ON((unsigned long)pdata & 0x3);
++
++	cnt = cnt >> 2;
++	while (cnt > 0) {
++		mci_writel(host, DATA, *pdata++);
++		cnt--;
++	}
++}
++
++static void fh_mci_pull_data32(struct fh_mci *host, void *buf, int cnt)
++{
++	u32 *pdata = (u32 *)buf;
++
++	WARN_ON(cnt % 4 != 0);
++	WARN_ON((unsigned long)pdata & 0x3);
++
++	cnt = cnt >> 2;
++	while (cnt > 0) {
++		*pdata++ = mci_readl(host, DATA);
++		cnt--;
++	}
++}
++
++static void fh_mci_push_data64(struct fh_mci *host, void *buf, int cnt)
++{
++	u64 *pdata = (u64 *)buf;
++
++	WARN_ON(cnt % 8 != 0);
++
++	cnt = cnt >> 3;
++	while (cnt > 0) {
++		mci_writeq(host, DATA, *pdata++);
++		cnt--;
++	}
++}
++
++static void fh_mci_pull_data64(struct fh_mci *host, void *buf, int cnt)
++{
++	u64 *pdata = (u64 *)buf;
++
++	WARN_ON(cnt % 8 != 0);
++
++	cnt = cnt >> 3;
++	while (cnt > 0) {
++		*pdata++ = mci_readq(host, DATA);
++		cnt--;
++	}
++}
++
++static void fh_mci_read_data_pio(struct fh_mci *host)
++{
++	struct scatterlist *sg = host->sg;
++	void *buf = sg_virt(sg);
++	unsigned int offset = host->pio_offset;
++	struct mmc_data	*data = host->data;
++	int shift = host->data_shift;
++	u32 status;
++	unsigned int nbytes = 0, len=0;
++
++	do {
++		len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
++		if (offset + len <= sg->length) {
++
++			host->pull_data(host, (void *)(buf + offset), len);
++			offset += len;
++			nbytes += len;
++
++			if (offset == sg->length) {
++				flush_dcache_page(sg_page(sg));
++				host->sg = sg = sg_next(sg);
++				if (!sg)
++					goto done;
++
++				offset = 0;
++				buf = sg_virt(sg);
++			}
++		} else {
++			unsigned int remaining = sg->length - offset;
++			host->pull_data(host, (void *)(buf + offset),
++					remaining);
++			nbytes += remaining;
++
++			flush_dcache_page(sg_page(sg));
++			host->sg = sg = sg_next(sg);
++			if (!sg)
++				goto done;
++
++			offset = len - remaining;
++			buf = sg_virt(sg);
++			host->pull_data(host, buf, offset);
++			nbytes += offset;
++		}
++
++		status = mci_readl(host, MINTSTS);
++		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
++		if (status & FH_MCI_DATA_ERROR_FLAGS) {
++			host->data_status = status;
++			data->bytes_xfered += nbytes;
++			smp_wmb();
++			printk("data error in read pio\n");
++			set_bit(EVENT_DATA_ERROR, &host->pending_events);
++
++			tasklet_schedule(&host->tasklet);
++			return;
++		}
++	} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
++	len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
++	host->pio_offset = offset;
++	data->bytes_xfered += nbytes;
++	return;
++
++done:
++	data->bytes_xfered += nbytes;
++	smp_wmb();
++	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
++}
++
++static void fh_mci_write_data_pio(struct fh_mci *host)
++{
++	struct scatterlist *sg = host->sg;
++	void *buf = sg_virt(sg);
++	unsigned int offset = host->pio_offset;
++	struct mmc_data	*data = host->data;
++	int shift = host->data_shift;
++	u32 status;
++	unsigned int nbytes = 0, len;
++
++	do {
++		len = SDMMC_FIFO_SZ -
++			(SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
++		if (offset + len <= sg->length) {
++			host->push_data(host, (void *)(buf + offset), len);
++
++
++			offset += len;
++			nbytes += len;
++			if (offset == sg->length) {
++				host->sg = sg = sg_next(sg);
++				if (!sg)
++					goto done;
++
++				offset = 0;
++				buf = sg_virt(sg);
++			}
++		} else {
++			unsigned int remaining = sg->length - offset;
++
++			host->push_data(host, (void *)(buf + offset),
++					remaining);
++			nbytes += remaining;
++			host->sg = sg = sg_next(sg);
++			if (!sg)
++				goto done;
++
++			offset = len - remaining;
++			buf = sg_virt(sg);
++			host->push_data(host, (void *)buf, offset);
++			nbytes += offset;
++		}
++
++		status = mci_readl(host, MINTSTS);
++		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
++		if (status & FH_MCI_DATA_ERROR_FLAGS) {
++			host->data_status = status;
++			data->bytes_xfered += nbytes;
++
++			smp_wmb();
++			printk("data error in write pio\n");
++			set_bit(EVENT_DATA_ERROR, &host->pending_events);
++
++			tasklet_schedule(&host->tasklet);
++			return;
++		}
++	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
++
++	host->pio_offset = offset;
++	data->bytes_xfered += nbytes;
++	//flag_int = mci_readl(host, INTMASK);
++	//mci_writel(host, INTMASK, flag_int|0x4);
++	return;
++
++done:
++	data->bytes_xfered += nbytes;
++	smp_wmb();
++	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
++}
++
++static void fh_mci_cmd_interrupt(struct fh_mci *host, u32 status)
++{
++	if (!host->cmd_status)
++		host->cmd_status = status;
++
++	smp_wmb();
++
++	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
++	tasklet_schedule(&host->tasklet);
++}
++
++
++static irqreturn_t fh_mci_interrupt(int irq, void *dev_id)
++{
++	struct fh_mci *host = dev_id;
++	u32 pending;
++	int i;
++	u32 cmd, arg, rint, resp0, resp1, resp2, resp3;
++	#ifdef SDC_CRC_TEST
++	struct clk *sdc_clk;
++	#endif
++
++	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
++
++	if (pending) {
++		if (pending & FH_MCI_CMD_ERROR_FLAGS) {
++			mci_writel(host, RINTSTS, FH_MCI_CMD_ERROR_FLAGS);
++			host->cmd_status = pending;
++			smp_wmb();
++			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
++		}
++
++		if (pending & FH_MCI_DATA_ERROR_FLAGS) {
++#ifdef SDC_CRC_TEST
++			gpio_direction_output(TEST_GPIO, 1);
++			__gpio_set_value(TEST_GPIO, 1);
++#endif
++			host->data_error_flag = 1;
++			rint = mci_readl(host, RINTSTS);
++			/* if there is an error report DATA_ERROR */
++			mci_writel(host, RINTSTS, FH_MCI_DATA_ERROR_FLAGS);
++			host->data_status = pending;
++			smp_wmb();
++			cmd = mci_readl(host, CMD);
++			arg = mci_readl(host, CMDARG);
++			printk("data error in interrupt, cmd=0x%x, args=0x%x, rintsts=0x%x\n",
++					cmd, arg, rint);
++
++			resp0 = mci_readl(host, RESP0);
++			resp1 = mci_readl(host, RESP1);
++			resp2 = mci_readl(host, RESP2);
++			resp3 = mci_readl(host, RESP3);
++
++			printk("resp0=0x%x, resp1=0x%x, resp2=0x%x, resp3=0x%x\n",
++					resp0, resp1, resp2, resp3);
++			set_bit(EVENT_DATA_ERROR, &host->pending_events);
++			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
++#ifdef SDC_CRC_TEST
++			sdc_clk = clk_get(NULL, "sdc0_clk");
++			clk_disable(sdc_clk);
++
++			printk("!!!!!!!!!!!sdc stopped!!!!!!!!!!!!\n");
++			panic("really terrible\n");
++#endif
++			tasklet_schedule(&host->tasklet);
++
++		}
++
++		if (likely(pending & SDMMC_INT_DATA_OVER)) {
++			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
++			if (!host->data_status)
++				host->data_status = pending;
++			smp_wmb();
++			if (host->dir_status == FH_MCI_RECV_STATUS) {
++				if (host->sg != NULL)
++					fh_mci_read_data_pio(host);
++			}
++			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
++			tasklet_schedule(&host->tasklet);
++		}
++
++		if (pending & SDMMC_INT_RXDR) {
++			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
++			if (host->dir_status == FH_MCI_RECV_STATUS && host->sg)
++				fh_mci_read_data_pio(host);
++		}
++
++		if (pending & SDMMC_INT_TXDR) {
++			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
++			if (host->dir_status == FH_MCI_SEND_STATUS && host->sg)
++				fh_mci_write_data_pio(host);
++		}
++
++		if (likely(pending & SDMMC_INT_CMD_DONE)) {
++			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
++			fh_mci_cmd_interrupt(host, pending);
++		}
++
++		if (pending & SDMMC_INT_CD) {
++			mci_writel(host, RINTSTS, SDMMC_INT_CD);
++			tasklet_schedule(&host->card_tasklet);
++		}
++
++		/* Handle SDIO Interrupts */
++		for (i = 0; i < host->num_slots; i++) {
++			struct fh_mci_slot *slot = host->slot[i];
++			if (pending & SDMMC_INT_SDIO(i)) {
++				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
++				mmc_signal_sdio_irq(slot->mmc);
++			}
++		}
++
++	}
++#ifdef CONFIG_MMC_FH_IDMAC
++	/* Handle DMA interrupts */
++	pending = mci_readl(host, IDSTS);
++	if (likely(pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))) {
++		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_NI);
++		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
++		host->dma_ops->complete(host);
++	}
++#endif
++
++	return IRQ_HANDLED;
++}
++
++static void fh_mci_tasklet_card(unsigned long data)
++{
++	struct fh_mci *host = (struct fh_mci *)data;
++	int i;
++
++	for (i = 0; i < host->num_slots; i++) {
++		struct fh_mci_slot *slot = host->slot[i];
++		struct mmc_host *mmc = slot->mmc;
++		struct mmc_request *mrq;
++		int present;
++		u32 ctrl;
++		present = fh_mci_get_cd(mmc);
++		while (present != slot->last_detect_state) {
++			dev_dbg(&slot->mmc->class_dev, "card %s\n",
++				present ? "inserted" : "removed");
++
++			spin_lock(&host->lock);
++
++			/* Card change detected */
++			slot->last_detect_state = present;
++
++			/* Power up slot */
++			if (present != 0) {
++				if (host->pdata->setpower)
++					host->pdata->setpower(slot->id,
++							      mmc->ocr_avail);
++
++				set_bit(FH_MMC_CARD_PRESENT, &slot->flags);
++			}
++
++			/* Clean up queue if present */
++			mrq = slot->mrq;
++			if (mrq) {
++				if (mrq == host->mrq) {
++					host->data = NULL;
++					host->cmd = NULL;
++
++					switch (host->state) {
++					case STATE_IDLE:
++						break;
++					case STATE_SENDING_CMD:
++						mrq->cmd->error = -ENOMEDIUM;
++						if (!mrq->data)
++							break;
++						/* fall through */
++					case STATE_SENDING_DATA:
++						mrq->data->error = -ENOMEDIUM;
++						fh_mci_stop_dma(host);
++						break;
++					case STATE_DATA_BUSY:
++					case STATE_DATA_ERROR:
++						printk("STATE_DATA_ERROR in tasklet card\n");
++						if (mrq->data->error == -EINPROGRESS)
++							mrq->data->error = -ENOMEDIUM;
++						if (!mrq->stop)
++							break;
++						/* fall through */
++					case STATE_SENDING_STOP:
++						mrq->stop->error = -ENOMEDIUM;
++						break;
++					}
++
++					fh_mci_request_end(host, mrq);
++				} else {
++					list_del(&slot->queue_node);
++					mrq->cmd->error = -ENOMEDIUM;
++					if (mrq->data)
++						mrq->data->error = -ENOMEDIUM;
++					if (mrq->stop)
++						mrq->stop->error = -ENOMEDIUM;
++
++					spin_unlock(&host->lock);
++					mmc_request_done(slot->mmc, mrq);
++					spin_lock(&host->lock);
++				}
++			}
++
++			/* Power down slot */
++			if (present == 0) {
++				if (host->pdata->setpower)
++					host->pdata->setpower(slot->id, 0);
++				clear_bit(FH_MMC_CARD_PRESENT, &slot->flags);
++
++				/*
++				 * Clear down the FIFO - doing so generates a
++				 * block interrupt, hence setting the
++				 * scatter-gather pointer to NULL.
++				 */
++				host->sg = NULL;
++
++				ctrl = mci_readl(host, CTRL);
++				ctrl |= SDMMC_CTRL_FIFO_RESET;
++				mci_writel(host, CTRL, ctrl);
++
++#ifdef CONFIG_MMC_FH_IDMAC
++				ctrl = mci_readl(host, BMOD);
++				ctrl |= SDMMC_IDMAC_SWRESET; /* Software reset of DMA */
++				mci_writel(host, BMOD, ctrl);
++#endif
++
++			}
++
++			spin_unlock(&host->lock);
++			present = fh_mci_get_cd(mmc);
++		}
++
++		mmc_detect_change(slot->mmc,
++			msecs_to_jiffies(host->pdata->detect_delay_ms));
++	}
++}
++
++static int __init fh_mci_init_slot(struct fh_mci *host, unsigned int id)
++{
++	struct mmc_host *mmc;
++	struct fh_mci_slot *slot;
++
++	mmc = mmc_alloc_host(sizeof(struct fh_mci_slot), &host->pdev->dev);
++	if (!mmc)
++		return -ENOMEM;
++
++	slot = mmc_priv(mmc);
++	slot->id = id;
++	slot->mmc = mmc;
++	slot->host = host;
++
++	mmc->ops = &fh_mci_ops;
++	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
++	mmc->f_max = 50000000;//12500000;
++
++	if (host->pdata->get_ocr)
++		mmc->ocr_avail = host->pdata->get_ocr(id);
++	else
++		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
++
++	/*
++	 * Start with slot power disabled, it will be enabled when a card
++	 * is detected.
++	 */
++	if (host->pdata->setpower)
++		host->pdata->setpower(id, 0);
++
++	if (host->pdata->caps)
++		mmc->caps = host->pdata->caps;
++	else
++		mmc->caps = 0;
++
++	if (host->pdata->get_bus_wd)
++		if (host->pdata->get_bus_wd(slot->id) >= 4)
++			mmc->caps |= MMC_CAP_4_BIT_DATA;
++
++	if (host->pdata->quirks & FH_MCI_QUIRK_HIGHSPEED)
++		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
++
++#ifdef CONFIG_MMC_FH_IDMAC
++	/* Useful defaults if platform data is unset. */
++	mmc->max_segs = 64;
++	mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
++	mmc->max_blk_count = 512;
++	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
++	mmc->max_seg_size = mmc->max_req_size;
++#else
++	if (host->pdata->blk_settings) {
++		mmc->max_segs = host->pdata->blk_settings->max_segs;
++		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
++		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
++		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
++		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
++	} else {
++		/* Useful defaults if platform data is unset. */
++		mmc->max_segs = 64;
++		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
++		mmc->max_blk_count = 512;
++		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
++		mmc->max_seg_size = mmc->max_req_size;
++	}
++#endif /* CONFIG_MMC_FH_IDMAC */
++
++	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
++	if (IS_ERR(host->vmmc)) {
++		host->vmmc = NULL;
++	} else
++		regulator_enable(host->vmmc);
++
++	if (fh_mci_get_cd(mmc))
++		set_bit(FH_MMC_CARD_PRESENT, &slot->flags);
++	else
++		clear_bit(FH_MMC_CARD_PRESENT, &slot->flags);
++
++	host->slot[id] = slot;
++	mmc_add_host(mmc);
++
++#if defined(CONFIG_DEBUG_FS)
++	fh_mci_init_debugfs(slot);
++#endif
++
++	/* Card initially undetected */
++	slot->last_detect_state = 0;
++
++	/*
++	 * Card may have been plugged in prior to boot so we
++	 * need to run the detect tasklet
++	 */
++	tasklet_schedule(&host->card_tasklet);
++
++	return 0;
++}
++
++static void fh_mci_cleanup_slot(struct fh_mci_slot *slot, unsigned int id)
++{
++	/* Shutdown detect IRQ */
++	if (slot->host->pdata->exit)
++		slot->host->pdata->exit(id);
++
++	/* Debugfs stuff is cleaned up by mmc core */
++	mmc_remove_host(slot->mmc);
++	slot->host->slot[id] = NULL;
++	mmc_free_host(slot->mmc);
++}
++
++static void fh_mci_init_dma(struct fh_mci *host)
++{
++	/* Alloc memory for sg translation */
++	host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, SDC_DESC_SIZE,
++					  &host->sg_dma, GFP_KERNEL);
++	if (!host->sg_cpu) {
++		dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
++			__func__);
++		goto no_dma;
++	}
++
++	/* Determine which DMA interface to use */
++#ifdef CONFIG_MMC_FH_IDMAC
++	host->dma_ops = &fh_mci_idmac_ops;
++	dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
++#endif
++
++	if (!host->dma_ops)
++		goto no_dma;
++
++	if (host->dma_ops->init) {
++		if (host->dma_ops->init(host)) {
++			dev_err(&host->pdev->dev, "%s: Unable to initialize "
++				"DMA Controller.\n", __func__);
++			goto no_dma;
++		}
++	} else {
++		dev_err(&host->pdev->dev, "DMA initialization not found.\n");
++		goto no_dma;
++	}
++
++	host->use_dma = 1;
++	return;
++
++no_dma:
++	dev_info(&host->pdev->dev, "Using PIO mode.\n");
++	host->use_dma = 0;
++	return;
++}
++
++static bool mci_wait_reset(struct device *dev, struct fh_mci *host)
++{
++	unsigned long timeout = jiffies + msecs_to_jiffies(500);
++	unsigned int ctrl;
++
++	mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
++				SDMMC_CTRL_DMA_RESET));
++
++	/* wait till resets clear */
++	do {
++		ctrl = mci_readl(host, CTRL);
++		if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
++			      SDMMC_CTRL_DMA_RESET)))
++			return true;
++	} while (time_before(jiffies, timeout));
++
++	dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
++
++	return false;
++}
++
++static int fh_mci_probe(struct platform_device *pdev)
++{
++	struct fh_mci *host;
++	struct resource	*regs;
++	struct fh_mci_board *pdata;
++	int irq, ret, i, width;
++	u32 fifo_size;
++	u32 reg;
++	struct clk *sdc_clk;
++
++	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!regs)
++		return -ENXIO;
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0)
++		return irq;
++
++	host = kzalloc(sizeof(struct fh_mci), GFP_KERNEL);
++	if (!host)
++		return -ENOMEM;
++
++	host->pdev = pdev;
++	host->pdata = pdata = pdev->dev.platform_data;
++	if (!pdata || !pdata->init) {
++		dev_err(&pdev->dev,
++			"Platform data must supply init function\n");
++		ret = -ENODEV;
++		goto err_freehost;
++	}
++
++	if (!pdata->select_slot && pdata->num_slots > 1) {
++		dev_err(&pdev->dev,
++			"Platform data must supply select_slot function\n");
++		ret = -ENODEV;
++		goto err_freehost;
++	}
++
++	if (!pdata->bus_hz) {
++		dev_err(&pdev->dev,
++			"Platform data must supply bus speed\n");
++		ret = -ENODEV;
++		goto err_freehost;
++	}
++
++	host->bus_hz = pdata->bus_hz;
++	host->quirks = pdata->quirks;
++
++	spin_lock_init(&host->lock);
++	INIT_LIST_HEAD(&host->queue);
++
++	pdata->init(pdev->id, NULL, NULL);
++
++	ret = -ENOMEM;
++	//enable clk
++
++	if(pdev->id){
++		ret = gpio_request(6, NULL);
++		if(ret){
++			printk("gpio requset err\n");
++		        ret = -ENODEV;
++		        return ret;
++		}
++		gpio_direction_output(6,0);//set power on
++		sdc_clk = clk_get(NULL, "sdc1_clk");
++		clk_enable(sdc_clk);
++//		*(int *)0xfe900020 =0x100000;//wait for modify
++
++		clk_set_rate(sdc_clk,50000000);
++		reg = clk_get_clk_sel();
++		reg |=1<<12;
++		reg &=~(1<<13);
++		clk_set_clk_sel(reg);
++	}
++	else
++	{
++		ret = gpio_request(5, NULL);
++		if(ret){
++			printk("gpio requset err\n");
++		        ret = -ENODEV;
++		        return ret;
++		}
++		gpio_direction_output(5,0);//set power on
++		sdc_clk = clk_get(NULL, "sdc0_clk");
++		clk_enable(sdc_clk);
++
++		clk_set_rate(sdc_clk,50000000);
++		reg = clk_get_clk_sel();
++		reg |=1<<20;
++		reg &=~(1<<21);
++
++#define SIMPLE_0
++//#define SIMPLE_90
++//#define SIMPLE_180
++//#define SIMPLE_270
++
++#ifdef SIMPLE_0
++		//0
++		reg &=~(1<<17);
++		reg &=~(1<<16);
++#endif
++#ifdef SIMPLE_90
++		//90
++		reg |=(1<<16);
++		reg &=~(1<<17);
++#endif
++#ifdef SIMPLE_180
++		//180
++		reg &=~(1<<16);
++		reg |=(1<<17);
++#endif
++#ifdef SIMPLE_270
++		//270
++		reg |=(1<<17);
++		reg |=(1<<16);
++#endif
++		clk_set_clk_sel(reg);
++
++	}
++
++	//io_remap
++	host->regs = ioremap(regs->start, regs->end - regs->start + 1);
++	//host->regs = 0xfe700000;
++	if (!host->regs)
++		goto err_freehost;
++
++	//host->dma_ops = pdata->dma_ops;
++	fh_mci_init_dma(host);
++
++	/*
++	 * Get the host data width - this assumes that HCON has been set with
++	 * the correct values.
++	 */
++	i = (mci_readl(host, HCON) >> 7) & 0x7;
++	if (!i) {
++		host->push_data = fh_mci_push_data16;
++		host->pull_data = fh_mci_pull_data16;
++		width = 16;
++		host->data_shift = 1;
++	} else if (i == 2) {
++		host->push_data = fh_mci_push_data64;
++		host->pull_data = fh_mci_pull_data64;
++		width = 64;
++		host->data_shift = 3;
++	} else {
++		/* Check for a reserved value, and warn if it is */
++		WARN((i != 1),
++
++		     "HCON reports a reserved host data width!\n"
++		     "Defaulting to 32-bit access.\n");
++		host->push_data = fh_mci_push_data32;
++		host->pull_data = fh_mci_pull_data32;
++		width = 32;
++		host->data_shift = 2;
++	}
++
++	/* Reset all blocks */
++	if (!mci_wait_reset(&pdev->dev, host)) {
++		ret = -ENODEV;
++		goto err_dmaunmap;
++	}
++
++	/* Clear the interrupts for the host controller */
++	mci_writel(host, RINTSTS, 0xFFFFFFFF);
++	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
++
++	/* Put in max timeout */
++	mci_writel(host, TMOUT, 0xFFFFFFFF);
++
++	/*
++	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
++	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
++	 */
++	fifo_size = mci_readl(host, FIFOTH);
++	fifo_size = 1+((fifo_size >> 16) & 0x7ff);
++	host->fifoth_val =
++			SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
++	mci_writel(host, FIFOTH, host->fifoth_val);
++
++	/* disable clock to CIU */
++	mci_writel(host, CLKENA, 0);
++	mci_writel(host, CLKSRC, 0);
++
++	tasklet_init(&host->tasklet, fh_mci_tasklet_func, (unsigned long)host);
++	tasklet_init(&host->card_tasklet,
++		     fh_mci_tasklet_card, (unsigned long)host);
++
++	ret = request_irq(irq, fh_mci_interrupt, 0, "fh-mci", host);
++	if (ret)
++		goto err_dmaunmap;
++
++	platform_set_drvdata(pdev, host);
++
++	if (host->pdata->num_slots)
++		host->num_slots = host->pdata->num_slots;
++	else
++		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
++
++	/* We need at least one slot to succeed */
++	for (i = 0; i < host->num_slots; i++) {
++		ret = fh_mci_init_slot(host, i);
++		if (ret) {
++			ret = -ENODEV;
++			goto err_init_slot;
++		}
++	}
++
++	/*
++	 * Enable interrupts for command done, data over, data empty, card det,
++	 * receive ready and error such as transmit, receive timeout, crc error
++	 */
++	mci_writel(host, RINTSTS, 0xFFFFFFFF);
++	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |SDMMC_INT_RTO | SDMMC_INT_DTO |
++		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
++		   FH_MCI_ERROR_FLAGS | SDMMC_INT_CD);
++	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
++
++	dev_info(&pdev->dev, "FH MMC controller at irq %d, "
++		 "%d bit host data width\n", irq, width);
++	if (host->quirks & FH_MCI_QUIRK_IDMAC_DTO)
++		dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
++#ifdef SDC_CRC_TEST
++	ret = gpio_request(TEST_GPIO, "SDC_TEST");
++
++	if(ret)
++	{
++		printk("!!!!!!!!!!SDC gpio_request failed!!!!!!!!!\n");
++	}
++
++	gpio_direction_output(TEST_GPIO, 1);
++	__gpio_set_value(TEST_GPIO, 0);
++#endif
++	return 0;
++
++err_init_slot:
++	/* De-init any initialized slots */
++	while (i > 0) {
++		if (host->slot[i])
++			fh_mci_cleanup_slot(host->slot[i], i);
++		i--;
++	}
++	free_irq(irq, host);
++
++err_dmaunmap:
++	if (host->use_dma && host->dma_ops->exit)
++		host->dma_ops->exit(host);
++	dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
++			  host->sg_cpu, host->sg_dma);
++	//iounmap(host->regs);
++
++	if (host->vmmc) {
++		regulator_disable(host->vmmc);
++		regulator_put(host->vmmc);
++	}
++
++
++err_freehost:
++	kfree(host);
++	return ret;
++}
++
++static int __exit fh_mci_remove(struct platform_device *pdev)
++{
++	struct fh_mci *host = platform_get_drvdata(pdev);
++	int i;
++
++	mci_writel(host, RINTSTS, 0xFFFFFFFF);
++	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
++
++	platform_set_drvdata(pdev, NULL);
++
++	for (i = 0; i < host->num_slots; i++) {
++		dev_dbg(&pdev->dev, "remove slot %d\n", i);
++		if (host->slot[i])
++			fh_mci_cleanup_slot(host->slot[i], i);
++	}
++
++	/* disable clock to CIU */
++	mci_writel(host, CLKENA, 0);
++	mci_writel(host, CLKSRC, 0);
++
++	free_irq(platform_get_irq(pdev, 0), host);
++	dma_free_coherent(&pdev->dev, SDC_DESC_SIZE, host->sg_cpu, host->sg_dma);
++
++	if (host->use_dma && host->dma_ops->exit)
++		host->dma_ops->exit(host);
++
++	if (host->vmmc) {
++		regulator_disable(host->vmmc);
++		regulator_put(host->vmmc);
++	}
++
++	//iounmap(host->regs);
++#ifdef SDC_CRC_TEST
++	gpio_free(TEST_GPIO);
++#endif
++
++	kfree(host);
++	return 0;
++}
++
++#if CONFIG_PM
++/*
++ * TODO: we should probably disable the clock to the card in the suspend path.
++ */
++static int fh_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
++{
++	int i, ret;
++	struct fh_mci *host = platform_get_drvdata(pdev);
++
++	for (i = 0; i < host->num_slots; i++) {
++		struct fh_mci_slot *slot = host->slot[i];
++		if (!slot)
++			continue;
++		ret = mmc_suspend_host(slot->mmc);
++		if (ret < 0) {
++			while (--i >= 0) {
++				slot = host->slot[i];
++				if (slot)
++					mmc_resume_host(host->slot[i]->mmc);
++			}
++			return ret;
++		}
++	}
++
++	if (host->vmmc)
++		regulator_disable(host->vmmc);
++
++	return 0;
++}
++
++static int fh_mci_resume(struct platform_device *pdev)
++{
++	int i, ret;
++	struct fh_mci *host = platform_get_drvdata(pdev);
++
++	if (host->vmmc)
++		regulator_enable(host->vmmc);
++
++	if (host->dma_ops->init)
++		host->dma_ops->init(host);
++
++	if (!mci_wait_reset(&pdev->dev, host)) {
++		ret = -ENODEV;
++		return ret;
++	}
++
++	/* Restore the old value at FIFOTH register */
++	mci_writel(host, FIFOTH, host->fifoth_val);
++
++	mci_writel(host, RINTSTS, 0xFFFFFFFF);
++	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |SDMMC_INT_RTO | SDMMC_INT_DTO |
++		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
++		   FH_MCI_ERROR_FLAGS | SDMMC_INT_CD);
++	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
++
++	for (i = 0; i < host->num_slots; i++) {
++		struct fh_mci_slot *slot = host->slot[i];
++		if (!slot)
++			continue;
++		ret = mmc_resume_host(host->slot[i]->mmc);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++#else
++#define fh_mci_suspend	NULL
++#define fh_mci_resume	NULL
++#endif /* CONFIG_PM */
++
++static struct platform_driver fh_mci_driver = {
++	.remove		= __exit_p(fh_mci_remove),
++	.suspend	= fh_mci_suspend,
++	.resume		= fh_mci_resume,
++	.driver		= {
++		.name		= "fh_mmc",
++	},
++};
++
++static int __init fh_mci_init(void)
++{
++	return platform_driver_probe(&fh_mci_driver, fh_mci_probe);
++}
++
++static void __exit fh_mci_exit(void)
++{
++	platform_driver_unregister(&fh_mci_driver);
++}
++
++module_init(fh_mci_init);
++module_exit(fh_mci_exit);
++
++MODULE_DESCRIPTION("FH Multimedia Card Interface driver");
++MODULE_AUTHOR("NXP Semiconductor VietNam");
++MODULE_AUTHOR("Imagination Technologies Ltd");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/mmc/host/fh_mmc.h b/drivers/mmc/host/fh_mmc.h
+new file mode 100644
+index 00000000..ddb9a63d
+--- /dev/null
++++ b/drivers/mmc/host/fh_mmc.h
+@@ -0,0 +1,233 @@
++/*
++ * Synopsys DesignWare Multimedia Card Interface driver
++ *  (Based on NXP driver for lpc 31xx)
++ *
++ * Copyright (C) 2009 NXP Semiconductors
++ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef _LINUX_MMC_FH_MMC_H_
++#define _LINUX_MMC_FH_MMC_H_
++
++#include <linux/scatterlist.h>
++#include <linux/compiler.h>
++#include <linux/types.h>
++#include <linux/io.h>
++#include <linux/mmc/host.h>
++
++#define MAX_MCI_SLOTS	2
++
++enum fh_mci_state {
++	STATE_IDLE = 0,
++	STATE_SENDING_CMD,
++	STATE_SENDING_DATA,
++	STATE_DATA_BUSY,
++	STATE_SENDING_STOP,
++	STATE_DATA_ERROR,
++};
++
++enum {
++	EVENT_CMD_COMPLETE = 0,
++	EVENT_XFER_COMPLETE,
++	EVENT_DATA_COMPLETE,
++	EVENT_DATA_ERROR,
++	EVENT_XFER_ERROR
++};
++
++struct mmc_data;
++
++/**
++ * struct fh_mci - MMC controller state shared between all slots
++ * @lock: Spinlock protecting the queue and associated data.
++ * @regs: Pointer to MMIO registers.
++ * @sg: Scatterlist entry currently being processed by PIO code, if any.
++ * @pio_offset: Offset into the current scatterlist entry.
++ * @cur_slot: The slot which is currently using the controller.
++ * @mrq: The request currently being processed on @cur_slot,
++ *	or NULL if the controller is idle.
++ * @cmd: The command currently being sent to the card, or NULL.
++ * @data: The data currently being transferred, or NULL if no data
++ *	transfer is in progress.
++ * @use_dma: Whether DMA channel is initialized or not.
++ * @sg_dma: Bus address of DMA buffer.
++ * @sg_cpu: Virtual address of DMA buffer.
++ * @dma_ops: Pointer to platform-specific DMA callbacks.
++ * @cmd_status: Snapshot of SR taken upon completion of the current
++ *	command. Only valid when EVENT_CMD_COMPLETE is pending.
++ * @data_status: Snapshot of SR taken upon completion of the current
++ *	data transfer. Only valid when EVENT_DATA_COMPLETE or
++ *	EVENT_DATA_ERROR is pending.
++ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
++ *	to be sent.
++ * @dir_status: Direction of current transfer.
++ * @tasklet: Tasklet running the request state machine.
++ * @card_tasklet: Tasklet handling card detect.
++ * @pending_events: Bitmask of events flagged by the interrupt handler
++ *	to be processed by the tasklet.
++ * @completed_events: Bitmask of events which the state machine has
++ *	processed.
++ * @state: Tasklet state.
++ * @queue: List of slots waiting for access to the controller.
++ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
++ *	rate and timeout calculations.
++ * @current_speed: Configured rate of the controller.
++ * @num_slots: Number of slots available.
++ * @pdev: Platform device associated with the MMC controller.
++ * @pdata: Platform data associated with the MMC controller.
++ * @slot: Slots sharing this MMC controller.
++ * @data_shift: log2 of FIFO item size.
++ * @push_data: Pointer to FIFO push function.
++ * @pull_data: Pointer to FIFO pull function.
++ * @quirks: Set of quirks that apply to specific versions of the IP.
++ *
++ * Locking
++ * =======
++ *
++ * @lock is a softirq-safe spinlock protecting @queue as well as
++ * @cur_slot, @mrq and @state. These must always be updated
++ * at the same time while holding @lock.
++ *
++ * The @mrq field of struct fh_mci_slot is also protected by @lock,
++ * and must always be written at the same time as the slot is added to
++ * @queue.
++ *
++ * @pending_events and @completed_events are accessed using atomic bit
++ * operations, so they don't need any locking.
++ *
++ * None of the fields touched by the interrupt handler need any
++ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
++ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
++ * interrupts must be disabled and @data_status updated with a
++ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
++ * CMDRDY interrupt must be disabled and @cmd_status updated with a
++ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
++ * bytes_xfered field of @data must be written. This is ensured by
++ * using barriers.
++ */
++struct fh_mci {
++	spinlock_t		lock;
++	void __iomem		*regs;
++
++	struct scatterlist	*sg;
++	unsigned int		pio_offset;
++
++	struct fh_mci_slot	*cur_slot;
++	struct mmc_request	*mrq;
++	struct mmc_command	*cmd;
++	struct mmc_data		*data;
++
++	/* DMA interface members*/
++	int			use_dma;
++	int 		using_dma;
++	unsigned int		prev_blksz;
++
++	dma_addr_t		sg_dma;
++	void			*sg_cpu;
++	struct fh_mci_dma_ops	*dma_ops;
++#ifdef CONFIG_MMC_FH_IDMAC
++	unsigned int		ring_size;
++#else
++	struct fh_mci_dma_data	*dma_data;
++#endif
++	u32			cmd_status;
++	u32			data_status;
++	u32			stop_cmdr;
++	u32			dir_status;
++	struct tasklet_struct	tasklet;
++	struct tasklet_struct	card_tasklet;
++	unsigned long		pending_events;
++	unsigned long		completed_events;
++	enum fh_mci_state	state;
++	struct list_head	queue;
++
++	u32			bus_hz;
++	u32			current_speed;
++	u32			num_slots;
++	u32			fifoth_val;
++	struct platform_device	*pdev;
++	struct fh_mci_board	*pdata;
++	struct fh_mci_slot	*slot[MAX_MCI_SLOTS];
++
++	/* FIFO push and pull */
++	int			data_shift;
++	void (*push_data)(struct fh_mci *host, void *buf, int cnt);
++	void (*pull_data)(struct fh_mci *host, void *buf, int cnt);
++
++	/* Workaround flags */
++	u32			quirks;
++
++	struct regulator	*vmmc;	/* Power regulator */
++
++	int 		dma_data_mapped;
++	int 		data_error_flag;
++};
++
++/* DMA ops for Internal/External DMAC interface */
++struct fh_mci_dma_ops {
++	/* DMA Ops */
++	int (*init)(struct fh_mci *host);
++	void (*start)(struct fh_mci *host, unsigned int sg_len);
++	void (*complete)(struct fh_mci *host);
++	void (*stop)(struct fh_mci *host);
++	void (*cleanup)(struct fh_mci *host);
++	void (*exit)(struct fh_mci *host);
++};
++
++/* IP Quirks/flags. */
++/* DTO fix for command transmission with IDMAC configured */
++#define FH_MCI_QUIRK_IDMAC_DTO			BIT(0)
++/* delay needed between retries on some 2.11a implementations */
++#define FH_MCI_QUIRK_RETRY_DELAY		BIT(1)
++/* High Speed Capable - Supports HS cards (up to 50MHz) */
++#define FH_MCI_QUIRK_HIGHSPEED			BIT(2)
++/* Unreliable card detection */
++#define FH_MCI_QUIRK_BROKEN_CARD_DETECTION	BIT(3)
++
++
++struct dma_pdata;
++
++struct block_settings {
++	unsigned short	max_segs;	/* see blk_queue_max_segments */
++	unsigned int	max_blk_size;	/* maximum size of one mmc block */
++	unsigned int	max_blk_count;	/* maximum number of blocks in one req*/
++	unsigned int	max_req_size;	/* maximum number of bytes in one req*/
++	unsigned int	max_seg_size;	/* see blk_queue_max_segment_size */
++};
++
++/* Board platform data */
++struct fh_mci_board {
++	u32 num_slots;
++
++	u32 quirks; /* Workaround / Quirk flags */
++	unsigned int bus_hz; /* Bus speed */
++
++	unsigned int caps;	/* Capabilities */
++
++	/* delay in mS before detecting cards after interrupt */
++	u32 detect_delay_ms;
++
++	int (*init)(u32 slot_id,void* irq_handler_t , void *);
++	int (*get_ro)(u32 slot_id);
++	int (*get_cd)(u32 slot_id);
++	int (*get_ocr)(u32 slot_id);
++	int (*get_bus_wd)(u32 slot_id);
++	/*
++	 * Enable power to selected slot and set voltage to desired level.
++	 * Voltage levels are specified using MMC_VDD_xxx defines defined
++	 * in linux/mmc/host.h file.
++	 */
++	void (*setpower)(u32 slot_id, u32 volt);
++	void (*exit)(u32 slot_id);
++	void (*select_slot)(u32 slot_id);
++
++	struct fh_mci_dma_ops *dma_ops;
++	struct dma_pdata *data;
++	struct block_settings *blk_settings;
++};
++
++#endif /* _LINUX_MMC_FH_MMC_H_ */
+diff --git a/drivers/mmc/host/fh_mmc_reg.h b/drivers/mmc/host/fh_mmc_reg.h
+new file mode 100644
+index 00000000..8153d4d6
+--- /dev/null
++++ b/drivers/mmc/host/fh_mmc_reg.h
+@@ -0,0 +1,174 @@
++/*
++ * Synopsys DesignWare Multimedia Card Interface driver
++ *  (Based on NXP driver for lpc 31xx)
++ *
++ * Copyright (C) 2009 NXP Semiconductors
++ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef _DW_MMC_H_
++#define _DW_MMC_H_
++
++#define SDMMC_CTRL		0x000
++#define SDMMC_PWREN		0x004
++#define SDMMC_CLKDIV		0x008
++#define SDMMC_CLKSRC		0x00c
++#define SDMMC_CLKENA		0x010
++#define SDMMC_TMOUT		0x014
++#define SDMMC_CTYPE		0x018
++#define SDMMC_BLKSIZ		0x01c
++#define SDMMC_BYTCNT		0x020
++#define SDMMC_INTMASK		0x024
++#define SDMMC_CMDARG		0x028
++#define SDMMC_CMD		0x02c
++#define SDMMC_RESP0		0x030
++#define SDMMC_RESP1		0x034
++#define SDMMC_RESP2		0x038
++#define SDMMC_RESP3		0x03c
++#define SDMMC_MINTSTS		0x040
++#define SDMMC_RINTSTS		0x044
++#define SDMMC_STATUS		0x048
++#define SDMMC_FIFOTH		0x04c
++#define SDMMC_CDETECT		0x050
++#define SDMMC_WRTPRT		0x054
++#define SDMMC_GPIO		0x058
++#define SDMMC_TCBCNT		0x05c
++#define SDMMC_TBBCNT		0x060
++#define SDMMC_DEBNCE		0x064
++#define SDMMC_USRID		0x068
++#define SDMMC_VERID		0x06c
++#define SDMMC_HCON		0x070
++#define SDMMC_UHS_REG		0x074
++#define SDMMC_RST_n         0x78
++#define SDMMC_BMOD		0x080
++#define SDMMC_PLDMND		0x084
++#define SDMMC_DBADDR		0x088
++#define SDMMC_IDSTS		0x08c
++#define SDMMC_IDINTEN		0x090
++#define SDMMC_DSCADDR		0x094
++#define SDMMC_BUFADDR		0x098
++#define SDMMC_DATA		0x200
++
++/* shift bit field */
++#define _SBF(f, v)		((v) << (f))
++
++/* Control register defines */
++#define SDMMC_CTRL_USE_IDMAC		BIT(25)
++#define SDMMC_CTRL_CEATA_INT_EN		BIT(11)
++#define SDMMC_CTRL_SEND_AS_CCSD		BIT(10)
++#define SDMMC_CTRL_SEND_CCSD		BIT(9)
++#define SDMMC_CTRL_ABRT_READ_DATA	BIT(8)
++#define SDMMC_CTRL_SEND_IRQ_RESP	BIT(7)
++#define SDMMC_CTRL_READ_WAIT		BIT(6)
++#define SDMMC_CTRL_DMA_ENABLE		BIT(5)
++#define SDMMC_CTRL_INT_ENABLE		BIT(4)
++#define SDMMC_CTRL_DMA_RESET		BIT(2)
++#define SDMMC_CTRL_FIFO_RESET		BIT(1)
++#define SDMMC_CTRL_RESET		BIT(0)
++/* Clock Enable register defines */
++#define SDMMC_CLKEN_LOW_PWR		BIT(16)
++#define SDMMC_CLKEN_ENABLE		BIT(0)
++/* time-out register defines */
++#define SDMMC_TMOUT_DATA(n)		_SBF(8, (n))
++#define SDMMC_TMOUT_DATA_MSK		0xFFFFFF00
++#define SDMMC_TMOUT_RESP(n)		((n) & 0xFF)
++#define SDMMC_TMOUT_RESP_MSK		0xFF
++/* card-type register defines */
++#define SDMMC_CTYPE_8BIT		BIT(16)
++#define SDMMC_CTYPE_4BIT		BIT(0)
++#define SDMMC_CTYPE_1BIT		0
++/* Interrupt status & mask register defines */
++//#define SDMMC_INT_SDIO			BIT(16)
++#define SDMMC_INT_SDIO(n)		BIT(16 + (n))
++#define SDMMC_INT_EBE			BIT(15)
++#define SDMMC_INT_ACD			BIT(14)
++#define SDMMC_INT_SBE			BIT(13)
++#define SDMMC_INT_HLE			BIT(12)
++#define SDMMC_INT_FRUN			BIT(11)
++#define SDMMC_INT_HTO			BIT(10)
++#define SDMMC_INT_DTO			BIT(9)
++#define SDMMC_INT_RTO			BIT(8)
++#define SDMMC_INT_DCRC			BIT(7)
++#define SDMMC_INT_RCRC			BIT(6)
++#define SDMMC_INT_RXDR			BIT(5)
++#define SDMMC_INT_TXDR			BIT(4)
++#define SDMMC_INT_DATA_OVER		BIT(3)
++#define SDMMC_INT_CMD_DONE		BIT(2)
++#define SDMMC_INT_RESP_ERR		BIT(1)
++#define SDMMC_INT_CD			BIT(0)
++#define SDMMC_INT_ERROR			0xbfc2
++/* Command register defines */
++#define SDMMC_CMD_START			BIT(31)
++#define SDMMC_CMD_USE_HOLD_REG	BIT(29)
++#define SDMMC_CMD_CCS_EXP		BIT(23)
++#define SDMMC_CMD_CEATA_RD		BIT(22)
++#define SDMMC_CMD_UPD_CLK		BIT(21)
++#define SDMMC_CMD_INIT			BIT(15)
++#define SDMMC_CMD_STOP			BIT(14)
++#define SDMMC_CMD_PRV_DAT_WAIT		BIT(13)
++#define SDMMC_CMD_SEND_STOP		BIT(12)
++#define SDMMC_CMD_STRM_MODE		BIT(11)
++#define SDMMC_CMD_DAT_WR		BIT(10)
++#define SDMMC_CMD_DAT_EXP		BIT(9)
++#define SDMMC_CMD_RESP_CRC		BIT(8)
++#define SDMMC_CMD_RESP_LONG		BIT(7)
++#define SDMMC_CMD_RESP_EXP		BIT(6)
++#define SDMMC_CMD_INDX(n)		((n) & 0x1F)
++/* Status register defines */
++#define SDMMC_GET_FCNT(x)		(((x)>>17) & 0x1FF)
++#define SDMMC_FIFO_SZ			32
++/* Internal DMAC interrupt defines */
++#define SDMMC_IDMAC_INT_AI		BIT(9)
++#define SDMMC_IDMAC_INT_NI		BIT(8)
++#define SDMMC_IDMAC_INT_CES		BIT(5)
++#define SDMMC_IDMAC_INT_DU		BIT(4)
++#define SDMMC_IDMAC_INT_FBE		BIT(2)
++#define SDMMC_IDMAC_INT_RI		BIT(1)
++#define SDMMC_IDMAC_INT_TI		BIT(0)
++/* Internal DMAC bus mode bits */
++#define SDMMC_IDMAC_ENABLE		BIT(7)
++#define SDMMC_IDMAC_FB			BIT(1)
++#define SDMMC_IDMAC_SWRESET		BIT(0)
++#define SDMMC_SET_FIFOTH(m, r, t)	(((m) & 0x7) << 28 | \
++					 ((r) & 0xFFF) << 16 | \
++					 ((t) & 0xFFF))
++
++/* Register access macros */
++#define mci_readl(dev, reg)			\
++	__raw_readl(dev->regs + SDMMC_##reg)
++#define mci_writel(dev, reg, value)			\
++	__raw_writel((value), dev->regs + SDMMC_##reg)
++
++/* 16-bit FIFO access macros */
++#define mci_readw(dev, reg)			\
++	__raw_readw(dev->regs + SDMMC_##reg)
++#define mci_writew(dev, reg, value)			\
++	__raw_writew((value), dev->regs + SDMMC_##reg)
++
++/* 64-bit FIFO access macros */
++#ifdef readq
++#define mci_readq(dev, reg)			\
++	__raw_readq(dev->regs + SDMMC_##reg)
++#define mci_writeq(dev, reg, value)			\
++	__raw_writeq((value), dev->regs + SDMMC_##reg)
++#else
++/*
++ * Dummy readq implementation for architectures that don't define it.
++ *
++ * We would assume that none of these architectures would configure
++ * the IP block with a 64bit FIFO width, so this code will never be
++ * executed on those machines. Defining these macros here keeps the
++ * rest of the code free from ifdefs.
++ */
++#define mci_readq(dev, reg)			\
++	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
++#define mci_writeq(dev, reg, value)			\
++	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
++#endif
++
++#endif /* _DW_MMC_H_ */
+diff --git a/drivers/mmc/host/fhmci/Makefile b/drivers/mmc/host/fhmci/Makefile
+new file mode 100644
+index 00000000..b017f98e
+--- /dev/null
++++ b/drivers/mmc/host/fhmci/Makefile
+@@ -0,0 +1,3 @@
++
++obj-y += fh_mci.o
++fh_mci-y := fhmci.o
+diff --git a/drivers/mmc/host/fhmci/fhmci.c b/drivers/mmc/host/fhmci/fhmci.c
+new file mode 100644
+index 00000000..6a19e951
+--- /dev/null
++++ b/drivers/mmc/host/fhmci/fhmci.c
+@@ -0,0 +1,1539 @@
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++#include <linux/regulator/consumer.h>
++#include <linux/mmc/host.h>
++#include <linux/mmc/mmc.h>
++#include <linux/mmc/sd.h>
++#include <linux/mmc/card.h>
++#include <linux/slab.h>
++
++#include <linux/ioport.h>
++#include <linux/device.h>
++#include <linux/gpio.h>
++
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/kthread.h>
++#include <linux/workqueue.h>
++#include <linux/freezer.h>
++#include <asm/dma.h>
++#include <linux/io.h>
++#include <asm/irq.h>
++#include <asm/sizes.h>
++#include <linux/uaccess.h>
++#include <mach/hardware.h>
++#include <linux/mmc/card.h>
++#include <linux/clk.h>
++#include "fhmci_reg.h"
++#include <mach/fhmci.h>
++
++#include <mach/pmu.h>
++
++
++#ifndef FALSE
++#define FALSE   (0)
++#endif
++
++#ifndef TRUE
++#define TRUE    (!(FALSE))
++#endif
++
++#define SD_POWER_ON   1
++#define SD_POWER_OFF  0
++#define DRIVER_NAME	"fh_mci"
++
++static unsigned int retry_count = MAX_RETRY_COUNT;
++static unsigned int request_timeout = FH_MCI_REQUEST_TIMEOUT;
++int trace_level = FHMCI_TRACE_LEVEL;
++struct mmc_host *mmc_sd1 = NULL;
++struct mmc_host *mmc_sd0 = NULL;
++
++#ifdef MODULE
++
++MODULE_PARM_DESC(detect_timer, "card detect time (default:500ms))");
++
++module_param(retry_count, uint, 0600);
++MODULE_PARM_DESC(retry_count, "retry count times (default:100))");
++
++module_param(request_timeout, uint, 0600);
++MODULE_PARM_DESC(request_timeout, "Request timeout time (default:3s))");
++
++module_param(trace_level, int, 0600);
++MODULE_PARM_DESC(trace_level, "FHMCI_TRACE_LEVEL");
++
++#endif
++
++#include "fhmci_io.c"
++
++/* reset MMC host controler */
++static void fh_mci_sys_reset(struct fhmci_host *host)
++{
++	unsigned int reg_value;
++	unsigned long flags;
++
++	fhmci_trace(2, "reset");
++
++	local_irq_save(flags);
++
++	reg_value = fhmci_readl(host->base + MCI_BMOD);
++	reg_value |= BMOD_SWR;
++	fhmci_writel(reg_value, host->base + MCI_BMOD);
++	udelay(50);
++
++	reg_value = fhmci_readl(host->base + MCI_BMOD);
++	reg_value |= BURST_INCR;
++	fhmci_writel(reg_value, host->base + MCI_BMOD);
++
++	reg_value = fhmci_readl(host->base + MCI_CTRL);
++	reg_value |=  CTRL_RESET | FIFO_RESET | DMA_RESET;
++	fhmci_writel(reg_value, host->base + MCI_CTRL);
++
++	local_irq_restore(flags);
++}
++
++static void fh_mci_sys_undo_reset(struct fhmci_host *host)
++{
++	unsigned long flags;
++
++	fhmci_trace(2, "undo reset");
++
++	local_irq_save(flags);
++	local_irq_restore(flags);
++}
++
++static void fh_mci_ctrl_power(struct fhmci_host *host, unsigned int flag)
++{
++	fhmci_trace(2, "begin");
++
++}
++
++/**********************************************
++ *1: card off
++ *0: card on
++ ***********************************************/
++static unsigned int fh_mci_sys_card_detect(struct fhmci_host *host)
++{
++	unsigned int card_status = readl(host->base + MCI_CDETECT);
++	return card_status & FHMCI_CARD0;
++}
++
++/**********************************************
++ *1: card readonly
++ *0: card read/write
++ ***********************************************/
++static unsigned int fh_mci_ctrl_card_readonly(struct fhmci_host *host)
++{
++	unsigned int card_value = fhmci_readl(host->base + MCI_WRTPRT);
++	return card_value & FHMCI_CARD0;
++}
++
++static int fh_mci_wait_cmd(struct fhmci_host *host)
++{
++	int wait_retry_count = 0;
++	unsigned int reg_data = 0;
++	unsigned long flags;
++
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++
++	while (1) {
++		/*
++		 * Check if CMD::start_cmd bit is clear.
++		 * start_cmd = 0 means MMC Host controller has loaded registers
++		 * and next command can be loaded in.
++		 */
++		reg_data = readl(host->base + MCI_CMD);
++		if ((reg_data & START_CMD) == 0)
++			return 0;
++
++		/* Check if Raw_Intr_Status::HLE bit is set. */
++		spin_lock_irqsave(&host->lock, flags);
++		reg_data = readl(host->base + MCI_RINTSTS);
++		if (reg_data & HLE_INT_STATUS) {
++			reg_data &= (~SDIO_INT_STATUS);
++			fhmci_writel(reg_data, host->base + MCI_RINTSTS);
++			spin_unlock_irqrestore(&host->lock, flags);
++
++			fhmci_trace(3, "Other CMD is running," \
++					"please operate cmd again!");
++			return 1;
++		}
++
++		spin_unlock_irqrestore(&host->lock, flags);
++		udelay(100);
++
++		/* Check if number of retries for this are over. */
++		wait_retry_count++;
++		if (wait_retry_count >= retry_count) {
++			fhmci_trace(3, "send cmd is timeout!");
++			return -1;
++		}
++	}
++}
++
++static void fh_mci_control_cclk(struct fhmci_host *host, unsigned int flag)
++{
++	unsigned int reg;
++	union cmd_arg_s cmd_reg;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++
++	reg = fhmci_readl(host->base + MCI_CLKENA);
++	if (flag == ENABLE)
++		reg |= CCLK_ENABLE;
++	else
++		reg &= 0xffff0000;
++	fhmci_writel(reg, host->base + MCI_CLKENA);
++
++	cmd_reg.cmd_arg = fhmci_readl(host->base + MCI_CMD);
++	cmd_reg.bits.start_cmd = 1;
++	cmd_reg.bits.update_clk_reg_only = 1;
++	fhmci_writel(cmd_reg.cmd_arg, host->base + MCI_CMD);
++	if (fh_mci_wait_cmd(host) != 0)
++		fhmci_trace(3, "disable or enable clk is timeout!");
++}
++
++static void fh_mci_set_cclk(struct fhmci_host *host, unsigned int cclk)
++{
++	unsigned int reg_value;
++	union cmd_arg_s clk_cmd;
++	struct fh_mci_board *pdata;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(cclk);
++
++	pdata = host->pdata;
++
++	/*
++	 * set card clk divider value,
++	 * clk_divider = Fmmcclk/(Fmmc_cclk * 2)
++	 */
++
++	if (0 == host->id) {
++		if (pdata->bus_hz <= cclk)
++			reg_value = 0;
++		else {
++			reg_value = pdata->bus_hz / (cclk * 2);
++			if (pdata->bus_hz % (cclk * 2))
++				reg_value++;
++		}
++	} else if (1 == host->id) {
++		if (pdata->bus_hz <= cclk)
++			reg_value = 0;
++		else {
++			reg_value = pdata->bus_hz / (cclk * 2);
++			if (pdata->bus_hz % (cclk * 2))
++				reg_value++;
++		}
++	} else {
++		fhmci_error("fhmci host id error!");
++		return;
++	}
++
++	fhmci_writel(reg_value, host->base + MCI_CLKDIV);
++
++
++	clk_cmd.cmd_arg = fhmci_readl(host->base + MCI_CMD);
++	clk_cmd.bits.start_cmd = 1;
++	clk_cmd.bits.update_clk_reg_only = 1;
++	fhmci_writel(clk_cmd.cmd_arg, host->base + MCI_CMD);
++
++	if (fh_mci_wait_cmd(host) != 0)
++		fhmci_trace(3, "set card clk divider is failed!");
++}
++
++static void fh_mci_init_card(struct fhmci_host *host)
++{
++	unsigned int tmp_reg, tmp;
++	unsigned long flags;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	tmp = fhmci_readl(host->base + MCI_PWREN);
++	fh_mci_sys_reset(host);
++	fh_mci_ctrl_power(host, POWER_OFF);
++	udelay(500);
++	/* card power on */
++	fh_mci_ctrl_power(host, POWER_ON);
++	udelay(200);
++
++	fh_mci_sys_undo_reset(host);
++
++	/* set phase shift */
++	/* set card read threshold */
++
++	/* clear MMC host intr */
++	fhmci_writel(ALL_INT_CLR, host->base + MCI_RINTSTS);
++
++	spin_lock_irqsave(&host->lock, flags);
++	host->pending_events = 0;
++	spin_unlock_irqrestore(&host->lock, flags);
++
++	/* MASK MMC host intr */
++	tmp_reg = fhmci_readl(host->base + MCI_INTMASK);
++	tmp_reg &= ~ALL_INT_MASK;
++	tmp_reg |= DATA_INT_MASK;
++	fhmci_writel(tmp_reg, host->base + MCI_INTMASK);
++
++	/* enable inner DMA mode and close intr of MMC host controler */
++	tmp_reg = fhmci_readl(host->base + MCI_CTRL);
++	tmp_reg &= ~INTR_EN;
++	tmp_reg |= USE_INTERNAL_DMA | INTR_EN;
++	fhmci_writel(tmp_reg, host->base + MCI_CTRL);
++
++	/* set timeout param */
++	fhmci_writel(DATA_TIMEOUT | RESPONSE_TIMEOUT, host->base + MCI_TIMEOUT);
++
++	/* set FIFO param */
++	if (host->pdata->fifo_depth > 15)
++		tmp = 0x5;
++	else
++		tmp = 0x2;
++
++	tmp_reg = ((tmp << 28) | ((host->pdata->fifo_depth / 2) << 16)
++			| (((host->pdata->fifo_depth / 2) + 1) << 0));
++	fhmci_writel(tmp_reg, host->base + MCI_FIFOTH);
++}
++
++int read_mci_ctrl_states(int id_mmc_sd)
++{
++	if ((id_mmc_sd == ID_SD0) && (mmc_sd0 != NULL))
++		return mmc_sd0->rescan_disable;
++	else if ((id_mmc_sd == ID_SD1) && (mmc_sd1 != NULL))
++		return mmc_sd1->rescan_disable;
++
++	return -1;
++}
++
++int storage_dev_set_mmc_rescan(struct mmc_ctrl *m_ctrl)
++{
++	unsigned int tmp;
++	struct mmc_host *mmc_sd = NULL;
++	tmp = m_ctrl->mmc_ctrl_state;
++
++	if (m_ctrl->slot_idx == 1) {
++		if (mmc_sd1 != NULL)
++			mmc_sd = mmc_sd1;
++	} else if (m_ctrl->slot_idx == 0) {
++		if (mmc_sd0 != NULL)
++			mmc_sd = mmc_sd0;
++	}
++	if ((tmp != TRUE) && (tmp != FALSE))
++		return -1;
++
++	if (tmp == TRUE) {
++		if (mmc_sd != NULL) {
++			mmc_sd->rescan_disable = TRUE;
++			mmc_detect_change(mmc_sd, 0);
++		}
++	} else {
++		if (mmc_sd != NULL) {
++			mmc_sd->rescan_disable = FALSE;
++			mmc_detect_change(mmc_sd, 0);
++		}
++	}
++	return 0;
++}
++
++static void fh_mci_idma_start(struct fhmci_host *host)
++{
++	unsigned int tmp;
++
++	fhmci_trace(2, "begin");
++	fhmci_writel(host->dma_paddr, host->base + MCI_DBADDR);
++	tmp = fhmci_readl(host->base + MCI_BMOD);
++	tmp |= BMOD_DMA_EN;
++	tmp |= BURST_INCR;
++	fhmci_writel(tmp, host->base + MCI_BMOD);
++}
++
++static void fh_mci_idma_stop(struct fhmci_host *host)
++{
++	unsigned int tmp_reg;
++
++	fhmci_trace(2, "begin");
++	tmp_reg = fhmci_readl(host->base + MCI_BMOD);
++	tmp_reg &= ~BMOD_DMA_EN;
++	tmp_reg |= BMOD_SWR;
++	fhmci_writel(tmp_reg, host->base + MCI_BMOD);
++}
++
++static int fh_mci_setup_data(struct fhmci_host *host, struct mmc_data *data)
++{
++	unsigned int sg_phyaddr, sg_length;
++	unsigned int i, ret = 0;
++	unsigned int data_size;
++	unsigned int max_des, des_cnt;
++	struct fhmci_des *des;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(data);
++
++	host->data = data;
++
++	if (data->flags & MMC_DATA_READ)
++		host->dma_dir = DMA_FROM_DEVICE;
++	else
++		host->dma_dir = DMA_TO_DEVICE;
++
++	host->dma_sg = data->sg;
++	host->dma_sg_num = dma_map_sg(mmc_dev(host->mmc),
++			data->sg, data->sg_len, host->dma_dir);
++	fhmci_assert(host->dma_sg_num);
++	fhmci_trace(2, "host->dma_sg_num is %d\n", host->dma_sg_num);
++	data_size = data->blksz * data->blocks;
++
++	if (data_size > (DMA_BUFFER * MAX_DMA_DES)) {
++		fhmci_error("mci request data_size is too big!\n");
++		ret = -1;
++		goto out;
++	}
++
++	fhmci_trace(2, "host->dma_paddr is 0x%08X,host->dma_vaddr is 0x%08X\n",
++			(unsigned int)host->dma_paddr,
++			(unsigned int)host->dma_vaddr);
++
++	max_des = (PAGE_SIZE/sizeof(struct fhmci_des));
++	des = (struct fhmci_des *)host->dma_vaddr;
++	des_cnt = 0;
++
++	for (i = 0; i < host->dma_sg_num; i++) {
++		sg_length = sg_dma_len(&data->sg[i]);
++		sg_phyaddr = sg_dma_address(&data->sg[i]);
++		fhmci_trace(2, "sg[%d] sg_length is 0x%08X, " \
++				"sg_phyaddr is 0x%08X\n", \
++				i, (unsigned int)sg_length, \
++				(unsigned int)sg_phyaddr);
++		while (sg_length) {
++			des[des_cnt].idmac_des_ctrl = DMA_DES_OWN
++				| DMA_DES_NEXT_DES;
++			des[des_cnt].idmac_des_buf_addr = sg_phyaddr;
++			/* idmac_des_next_addr is paddr for dma */
++			des[des_cnt].idmac_des_next_addr = host->dma_paddr
++				+ (des_cnt + 1) * sizeof(struct fhmci_des);
++
++			if (sg_length >= 0x1F00) {
++				des[des_cnt].idmac_des_buf_size = 0x1F00;
++				sg_length -= 0x1F00;
++				sg_phyaddr += 0x1F00;
++			} else {
++				/* FIXME:data alignment */
++				des[des_cnt].idmac_des_buf_size = sg_length;
++				sg_length = 0;
++			}
++
++			fhmci_trace(2, "des[%d] vaddr  is 0x%08X", i,
++					(unsigned int)&des[i]);
++			fhmci_trace(2, "des[%d].idmac_des_ctrl is 0x%08X",
++			       i, (unsigned int)des[i].idmac_des_ctrl);
++			fhmci_trace(2, "des[%d].idmac_des_buf_size is 0x%08X",
++				i, (unsigned int)des[i].idmac_des_buf_size);
++			fhmci_trace(2, "des[%d].idmac_des_buf_addr 0x%08X",
++				i, (unsigned int)des[i].idmac_des_buf_addr);
++			fhmci_trace(2, "des[%d].idmac_des_next_addr is 0x%08X",
++				i, (unsigned int)des[i].idmac_des_next_addr);
++			des_cnt++;
++		}
++
++		fhmci_assert(des_cnt < max_des);
++	}
++	des[0].idmac_des_ctrl |= DMA_DES_FIRST_DES;
++	des[des_cnt - 1].idmac_des_ctrl |= DMA_DES_LAST_DES;
++	des[des_cnt - 1].idmac_des_next_addr = 0;
++out:
++	return ret;
++}
++
++static int fh_mci_exec_cmd(struct fhmci_host *host, struct mmc_command *cmd,
++		struct mmc_data *data)
++{
++	volatile union cmd_arg_s cmd_regs;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(cmd);
++
++	host->cmd = cmd;
++
++	fhmci_writel(cmd->arg, host->base + MCI_CMDARG);
++	fhmci_trace(2, "arg_reg 0x%x, val 0x%x\n", MCI_CMDARG, cmd->arg);
++	cmd_regs.cmd_arg = fhmci_readl(host->base + MCI_CMD);
++	if (data) {
++		cmd_regs.bits.data_transfer_expected = 1;
++		if (data->flags & (MMC_DATA_WRITE | MMC_DATA_READ))
++			cmd_regs.bits.transfer_mode = 0;
++
++		if (data->flags & MMC_DATA_STREAM)
++			cmd_regs.bits.transfer_mode = 1;
++
++		if (data->flags & MMC_DATA_WRITE)
++			cmd_regs.bits.read_write = 1;
++		else if (data->flags & MMC_DATA_READ)
++			cmd_regs.bits.read_write = 0;
++	} else {
++		cmd_regs.bits.data_transfer_expected = 0;
++		cmd_regs.bits.transfer_mode = 0;
++		cmd_regs.bits.read_write = 0;
++	}
++	cmd_regs.bits.send_auto_stop = 0;
++#ifdef CONFIG_SEND_AUTO_STOP
++	if ((host->mrq->stop) && (!(host->is_tuning)))
++		cmd_regs.bits.send_auto_stop = 1;
++#endif
++
++	if (cmd == host->mrq->stop) {
++		cmd_regs.bits.stop_abort_cmd = 1;
++		cmd_regs.bits.wait_prvdata_complete = 0;
++	} else {
++		cmd_regs.bits.stop_abort_cmd = 0;
++		cmd_regs.bits.wait_prvdata_complete = 1;
++	}
++
++	switch (mmc_resp_type(cmd)) {
++	case MMC_RSP_NONE:
++		cmd_regs.bits.response_expect = 0;
++		cmd_regs.bits.response_length = 0;
++		cmd_regs.bits.check_response_crc = 0;
++		break;
++	case MMC_RSP_R1:
++	case MMC_RSP_R1B:
++		cmd_regs.bits.response_expect = 1;
++		cmd_regs.bits.response_length = 0;
++		cmd_regs.bits.check_response_crc = 1;
++		break;
++	case MMC_RSP_R2:
++		cmd_regs.bits.response_expect = 1;
++		cmd_regs.bits.response_length = 1;
++		cmd_regs.bits.check_response_crc = 1;
++		break;
++	case MMC_RSP_R3:
++		cmd_regs.bits.response_expect = 1;
++		cmd_regs.bits.response_length = 0;
++		cmd_regs.bits.check_response_crc = 0;
++		break;
++	default:
++		fhmci_error("fh_mci: unhandled response type %02x\n",
++				mmc_resp_type(cmd));
++		return -EINVAL;
++	}
++
++	fhmci_trace(2, "send cmd of card is cmd->opcode = %d ", cmd->opcode);
++	if (cmd->opcode == MMC_GO_IDLE_STATE)
++		cmd_regs.bits.send_initialization = 1;
++	else
++		cmd_regs.bits.send_initialization = 0;
++	/* CMD 11 check switch voltage */
++	if (cmd->opcode == SD_SWITCH_VOLTAGE)
++		cmd_regs.bits.volt_switch = 1;
++	else
++		cmd_regs.bits.volt_switch = 0;
++
++
++	cmd_regs.bits.card_number = 0;
++	cmd_regs.bits.cmd_index = cmd->opcode;
++	cmd_regs.bits.start_cmd = 1;
++	cmd_regs.bits.update_clk_reg_only = 0;
++	fhmci_writel(DATA_INT_MASK, host->base + MCI_RINTSTS);
++	fhmci_writel(cmd_regs.cmd_arg, host->base + MCI_CMD);
++	fhmci_trace(2, "cmd_reg 0x%x, val 0x%x\n", MCI_CMD, cmd_regs.cmd_arg);
++
++	if (fh_mci_wait_cmd(host) != 0) {
++		fhmci_trace(3, "send card cmd is failed!");
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static void fh_mci_finish_request(struct fhmci_host *host,
++		struct mmc_request *mrq)
++{
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(mrq);
++
++	host->mrq = NULL;
++	host->cmd = NULL;
++	host->data = NULL;
++
++	mmc_request_done(host->mmc, mrq);
++}
++
++static void fh_mci_cmd_done(struct fhmci_host *host, unsigned int stat)
++{
++	unsigned int i;
++	struct mmc_command *cmd = host->cmd;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(cmd);
++
++
++	for (i = 0; i < 4; i++) {
++		if (mmc_resp_type(cmd) == MMC_RSP_R2) {
++			cmd->resp[i] = fhmci_readl(host->base +
++					MCI_RESP3 - i * 0x4);
++			/* R2 must delay some time here ,when use UHI card,
++			   need check why */
++			udelay(1000);
++		} else
++			cmd->resp[i] = fhmci_readl(host->base +
++					MCI_RESP0 + i * 0x4);
++	}
++
++	if (stat & RTO_INT_STATUS) {
++		cmd->error = -ETIMEDOUT;
++		fhmci_trace(3, "irq cmd status stat = 0x%x is timeout error!",
++				stat);
++	} else if (stat & (RCRC_INT_STATUS | RE_INT_STATUS)) {
++		cmd->error = -EILSEQ;
++		fhmci_trace(3, "irq cmd status stat = 0x%x is response error!",
++				stat);
++	}
++	host->cmd = NULL;
++}
++
++
++static void fh_mci_data_done(struct fhmci_host *host, unsigned int stat)
++{
++	struct mmc_data *data = host->data;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(data);
++
++
++	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
++
++	if (stat & (HTO_INT_STATUS | DRTO_INT_STATUS)) {
++		data->error = -ETIMEDOUT;
++		fhmci_trace(3, "irq data status stat = 0x%x is timeout error!",
++				stat);
++	} else if (stat & (EBE_INT_STATUS | SBE_INT_STATUS | FRUN_INT_STATUS
++				| DCRC_INT_STATUS)) {
++#ifndef CONFIG_MACH_FH8830_FPGA
++		data->error = -EILSEQ;
++#endif
++		fhmci_trace(3, "irq data status stat = 0x%x is data error!",
++				stat);
++	}
++
++	if (!data->error)
++		data->bytes_xfered = data->blocks * data->blksz;
++	else
++		data->bytes_xfered = 0;
++
++	host->data = NULL;
++}
++
++
++static int fh_mci_wait_cmd_complete(struct fhmci_host *host)
++{
++	unsigned int cmd_retry_count = 0;
++	unsigned long cmd_jiffies_timeout;
++	unsigned int cmd_irq_reg = 0;
++	struct mmc_command *cmd = host->cmd;
++	unsigned long flags;
++	unsigned int cmd_done = 0;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(cmd);
++
++	cmd_jiffies_timeout = jiffies + request_timeout;
++	while (1) {
++
++		do {
++			spin_lock_irqsave(&host->lock, flags);
++			cmd_irq_reg = readl(host->base + MCI_RINTSTS);
++
++			if (cmd_irq_reg & CD_INT_STATUS) {
++				fhmci_writel((CD_INT_STATUS | RTO_INT_STATUS
++					| RCRC_INT_STATUS | RE_INT_STATUS),
++					host->base + MCI_RINTSTS);
++				spin_unlock_irqrestore(&host->lock, flags);
++				cmd_done = 1;
++				break;
++/*				fh_mci_cmd_done(host, cmd_irq_reg);
++				return 0;*/
++			} else if (cmd_irq_reg & VOLT_SWITCH_INT_STATUS) {
++				fhmci_writel(VOLT_SWITCH_INT_STATUS, \
++						host->base + MCI_RINTSTS);
++				spin_unlock_irqrestore(&host->lock, flags);
++				cmd_done = 1;
++				break;
++/*				fh_mci_cmd_done(host, cmd_irq_reg);
++				return 0;*/
++			}
++			spin_unlock_irqrestore(&host->lock, flags);
++			cmd_retry_count++;
++		} while (cmd_retry_count < retry_count &&
++				host->get_cd(host) != CARD_UNPLUGED);
++
++		cmd_retry_count = 0;
++
++		if ((host->card_status == CARD_UNPLUGED)
++			|| (host->get_cd(host) == CARD_UNPLUGED)) {
++			cmd->error = -ETIMEDOUT;
++			return -1;
++		}
++		if (cmd_done) {
++			fh_mci_cmd_done(host, cmd_irq_reg);
++			return 0;
++		}
++
++		if (!time_before(jiffies, cmd_jiffies_timeout)) {
++			unsigned int i = 0;
++			for (i = 0; i < 4; i++) {
++				cmd->resp[i] = fhmci_readl(host->base \
++						+ MCI_RESP0 + i * 0x4);
++				printk(KERN_ERR "voltage switch read MCI_RESP");
++				printk(KERN_ERR "%d : 0x%x\n", i, cmd->resp[i]);
++			}
++			cmd->error = -ETIMEDOUT;
++			fhmci_trace(3, "wait cmd request complete is timeout!");
++			return -1;
++		}
++
++		schedule();
++	}
++}
++/*
++ * designware support send stop command automatically when
++ * read or wirte multi blocks
++ */
++#ifdef CONFIG_SEND_AUTO_STOP
++static int fh_mci_wait_auto_stop_complete(struct fhmci_host *host)
++{
++	unsigned int cmd_retry_count = 0;
++	unsigned long cmd_jiffies_timeout;
++	unsigned int cmd_irq_reg = 0;
++	unsigned long flags;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++
++	cmd_jiffies_timeout = jiffies + request_timeout;
++	while (1) {
++
++		do {
++			spin_lock_irqsave(&host->lock, flags);
++			cmd_irq_reg = readl(host->base + MCI_RINTSTS);
++			if (cmd_irq_reg & ACD_INT_STATUS) {
++				fhmci_writel((ACD_INT_STATUS | RTO_INT_STATUS
++					| RCRC_INT_STATUS | RE_INT_STATUS),
++					host->base + MCI_RINTSTS);
++				spin_unlock_irqrestore(&host->lock, flags);
++				return 0;
++			}
++			spin_unlock_irqrestore(&host->lock, flags);
++			cmd_retry_count++;
++		} while (cmd_retry_count < retry_count);
++
++		cmd_retry_count = 0;
++		if (host->card_status == CARD_UNPLUGED)
++			return -1;
++
++		if (!time_before(jiffies, cmd_jiffies_timeout)) {
++			fhmci_trace(3, "wait auto stop complete is timeout!");
++			return -1;
++		}
++
++		schedule();
++	}
++
++}
++#endif
++static int fh_mci_wait_data_complete(struct fhmci_host *host)
++{
++	unsigned int tmp_reg;
++	struct mmc_data *data = host->data;
++	long time = request_timeout;
++	unsigned long flags;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	fhmci_assert(data);
++
++	time = wait_event_timeout(host->intr_wait,
++		test_bit(FHMCI_PEND_DTO_b, &host->pending_events),
++		time);
++
++	/* Mask MMC host data intr */
++	spin_lock_irqsave(&host->lock, flags);
++	tmp_reg = fhmci_readl(host->base + MCI_INTMASK);
++	tmp_reg &= ~DATA_INT_MASK;
++	fhmci_writel(tmp_reg, host->base + MCI_INTMASK);
++	host->pending_events &= ~FHMCI_PEND_DTO_m;
++	spin_unlock_irqrestore(&host->lock, flags);
++
++	if (((time <= 0)
++		&& (!test_bit(FHMCI_PEND_DTO_b, &host->pending_events)))
++		|| (host->card_status == CARD_UNPLUGED)) {
++
++		data->error = -ETIMEDOUT;
++		fhmci_trace(3, "wait data request complete is timeout! 0x%08X",
++				host->irq_status);
++		fh_mci_idma_stop(host);
++		fh_mci_data_done(host, host->irq_status);
++		return -1;
++	}
++
++	fh_mci_idma_stop(host);
++	fh_mci_data_done(host, host->irq_status);
++	return 0;
++}
++
++
++static int fh_mci_wait_card_complete(struct fhmci_host *host,
++		struct mmc_data *data)
++{
++	unsigned int card_retry_count = 0;
++	unsigned long card_jiffies_timeout;
++	unsigned int card_status_reg = 0;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(host);
++	/* fhmci_assert(data); */
++
++	card_jiffies_timeout = jiffies + FH_MCI_DETECT_TIMEOUT;
++	while (1) {
++
++		do {
++			card_status_reg = readl(host->base + MCI_STATUS);
++			if (!(card_status_reg & DATA_BUSY)) {
++				fhmci_trace(2, "end");
++				return 0;
++			}
++			card_retry_count++;
++		} while (card_retry_count < retry_count);
++		card_retry_count = 0;
++
++		if (host->card_status == CARD_UNPLUGED) {
++			data->error = -ETIMEDOUT;
++			return -1;
++		}
++
++		if (!time_before(jiffies, card_jiffies_timeout)) {
++			if (data != NULL)
++				data->error = -ETIMEDOUT;
++			fhmci_trace(3, "wait card ready complete is timeout!");
++			return -1;
++		}
++
++		schedule();
++	}
++}
++
++static unsigned long t = 0;
++static unsigned long cmds = 0;
++static unsigned long long send_byte_count = 0;
++static struct timeval in_cmd, out_cmd;
++static struct timeval *x = &out_cmd, *y = &in_cmd;
++static unsigned long max = 0, sum = 0;
++static unsigned long called = 0, ended = 0;
++
++static void fh_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
++{
++	struct fhmci_host *host = mmc_priv(mmc);
++	int byte_cnt = 0;
++	#ifdef CONFIG_SEND_AUTO_STOP
++	int trans_cnt;
++	#endif
++	int fifo_count = 0, tmp_reg;
++	int ret = 0;
++	unsigned long flags;
++
++	if (host->id == 1) {
++		called++;
++		memset(x, 0, sizeof(struct timeval));
++		memset(y, 0, sizeof(struct timeval));
++		do_gettimeofday(y);
++	}
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(mmc);
++	fhmci_assert(mrq);
++	fhmci_assert(host);
++
++	host->mrq = mrq;
++	host->irq_status = 0;
++
++
++	if (host->card_status == CARD_UNPLUGED) {
++		mrq->cmd->error = -ENODEV;
++		goto  request_end;
++	}
++#if 1
++	ret = fh_mci_wait_card_complete(host, mrq->data);
++
++	if (ret) {
++		mrq->cmd->error = ret;
++		goto request_end;
++	}
++#endif
++	/* prepare data */
++	if (mrq->data) {
++		ret = fh_mci_setup_data(host, mrq->data);
++		if (ret) {
++			mrq->data->error = ret;
++			fhmci_trace(3, "data setup is error!");
++			goto request_end;
++		}
++
++		byte_cnt = mrq->data->blksz * mrq->data->blocks;
++		fhmci_writel(byte_cnt, host->base + MCI_BYTCNT);
++		fhmci_writel(mrq->data->blksz, host->base + MCI_BLKSIZ);
++
++		tmp_reg = fhmci_readl(host->base + MCI_CTRL);
++		tmp_reg |= FIFO_RESET;
++		fhmci_writel(tmp_reg, host->base + MCI_CTRL);
++
++		do {
++			tmp_reg = fhmci_readl(host->base + MCI_CTRL);
++			fifo_count++;
++			if (fifo_count >= retry_count) {
++				printk(KERN_INFO "fifo reset is timeout!");
++				return;
++			}
++		} while (tmp_reg&FIFO_RESET);
++
++		/* start DMA */
++		fh_mci_idma_start(host);
++	} else {
++		fhmci_writel(0, host->base + MCI_BYTCNT);
++		fhmci_writel(0, host->base + MCI_BLKSIZ);
++	}
++
++	/* send command */
++	ret = fh_mci_exec_cmd(host, mrq->cmd, mrq->data);
++	if (ret) {
++		mrq->cmd->error = ret;
++		fh_mci_idma_stop(host);
++		fhmci_trace(3, "can't send card cmd! ret = %d", ret);
++		goto request_end;
++	}
++
++	/* wait command send complete */
++	ret = fh_mci_wait_cmd_complete(host);
++
++	/* start data transfer */
++	if (mrq->data) {
++		if (!(mrq->cmd->error)) {
++			/* Open MMC host data intr */
++			spin_lock_irqsave(&host->lock, flags);
++			tmp_reg = fhmci_readl(host->base + MCI_INTMASK);
++			tmp_reg |= DATA_INT_MASK;
++			fhmci_writel(tmp_reg, host->base + MCI_INTMASK);
++			spin_unlock_irqrestore(&host->lock, flags);
++		/* wait data transfer complete */
++		ret = fh_mci_wait_data_complete(host);
++		} else {
++			/* CMD error in data command */
++			fh_mci_idma_stop(host);
++		}
++
++		if (mrq->stop) {
++#ifdef CONFIG_SEND_AUTO_STOP
++			trans_cnt = fhmci_readl(host->base + MCI_TCBCNT);
++			/* send auto stop */
++			if ((trans_cnt == byte_cnt) && (!(host->is_tuning))) {
++				fhmci_trace(3, "byte_cnt = %d, trans_cnt = %d",
++						byte_cnt, trans_cnt);
++				ret = fh_mci_wait_auto_stop_complete(host);
++				if (ret) {
++					mrq->stop->error = -ETIMEDOUT;
++					goto request_end;
++				}
++			} else {
++#endif
++				/* send soft stop command */
++				fhmci_trace(3, "this time, send soft stop");
++				ret = fh_mci_exec_cmd(host, host->mrq->stop,
++						host->data);
++				if (ret) {
++					mrq->stop->error = ret;
++					goto request_end;
++				}
++				ret = fh_mci_wait_cmd_complete(host);
++				if (ret)
++					goto request_end;
++#ifdef CONFIG_SEND_AUTO_STOP
++			}
++#endif
++		}
++	}
++
++request_end:
++	/* clear MMC host intr */
++	spin_lock_irqsave(&host->lock, flags);
++	fhmci_writel(ALL_INT_CLR & (~SDIO_INT_STATUS),
++			host->base + MCI_RINTSTS);
++	spin_unlock_irqrestore(&host->lock, flags);
++
++	fh_mci_finish_request(host, mrq);
++
++
++	if (host->id == 1) {
++		ended++;
++		do_gettimeofday(x);
++
++		/* Perform the carry for the later subtraction by updating y. */
++		if (x->tv_usec < y->tv_usec) {
++			int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
++			y->tv_usec -= 1000000 * nsec;
++			y->tv_sec += nsec;
++		}
++		if (x->tv_usec - y->tv_usec > 1000000) {
++			int nsec = (x->tv_usec - y->tv_usec) / 1000000;
++			y->tv_usec += 1000000 * nsec;
++			y->tv_sec -= nsec;
++		}
++		/* Compute the time remaining to wait.
++		 * tv_usec is certainly positive. */
++		if (((x->tv_sec - y->tv_sec) * 1000
++				+ x->tv_usec - y->tv_usec) > max) {
++			max = (x->tv_sec - y->tv_sec)
++					* 1000 + x->tv_usec - y->tv_usec;
++		}
++
++		sum += (x->tv_sec - y->tv_sec) * 1000 + x->tv_usec - y->tv_usec;
++
++		send_byte_count += byte_cnt;
++		cmds++;
++
++		if (jiffies - t > HZ) {
++			/*
++			 * pr_info("SDIO HOST send_byte_count:
++			 * %llu in %u cmds, max cost time: %lu,
++			 * sum: %lu, ave: %lu\ncalled: %lu, ended: %lu\n",
++			 * send_byte_count, cmds, max, sum,
++			 * sum / cmds, called, ended);
++			 */
++			t = jiffies;
++			send_byte_count = 0;
++			cmds = 0;
++			max = 0;
++			sum = 0;
++			called = 0;
++			ended = 0;
++		}
++	}
++}
++
++static void fh_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
++{
++	struct fhmci_host *host = mmc_priv(mmc);
++	unsigned int tmp_reg;
++	u32 ctrl;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(mmc);
++	fhmci_assert(ios);
++	fhmci_assert(host);
++
++	fhmci_trace(3, "ios->power_mode = %d ", ios->power_mode);
++	switch (ios->power_mode) {
++	case MMC_POWER_OFF:
++		/*
++		 * Set controller working voltage as 3.3V before power off.
++		 */
++		ctrl = fhmci_readl(host->base + MCI_UHS_REG);
++		ctrl &= ~FH_SDXC_CTRL_VDD_180;
++		fhmci_trace(3, "set voltage %d[addr 0x%x]", ctrl, MCI_UHS_REG);
++		fhmci_writel(ctrl, host->base + MCI_UHS_REG);
++
++		fh_mci_ctrl_power(host, POWER_OFF);
++		break;
++	case MMC_POWER_UP:
++	case MMC_POWER_ON:
++		fh_mci_ctrl_power(host, POWER_ON);
++		break;
++	}
++	fhmci_trace(3, "ios->clock = %d ", ios->clock);
++	if (ios->clock) {
++
++		fh_mci_control_cclk(host, DISABLE);
++		fh_mci_set_cclk(host, ios->clock);
++		fh_mci_control_cclk(host, ENABLE);
++
++		/* speed mode check ,if it is DDR50 set DDR mode*/
++		if ((ios->timing == MMC_TIMING_UHS_DDR50)) {
++			ctrl = fhmci_readl(host->base + MCI_UHS_REG);
++			if (!(FH_SDXC_CTRL_DDR_REG & ctrl)) {
++				ctrl |= FH_SDXC_CTRL_DDR_REG;
++				fhmci_writel(ctrl, host->base + MCI_UHS_REG);
++			}
++		}
++	} else {
++		fh_mci_control_cclk(host, DISABLE);
++		if ((ios->timing != MMC_TIMING_UHS_DDR50)) {
++			ctrl = fhmci_readl(host->base + MCI_UHS_REG);
++			if (FH_SDXC_CTRL_DDR_REG & ctrl) {
++				ctrl &= ~FH_SDXC_CTRL_DDR_REG;
++				fhmci_writel(ctrl, host->base + MCI_UHS_REG);
++			}
++		}
++	}
++
++	/* set bus_width */
++	fhmci_trace(3, "ios->bus_width = %d ", ios->bus_width);
++	if (ios->bus_width == MMC_BUS_WIDTH_4) {
++		tmp_reg = fhmci_readl(host->base + MCI_CTYPE);
++		tmp_reg |= CARD_WIDTH;
++		fhmci_writel(tmp_reg, host->base + MCI_CTYPE);
++	} else {
++		tmp_reg = fhmci_readl(host->base + MCI_CTYPE);
++		tmp_reg &= ~CARD_WIDTH;
++		fhmci_writel(tmp_reg, host->base + MCI_CTYPE);
++	}
++}
++
++static void fhmci_enable_sdio_irq(struct mmc_host *host, int enable)
++{
++	struct fhmci_host *fh_host = mmc_priv(host);
++	unsigned int reg_value;
++	unsigned long flags;
++
++	if (enable) {
++		local_irq_save(flags);
++
++		reg_value = fhmci_readl(fh_host->base + MCI_INTMASK);
++		reg_value |= 0x10000;
++		fhmci_writel(reg_value, fh_host->base + MCI_INTMASK);
++		local_irq_restore(flags);
++	} else {
++		reg_value = fhmci_readl(fh_host->base + MCI_INTMASK);
++		reg_value &= ~0xffff0000;
++		fhmci_writel(reg_value, fh_host->base + MCI_INTMASK);
++	}
++
++}
++
++
++static int fh_mci_get_ro(struct mmc_host *mmc)
++{
++	unsigned ret;
++	struct fhmci_host *host = mmc_priv(mmc);
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(mmc);
++
++	ret = host->get_ro(host);
++
++	return ret;
++}
++
++/**
++ * @brief get the status of SD card's CD pin
++ *
++ * @param [in] mmc host struct
++ *
++ * @return "1": sd card in the slot, "0": sd card is not in the slot, "2":status of sd card no changed
++ */
++static int fh_mci_get_cd(struct mmc_host *mmc)
++{
++	unsigned int i, curr_status, status[3] = {0}, detect_retry_count = 0;
++	struct fhmci_host *host = mmc_priv(mmc);
++
++	while (1) {
++		for (i = 0; i < 3; i++) {
++			status[i] = host->get_cd(host);
++			udelay(10);
++		}
++		if ((status[0] == status[1]) && (status[0] == status[2]))
++			break;
++
++		detect_retry_count++;
++		if (detect_retry_count >= retry_count) {
++			fhmci_error("this is a dithering,card detect error!");
++			goto err;
++		}
++	}
++	curr_status = status[0];
++	if (curr_status != host->card_status) {
++		host->card_status = curr_status;
++		if (curr_status != CARD_UNPLUGED) {
++			fh_mci_init_card(host);
++			printk(KERN_INFO "card%d connected!\n", host->id);
++			mmc->rescan_count = 0;
++			return 1;
++		} else {
++			printk(KERN_INFO "card%d disconnected!\n", host->id);
++			return 0;
++		}
++	}
++	if (mmc->card == NULL)
++		fh_mci_init_card(host);
++err:
++	return 2;
++}
++
++static const struct mmc_host_ops fh_mci_ops = {
++	.request = fh_mci_request,
++	.set_ios = fh_mci_set_ios,
++	.get_ro	 = fh_mci_get_ro,
++	.enable_sdio_irq = fhmci_enable_sdio_irq,
++	.get_cd  = fh_mci_get_cd,
++};
++
++static irqreturn_t hisd_irq(int irq, void *dev_id)
++{
++	struct fhmci_host *host = dev_id;
++	u32 state = 0;
++	int handle = 0;
++
++	state = fhmci_readl(host->base + MCI_RINTSTS);
++
++#ifndef CONFIG_SEND_AUTO_STOP
++	/* bugfix: when send soft stop to SD Card, Host will report
++	   sdio interrupt, This situation needs to be avoided */
++	if ((host->mmc->card != NULL)
++			&& (host->mmc->card->type == MMC_TYPE_SDIO)) {
++#endif
++		if (state & SDIO_INT_STATUS) {
++			if (fhmci_readl(host->base + MCI_INTMASK) & SDIO_INT_STATUS) {
++				fhmci_writel(SDIO_INT_STATUS, host->base + MCI_RINTSTS);
++				mmc_signal_sdio_irq(host->mmc);
++				handle = 1;
++			}
++		}
++#ifndef CONFIG_SEND_AUTO_STOP
++	}
++#endif
++
++	if (state & DATA_INT_MASK) {
++		handle = 1;
++		host->pending_events |= FHMCI_PEND_DTO_m;
++
++		host->irq_status = fhmci_readl(host->base + MCI_RINTSTS);
++		if (host->irq_status & (DCRC_INT_STATUS|SBE_INT_STATUS|EBE_INT_STATUS)) {
++#ifndef CONFIG_MACH_FH8830_FPGA
++			printk(KERN_ERR "SDC CRC error:%08x,.\n",
++				host->irq_status);
++#endif
++		}
++		fhmci_writel(DATA_INT_MASK , host->base + MCI_RINTSTS);
++
++		wake_up(&host->intr_wait);
++	}
++
++	/*if (state & 0x10000) {
++		handle = 1;
++		fhmci_writel(0x10000, host->base + MCI_RINTSTS);
++		mmc_signal_sdio_irq(host->mmc);
++	}*/
++
++	if (handle)
++		return IRQ_HANDLED;
++
++	return IRQ_NONE;
++}
++
++static int __devinit fh_mci_probe(struct platform_device *pdev)
++{
++	struct resource *regs;
++	struct mmc_host *mmc;
++	struct fhmci_host *host = NULL;
++	int ret = 0, irq;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(pdev);
++
++	mmc = mmc_alloc_host(sizeof(struct fhmci_host), &pdev->dev);
++	if (!mmc) {
++		fhmci_error("no mem for hi mci host controller!\n");
++		ret = -ENOMEM;
++		goto out;
++	}
++	host = mmc_priv(mmc);
++	host->pdata = pdev->dev.platform_data;
++	mmc->ops = &fh_mci_ops;
++	mmc->rescan_disable = FALSE;
++	mmc->f_min = DIV_ROUND_UP(host->pdata->bus_hz, 510);
++	mmc->f_max = host->pdata->bus_hz;
++
++	if (host->pdata->caps)
++		mmc->caps = host->pdata->caps;
++	else
++		mmc->caps = 0;
++
++	mmc->caps |= MMC_CAP_SDIO_IRQ;
++
++	if (0 == pdev->id) {
++		mmc_sd0 = mmc;
++	} else if (1 == pdev->id) {
++		mmc_sd1 = mmc;
++	} else {
++		fhmci_error("fhmci host id error!");
++		goto out;
++	}
++	/* reload by this controller */
++	mmc->max_blk_count = 2048;
++	mmc->max_segs = 1024;
++	mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
++	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
++
++	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
++	mmc->ocr = mmc->ocr_avail;
++
++	host->dma_vaddr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
++			&host->dma_paddr, GFP_KERNEL);
++	if (!host->dma_vaddr) {
++		fhmci_error("no mem for fhmci dma!\n");
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	host->mmc = mmc;
++	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!regs) {
++		fhmci_error("request resource error!\n");
++		ret = -ENXIO;
++		goto out;
++	}
++
++	host->id = pdev->id;
++	host->base = ioremap_nocache(regs->start, regs->end - regs->start + 1);
++	if (!host->base) {
++		fhmci_error("no mem for fhmci base!\n");
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	if (host->pdata->init)
++		host->pdata->init(pdev->id, NULL, NULL);
++
++	if (host->pdata->get_cd)
++		host->get_cd = host->pdata->get_cd;
++	else
++		host->get_cd = fh_mci_sys_card_detect;
++
++	if (host->pdata->get_ro)
++		host->get_ro = host->pdata->get_ro;
++	else
++		host->get_ro = fh_mci_ctrl_card_readonly;
++
++	/* enable mmc clk */
++	fh_mci_sys_ctrl_init(host);
++
++	/* enable card */
++	spin_lock_init(&host->lock);
++	platform_set_drvdata(pdev, mmc);
++	mmc_add_host(mmc);
++
++	fhmci_writel(SD_POWER_ON, host->base + MCI_PWREN);
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0) {
++		printk(KERN_ERR "no IRQ defined!\n");
++		goto out;
++	}
++
++	init_waitqueue_head(&host->intr_wait);
++
++	host->irq = irq;
++	ret = request_irq(irq, hisd_irq, 0, DRIVER_NAME, host);
++	if (ret) {
++		printk(KERN_ERR "request_irq error!\n");
++		goto out;
++	}
++
++
++	return 0;
++out:
++	if (host) {
++
++		if (host->base)
++			iounmap(host->base);
++
++		if (host->dma_vaddr)
++			dma_free_coherent(&pdev->dev, PAGE_SIZE,
++					host->dma_vaddr, host->dma_paddr);
++	}
++	if (mmc)
++		mmc_free_host(mmc);
++
++	return ret;
++}
++
++/* for wifi Cypress 43438/43455
++ * (Note: sd_id is the sdio index used by wifi)
++ */
++void fh_sdio_card_scan(int sd_id)
++{
++	printk(KERN_ERR "%s-%d mmc_sd0->caps 0x%x mmc_sd1->caps 0x%x\n",
++		__func__, __LINE__, mmc_sd0->caps, mmc_sd1->caps);
++	if (sd_id == 1) {
++		mmc_sd1->caps &= ~MMC_CAP_NEEDS_POLL;
++		mmc_sd1->caps &= ~MMC_CAP_NONREMOVABLE;
++		if (NULL != mmc_sd1) {
++			printk(KERN_ERR "%s-%d, enter\n", __func__, __LINE__);
++			mmc_detect_change(mmc_sd1, 0);
++		}
++		msleep(100);
++		mmc_sd1->caps |= MMC_CAP_NONREMOVABLE;
++	} else if (sd_id == 0) {
++		mmc_sd0->caps &= ~MMC_CAP_NEEDS_POLL;
++		mmc_sd0->caps &= ~MMC_CAP_NONREMOVABLE;
++		if (NULL != mmc_sd0) {
++			printk(KERN_ERR "%s-%d, enter\n", __func__, __LINE__);
++			mmc_detect_change(mmc_sd0, 0);
++		}
++		msleep(100);
++		mmc_sd0->caps |= MMC_CAP_NONREMOVABLE;
++	} else {
++		printk(KERN_ERR "%s-%d, sd_id invalid!\n", __func__, __LINE__);
++	}
++	printk(KERN_ERR "%s-%d mmc_sd0->caps 0x%x mmc_sd1->caps 0x%x\n",
++		__func__, __LINE__, mmc_sd0->caps, mmc_sd1->caps);
++
++	return;
++}
++EXPORT_SYMBOL_GPL(fh_sdio_card_scan);
++
++static int __devexit fh_mci_remove(struct platform_device *pdev)
++{
++	struct mmc_host *mmc = platform_get_drvdata(pdev);
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(pdev);
++
++	platform_set_drvdata(pdev, NULL);
++
++	if (mmc) {
++		struct fhmci_host *host = mmc_priv(mmc);
++
++		free_irq(host->irq, host);
++		mmc_remove_host(mmc);
++		fh_mci_ctrl_power(host, POWER_OFF);
++		fh_mci_control_cclk(host, DISABLE);
++		iounmap(host->base);
++		dma_free_coherent(&pdev->dev, PAGE_SIZE, host->dma_vaddr,
++				host->dma_paddr);
++		mmc_free_host(mmc);
++	}
++	return 0;
++}
++
++#ifdef CONFIG_PM
++static int fh_mci_suspend(struct platform_device *dev, pm_message_t state)
++{
++	struct mmc_host *mmc = platform_get_drvdata(dev);
++	struct fhmci_host *host;
++	int ret = 0;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(dev);
++
++	if (mmc) {
++		ret = mmc_suspend_host(mmc);
++
++		host = mmc_priv(mmc);
++	}
++
++	fhmci_trace(2, "end");
++
++	return ret;
++}
++
++static int fh_mci_resume(struct platform_device *dev)
++{
++	struct mmc_host *mmc = platform_get_drvdata(dev);
++	struct fhmci_host *host;
++	int ret = 0;
++
++	fhmci_trace(2, "begin");
++	fhmci_assert(dev);
++
++	if (mmc) {
++		host = mmc_priv(mmc);
++		/* enable mmc clk */
++		fh_mci_sys_ctrl_init(host);
++		/* enable card */
++		fh_mci_init_card(host);
++
++		ret = mmc_resume_host(mmc);
++	}
++
++	fhmci_trace(2, "end");
++
++	return ret;
++}
++#else
++#define fh_mci_suspend	NULL
++#define fh_mci_resume	NULL
++#endif
++
++
++static struct platform_driver fh_mci_driver = {
++	.probe         = fh_mci_probe,
++	.remove        = fh_mci_remove,
++	.suspend       = fh_mci_suspend,
++	.resume        = fh_mci_resume,
++	.driver        = {
++		.name          = DRIVER_NAME,
++	},
++};
++
++static ssize_t fh_mci_rescan_control(struct class *cls,
++		struct class_attribute *attr, const char *_buf, size_t _count)
++{
++	int cmd = 0;
++	int err = 0;
++
++	err = kstrtoint(_buf, 10, &cmd);
++	if (err)
++		return _count;
++
++	if (cmd) {
++		if (mmc_sd0)
++			mmc_sd0->rescan_count = 0;
++		if (mmc_sd1)
++			mmc_sd1->rescan_count = 0;
++	}
++	return _count;
++}
++
++static struct class *fhmci_rescan_class;
++
++static CLASS_ATTR(mmc_rescan, 0666, NULL, fh_mci_rescan_control);
++
++static void fh_mci_rescan_init(void)
++{
++	int err = 0;
++
++	fhmci_rescan_class = class_create(THIS_MODULE, "fhmci");
++	err = class_create_file(fhmci_rescan_class, &class_attr_mmc_rescan);
++	if (err)
++		fhmci_error("fhmci_rescan_class: create class file failed!");
++}
++
++static int __init fh_mci_init(void)
++{
++	int ret = 0;
++
++	fhmci_trace(2, "mci init begin");
++	fh_mci_rescan_init();
++	ret = platform_driver_register(&fh_mci_driver);
++	if (ret)
++		fhmci_error("Platform driver register is failed!");
++
++	return ret;
++}
++
++static void __exit fh_mci_exit(void)
++{
++	fhmci_trace(2, "begin");
++	platform_driver_unregister(&fh_mci_driver);
++}
++module_init(fh_mci_init);
++module_exit(fh_mci_exit);
++MODULE_LICENSE("GPL");
+diff --git a/drivers/mmc/host/fhmci/fhmci_io.c b/drivers/mmc/host/fhmci/fhmci_io.c
+new file mode 100644
+index 00000000..6c99aa7a
+--- /dev/null
++++ b/drivers/mmc/host/fhmci/fhmci_io.c
+@@ -0,0 +1,63 @@
++
++#include <mach/clock.h>
++
++#define SIMPLE_0
++//#define SIMPLE_90
++//#define SIMPLE_180
++//#define SIMPLE_270
++
++static int fh_mci_sys_ctrl_init(struct fhmci_host *host)
++{
++
++	struct clk *sdc_clk;
++	u32 pctrl_gpio;
++	u32 drv_shift;
++	u32 sam_shift;
++	int ret;
++	u32 reg;
++	host->pdata->fifo_depth = (fhmci_readl(host->base + MCI_FIFOTH) & 0xfff0000) >> 16;
++	if (host->id == 0) {
++		pctrl_gpio = 5;
++		drv_shift = 20;
++		sam_shift = 16;
++		sdc_clk = clk_get(NULL, "sdc0_clk");
++
++	} else {
++		pctrl_gpio = 6;
++		drv_shift = 12;
++		sam_shift = 8;
++		sdc_clk = clk_get(NULL, "sdc1_clk");
++
++	}
++
++	//Power on.
++	ret = gpio_request(pctrl_gpio, NULL);
++	if (ret) {
++		fhmci_error("gpio requset err!");
++		return ret;
++	}
++	gpio_direction_output(pctrl_gpio, 0);
++	gpio_free(pctrl_gpio);
++
++	//adjust clock phase...
++	clk_enable(sdc_clk);
++	clk_set_rate(sdc_clk, 50000000);
++	reg = clk_get_clk_sel();
++	reg &= ~(3 << drv_shift);
++	reg &= ~(3 << sam_shift);
++	reg |= (2 << drv_shift); //now drv fixed to 180.
++#ifdef SIMPLE_0
++	reg |= (0 << sam_shift);
++#endif
++#ifdef SIMPLE_90
++	reg |= (1 << sam_shift);
++#endif
++#ifdef SIMPLE_180
++	reg |= (2 << sam_shift);
++#endif
++#ifdef SIMPLE_270
++	reg |= (3 << sam_shift);
++#endif
++	clk_set_clk_sel(reg);
++	return 0;
++}
+diff --git a/drivers/mmc/host/fhmci/fhmci_reg.h b/drivers/mmc/host/fhmci/fhmci_reg.h
+new file mode 100644
+index 00000000..b4a215cf
+--- /dev/null
++++ b/drivers/mmc/host/fhmci/fhmci_reg.h
+@@ -0,0 +1,182 @@
++#ifndef _FH_MCI_REG_H_
++#define _FH_MCI_REG_H_
++
++#define MCI_CTRL		0x00
++#define MCI_PWREN		0x04
++#define MCI_CLKDIV		0x08
++#define MCI_CLKSRC		0x0C
++#define MCI_CLKENA		0x10
++#define MCI_TIMEOUT		0x14
++#define MCI_CTYPE		0x18
++#define MCI_BLKSIZ		0x1c
++#define MCI_BYTCNT		0x20
++#define MCI_INTMASK		0x24
++#define MCI_CMDARG		0x28
++#define MCI_CMD			0x2C
++#define MCI_RESP0		0x30
++#define MCI_RESP1		0x34
++#define MCI_RESP2		0x38
++#define MCI_RESP3		0x3C
++#define MCI_MINTSTS		0x40
++#define MCI_RINTSTS		0x44
++#define MCI_STATUS		0x48
++#define MCI_FIFOTH		0x4C
++#define MCI_CDETECT		0x50
++#define MCI_WRTPRT		0x54
++#define MCI_GPIO		0x58
++#define MCI_TCBCNT		0x5C
++#define MCI_TBBCNT		0x60
++#define MCI_DEBNCE		0x64
++#define MCI_USRID		0x68
++#define MCI_VERID		0x6C
++#define MCI_HCON		0x70
++#define MCI_UHS_REG		0x74
++#define MCI_BMOD		0x80
++#define MCI_DBADDR		0x88
++#define MCI_IDSTS		0x8C
++#define MCI_IDINTEN		0x90
++#define MCI_DSCADDR		0x94
++#define MCI_BUFADDR		0x98
++#define MCI_READ_THRESHOLD_SIZE		0x100
++#define MCI_UHS_EXT		0x108
++/* MCI_UHS_REG(0x74) details */
++#define FH_SDXC_CTRL_VDD_180	(1<<0)
++#define FH_SDXC_CTRL_DDR_REG	(1<<16)
++
++/* MCI_BMOD(0x80) details */
++#define BMOD_SWR		(1<<0)
++#define BURST_INCR		(1<<1)
++#define BURST_8			(0x2<<8)
++
++/* MCI_CTRL(0x00) details */
++#define CTRL_RESET             (1<<0)
++#define FIFO_RESET             (1<<1)
++#define DMA_RESET              (1<<2)
++#define INTR_EN                (1<<4)
++#define USE_INTERNAL_DMA       (1<<25)
++
++/* IDMAC DEST1 details */
++#define DMA_BUFFER		0x2000
++#define MAX_DMA_DES		(20480)
++
++/* MCI_CDETECT(0x50) details */
++#define FHMCI_CARD0		(1<<0)
++
++/* MCI_TIMEOUT(0x14) details: */
++/*bit 31-8: data read timeout param*/
++#define DATA_TIMEOUT		(0xffffff<<8)
++
++/* bit 7-0: response timeout param */
++#define RESPONSE_TIMEOUT	0xff
++
++/* bit 0: enable of card clk*/
++#define CCLK_ENABLE		(1<<0)
++
++/* IDMAC DEST0 details */
++#define DMA_DES_OWN		(1<<31)
++#define DMA_DES_NEXT_DES	(1<<4)
++#define DMA_DES_FIRST_DES	(1<<3)
++#define DMA_DES_LAST_DES	(1<<2)
++
++/* MCI_BMOD(0x80) details */
++#define BMOD_DMA_EN		(1<<7)
++
++/* MCI_CTYPE(0x18) details */
++#define CARD_WIDTH		(0x1<<0)
++
++/* MCI_INTMASK(0x24) details:
++   bit 16-1: mask MMC host controller each interrupt
++*/
++#define ALL_INT_MASK				0x1ffff
++#define DTO_INT_MASK				(1<<3)
++
++/* bit[18:16] sampling phase */
++#define CLK_SMPL_PHS_MASK			(7<<16)
++
++/* MCI_CMD(0x2c) details:
++   bit 31: cmd execute or load start param of interface clk bit
++*/
++#define START_CMD		(1<<31)
++
++
++/* MCI_INTSTS(0x44) details */
++/***************************************************************/
++/* bit 16: sdio interrupt status */
++#define SDIO_INT_STATUS		(0x1<<16)
++
++/* bit 15: end-bit error (read)/write no CRC interrupt status */
++#define EBE_INT_STATUS		(0x1<<15)
++
++/* bit 14: auto command done interrupt status */
++#define ACD_INT_STATUS		(0x1<<14)
++
++/* bit 13: start bit error interrupt status */
++#define SBE_INT_STATUS		(0x1<<13)
++
++/* bit 12: hardware locked write error interrupt status */
++#define HLE_INT_STATUS		(0x1<<12)
++
++/* bit 11: FIFO underrun/overrun error interrupt status */
++#define FRUN_INT_STATUS		(0x1<<11)
++
++/* bit 10: data starvation-by-host timeout interrupt status */
++#define HTO_INT_STATUS		(0x1<<10)
++
++/* bit 10: volt_switch to 1.8v for sdxc */
++#define VOLT_SWITCH_INT_STATUS		(0x1<<10)
++
++/* bit 9: data read timeout interrupt status */
++#define DRTO_INT_STATUS		(0x1<<9)
++
++/* bit 8: response timeout interrupt status */
++#define RTO_INT_STATUS		(0x1<<8)
++
++/* bit 7: data CRC error interrupt status */
++#define DCRC_INT_STATUS		(0x1<<7)
++
++/* bit 6: response CRC error interrupt status */
++#define RCRC_INT_STATUS		(0x1<<6)
++
++/* bit 5: receive FIFO data request interrupt status */
++#define RXDR_INT_STATUS		(0x1<<5)
++
++/* bit 4: transmit FIFO data request interrupt status */
++#define TXDR_INT_STATUS		(0x1<<4)
++
++/* bit 3: data transfer Over interrupt status */
++#define DTO_INT_STATUS		(0x1<<3)
++
++/* bit 2: command done interrupt status */
++#define CD_INT_STATUS		(0x1<<2)
++
++/* bit 1: response error interrupt status */
++#define RE_INT_STATUS		(0x1<<1)
++#define DATA_INT_MASK	(DTO_INT_STATUS | DCRC_INT_STATUS \
++		| SBE_INT_STATUS | EBE_INT_STATUS)
++/***************************************************************/
++
++/* MCI_RINTSTS(0x44) details:bit 16-1: clear
++   MMC host controller each interrupt but
++   hardware locked write error interrupt
++*/
++#define ALL_INT_CLR       0x1efff
++
++#define PHASE_SHIFT		0x1030000
++#define READ_THRESHOLD_SIZE	0x2000001
++
++/* MCI_STATUS(0x48) details */
++#define DATA_BUSY		(0x1<<9)
++
++/* MCI_FIFOTH(0x4c) details */
++
++#define BURST_SIZE		(0x2<<28)
++#define RX_WMARK		(0x7<<16)
++#define TX_WMARK		0x8
++
++/*
++#define BURST_SIZE		(0x6<<28)
++#define RX_WMARK		(0x7f<<16)
++#define TX_WMARK		0x80
++*/
++
++#endif
+diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
+index 4be8373d..98f2b8b0 100644
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -330,6 +330,8 @@ source "drivers/mtd/onenand/Kconfig"
+ 
+ source "drivers/mtd/lpddr/Kconfig"
+ 
++source "drivers/mtd/spi-nand/Kconfig"
++
+ source "drivers/mtd/ubi/Kconfig"
+ 
+ endif # MTD
+diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
+index 39664c42..baf5b647 100644
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -31,4 +31,5 @@ inftl-objs		:= inftlcore.o inftlmount.o
+ 
+ obj-y		+= chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
+ 
++obj-$(CONFIG_MTD_SPI_NAND)	+= spi-nand/
+ obj-$(CONFIG_MTD_UBI)		+= ubi/
+diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
+index e790f388..398cd61a 100644
+--- a/drivers/mtd/cmdlinepart.c
++++ b/drivers/mtd/cmdlinepart.c
+@@ -48,7 +48,7 @@
+ #define ERRP "mtd: "
+ 
+ /* debug macro */
+-#if 0
++#if 1
+ #define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
+ #else
+ #define dbg(x)
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index 35180e47..0cf8b67b 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -166,10 +166,26 @@ static inline int write_disable(struct m25p *flash)
+  */
+ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
+ {
++	int ret;
+ 	switch (JEDEC_MFR(jedec_id)) {
+ 	case CFI_MFR_MACRONIX:
++	case CFI_MFR_ST: /* Micron, actually */
++	case 0xC8 /* GD */ :
+ 		flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
+-		return spi_write(flash->spi, flash->command, 1);
++		ret = spi_write(flash->spi, flash->command, 1);
++		return ret;
++	case 0xEF /* winbond */:
++		flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
++		ret = spi_write(flash->spi, flash->command, 1);
++		if (!enable)
++		{
++			flash->command[0] = 0x06;
++			spi_write(flash->spi,flash->command, 1);
++			flash->command[0] = 0xc5;
++			flash->command[1] = 0x00;
++			ret =spi_write(flash->spi,flash->command, 2);
++		}
++		return ret;
+ 	default:
+ 		/* Spansion style */
+ 		flash->command[0] = OPCODE_BRWR;
+@@ -178,6 +194,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
+ 	}
+ }
+ 
++
+ /*
+  * Service routine to read status register until ready, or timeout occurs.
+  * Returns non-zero if error.
+@@ -192,7 +209,7 @@ static int wait_till_ready(struct m25p *flash)
+ 	do {
+ 		if ((sr = read_sr(flash)) < 0)
+ 			break;
+-		else if (!(sr & SR_WIP))
++		else if (!(sr & (SR_WIP | SR_WEL)))
+ 			return 0;
+ 
+ 		cond_resched();
+@@ -202,6 +219,43 @@ static int wait_till_ready(struct m25p *flash)
+ 	return 1;
+ }
+ 
++
++static  int reset_chip(struct m25p *flash, u32 jedec_id)
++{
++	int ret;
++	mutex_lock(&flash->lock);
++
++	/* Wait till previous write/erase is done. */
++	if (wait_till_ready(flash)) {
++		mutex_unlock(&flash->lock);
++		return 1;
++	}
++
++	switch (JEDEC_MFR(jedec_id)) {
++	case 0x9F: /* S25FL128/256S spansion */
++		flash->command[0] = 0xFF; // MBR
++		ret = spi_write(flash->spi, flash->command, 1);
++		flash->command[0] = 0xF0;  // RESET
++		ret = spi_write(flash->spi, flash->command, 1);
++		mutex_unlock(&flash->lock);
++		return ret;
++	case 0xef:	/*winbond*/
++	case 0xc8:	/*GD*/
++		flash->command[0] = 0x66;
++		ret = spi_write(flash->spi, flash->command, 1);
++		flash->command[0] = 0x99;
++		ret = spi_write(flash->spi, flash->command, 1);
++		udelay(100);
++		mutex_unlock(&flash->lock);
++		return ret;
++	case CFI_MFR_MACRONIX:
++	case CFI_MFR_ST: /* Micron, actually */
++	default:
++		mutex_unlock(&flash->lock);
++		return 0;
++	}
++}
++
+ /*
+  * Erase the whole flash memory
+  *
+@@ -707,7 +761,7 @@ static const struct spi_device_id m25p_ids[] = {
+ 	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, 0) },
+ 	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, 0) },
+ 	{ "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K) },
+-	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
++	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, 0) },
+ 
+ 	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ 	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K) },
+@@ -760,15 +814,29 @@ static const struct spi_device_id m25p_ids[] = {
+ 	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
+ 	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
+ 	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
++	{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64, SECT_4K) },
+ 	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+-	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
++	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, 0) },
++	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
++	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
++	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, 0) }, /// SECT_4K
++	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, 0) },
+ 
+ 	/* Catalyst / On Semiconductor -- non-JEDEC */
+-	{ "cat25c11", CAT25_INFO(  16, 8, 16, 1) },
+-	{ "cat25c03", CAT25_INFO(  32, 8, 16, 2) },
+-	{ "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
+-	{ "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
++	{ "cat25c11", CAT25_INFO(16, 8, 16, 1) },
++	{ "cat25c03", CAT25_INFO(32, 8, 16, 2) },
++	{ "cat25c09", CAT25_INFO(128, 8, 32, 2) },
++	{ "cat25c17", CAT25_INFO(256, 8, 32, 2) },
+ 	{ "cat25128", CAT25_INFO(2048, 8, 64, 2) },
++
++	/*for GD flash..*/
++	{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, 0) },
++	{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, 0) },
++	{ "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32, 0) },
++	/*for xmc flash..*/
++	{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, 0) },
++	{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, 0) },
++
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(spi, m25p_ids);
+@@ -909,6 +977,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
+ 	flash->mtd.size = info->sector_size * info->n_sectors;
+ 	flash->mtd.erase = m25p80_erase;
+ 	flash->mtd.read = m25p80_read;
++	flash->mtd.priv = (void *)info->jedec_id;
+ 
+ 	/* sst flash chips use AAI word program */
+ 	if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
+@@ -1005,6 +1074,22 @@ static int __devinit m25p_probe(struct spi_device *spi)
+ 		-ENODEV : 0;
+ }
+ 
++static void m25p_shutdown(struct spi_device *spi)
++{
++	struct m25p	*flash = dev_get_drvdata(&spi->dev);
++	u32 jedec = (u32)flash->mtd.priv;
++
++	dev_err(&spi->dev, "[m25] shutdown here? \n");
++
++	if (flash->addr_width == 4){
++		set_4byte(flash, jedec, 0);
++		flash->addr_width =3 ;
++	}
++
++	if (reset_chip(flash, jedec))
++		dev_err(&spi->dev, "[m25] reset chip error...\n");
++}
++
+ 
+ static int __devexit m25p_remove(struct spi_device *spi)
+ {
+@@ -1030,7 +1115,8 @@ static struct spi_driver m25p80_driver = {
+ 	.id_table	= m25p_ids,
+ 	.probe	= m25p_probe,
+ 	.remove	= __devexit_p(m25p_remove),
+-
++	// add shutdown method to reset spi flash
++	.shutdown = m25p_shutdown, ///
+ 	/* REVISIT: many of these chips have deep power-down modes, which
+ 	 * should clearly be entered on suspend() to minimize power use.
+ 	 * And also when they're otherwise idle...
+diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
+index 3326615a..757cb7a9 100644
+--- a/drivers/mtd/mtdblock.c
++++ b/drivers/mtd/mtdblock.c
+@@ -268,7 +268,8 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
+ {
+ 	struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ 	if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
+-		mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
++		mtdblk->cache_data = kmalloc(mtdblk->mbd.mtd->erasesize,
++		GFP_KERNEL);
+ 		if (!mtdblk->cache_data)
+ 			return -EINTR;
+ 		/* -EINTR is not really correct, but it is the best match
+@@ -324,7 +325,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
+ 		/* It was the last usage. Free the cache */
+ 		if (mbd->mtd->sync)
+ 			mbd->mtd->sync(mbd->mtd);
+-		vfree(mtdblk->cache_data);
++		kfree(mtdblk->cache_data);
+ 	}
+ 
+ 	mutex_unlock(&mtdblks_lock);
+diff --git a/drivers/mtd/spi-nand/Kconfig b/drivers/mtd/spi-nand/Kconfig
+new file mode 100644
+index 00000000..b4da8f5b
+--- /dev/null
++++ b/drivers/mtd/spi-nand/Kconfig
+@@ -0,0 +1,7 @@
++menuconfig MTD_SPI_NAND
++	tristate "SPI-NAND device Support"
++	depends on MTD_NAND && SPI
++	help
++	  This is the framework for the SPI NAND which can be used by the SPI
++	  device drivers and the SPI-NAND device drivers.
++
+diff --git a/drivers/mtd/spi-nand/Makefile b/drivers/mtd/spi-nand/Makefile
+new file mode 100644
+index 00000000..971e7a9d
+--- /dev/null
++++ b/drivers/mtd/spi-nand/Makefile
+@@ -0,0 +1,2 @@
++
++obj-$(CONFIG_MTD_SPI_NAND) += spi-nand-base.o spi-nand-bbt.o spi-nand-device.o spi-nand-ids.o
+diff --git a/drivers/mtd/spi-nand/spi-nand-base.c b/drivers/mtd/spi-nand/spi-nand-base.c
+new file mode 100644
+index 00000000..0c34a5d9
+--- /dev/null
++++ b/drivers/mtd/spi-nand/spi-nand-base.c
+@@ -0,0 +1,2073 @@
++/**
++* spi-nand-base.c
++*
++* Copyright (c) 2009-2014 Micron Technology, Inc.
++*
++* Derived from nand_base.c
++*
++* This program is free software; you can redistribute it and/or
++* modify it under the terms of the GNU General Public License
++* as published by the Free Software Foundation; either version 2
++* of the License, or (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++* GNU General Public License for more details.
++*/
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/jiffies.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/mtd/spi-nand.h>
++#include <linux/mtd/bbm.h>
++#include <linux/spi/spi.h>
++#include <linux/slab.h>
++#include "spi-nand-ids.h"
++
++int fh_start_debug =0;
++//#define SPINAND_BBT_DEBUG
++#ifdef SPINAND_BBT_DEBUG
++#define fh_dev_debug	dev_err
++#else
++#define fh_dev_debug(...)
++#endif
++
++static int spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo);
++
++/**
++ * spi_nand_get_device - [GENERIC] Get chip for selected access
++ * @mtd: MTD device structure
++ * @new_state: the state which is requested
++ *
++ * Get the device and lock it for exclusive access
++ */
++static int spi_nand_get_device(struct mtd_info *mtd, int new_state)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	DECLARE_WAITQUEUE(wait, current);
++
++	/*
++	 * Grab the lock and see if the device is available
++	 */
++	while (1) {
++		spin_lock(&this->chip_lock);
++		if (this->state == FL_READY) {
++			this->state = new_state;
++			spin_unlock(&this->chip_lock);
++			break;
++		}
++		if (new_state == FL_PM_SUSPENDED) {
++			spin_unlock(&this->chip_lock);
++			return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
++		}
++		set_current_state(TASK_UNINTERRUPTIBLE);
++		add_wait_queue(&this->wq, &wait);
++		spin_unlock(&this->chip_lock);
++		schedule();
++		remove_wait_queue(&this->wq, &wait);
++	}
++	return 0;
++}
++
++/**
++ * spi_nand_release_device - [GENERIC] release chip
++ * @mtd: MTD device structure
++ *
++ * Deselect, release chip lock and wake up anyone waiting on the device
++ */
++static void spi_nand_release_device(struct mtd_info *mtd)
++{
++	struct spi_nand_chip *this = mtd->priv;
++
++	/* Release the chip */
++	spin_lock(&this->chip_lock);
++	this->state = FL_READY;
++	wake_up(&this->wq);
++	spin_unlock(&this->chip_lock);
++}
++
++/**
++ * __spi_nand_do_read_page - [INTERN] read data from flash to buffer
++ * @mtd: MTD device structure
++ * @page_addr: page address/raw address
++ * @column :column address
++ * @raw: without ecc or not
++ * @corrected: how many bit error corrected
++ *
++ * read a page to buffer pointed by chip->buf
++ */
++static int __spi_nand_do_read_page(struct mtd_info *mtd, u32 page_addr,
++				   u32 colunm, bool raw, int *corrected)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	int ret, ecc_error;
++	u8 status;
++
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++
++	/*read data from chip*/
++	memset(chip->buf, 0, chip->page_size + chip->page_spare_size);
++	if (raw) {
++		ret = chip->disable_ecc(chip);
++		if (ret < 0) {
++			pr_debug("disable ecc failed\n");
++			return ret;
++		}
++	}
++	ret = chip->load_page(chip, page_addr);
++	if (ret < 0) {
++		pr_debug("error %d loading page 0x%x to cache\n",
++			 ret, page_addr);
++		return ret;
++	}
++	ret = chip->waitfunc(chip, &status);
++	if (ret < 0) {
++		pr_debug("error %d waiting page 0x%x to cache\n",
++			 ret, page_addr);
++		return ret;
++	}
++	chip->get_ecc_status(chip, status, corrected, &ecc_error);
++	/*
++	 * If there's an ECC error, print a message and notify MTD
++	 * about it. Then complete the read, to load actual data on
++	 * the buffer (instead of the status result).
++	 */
++	if (ecc_error) {
++		pr_warn("internal ECC error reading page 0x%x with status 0x%02x\n",
++			 page_addr, status);
++		mtd->ecc_stats.failed++;
++	} else if (*corrected)
++		mtd->ecc_stats.corrected += *corrected;
++	/* Get page from the device cache into our internal buffer */
++	ret = chip->read_cache(chip, page_addr, colunm,
++			       chip->page_size + chip->page_spare_size - colunm,
++			       chip->buf + colunm);
++	if (ret < 0) {
++		pr_debug("error %d reading page 0x%x from cache\n",
++			 ret, page_addr);
++		return ret;
++	}
++	if (raw) {
++		ret = chip->enable_ecc(chip);
++		if (ret < 0) {
++			pr_debug("enable ecc failed\n");
++			return ret;
++		}
++	}
++
++	return 0;
++}
++
++/**
++ * spi_nand_do_read_page - [INTERN] read a page from flash to buffer
++ * @mtd: MTD device structure
++ * @page_addr: page address/raw address
++ * @raw: without ecc or not
++ * @corrected: how many bit error corrected
++ *
++ * read a page to buffer pointed by chip->buf
++ */
++static int spi_nand_do_read_page(struct mtd_info *mtd, u32 page_addr,
++				 bool raw, int *corrected)
++{
++	return __spi_nand_do_read_page(mtd, page_addr, 0, raw, corrected);
++}
++
++/**
++ * spi_nand_do_read_page_oob - [INTERN] read page oob from flash to buffer
++ * @mtd: MTD device structure
++ * @page_addr: page address/raw address
++ * @raw: without ecc or not
++ * @corrected: how many bit error corrected
++ *
++ * read page oob to buffer pointed by chip->oobbuf
++ */
++static int spi_nand_do_read_page_oob(struct mtd_info *mtd, u32 page_addr,
++				     bool raw, int *corrected)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++
++	return __spi_nand_do_read_page(mtd, page_addr, chip->page_size,
++				       raw, corrected);
++}
++
++
++/**
++ * __spi_nand_do_write_page - [INTERN] write data from buffer to flash
++ * @mtd: MTD device structure
++ * @page_addr: page address/raw address
++ * @column :column address
++ * @raw: without ecc or not
++ *
++ * write data from buffer pointed by chip->buf to flash
++ */
++static int __spi_nand_do_write_page(struct mtd_info *mtd, u32 page_addr,
++				    u32 column, bool raw)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	u8 status;
++	bool p_fail = false;
++	bool p_timeout = false;
++	int ret = 0;
++
++	fh_dev_debug(&chip->spi->dev, "Enter %s, with buf \n", __func__);
++
++	if (raw) {
++		ret = chip->disable_ecc(chip);
++		if (ret < 0) {
++			pr_debug("disable ecc failed\n");
++			return ret;
++		}
++	}
++	ret = chip->write_enable(chip);
++	if (ret < 0) {
++		pr_debug("write enable command failed\n");
++		return ret;
++	}
++	/* Store the page to cache */
++	ret = chip->store_cache(chip, page_addr, column,
++				chip->page_size + chip->page_spare_size - column,
++				chip->buf + column);
++	if (ret < 0) {
++		pr_debug("error %d storing page 0x%x to cache\n",
++			 ret, page_addr);
++		return ret;
++	}
++	/* Get page from the device cache into our internal buffer */
++	ret = chip->write_page(chip, page_addr);
++	if (ret < 0) {
++		pr_debug("error %d reading page 0x%x from cache\n",
++			 ret, page_addr);
++		return ret;
++	}
++	ret = chip->waitfunc(chip, &status);
++	if (ret < 0) {
++		pr_info("error %d write page 0x%x timeout\n",
++			 ret, page_addr);
++		return ret;
++	}
++	if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
++		pr_debug("program page 0x%x failed\n", page_addr);
++		p_fail = true;
++	}
++
++	if ((status & STATUS_OIP_MASK) == STATUS_BUSY) {
++			pr_debug("program page 0x%x timeout\n", page_addr);
++			p_timeout = true;
++		}
++	if (raw) {
++		ret = chip->enable_ecc(chip);
++		if (ret < 0) {
++			pr_debug("enable ecc failed\n");
++			return ret;
++		}
++	}
++	if ((p_fail==true)||(p_timeout==true))
++		ret = -EIO;
++
++	return ret;
++}
++
++/**
++ * spi_nand_do_write_page - [INTERN] write page from buffer to flash
++ * @mtd: MTD device structure
++ * @page_addr: page address/raw address
++ * @raw: without ecc or not
++ *
++ * write page from buffer pointed by chip->buf to flash
++ */
++static int spi_nand_do_write_page(struct mtd_info *mtd, u32 page_addr,
++				  bool raw)
++{
++	return __spi_nand_do_write_page(mtd, page_addr, 0, raw);
++}
++
++/**
++ * spi_nand_do_write_page_oob - [INTERN] write oob from buffer to flash
++ * @mtd: MTD device structure
++ * @page_addr: page address/raw address
++ * @raw: without ecc or not
++ *
++ * write oob from buffer pointed by chip->oobbuf to flash
++ */
++static int spi_nand_do_write_page_oob(struct mtd_info *mtd, u32 page_addr,
++				      bool raw)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++
++	return __spi_nand_do_write_page(mtd, page_addr, chip->page_size, raw);
++}
++
++
++/**
++ * spi_nand_transfer_oob - [INTERN] Transfer oob to client buffer
++ * @chip: SPI-NAND device structure
++ * @oob: oob destination address
++ * @ops: oob ops structure
++ * @len: size of oob to transfer
++ */
++static void spi_nand_transfer_oob(struct spi_nand_chip *chip, u8 *oob,
++				  struct mtd_oob_ops *ops, size_t len)
++{
++	switch (ops->mode) {
++
++	case MTD_OOB_PLACE: /*MTD_OPS_PLACE_OOB:*/
++	case MTD_OOB_RAW: /*MTD_OPS_RAW:*/
++		memcpy(oob, chip->oobbuf + ops->ooboffs, len);
++		return;
++
++	case MTD_OOB_AUTO: { /*MTD_OPS_AUTO_OOB:*/
++		struct nand_oobfree *free = chip->ecclayout->oobfree;
++		uint32_t boffs = 0, roffs = ops->ooboffs;
++		size_t bytes = 0;
++
++		for (; free->length && len; free++, len -= bytes) {
++			/* Read request not from offset 0? */
++			if (unlikely(roffs)) {
++				if (roffs >= free->length) {
++					roffs -= free->length;
++					continue;
++				}
++				boffs = free->offset + roffs;
++				bytes = min_t(size_t, len,
++					      (free->length - roffs));
++				roffs = 0;
++			} else {
++				bytes = min_t(size_t, len, free->length);
++				boffs = free->offset;
++			}
++			memcpy(oob, chip->oobbuf + boffs, bytes);
++			oob += bytes;
++		}
++		return;
++	}
++	default:
++		BUG();
++	}
++}
++
++/**
++ * spi_nand_fill_oob - [INTERN] Transfer client buffer to oob
++ * @chip: SPI-NAND device structure
++ * @oob: oob data buffer
++ * @len: oob data write length
++ * @ops: oob ops structure
++ */
++static void spi_nand_fill_oob(struct spi_nand_chip *chip, uint8_t *oob,
++			      size_t len, struct mtd_oob_ops *ops)
++{
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++	memset(chip->oobbuf, 0xff, chip->page_spare_size);
++
++	switch (ops->mode) {
++
++	case MTD_OOB_PLACE:
++	case MTD_OOB_RAW:
++		memcpy(chip->oobbuf + ops->ooboffs, oob, len);
++		return;
++
++	case MTD_OOB_AUTO: {
++		struct nand_oobfree *free = chip->ecclayout->oobfree;
++		uint32_t boffs = 0, woffs = ops->ooboffs;
++		size_t bytes = 0;
++
++		for (; free->length && len; free++, len -= bytes) {
++			/* Write request not from offset 0? */
++			if (unlikely(woffs)) {
++				if (woffs >= free->length) {
++					woffs -= free->length;
++					continue;
++				}
++				boffs = free->offset + woffs;
++				bytes = min_t(size_t, len,
++					      (free->length - woffs));
++				woffs = 0;
++			} else {
++				bytes = min_t(size_t, len, free->length);
++				boffs = free->offset;
++			}
++			memcpy(chip->oobbuf + boffs, oob, bytes);
++			oob += bytes;
++		}
++		return;
++	}
++	default:
++		BUG();
++	}
++}
++
++/**
++ * spi_nand_do_read_ops - [INTERN] Read data with ECC
++ * @mtd: MTD device structure
++ * @from: offset to read from
++ * @ops: oob ops structure
++ *
++ * Internal function. Called with chip held.
++ */
++static int spi_nand_do_read_ops(struct mtd_info *mtd, loff_t from,
++				struct mtd_oob_ops *ops)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	int page_addr, page_offset, size;
++	int ret;
++	unsigned int corrected = 0;
++	struct mtd_ecc_stats stats;
++	unsigned int max_bitflips = 0;
++	int readlen = ops->len;
++	int oobreadlen = ops->ooblen;
++	int ooblen = ops->mode == MTD_OOB_AUTO ?
++		     mtd->oobavail : mtd->oobsize;
++
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++
++	/* Do not allow reads past end of device */
++	if (unlikely(from >= mtd->size)) {
++		pr_debug("%s: attempt to read beyond end of device\n",
++			 __func__);
++		return -EINVAL;
++	}
++	stats = mtd->ecc_stats;
++
++	page_addr = from >> chip->page_shift;
++
++	/* for main data */
++	page_offset = from & chip->page_mask;
++	ops->retlen = 0;
++
++	/* for oob */
++	if (oobreadlen > 0) {
++		if (unlikely(ops->ooboffs >= ooblen)) {
++			pr_debug("%s: attempt to start read outside oob\n",
++				 __func__);
++			return -EINVAL;
++		}
++
++		if (unlikely(ops->ooboffs + oobreadlen >
++			     ((mtd->size >> chip->page_shift) - (from >> chip->page_shift))
++			     * ooblen)) {
++			pr_debug("%s: attempt to read beyond end of device\n",
++				 __func__);
++			return -EINVAL;
++		}
++		ooblen -= ops->ooboffs;
++		ops->oobretlen = 0;
++	}
++
++	while (1) {
++		if (page_addr != chip->pagebuf || oobreadlen > 0) {
++			ret = spi_nand_do_read_page(mtd, page_addr,
++						    ops->mode == MTD_OOB_RAW, &corrected);
++			if (ret) {
++				pr_debug("error %d reading page 0x%x\n",
++					 ret, page_addr);
++				return ret;
++			}
++			chip->pagebuf_bitflips = corrected;
++			chip->pagebuf = page_addr;
++		}
++		max_bitflips = max(max_bitflips, chip->pagebuf_bitflips);
++		size = min(readlen, chip->page_size - page_offset);
++		memcpy(ops->datbuf + ops->retlen,
++		       chip->buf + page_offset, size);
++
++		ops->retlen += size;
++		readlen -= size;
++		page_offset = 0;
++
++		if (unlikely(ops->oobbuf)) {
++			size = min(oobreadlen, ooblen);
++			spi_nand_transfer_oob(chip,
++					      ops->oobbuf + ops->oobretlen, ops, size);
++
++			ops->oobretlen += size;
++			oobreadlen -= size;
++		}
++		if (!readlen)
++			break;
++
++		page_addr++;
++	}
++
++	if (mtd->ecc_stats.failed - stats.failed)
++		return -EBADMSG;
++
++	return max_bitflips;
++}
++
++/**
++ * spi_nand_do_write_ops - [INTERN] SPI-NAND write with ECC
++ * @mtd: MTD device structure
++ * @to: offset to write to
++ * @ops: oob operations description structure
++ *
++ */
++static int spi_nand_do_write_ops(struct mtd_info *mtd, loff_t to,
++				 struct mtd_oob_ops *ops)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	int page_addr, page_offset, size;
++	int writelen = ops->len;
++	int oobwritelen = ops->ooblen;
++	int ret;
++	int ooblen = ops->mode == MTD_OOB_AUTO ?
++		     mtd->oobavail : mtd->oobsize;
++
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++
++	/* Do not allow reads past end of device */
++	if (unlikely(to >= mtd->size)) {
++		pr_debug("%s: attempt to write beyond end of device\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	page_addr = to >> chip->page_shift;
++
++	/* for main data */
++	page_offset = to & chip->page_mask;
++	ops->retlen = 0;
++
++	/* for oob */
++	if (oobwritelen > 0) {
++		/* Do not allow write past end of page */
++		if ((ops->ooboffs + oobwritelen) > ooblen) {
++			pr_debug("%s: attempt to write past end of page\n",
++				 __func__);
++			return -EINVAL;
++		}
++
++		if (unlikely(ops->ooboffs >= ooblen)) {
++			pr_debug("%s: attempt to start write outside oob\n",
++				 __func__);
++			return -EINVAL;
++		}
++
++		if (unlikely(ops->ooboffs + oobwritelen >
++			     ((mtd->size >> chip->page_shift) - (to >> chip->page_shift))
++			     * ooblen)) {
++			pr_debug("%s: attempt to write beyond end of device\n",
++				 __func__);
++			return -EINVAL;
++		}
++		ooblen -= ops->ooboffs;
++		ops->oobretlen = 0;
++	}
++
++	chip->pagebuf = -1;
++
++	while (1) {
++		memset(chip->buf, 0xFF,
++		       chip->page_size + chip->page_spare_size);
++
++		size = min(writelen, chip->page_size - page_offset);
++		memcpy(chip->buf + page_offset,
++		       ops->datbuf + ops->retlen, size);
++
++		ops->retlen += size;
++		writelen -= size;
++		page_offset = 0;
++
++		if (unlikely(ops->oobbuf)) {
++			size = min(oobwritelen, ooblen);
++
++			spi_nand_fill_oob(chip, ops->oobbuf + ops->oobretlen,
++					  size, ops);
++
++			ops->oobretlen += size;
++			oobwritelen -= size;
++		}
++		ret = spi_nand_do_write_page(mtd, page_addr,
++					     ops->mode == MTD_OOB_RAW);
++		if (ret) {
++			pr_debug("error %d writing page 0x%x\n",
++				 ret, page_addr);
++			return ret;
++		}
++		if (!writelen)
++			break;
++		page_addr++;
++	}
++	return 0;
++}
++
++/**
++ * nand_read - [MTD Interface] SPI-NAND read
++ * @mtd: MTD device structure
++ * @from: offset to read from
++ * @len: number of bytes to read
++ * @retlen: pointer to variable to store the number of read bytes
++ * @buf: the databuffer to put data
++ *
++ */
++static int spi_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
++			 size_t *retlen, u_char *buf)
++{
++	struct mtd_oob_ops ops = { 0 };
++	int ret;
++
++	spi_nand_get_device(mtd, FL_READING);
++
++	ops.len = len;
++	ops.datbuf = buf;
++
++	ret = spi_nand_do_read_ops(mtd, from, &ops);
++
++	*retlen = ops.retlen;
++
++	spi_nand_release_device(mtd);
++
++	return ret;
++}
++
++/**
++ * spi_nand_write - [MTD Interface] SPI-NAND write
++ * @mtd: MTD device structure
++ * @to: offset to write to
++ * @len: number of bytes to write
++ * @retlen: pointer to variable to store the number of written bytes
++ * @buf: the data to write
++ *
++ */
++static int spi_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
++			  size_t *retlen, const u_char *buf)
++{
++	struct mtd_oob_ops ops = {0};
++	int ret;
++
++	spi_nand_get_device(mtd, FL_WRITING);
++
++	ops.len = len;
++	ops.datbuf = (uint8_t *)buf;
++
++
++	ret =  spi_nand_do_write_ops(mtd, to, &ops);
++
++	*retlen = ops.retlen;
++
++	spi_nand_release_device(mtd);
++
++	return ret;
++}
++
++/**
++ * spi_nand_do_read_oob - [INTERN] SPI-NAND read out-of-band
++ * @mtd: MTD device structure
++ * @from: offset to read from
++ * @ops: oob operations description structure
++ *
++ * SPI-NAND read out-of-band data from the spare area.
++ */
++static int spi_nand_do_read_oob(struct mtd_info *mtd, loff_t from,
++				struct mtd_oob_ops *ops)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	int page_addr;
++	int corrected = 0;
++	struct mtd_ecc_stats stats;
++	int readlen = ops->ooblen;
++	int len;
++	int ret = 0;
++
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++
++	pr_debug("%s: from = 0x%08Lx, len = %i\n",
++		 __func__, (unsigned long long)from, readlen);
++
++	stats = mtd->ecc_stats;
++
++	len = ops->mode == MTD_OOB_AUTO ? mtd->oobavail : mtd->oobsize;
++
++	if (unlikely(ops->ooboffs >= len)) {
++		pr_debug("%s: attempt to start read outside oob\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	/* Do not allow reads past end of device */
++	if (unlikely(from >= mtd->size ||
++		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
++					       (from >> chip->page_shift)) * len)) {
++		pr_debug("%s: attempt to read beyond end of device\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	/* Shift to get page */
++	page_addr = (from >> chip->page_shift);
++	len -= ops->ooboffs;
++	ops->oobretlen = 0;
++
++	while (1) {
++		/*read data from chip*/
++		ret = spi_nand_do_read_page_oob(mtd, page_addr,
++						ops->mode == MTD_OOB_RAW, &corrected);
++		if (ret) {
++			pr_debug("error %d reading page 0x%x\n",
++				 ret, page_addr);
++			return ret;
++		}
++		if (page_addr == chip->pagebuf)
++			chip->pagebuf = -1;
++
++		len = min(len, readlen);
++		spi_nand_transfer_oob(chip, ops->oobbuf + ops->oobretlen,
++				      ops, len);
++
++		readlen -= len;
++		ops->oobretlen += len;
++		if (!readlen)
++			break;
++
++		page_addr++;
++	}
++
++	if (ret < 0)
++		return ret;
++
++	if (mtd->ecc_stats.failed - stats.failed)
++		return -EBADMSG;
++
++	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
++}
++
++/**
++ * spi_nand_do_write_oob - [MTD Interface] SPI-NAND write out-of-band
++ * @mtd: MTD device structure
++ * @to: offset to write to
++ * @ops: oob operation description structure
++ *
++ * SPI-NAND write out-of-band.
++ */
++static int spi_nand_do_write_oob(struct mtd_info *mtd, loff_t to,
++				 struct mtd_oob_ops *ops)
++{
++	int page_addr, len, ret;
++	struct spi_nand_chip *chip = mtd->priv;
++	int writelen = ops->ooblen;
++
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++
++	pr_debug("%s: to = 0x%08x, len = %i\n",
++		 __func__, (unsigned int)to, (int)writelen);
++
++	len = ops->mode == MTD_OOB_AUTO ? mtd->oobavail : mtd->oobsize;
++
++	/* Do not allow write past end of page */
++	if ((ops->ooboffs + writelen) > len) {
++		pr_debug("%s: attempt to write past end of page\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	if (unlikely(ops->ooboffs >= len)) {
++		pr_debug("%s: attempt to start write outside oob\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	/* Do not allow write past end of device */
++	if (unlikely(to >= mtd->size ||
++		     ops->ooboffs + writelen >
++		     ((mtd->size >> chip->page_shift) -
++		      (to >> chip->page_shift)) * len)) {
++		pr_debug("%s: attempt to write beyond end of device\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	/* Shift to get page */
++	page_addr = to >> chip->page_shift;
++	/* Invalidate the page cache, if we write to the cached page */
++	if (page_addr == chip->pagebuf)
++		chip->pagebuf = -1;
++
++	spi_nand_fill_oob(chip, ops->oobbuf, writelen, ops);
++
++	ret = spi_nand_do_write_page_oob(mtd, page_addr,
++					 ops->mode == MTD_OOB_RAW);
++	if (ret) {
++		pr_debug("error %d writing page 0x%x\n",
++			 ret, page_addr);
++		return ret;
++	}
++	ops->oobretlen = writelen;
++
++	return 0;
++}
++
++/**
++ * spi_nand_read_oob - [MTD Interface] SPI-NAND read data and/or out-of-band
++ * @mtd: MTD device structure
++ * @from: offset to read from
++ * @ops: oob operation description structure
++ *
++ * SPI-NAND read data and/or out-of-band data.
++ */
++static int spi_nand_read_oob(struct mtd_info *mtd, loff_t from,
++			     struct mtd_oob_ops *ops)
++{
++	int ret = -ENOTSUPP;
++
++	struct spi_nand_chip *this = mtd->priv;
++	fh_dev_debug(&this->spi->dev, "Enter %s, from 0x%08llx \n", __func__, from);
++	ops->retlen = 0;
++
++	/* Do not allow reads past end of device */
++	if (ops->datbuf && (from + ops->len) > mtd->size) {
++		pr_debug("%s: attempt to read beyond end of device\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	spi_nand_get_device(mtd, FL_READING);
++
++	switch (ops->mode) {
++	case MTD_OOB_PLACE:
++	case MTD_OOB_AUTO:
++	case MTD_OOB_RAW:
++		break;
++
++	default:
++		goto out;
++	}
++
++	if (!ops->datbuf)
++		ret = spi_nand_do_read_oob(mtd, from, ops);
++	else
++		ret = spi_nand_do_read_ops(mtd, from, ops);
++
++out:
++	spi_nand_release_device(mtd);
++
++	return ret;
++}
++
++/**
++ * spi_nand_write_oob - [MTD Interface] SPI-NAND write data and/or out-of-band
++ * @mtd: MTD device structure
++ * @to: offset to write to
++ * @ops: oob operation description structure
++ */
++static int spi_nand_write_oob(struct mtd_info *mtd, loff_t to,
++			      struct mtd_oob_ops *ops)
++{
++	int ret = -ENOTSUPP;
++	struct spi_nand_chip *this = mtd->priv;
++	fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
++
++	ops->retlen = 0;
++
++	/* Do not allow writes past end of device */
++	if (ops->datbuf && (to + ops->len) > mtd->size) {
++		pr_debug("%s: attempt to write beyond end of device\n",
++			 __func__);
++		return -EINVAL;
++	}
++
++	spi_nand_get_device(mtd, FL_WRITING);
++
++	switch (ops->mode) {
++	case MTD_OOB_PLACE:
++	case MTD_OOB_AUTO:
++	case MTD_OOB_RAW:
++		break;
++
++	default:
++		goto out;
++	}
++
++	if (!ops->datbuf)
++		ret = spi_nand_do_write_oob(mtd, to, ops);
++	else
++		ret = spi_nand_do_write_ops(mtd, to, ops);
++
++out:
++	spi_nand_release_device(mtd);
++
++	return ret;
++}
++
++/**
++ * spi_nand_block_bad - [INTERN] Check if block at offset is bad
++ * @mtd: MTD device structure
++ * @offs: offset relative to mtd start
++ */
++static int spi_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	struct mtd_oob_ops ops = {0};
++	u32 block_addr;
++	u8 bad[2] = {0, 0};
++	u8 ret = 0;
++
++	block_addr = ofs >> chip->block_shift;
++	ops.mode = MTD_OOB_PLACE;
++	ops.ooblen = 2;
++	ops.oobbuf = bad;
++
++	ret = spi_nand_do_read_oob(mtd, block_addr << chip->block_shift, &ops);
++	if (bad[0] != 0xFF || bad[1] != 0xFF)
++		ret =  1;
++
++	return ret;
++
++}
++
++/**
++ * spi_nand_block_checkbad - [GENERIC] Check if a block is marked bad
++ * @mtd: MTD device structure
++ * @ofs: offset from device start
++ * @allowbbt: 1, if its allowed to access the bbt area
++ *
++ * Check, if the block is bad. Either by reading the bad block table or
++ * calling of the scan function.
++ */
++static int spi_nand_block_checkbad(struct mtd_info *mtd, loff_t ofs,
++				   int allowbbt)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++
++	if (!chip->bbt)
++		return spi_nand_block_bad(mtd, ofs);
++
++	/* Return info from the table */
++	return spi_nand_isbad_bbt(mtd, ofs, allowbbt);
++}
++
++/**
++ * spi_nand_block_isbad - [MTD Interface] Check if block at offset is bad
++ * @mtd: MTD device structure
++ * @offs: offset relative to mtd start
++ */
++static int spi_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	return chip->block_bad(mtd, offs, 0);
++}
++
++/**
++ * spi_nand_block_markbad_lowlevel - mark a block bad
++ * @mtd: MTD device structure
++ * @ofs: offset from device start
++ *
++ * This function performs the generic bad block marking steps (i.e., bad
++ * block table(s) and/or marker(s)). We only allow the hardware driver to
++ * specify how to write bad block markers to OOB (chip->block_markbad).
++ *
++ * We try operations in the following order:
++ *  (1) erase the affected block, to allow OOB marker to be written cleanly
++ *  (2) write bad block marker to OOB area of affected block (unless flag
++ *      NAND_BBT_NO_OOB_BBM is present)
++ *  (3) update the BBT
++ * Note that we retain the first error encountered in (2) or (3), finish the
++ * procedures, and dump the error in the end.
++*/
++static int spi_nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	struct mtd_oob_ops ops = {0};
++	struct erase_info einfo = {0};
++	u32 block_addr;
++	u8 buf[2] = {0, 0};
++	int res, ret = 0;
++
++	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
++		/*erase bad block before mark bad block*/
++		einfo.mtd = mtd;
++		einfo.addr = ofs;
++		einfo.len = 1UL << chip->block_shift;
++		spi_nand_erase(mtd, &einfo);
++
++		block_addr = ofs >> chip->block_shift;
++		ops.mode = MTD_OOB_PLACE;
++		ops.ooblen = 2;
++		ops.oobbuf = buf;
++
++		ret = spi_nand_do_write_oob(mtd,
++					    block_addr << chip->block_shift, &ops);
++	}
++
++	/* Mark block bad in BBT */
++	if (chip->bbt) {
++		res = spi_nand_markbad_bbt(mtd, ofs);
++		if (!ret)
++			ret = res;
++	}
++
++	if (!ret)
++		mtd->ecc_stats.badblocks++;
++
++	return ret;
++}
++
++/**
++ * spi_nand_block_markbad - [MTD Interface] Mark block at the given offset
++ * as bad
++ * @mtd: MTD device structure
++ * @ofs: offset relative to mtd start
++ */
++static int spi_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
++{
++	int ret;
++
++	ret = spi_nand_block_isbad(mtd, ofs);
++	if (ret) {
++		/* If it was bad already, return success and do nothing */
++		if (ret > 0)
++			return 0;
++		return ret;
++	}
++
++	return spi_nand_block_markbad_lowlevel(mtd, ofs);
++}
++
++/**
++ * __spi_nand_erase - [INTERN] erase block(s)
++ * @mtd: MTD device structure
++ * @einfo: erase instruction
++ * @allowbbt: allow to access bbt
++ *
++ * Erase one ore more blocks
++ */
++int __spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo,
++		     int allowbbt)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	int page_addr, pages_per_block;
++	loff_t len;
++	u8 status;
++	int ret = 0;
++
++
++	/* check address align on block boundary */
++	if (einfo->addr & (chip->block_size - 1)) {
++		pr_debug("%s: Unaligned address\n", __func__);
++		return -EINVAL;
++	}
++
++	if (einfo->len & (chip->block_size - 1)) {
++		pr_debug("%s: Length not block aligned\n", __func__);
++		return -EINVAL;
++	}
++
++	/* Do not allow erase past end of device */
++	if ((einfo->len + einfo->addr) > chip->size) {
++		pr_debug("%s: Erase past end of device\n", __func__);
++		return -EINVAL;
++	}
++
++	einfo->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
++
++	/* Grab the lock and see if the device is available */
++	spi_nand_get_device(mtd, FL_ERASING);
++
++	pages_per_block = 1 << (chip->block_shift - chip->page_shift);
++	page_addr = einfo->addr >> chip->page_shift;
++	len = einfo->len;
++
++	einfo->state = MTD_ERASING;
++
++	while (len) {
++		/* Check if we have a bad block, we do not erase bad blocks! */
++		if (chip->block_bad(mtd, ((loff_t) page_addr) <<
++					    chip->page_shift, allowbbt)) {
++			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
++				__func__, page_addr);
++			einfo->state = MTD_ERASE_FAILED;
++			goto erase_exit;
++		}
++		/*
++		 * Invalidate the page cache, if we erase the block which
++		 * contains the current cached page.
++		 */
++		if (page_addr <= chip->pagebuf && chip->pagebuf <
++		    (page_addr + pages_per_block))
++			chip->pagebuf = -1;
++
++		ret = chip->write_enable(chip);
++		if (ret < 0) {
++			pr_debug("write enable command failed\n");
++			einfo->state = MTD_ERASE_FAILED;
++			goto erase_exit;
++		}
++
++		ret = chip->erase_block(chip, page_addr);
++		if (ret < 0) {
++			pr_debug("block erase command failed\n");
++			einfo->state = MTD_ERASE_FAILED;
++			einfo->fail_addr = (loff_t)page_addr
++					   << chip->page_shift;
++			goto erase_exit;
++		}
++		ret = chip->waitfunc(chip, &status);
++		if (ret < 0) {
++			pr_debug("block erase command wait failed\n");
++			einfo->state = MTD_ERASE_FAILED;
++			goto erase_exit;
++		}
++		if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
++			pr_debug("erase block 0x%012llx failed\n",
++				 ((loff_t) page_addr) << chip->page_shift);
++			einfo->state = MTD_ERASE_FAILED;
++			einfo->fail_addr = (loff_t)page_addr
++					   << chip->page_shift;
++			goto erase_exit;
++		}
++
++		/* Increment page address and decrement length */
++		len -= (1ULL << chip->block_shift);
++		page_addr += pages_per_block;
++	}
++
++	einfo->state = MTD_ERASE_DONE;
++
++erase_exit:
++
++	ret = einfo->state == MTD_ERASE_DONE ? 0 : -EIO;
++
++	spi_nand_release_device(mtd);
++
++	/* Do call back function */
++	if (!ret)
++		mtd_erase_callback(einfo);
++
++	/* Return more or less happy */
++	return ret;
++}
++EXPORT_SYMBOL(__spi_nand_erase);
++
++/**
++ * spi_nand_erase - [MTD Interface] erase block(s)
++ * @mtd: MTD device structure
++ * @einfo: erase instruction
++ *
++ * Erase one ore more blocks
++ */
++static int spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo)
++{
++	return __spi_nand_erase(mtd, einfo, 0);
++}
++
++/**
++ * spi_nand_sync - [MTD Interface] sync
++ * @mtd: MTD device structure
++ *
++ * Sync is actually a wait for chip ready function
++ */
++static void spi_nand_sync(struct mtd_info *mtd)
++{
++	pr_debug("spi_nand_sync: called\n");
++
++	/* Grab the lock and see if the device is available */
++	spi_nand_get_device(mtd, FL_SYNCING);
++
++	/* Release it and go back */
++	spi_nand_release_device(mtd);
++}
++
++/**
++ * spi_nand_suspend - [MTD Interface] Suspend the SPI-NAND flash
++ * @mtd: MTD device structure
++ */
++static int spi_nand_suspend(struct mtd_info *mtd)
++{
++	return spi_nand_get_device(mtd, FL_PM_SUSPENDED);
++}
++
++/**
++ * spi_nand_resume - [MTD Interface] Resume the SPI-NAND flash
++ * @mtd: MTD device structure
++ */
++static void spi_nand_resume(struct mtd_info *mtd)
++{
++	struct spi_nand_chip *this = mtd->priv;
++
++	if (this->state == FL_PM_SUSPENDED)
++		spi_nand_release_device(mtd);
++	else
++		pr_err("%s is not called in suspended state\n:", __func__);
++}
++
++
++/*
++ * spi_nand_send_cmd - to process a command to send to the SPI-NAND
++ * @spi: spi device structure
++ * @cmd: command structure
++ *
++ *    Set up the command buffer to send to the SPI controller.
++ *    The command buffer has to initialized to 0.
++ */
++int spi_nand_send_cmd(struct spi_device *spi, struct spi_nand_cmd *cmd)
++{
++	struct spi_message message;
++	struct spi_transfer x[4];
++	u8 buf[8],  i;
++	u32 buflen = 0;
++
++	spi_message_init(&message);
++	memset(x, 0, sizeof(x));
++	x[0].len = 1;
++	x[0].tx_buf = &cmd->cmd;
++	spi_message_add_tail(&x[0], &message);
++
++	buf[buflen++] = cmd->cmd;
++
++	if (cmd->n_addr) {
++		x[1].len = cmd->n_addr;
++		x[1].tx_buf = cmd->addr;
++		spi_message_add_tail(&x[1], &message);
++	}
++	for (i = 0; i < cmd->n_addr && buflen < 7; i++)
++		buf[buflen++] = cmd->addr[i];
++
++	if (cmd->n_tx) {
++		x[2].len = cmd->n_tx;
++		/*x[2].tx_nbits = cmd->tx_nbits; always 0 for single future version*/
++		x[2].tx_buf = cmd->tx_buf;
++		spi_message_add_tail(&x[2], &message);
++	}
++	for (i = 0; i < cmd->n_tx && buflen < 7; i++)
++		buf[buflen++] = cmd->tx_buf[i];
++
++	if (cmd->n_rx) {
++		x[3].len = cmd->n_rx;
++		/*x[3].rx_nbits = cmd->rx_nbits;*/
++		x[3].rx_buf = cmd->rx_buf;
++		spi_message_add_tail(&x[3], &message);
++	}
++	for (i = 0; i < cmd->n_rx && buflen < 7; i++)
++		buf[buflen++] = cmd->rx_buf[i];
++
++	buflen = 1 + cmd->n_addr + cmd->n_tx + cmd->n_rx;
++	if (fh_start_debug || cmd->cmd == 0x1f)
++	fh_dev_debug(&spi->dev, " spi%d:%d: send cmd 0x: %02x %02x %02x %02x %02x %02x, size %d\n",
++			spi->master->bus_num, spi->chip_select,
++		     buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buflen);
++
++	return spi_sync(spi, &message);
++}
++EXPORT_SYMBOL(spi_nand_send_cmd);
++/*
++ * spi_nand_read_status- send command 0x0f to the SPI-NAND status register value
++ * @spi: spi device structure
++ * @status: buffer to store value
++ * Description:
++ *    After read, write, or erase, the Nand device is expected to set the
++ *    busy status.
++ *    This function is to allow reading the status of the command: read,
++ *    write, and erase.
++ *    Once the status turns to be ready, the other status bits also are
++ *    valid status bits.
++ */
++static int spi_nand_read_status(struct spi_device *spi, uint8_t *status)
++{
++	struct spi_nand_cmd cmd = {0};
++	int ret;
++
++	cmd.cmd = SPINAND_CMD_READ_REG;
++	cmd.n_addr = 1;
++	cmd.addr[0] = REG_STATUS;
++	cmd.n_rx = 1;
++	cmd.rx_buf = status;
++
++	ret = spi_nand_send_cmd(spi, &cmd);
++	if (ret < 0)
++		dev_err(&spi->dev, "err: %d read status register\n", ret);
++
++	return ret;
++}
++
++/**
++ * spi_nand_get_otp- send command 0x0f to read the SPI-NAND OTP register
++ * @spi: spi device structure
++ * @opt: buffer to store value
++ * Description:
++ *   There is one bit( bit 0x10 ) to set or to clear the internal ECC.
++ *   Enable chip internal ECC, set the bit to 1
++ *   Disable chip internal ECC, clear the bit to 0
++ */
++static int spi_nand_get_otp(struct spi_device *spi, u8 *otp)
++{
++	struct spi_nand_cmd cmd = {0};
++	int ret;
++
++	cmd.cmd = SPINAND_CMD_READ_REG;
++	cmd.n_addr = 1;
++	cmd.addr[0] = REG_OTP;
++	cmd.n_rx = 1;
++	cmd.rx_buf = otp;
++
++	ret = spi_nand_send_cmd(spi, &cmd);
++	if (ret < 0)
++		dev_err(&spi->dev, "error %d get otp\n", ret);
++	return ret;
++}
++
++/**
++ * spi_nand_set_otp- send command 0x1f to write the SPI-NAND OTP register
++ * @spi: spi device structure
++ * @status: buffer stored value
++ * Description:
++ *   There is one bit( bit 0x10 ) to set or to clear the internal ECC.
++ *   Enable chip internal ECC, set the bit to 1
++ *   Disable chip internal ECC, clear the bit to 0
++ */
++static int spi_nand_set_otp(struct spi_device *spi, u8 *otp)
++{
++	int ret;
++	struct spi_nand_cmd cmd = { 0 };
++
++	cmd.cmd = SPINAND_CMD_WRITE_REG;
++	cmd.n_addr = 1;
++	cmd.addr[0] = REG_OTP;
++	cmd.n_tx = 1;
++	cmd.tx_buf = otp;
++
++	ret = spi_nand_send_cmd(spi, &cmd);
++	if (ret < 0)
++		dev_err(&spi->dev, "error %d set otp\n", ret);
++
++	return ret;
++}
++
++/**
++ * spi_nand_enable_ecc- enable internal ECC
++ * @chip: SPI-NAND device structure
++ * Description:
++ *   There is one bit( bit 0x10 ) to set or to clear the internal ECC.
++ *   Enable chip internal ECC, set the bit to 1
++ *   Disable chip internal ECC, clear the bit to 0
++ */
++static int spi_nand_enable_ecc(struct spi_nand_chip *chip)
++{
++	struct spi_device *spi = chip->spi;
++	int ret;
++	u8 otp = 0;
++
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	ret = spi_nand_get_otp(spi, &otp);
++	if (ret < 0)
++		return ret;
++
++	fh_dev_debug(&spi->dev, "get opt: 0x%02x\n", otp);
++	if ((otp & OTP_ECC_MASK) == OTP_ECC_ENABLE)
++		return 0;
++
++	otp |= OTP_ECC_ENABLE;
++	ret = spi_nand_set_otp(spi, &otp);
++	if (ret < 0)
++		return ret;
++	fh_dev_debug(&spi->dev, "set opt: 0x%02x\n", otp);
++	return spi_nand_get_otp(spi, &otp);
++}
++
++/**
++ * spi_nand_disable_ecc- disable internal ECC
++ * @chip: SPI-NAND device structure
++ * Description:
++ *   There is one bit( bit 0x10 ) to set or to clear the internal ECC.
++ *   Enable chip internal ECC, set the bit to 1
++ *   Disable chip internal ECC, clear the bit to 0
++ */
++static int spi_nand_disable_ecc(struct spi_nand_chip *chip)
++{
++	struct spi_device *spi = chip->spi;
++	int ret;
++	u8 otp = 0;
++
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++	ret = spi_nand_get_otp(spi, &otp);
++	if (ret < 0)
++		return ret;
++
++	if ((otp & OTP_ECC_MASK) == OTP_ECC_ENABLE) {
++		otp &= ~OTP_ECC_ENABLE;
++		ret = spi_nand_set_otp(spi, &otp);
++		if (ret < 0)
++			return ret;
++		return spi_nand_get_otp(spi, &otp);
++	} else
++		return 0;
++}
++
++/**
++ * spi_nand_write_enable- send command 0x06 to enable write or erase the
++ * Nand cells
++ * @chip: SPI-NAND device structure
++ * Description:
++ *   Before write and erase the Nand cells, the write enable has to be set.
++ *   After the write or erase, the write enable bit is automatically
++ *   cleared (status register bit 2)
++ *   Set the bit 2 of the status register has the same effect
++ */
++static int spi_nand_write_enable(struct spi_nand_chip *chip)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	cmd.cmd = SPINAND_CMD_WR_ENABLE;
++	return spi_nand_send_cmd(spi, &cmd);
++}
++
++/*
++ * spi_nand_read_from_cache- send command 0x13 to read data from Nand to cache
++ * @chip: SPI-NAND device structure
++ * @page_addr: page to read
++ */
++static int spi_nand_read_page_to_cache(struct spi_nand_chip *chip,
++				       u32 page_addr)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	cmd.cmd = SPINAND_CMD_READ;
++	cmd.n_addr = 3;
++	cmd.addr[0] = (u8)(page_addr >> 16);
++	cmd.addr[1] = (u8)(page_addr >> 8);
++	cmd.addr[2] = (u8)page_addr;
++
++	return spi_nand_send_cmd(spi, &cmd);
++}
++
++/*
++ * spi_nand_read_from_cache- send command 0x03 to read out the data from the
++ * cache register
++ * Description:
++ *   The read can specify 1 to (page size + spare size) bytes of data read at
++ *   the corresponding locations.
++ *   No tRd delay.
++ */
++int spi_nand_read_from_cache(struct spi_nand_chip *chip, u32 page_addr,
++			     u32 column, size_t len, u8 *rbuf)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	cmd.cmd = SPINAND_CMD_READ_RDM;
++	cmd.n_addr = 3;
++	if (chip->dev_id[0]==0xC8) {/*FIXME: early GD chips, test 1G*/
++		cmd.addr[0] = 0;
++		cmd.addr[1] = (u8)(column >> 8);
++		if (chip->options & SPINAND_NEED_PLANE_SELECT)
++			cmd.addr[0] |= (u8)(((page_addr >>
++					      (chip->block_shift - chip->page_shift)) & 0x1) << 4);
++		cmd.addr[2] = (u8)column;
++	}
++	else{
++		cmd.addr[0] = (u8)(column >> 8);
++		if (chip->options & SPINAND_NEED_PLANE_SELECT)
++			cmd.addr[0] |= (u8)(((page_addr >>
++					      (chip->block_shift - chip->page_shift)) & 0x1) << 4);
++		cmd.addr[1] = (u8)column;
++		cmd.addr[2] = 0;
++	}
++	cmd.n_rx = len;
++	cmd.rx_buf = rbuf;
++
++	return spi_nand_send_cmd(spi, &cmd);
++}
++
++/*
++ * spi_nand_read_from_cache_x2- send command 0x3b to read out the data from the
++ * cache register
++ * Description:
++ *   The read can specify 1 to (page size + spare size) bytes of data read at
++ *   the corresponding locations.
++ *   No tRd delay.
++ */
++/*int spi_nand_read_from_cache_x2(struct spi_nand_chip *chip, u32 page_addr,
++		u32 column, size_t len, u8 *rbuf)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++
++	cmd.cmd = SPINAND_CMD_READ_CACHE_X2;
++	cmd.n_addr = 3;
++	cmd.addr[0] = (u8)(column >> 8);
++	if (chip->options & SPINAND_NEED_PLANE_SELECT)
++		cmd.addr[0] |= (u8)(((page_addr >>
++			(chip->block_shift - chip->page_shift)) & 0x1) << 4);
++	cmd.addr[1] = (u8)column;
++	cmd.addr[2] = 0;
++	cmd.n_rx = len;
++	cmd.rx_nbits = SPI_NBITS_DUAL;
++	cmd.rx_buf = rbuf;
++
++	return spi_nand_send_cmd(spi, &cmd);
++}*/
++
++/*
++ * spi_nand_read_from_cache_x4- send command 0x6b to read out the data from the
++ * cache register
++ * Description:
++ *   The read can specify 1 to (page size + spare size) bytes of data read at
++ *   the corresponding locations.
++ *   No tRd delay.
++ */
++/*int spi_nand_read_from_cache_x4(struct spi_nand_chip *chip, u32 page_addr,
++		u32 column, size_t len, u8 *rbuf)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++
++	cmd.cmd = SPINAND_CMD_READ_CACHE_X4;
++	cmd.n_addr = 3;
++	cmd.addr[0] = (u8)(column >> 8);
++	if (chip->options & SPINAND_NEED_PLANE_SELECT)
++		cmd.addr[0] |= (u8)(((page_addr >>
++			(chip->block_shift - chip->page_shift)) & 0x1) << 4);
++	cmd.addr[1] = (u8)column;
++	cmd.addr[2] = 0;
++	cmd.n_rx = len;
++	cmd.rx_nbits = SPI_NBITS_QUAD;
++	cmd.rx_buf = rbuf;
++
++	return spi_nand_send_cmd(spi, &cmd);
++}*/
++
++/*
++ * spi_nand_read_from_cache_snor_protocol- send command 0x03 to read out the
++ * data from the cache register, 0x03 command protocol is same as SPI NOR
++ * read command
++ * Description:
++ *   The read can specify 1 to (page size + spare size) bytes of data read at
++ *   the corresponding locations.
++ *   No tRd delay.
++ */
++int spi_nand_read_from_cache_snor_protocol(struct spi_nand_chip *chip,
++		u32 page_addr, u32 column, size_t len, u8 *rbuf)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	cmd.cmd = SPINAND_CMD_READ_RDM;
++	cmd.n_addr = 3;
++	cmd.addr[0] = 0;
++	cmd.addr[1] = (u8)(column >> 8);
++	if (chip->options & SPINAND_NEED_PLANE_SELECT)
++		cmd.addr[1] |= (u8)(((page_addr >>
++				      (chip->block_shift - chip->page_shift)) & 0x1) << 4);
++	cmd.addr[2] = (u8)column;
++	cmd.n_rx = len;
++	cmd.rx_buf = rbuf;
++
++	return spi_nand_send_cmd(spi, &cmd);
++}
++EXPORT_SYMBOL(spi_nand_read_from_cache_snor_protocol);
++
++/*
++ * spi_nand_program_data_to_cache--to write a page to cache
++ * @chip: SPI-NAND device structure
++ * @page_addr: page to write
++ * @column: the location to write to the cache
++ * @len: number of bytes to write
++ * wrbuf: buffer held @len bytes
++ *
++ * Description:
++ *   The write command used here is 0x02--indicating that the cache is
++ *   cleared first.
++ *   Since it is writing the data to cache, there is no tPROG time.
++ */
++static int spi_nand_program_data_to_cache(struct spi_nand_chip *chip,
++		u32 page_addr, u32 column, size_t len, u8 *wbuf)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	cmd.cmd = SPINAND_CMD_PROG_LOAD;
++	cmd.n_addr = 2;
++	cmd.addr[0] = (u8)(column >> 8);
++	if (chip->options & SPINAND_NEED_PLANE_SELECT)
++		cmd.addr[0] |= (u8)(((page_addr >>
++				      (chip->block_shift - chip->page_shift)) & 0x1) << 4);
++	cmd.addr[1] = (u8)column;
++	cmd.n_tx = len;
++	cmd.tx_buf = wbuf;
++	fh_dev_debug(&spi->dev, "see n_tx %d,  oob[4] 0x%08x\n", len, *(uint32_t*)(wbuf+2048));
++
++	return spi_nand_send_cmd(spi, &cmd);
++}
++
++/**
++ * spi_nand_program_execute--to write a page from cache to the Nand array
++ * @chip: SPI-NAND device structure
++ * @page_addr: the physical page location to write the page.
++ *
++ * Description:
++ *   The write command used here is 0x10--indicating the cache is writing to
++ *   the Nand array.
++ *   Need to wait for tPROG time to finish the transaction.
++ */
++static int spi_nand_program_execute(struct spi_nand_chip *chip, u32 page_addr)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++	cmd.cmd = SPINAND_CMD_PROG;
++	cmd.n_addr = 3;
++	cmd.addr[0] = (u8)(page_addr >> 16);
++	cmd.addr[1] = (u8)(page_addr >> 8);
++	cmd.addr[2] = (u8)page_addr;
++
++
++	return spi_nand_send_cmd(spi, &cmd);
++}
++
++
++/**
++ * spi_nand_erase_block_erase--to erase a block
++ * @chip: SPI-NAND device structure
++ * @page_addr: the page to erase.
++ *
++ * Description:
++ *   The command used here is 0xd8--indicating an erase command to erase
++ *   one block
++ *   Need to wait for tERS.
++ */
++static int spi_nand_erase_block(struct spi_nand_chip *chip,
++				u32 page_addr)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	cmd.cmd = SPINAND_CMD_ERASE_BLK;
++	cmd.n_addr = 3;
++	cmd.addr[0] = (u8)(page_addr >> 16);
++	cmd.addr[1] = (u8)(page_addr >> 8);
++	cmd.addr[2] = (u8)page_addr;
++
++	return spi_nand_send_cmd(spi, &cmd);
++}
++
++/**
++ * spi_nand_wait - [DEFAULT] wait until the command is done
++ * @chip: SPI-NAND device structure
++ * @s: buffer to store status register(can be NULL)
++ *
++ * Wait for command done. This applies to erase and program only. Erase can
++ * take up to 400ms and program up to 20ms.
++ */
++static int spi_nand_wait(struct spi_nand_chip *chip, u8 *s)
++{
++	unsigned long timeo = jiffies;
++	u8 status, state = chip->state;
++	int ret = -ETIMEDOUT;
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++
++	if (state == FL_ERASING)
++		timeo += msecs_to_jiffies(400);
++	else
++		timeo += msecs_to_jiffies(400); // 20 -> 40 for mx2g
++
++	while (time_before(jiffies, timeo)) {
++		spi_nand_read_status(chip->spi, &status);
++		if ((status & STATUS_OIP_MASK) == STATUS_READY) {
++			ret = 0;
++			goto out;
++		}
++		cond_resched();
++	}
++out:
++	if (s)
++		*s = status;
++
++	return 0;
++}
++
++
++/*
++ * spi_nand_reset- send RESET command "0xff" to the SPI-NAND.
++ * @chip: SPI-NAND device structure
++ */
++static int spi_nand_reset(struct spi_nand_chip *chip)
++{
++	struct spi_nand_cmd cmd = {0};
++	struct spi_device *spi = chip->spi;
++	fh_dev_debug(&spi->dev, "Enter %s\n", __func__);
++
++	cmd.cmd = SPINAND_CMD_RESET;
++
++	if (spi_nand_send_cmd(spi, &cmd) < 0)
++		pr_err("spi_nand reset failed!\n");
++
++	/* elapse 1ms before issuing any other command */
++	udelay(1000);
++
++	return 0;
++}
++
++
++/**
++ * spi_nand_lock_block- send write register 0x1f command to the lock/unlock device
++ * @spi: spi device structure
++ * @lock: value to set to block lock register
++ *
++ * Description:
++ *    After power up, all the Nand blocks are locked.  This function allows
++ *    one to unlock the blocks, and so it can be written or erased.
++ */
++static int spi_nand_lock_block(struct spi_device *spi, u8 lock)
++{
++	struct spi_nand_cmd cmd = {0};
++	int ret;
++
++	cmd.cmd = SPINAND_CMD_WRITE_REG;
++	cmd.n_addr = 1;
++	cmd.addr[0] = REG_BLOCK_LOCK;
++	cmd.n_tx = 1;
++	cmd.tx_buf = &lock;
++
++	ret = spi_nand_send_cmd(spi, &cmd);
++	if (ret < 0)
++		dev_err(&spi->dev, "error %d lock block\n", ret);
++
++	return ret;
++}
++
++static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
++{
++	int i;
++
++	while (len--) {
++		crc ^= *p++ << 8;
++		for (i = 0; i < 8; i++)
++			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
++	}
++
++	return crc;
++}
++
++/* Sanitize ONFI strings so we can safely print them */
++static void sanitize_string(uint8_t *s, size_t len)
++{
++	ssize_t i;
++
++	/* Null terminate */
++	s[len - 1] = 0;
++
++	/* Remove non printable chars */
++	for (i = 0; i < len - 1; i++) {
++		if (s[i] < ' ' || s[i] > 127)
++			s[i] = '?';
++	}
++
++	/* Remove trailing spaces */
++	strim(s);
++}
++
++/*
++ * Check if the SPI-NAND chip is ONFI compliant,
++ * returns 1 if it is, 0 otherwise.
++ */
++static bool spi_nand_detect_onfi(struct spi_nand_chip *chip)
++{
++	struct spi_device *spi = chip->spi;
++	struct spi_nand_onfi_params *p;
++	char *buffer;
++	bool ret = true;
++	u8 otp;
++	int i;
++
++	/*FIXME buffer size*/
++	buffer = kmalloc(256 * 3, GFP_KERNEL);
++	otp = OTP_ENABLE;
++	spi_nand_set_otp(spi, &otp);
++	chip->load_page(chip, 0x01);
++	chip->waitfunc(chip, NULL);
++	spi_nand_read_from_cache(chip, 0x01, 0x00, 256 * 3, buffer);
++	otp = OTP_ECC_ENABLE;
++	spi_nand_set_otp(spi, &otp);
++
++	p = (struct spi_nand_onfi_params *)buffer;
++	for (i = 0; i < 3; i++, p++) {
++		if (p->sig[0] != 'O' || p->sig[1] != 'N' ||
++		    p->sig[2] != 'F' || p->sig[3] != 'I')
++			continue;
++		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
++		    le16_to_cpu(p->crc))
++			break;
++	}
++	if (i == 3) {
++		pr_err("Could not find valid ONFI parameter page; aborting\n");
++		ret = false;
++		goto out;
++	}
++
++	memcpy(&chip->onfi_params, p, sizeof(*p));
++
++	p = &chip->onfi_params;
++
++	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
++	sanitize_string(p->model, sizeof(p->model));
++
++	chip->name = p->model;
++	chip->size = le32_to_cpu(p->byte_per_page) *
++		     le32_to_cpu(p->pages_per_block) *
++		     le32_to_cpu(p->blocks_per_lun) * p->lun_count;
++	chip->block_size = le32_to_cpu(p->byte_per_page) *
++			   le32_to_cpu(p->pages_per_block);
++	chip->page_size = le32_to_cpu(p->byte_per_page);
++	chip->page_spare_size = le16_to_cpu(p->spare_bytes_per_page);
++	chip->block_shift = ilog2(chip->block_size);
++	chip->page_shift = ilog2(chip->page_size);
++	chip->page_mask = chip->page_size - 1;
++	chip->bits_per_cell = p->bits_per_cell;
++	/*FIXME need to find a way to read options from ONFI table*/
++	chip->options = SPINAND_NEED_PLANE_SELECT;
++	if (p->ecc_bits != 0xff) {
++		chip->ecc_strength_ds = p->ecc_bits;
++		chip->ecc_step_ds = 512;
++	}
++
++out:
++	kfree(buffer);
++	return ret;
++}
++
++static void spi_nand_set_defaults(struct spi_nand_chip *chip)
++{
++	/*struct spi_device *spi = chip->spi;*/
++
++	/*	if (spi->mode & SPI_RX_QUAD)
++			chip->read_cache = spi_nand_read_from_cache_x4;
++		else if (spi->mode & SPI_RX_DUAL)
++			chip->read_cache = spi_nand_read_from_cache_x2;
++		else*/
++	chip->read_cache = spi_nand_read_from_cache;
++
++	if (!chip->reset)
++		chip->reset = spi_nand_reset;
++	if (!chip->erase_block)
++		chip->erase_block = spi_nand_erase_block;
++	if (!chip->load_page)
++		chip->load_page = spi_nand_read_page_to_cache;
++	if (!chip->store_cache)
++		chip->store_cache = spi_nand_program_data_to_cache;
++	if (!chip->write_page)
++		chip->write_page = spi_nand_program_execute;
++	if (!chip->write_enable)
++		chip->write_enable = spi_nand_write_enable;
++	if (!chip->waitfunc)
++		chip->waitfunc = spi_nand_wait;
++	if (!chip->enable_ecc)
++		chip->enable_ecc = spi_nand_enable_ecc;
++	if (!chip->disable_ecc)
++		chip->disable_ecc = spi_nand_disable_ecc;
++	if (!chip->block_bad)
++		chip->block_bad = spi_nand_block_checkbad;
++}
++
++static int spi_nand_check(struct spi_nand_chip *chip)
++{
++	if (!chip->reset)
++		return -ENODEV;
++	if (!chip->read_id)
++		return -ENODEV;
++	if (!chip->load_page)
++		return -ENODEV;
++	if (!chip->read_cache)
++		return -ENODEV;
++	if (!chip->store_cache)
++		return -ENODEV;
++	if (!chip->write_page)
++		return -ENODEV;
++	if (!chip->erase_block)
++		return -ENODEV;
++	if (!chip->waitfunc)
++		return -ENODEV;
++	if (!chip->write_enable)
++		return -ENODEV;
++	if (!chip->get_ecc_status)
++		return -ENODEV;
++	if (!chip->enable_ecc)
++		return -ENODEV;
++	if (!chip->disable_ecc)
++		return -ENODEV;
++	if (!chip->ecclayout)
++		return -ENODEV;
++	return 0;
++}
++
++/**
++ * spi_nand_scan_ident - [SPI-NAND Interface] Scan for the SPI-NAND device
++ * @mtd: MTD device structure
++ *
++ * This is the first phase of the normal spi_nand_scan() function. It reads the
++ * flash ID and sets up MTD fields accordingly.
++ *
++ */
++int spi_nand_scan_ident(struct mtd_info *mtd)
++{
++	int ret;
++	u8 id[SPINAND_MAX_ID_LEN] = {0};
++	struct spi_nand_chip *chip = mtd->priv;
++
++	spi_nand_set_defaults(chip);
++	chip->reset(chip);
++
++	chip->read_id(chip, id);
++	if (id[0] == 0 && id[1] == 0) {
++		pr_err("SPINAND: read id error! 0x%02x, 0x%02x!\n",
++		       id[0], id[1]);
++		return -ENODEV;
++	}
++
++	pr_err("SPINAND: read id ! 0x%02x, 0x%02x 0x%02x, 0x%02x!\n",
++		id[0], id[1], id[2], id[3]);
++	if (spi_nand_scan_id_table(chip, id))
++		goto ident_done;
++	pr_info("SPI-NAND type mfr_id: %x, dev_id: %x is not in id table.\n",
++		id[0], id[1]);
++
++	if (spi_nand_detect_onfi(chip))
++		goto ident_done;
++
++	return -ENODEV;
++
++ident_done:
++	pr_info("SPI-NAND: %s is found.\n", chip->name);
++
++	/*chip->mfr_id = id[0];
++	chip->dev_id = id[1];*/
++
++	chip->buf = kzalloc(chip->page_size + chip->page_spare_size,
++			    GFP_KERNEL);
++	if (!chip->buf)
++		return -ENOMEM;
++
++	chip->oobbuf = chip->buf + chip->page_size;
++
++	ret = spi_nand_lock_block(chip->spi, BL_ALL_UNLOCKED);
++	ret = chip->enable_ecc(chip);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(spi_nand_scan_ident);
++
++/**
++ * spi_nand_scan_tail - [SPI-NAND Interface] Scan for the SPI-NAND device
++ * @mtd: MTD device structure
++ *
++ * This is the second phase of the normal spi_nand_scan() function. It fills out
++ * all the uninitialized function pointers with the defaults.
++ */
++int spi_nand_scan_tail(struct mtd_info *mtd)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++	int ret;
++
++	ret = spi_nand_check(chip);
++	if (ret)
++		return ret;
++	/* Initialize state */
++	chip->state = FL_READY;
++	/* Invalidate the pagebuffer reference */
++	chip->pagebuf = -1;
++	chip->bbt_options |= NAND_BBT_USE_FLASH;
++	chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
++
++	init_waitqueue_head(&chip->wq);
++	spin_lock_init(&chip->chip_lock);
++
++	mtd->name = chip->name;
++	mtd->size = chip->size;
++	mtd->erasesize = chip->block_size;
++	mtd->writesize = chip->page_size;
++	mtd->writebufsize = mtd->writesize;
++	mtd->oobsize = chip->page_spare_size;
++	mtd->owner = THIS_MODULE;
++	mtd->type = MTD_NANDFLASH;
++	mtd->flags = MTD_CAP_NANDFLASH;
++	/*xxx:porting down: if (!mtd->ecc_strength)
++			mtd->ecc_strength = chip->ecc_strength_ds ?
++						chip->ecc_strength_ds : 1;*/
++
++	mtd->ecclayout = chip->ecclayout;
++	mtd->oobsize = chip->page_spare_size;
++	mtd->oobavail = chip->ecclayout->oobavail;
++	/* remove _* */
++	mtd->erase = spi_nand_erase;
++	mtd->point = NULL;
++	mtd->unpoint = NULL;
++	mtd->read = spi_nand_read;
++	mtd->write = spi_nand_write;
++	mtd->read_oob = spi_nand_read_oob;
++	mtd->write_oob = spi_nand_write_oob;
++	mtd->sync = spi_nand_sync;
++	mtd->lock = NULL;
++	mtd->unlock = NULL;
++	mtd->suspend = spi_nand_suspend;
++	mtd->resume = spi_nand_resume;
++	mtd->block_isbad = spi_nand_block_isbad;
++	mtd->block_markbad = spi_nand_block_markbad;
++
++#ifndef CONFIG_SPI_NAND_BBT
++	/* Build bad block table */
++	return spi_nand_default_bbt(mtd);
++#else
++	return 0;
++#endif
++}
++EXPORT_SYMBOL_GPL(spi_nand_scan_tail);
++
++/**
++ * spi_nand_scan_ident_release - [SPI-NAND Interface] Free resources
++ * applied by spi_nand_scan_ident
++ * @mtd: MTD device structure
++ */
++int spi_nand_scan_ident_release(struct mtd_info *mtd)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++
++	kfree(chip->buf);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(spi_nand_scan_ident_release);
++
++/**
++ * spi_nand_scan_tail_release - [SPI-NAND Interface] Free resources
++ * applied by spi_nand_scan_tail
++ * @mtd: MTD device structure
++ */
++int spi_nand_scan_tail_release(struct mtd_info *mtd)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(spi_nand_scan_tail_release);
++
++/**
++ * spi_nand_release - [SPI-NAND Interface] Free resources held by the SPI-NAND
++ * device
++ * @mtd: MTD device structure
++ */
++int spi_nand_release(struct mtd_info *mtd)
++{
++	struct spi_nand_chip *chip = mtd->priv;
++
++	mtd_device_unregister(mtd);
++	kfree(chip->buf);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(spi_nand_release);
++
++MODULE_DESCRIPTION("SPI NAND framework");
++MODULE_AUTHOR("Peter Pan<peterpandong at micron.com>");
++MODULE_LICENSE("GPL v2");
++
+diff --git a/drivers/mtd/spi-nand/spi-nand-bbt.c b/drivers/mtd/spi-nand/spi-nand-bbt.c
+new file mode 100644
+index 00000000..42ee9f87
+--- /dev/null
++++ b/drivers/mtd/spi-nand/spi-nand-bbt.c
+@@ -0,0 +1,1356 @@
++/*
++ *  drivers/mtd/spi_nand_bbt.c
++ *
++ *  Overview:
++ *   Bad block table support for the SPI-NAND driver
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This file is derived from nand_base.c
++ *
++ * TODO:
++ *   share BBT code with parallel nand
++ */
++
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/bbm.h>
++#include <linux/mtd/spi-nand.h>
++#include <linux/bitops.h>
++#include <linux/delay.h>
++#include <linux/vmalloc.h>
++#include <linux/string.h>
++#include <linux/spi/spi.h>
++#include <asm/string.h>
++
++#define BBT_BLOCK_GOOD		0x00
++#define BBT_BLOCK_WORN		0x01
++#define BBT_BLOCK_RESERVED	0x02
++#define BBT_BLOCK_FACTORY_BAD	0x03
++
++#define BBT_ENTRY_MASK		0x03
++#define BBT_ENTRY_SHIFT		2
++
++#ifdef SPINAND_BBT_DEBUG
++#define fh_dev_debug		dev_err
++#define fh_debug_dump(buf,len)  do { \
++		unsigned int i; \
++		printk("\t %s:L%d", __func__,__LINE__); \
++		for (i=0;i<len/4;i++) { \
++			if (0 == i % 4 ) \
++				printk("\n\t\t 0x%08x:\t",(unsigned int) buf+i*4 ); \
++			printk("%08x ", *(unsigned int*) (buf + i*4));\
++		} \
++	} while(0)
++#else
++#define fh_dev_debug(...)
++#define fh_debug_dump(buf,len)
++#endif
++
++
++static int spi_nand_update_bbt(struct mtd_info *mtd, loff_t offs);
++
++static inline uint8_t bbt_get_entry(struct spi_nand_chip *chip, int block)
++{
++	uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
++
++	entry >>= (block & BBT_ENTRY_MASK) * 2;
++	return entry & BBT_ENTRY_MASK;
++}
++
++static inline void bbt_mark_entry(struct spi_nand_chip *chip, int block,
++				  uint8_t mark)
++{
++	uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
++
++	chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
++}
++
++static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
++{
++	if (memcmp(buf, td->pattern, td->len))
++		return -1;
++	return 0;
++}
++
++/**
++ * check_pattern - [GENERIC] check if a pattern is in the buffer
++ * @buf: the buffer to search
++ * @len: the length of buffer to search
++ * @paglen: the pagelength
++ * @td: search pattern descriptor
++ *
++ * Check for a pattern at the given place. Used to search bad block tables and
++ * good / bad block identifiers.
++ */
++static int check_pattern(uint8_t *buf, int len, int paglen,
++			 struct nand_bbt_descr *td)
++{
++	if (td->options & NAND_BBT_NO_OOB)
++		return check_pattern_no_oob(buf, td);
++
++	/* Compare the pattern */
++	fh_debug_dump(buf + paglen + td->offs, td->len);
++	if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
++		return -1;
++
++	return 0;
++}
++
++/**
++ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
++ * @buf: the buffer to search
++ * @td:	search pattern descriptor
++ *
++ * Check for a pattern at the given place. Used to search bad block tables and
++ * good / bad block identifiers. Same as check_pattern, but no optional empty
++ * check.
++ */
++static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
++{
++	/* Compare the pattern */
++	if (memcmp(buf + td->offs, td->pattern, td->len))
++		return -1;
++	return 0;
++}
++
++/**
++ * add_marker_len - compute the length of the marker in data area
++ * @td: BBT descriptor used for computation
++ *
++ * The length will be 0 if the marker is located in OOB area.
++ */
++static u32 add_marker_len(struct nand_bbt_descr *td)
++{
++	u32 len;
++
++	if (!(td->options & NAND_BBT_NO_OOB))
++		return 0;
++
++	len = td->len;
++	if (td->options & NAND_BBT_VERSION)
++		len++;
++	return len;
++}
++
++static inline int mtd_is_eccerr(int err)
++{
++	return err == -EBADMSG;
++}
++
++static inline int mtd_is_bitflip(int err)
++{
++	return err == -EUCLEAN;
++}
++
++static inline int mtd_is_bitflip_or_eccerr(int err)
++{
++	return mtd_is_bitflip(err) || mtd_is_eccerr(err);
++}
++/**
++ * read_bbt - [GENERIC] Read the bad block table starting from page
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @page: the starting page
++ * @num: the number of bbt descriptors to read
++ * @td: the bbt describtion table
++ * @offs: block number offset in the table
++ *
++ * Read the bad block table starting from page.
++ */
++static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
++		    struct nand_bbt_descr *td, int offs)
++{
++	int res, ret = 0, i, j, act = 0;
++	struct spi_nand_chip *this = mtd->priv;
++	size_t retlen, len, totlen;
++	loff_t from;
++	int bits = td->options & NAND_BBT_NRBITS_MSK;
++	uint8_t msk = (uint8_t)((1 << bits) - 1);
++	u32 marker_len;
++	int reserved_block_code = td->reserved_block_code;
++
++	totlen = (num * bits) >> 3;
++	marker_len = add_marker_len(td);
++	from = ((loff_t)page) << this->page_shift;
++
++	while (totlen) {
++		len = min(totlen, (size_t)(1 << this->block_shift));
++		if (marker_len) {
++			/*
++			 * In case the BBT marker is not in the OOB area it
++			 * will be just in the first page.
++			 */
++			len -= marker_len;
++			from += marker_len;
++			marker_len = 0;
++		}
++		res = mtd->read(mtd, from, len, &retlen,
++				buf); /*/// mtd_read(mtd, from, len, &retlen, buf);*/
++		if (res < 0) {
++			if (mtd_is_eccerr(res)) {
++				pr_info("spi_nand_bbt: ECC error in BBT at 0x%012llx\n",
++					from & ~mtd->writesize);
++				return res;
++			} else if (mtd_is_bitflip(res)) {
++				pr_info("spi_nand_bbt: corrected error in BBT at 0x%012llx\n",
++					from & ~mtd->writesize);
++				ret = res;
++			} else {
++				pr_info("spi_nand_bbt: error reading BBT\n");
++				return res;
++			}
++		}
++
++		/* Analyse data */
++		for (i = 0; i < len; i++) {
++			uint8_t dat = buf[i];
++
++			for (j = 0; j < 8; j += bits, act++) {
++				uint8_t tmp = (dat >> j) & msk;
++
++				if (tmp == msk)
++					continue;
++				if (reserved_block_code &&
++				    (tmp == reserved_block_code)) {
++					pr_info("spi_nand_read_bbt: reserved block at 0x%012llx\n",
++						(loff_t)(offs + act) <<
++						this->block_shift);
++					bbt_mark_entry(this, offs + act,
++						       BBT_BLOCK_RESERVED);
++					mtd->ecc_stats.bbtblocks++;
++					continue;
++				}
++				/*
++				 * Leave it for now, if it's matured we can
++				 * move this message to pr_debug.
++				 */
++				pr_info("spi_nand_read_bbt: bad block at 0x%012llx\n",
++					(loff_t)(offs + act) <<
++					this->block_shift);
++				/* Factory marked bad or worn out? */
++				if (tmp == 0)
++					bbt_mark_entry(this, offs + act,
++						       BBT_BLOCK_FACTORY_BAD);
++				else
++					bbt_mark_entry(this, offs + act,
++						       BBT_BLOCK_WORN);
++				mtd->ecc_stats.badblocks++;
++			}
++		}
++		totlen -= len;
++		from += len;
++	}
++	return ret;
++}
++
++/**
++ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @td: descriptor for the bad block table
++ * @chip: read the table for a specific chip, -1 read all chips; applies only if
++ *        NAND_BBT_PERCHIP option is set
++ *
++ * Read the bad block table for all chips starting at a given page. We assume
++ * that the bbt bits are in consecutive order.
++ */
++static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf,
++			struct nand_bbt_descr *td, int chip)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int res = 0;
++
++	res = read_bbt(mtd, buf, td->pages[0],
++		       mtd->size >> this->block_shift, td, 0);
++	if (res)
++		return res;
++
++	return 0;
++}
++
++/* BBT marker is in the first page, no OOB */
++static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
++			  struct nand_bbt_descr *td)
++{
++	size_t retlen;
++	size_t len;
++
++	len = td->len;
++	if (td->options & NAND_BBT_VERSION)
++		len++;
++
++	/*return mtd_read(mtd, offs, len, &retlen, buf);*/
++	return mtd->read(mtd, offs, len, &retlen, buf);
++}
++
++/**
++ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @offs: offset at which to scan
++ * @len: length of data region to read
++ *
++ * Scan read data from data+OOB. May traverse multiple pages, interleaving
++ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
++ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
++ */
++static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
++			 size_t len)
++{
++	struct mtd_oob_ops ops;
++	int res, ret = 0;
++	struct spi_nand_chip *chip = mtd->priv;
++	fh_dev_debug(&chip->spi->dev, "Enter %s\n", __func__);
++
++	ops.mode = MTD_OOB_PLACE;
++	ops.ooboffs = 0;
++	ops.ooblen = mtd->oobsize;
++
++	while (len > 0) {
++		ops.datbuf = buf;
++		ops.len = min_t(size_t, len, mtd->writesize);
++		ops.oobbuf = buf + ops.len;
++
++		/*res = mtd_read_oob(mtd, offs, &ops);*/
++		res = mtd->read_oob(mtd, offs, &ops);
++		if (res) {
++			if (!mtd_is_bitflip_or_eccerr(res))
++				return res;
++			else if (mtd_is_eccerr(res) || !ret)
++				ret = res;
++		}
++
++		buf += mtd->oobsize + mtd->writesize;
++		len -= mtd->writesize;
++		offs += mtd->writesize;
++	}
++	return ret;
++}
++
++static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
++		     size_t len, struct nand_bbt_descr *td)
++{
++	if (td->options & NAND_BBT_NO_OOB)
++		return scan_read_data(mtd, buf, offs, td);
++	else
++		return scan_read_oob(mtd, buf, offs, len);
++}
++
++/* Scan write data with oob to flash */
++static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
++			  uint8_t *buf, uint8_t *oob)
++{
++	struct mtd_oob_ops ops;
++
++	ops.mode = MTD_OOB_PLACE;
++	ops.ooboffs = 0;
++	ops.ooblen = mtd->oobsize;
++	ops.datbuf = buf;
++	ops.oobbuf = oob;
++	ops.len = len;
++
++	/*return mtd_write_oob(mtd, offs, &ops);*/
++	return mtd->write_oob(mtd, offs, &ops);
++}
++
++static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
++{
++	u32 ver_offs = td->veroffs;
++
++	if (!(td->options & NAND_BBT_NO_OOB))
++		ver_offs += mtd->writesize;
++	return ver_offs;
++}
++
++/**
++ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @td: descriptor for the bad block table
++ * @md:	descriptor for the bad block table mirror
++ *
++ * Read the bad block table(s) for all chips starting at a given page. We
++ * assume that the bbt bits are in consecutive order.
++ */
++static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
++			  struct nand_bbt_descr *td, struct nand_bbt_descr *md)
++{
++	struct spi_nand_chip *this = mtd->priv;
++
++	/* Read the primary version, if available */
++	if (td->options & NAND_BBT_VERSION) {
++		scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
++			  mtd->writesize, td);
++		td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
++		pr_info("Bad block table at page %d, version 0x%02X\n",
++			td->pages[0], td->version[0]);
++	}
++
++	/* Read the mirror version, if available */
++	if (md && (md->options & NAND_BBT_VERSION)) {
++		scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
++			  mtd->writesize, md);
++		md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
++		pr_info("Bad block table at page %d, version 0x%02X\n",
++			md->pages[0], md->version[0]);
++	}
++}
++
++/* Scan a given block partially */
++static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
++			   loff_t offs, uint8_t *buf, int numpages)
++{
++	struct mtd_oob_ops ops;
++	int j, ret;
++
++	ops.ooblen = mtd->oobsize;
++	ops.oobbuf = buf;
++	ops.ooboffs = 0;
++	ops.datbuf = NULL;
++	ops.mode = MTD_OOB_PLACE;
++
++	for (j = 0; j < numpages; j++) {
++		/*
++		 * Read the full oob until read_oob is fixed to handle single
++		 * byte reads for 16 bit buswidth.
++		 */
++		ret = mtd->read_oob(mtd, offs, &ops);/*mtd_read_oob(mtd, offs, &ops);*/
++		/* Ignore ECC errors when checking for BBM */
++		if (ret && !mtd_is_bitflip_or_eccerr(ret))
++			return ret;
++
++		if (check_short_pattern(buf, bd))
++			return 1;
++
++		offs += mtd->writesize;
++	}
++	return 0;
++}
++
++/**
++ * create_bbt - [GENERIC] Create a bad block table by scanning the device
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @bd: descriptor for the good/bad block search pattern
++ * @chip: create the table for a specific chip, -1 read all chips; applies only
++ *        if NAND_BBT_PERCHIP option is set
++ *
++ * Create a bad block table by scanning the device for the given good/bad block
++ * identify pattern.
++ */
++static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
++		      struct nand_bbt_descr *bd, int chip)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int i, numblocks, numpages;
++	int startblock;
++	loff_t from;
++	fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
++
++	pr_info("Scanning device for bad blocks\n");
++
++	if (bd->options & NAND_BBT_SCAN2NDPAGE)
++		numpages = 2;
++	else
++		numpages = 1;
++
++	if (chip == -1) {
++		numblocks = mtd->size >> this->block_shift;
++		startblock = 0;
++		from = 0;
++	} else {
++		numblocks = this->size >> this->block_shift;
++		startblock = chip * numblocks;
++		numblocks += startblock;
++		from = (loff_t)startblock << this->block_shift;
++	}
++
++	if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
++		from += mtd->erasesize - (mtd->writesize * numpages);
++
++	for (i = startblock; i < numblocks; i++) {
++		int ret;
++
++		BUG_ON(bd->options & NAND_BBT_NO_OOB);
++
++		ret = scan_block_fast(mtd, bd, from, buf, numpages);
++		if (ret < 0)
++			return ret;
++
++		if (ret) {
++			bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
++			pr_warn("Bad eraseblock %d at 0x%012llx\n",
++				i, (unsigned long long)from);
++			mtd->ecc_stats.badblocks++;
++		}
++
++		from += (1 << this->block_shift);
++	}
++	return 0;
++}
++
++/**
++ * search_bbt - [GENERIC] scan the device for a specific bad block table
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @td: descriptor for the bad block table
++ *
++ * Read the bad block table by searching for a given ident pattern. Search is
++ * preformed either from the beginning up or from the end of the device
++ * downwards. The search starts always at the start of a block. If the option
++ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
++ * the bad block information of this chip. This is necessary to provide support
++ * for certain DOC devices.
++ *
++ * The bbt ident pattern resides in the oob area of the first page in a block.
++ */
++static int search_bbt(struct mtd_info *mtd, uint8_t *buf,
++		      struct nand_bbt_descr *td)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int i, chips;
++	int startblock, block, dir;
++	int scanlen = mtd->writesize + mtd->oobsize;
++	int bbtblocks;
++	int blocktopage = this->block_shift - this->page_shift;
++
++	fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
++	/* Search direction top -> down? */
++	if (td->options & NAND_BBT_LASTBLOCK) {
++		startblock = (mtd->size >> this->block_shift) - 1;
++		dir = -1;
++	} else {
++		startblock = 0;
++		dir = 1;
++	}
++
++	chips = 1;
++	bbtblocks = mtd->size >> this->block_shift;
++
++	for (i = 0; i < chips; i++) {
++		/* Reset version information */
++		td->version[i] = 0;
++		td->pages[i] = -1;
++		/* Scan the maximum number of blocks */
++		for (block = 0; block < td->maxblocks; block++) {
++
++			int actblock = startblock + dir * block;
++			loff_t offs = (loff_t)actblock << this->block_shift;
++
++			/* Read first page */
++			scan_read(mtd, buf, offs, mtd->writesize, td);
++			fh_dev_debug(&this->spi->dev, "read block %d, first v 0x%08x\n ",
++				     actblock, *(int *)buf);
++			fh_dev_debug(&this->spi->dev, "td pattern:%s, offset %d, len %d\n ",
++				     td->pattern, td->offs,td->len);
++			if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
++				td->pages[i] = actblock << blocktopage;
++				if (td->options & NAND_BBT_VERSION) {
++					offs = bbt_get_ver_offs(mtd, td);
++					td->version[i] = buf[offs];
++				}
++				break;
++			}
++		}
++		startblock += this->size >> this->block_shift;
++	}
++	/* Check, if we found a bbt for each requested chip */
++	for (i = 0; i < chips; i++) {
++		if (td->pages[i] == -1)
++			pr_warn("Bad block table not found for chip %d\n", i);
++		else
++			pr_info("Bad block table found at page %d, version 0x%02X\n",
++				td->pages[i], td->version[i]);
++	}
++	return 0;
++}
++
++/**
++ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @td: descriptor for the bad block table
++ * @md: descriptor for the bad block table mirror
++ *
++ * Search and read the bad block table(s).
++ */
++static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
++			     struct nand_bbt_descr *td,
++			     struct nand_bbt_descr *md)
++{
++	/* Search the primary table */
++	search_bbt(mtd, buf, td);
++
++	/* Search the mirror table */
++	if (md)
++		search_bbt(mtd, buf, md);
++}
++
++/**
++ * write_bbt - [GENERIC] (Re)write the bad block table
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @td: descriptor for the bad block table
++ * @md: descriptor for the bad block table mirror
++ * @chipsel: selector for a specific chip, -1 for all
++ *
++ * (Re)write the bad block table.
++ */
++static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
++		     struct nand_bbt_descr *td, struct nand_bbt_descr *md,
++		     int chipsel)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	struct erase_info einfo;
++	int i, res, chip = 0;
++	int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
++	int nrchips, pageoffs, ooboffs;
++	uint8_t msk[4];
++	uint8_t rcode = td->reserved_block_code;
++	size_t retlen, len = 0;
++	loff_t to;
++	struct mtd_oob_ops ops;
++	extern int fh_start_debug;
++	fh_start_debug = 1;
++	fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
++
++	ops.ooblen = mtd->oobsize;
++	ops.ooboffs = 0;
++	ops.datbuf = NULL;
++	ops.mode = MTD_OOB_PLACE;
++
++	if (!rcode)
++		rcode = 0xff;
++	numblocks = (int)(mtd->size >> this->block_shift);
++	nrchips = 1;
++
++	/* Loop through the chips */
++	for (; chip < nrchips; chip++) {
++		/*
++		 * There was already a version of the table, reuse the page
++		 * This applies for absolute placement too, as we have the
++		 * page nr. in td->pages.
++		 */
++		if (td->pages[chip] != -1) {
++			page = td->pages[chip];
++			goto write;
++		}
++
++		/*
++		 * Automatic placement of the bad block table. Search direction
++		 * top -> down?
++		 */
++		if (td->options & NAND_BBT_LASTBLOCK) {
++			startblock = numblocks * (chip + 1) - 1;
++			dir = -1;
++		} else {
++			startblock = chip * numblocks;
++			dir = 1;
++		}
++
++		for (i = 0; i < td->maxblocks; i++) {
++			int block = startblock + dir * i;
++			/* Check, if the block is bad */
++			switch (bbt_get_entry(this, block)) {
++			case BBT_BLOCK_WORN:
++			case BBT_BLOCK_FACTORY_BAD:
++				continue;
++			}
++			page = block <<
++			       (this->block_shift - this->page_shift);
++			/* Check, if the block is used by the mirror table */
++			if (!md || md->pages[chip] != page)
++				goto write;
++		}
++		pr_err("No space left to write bad block table\n");
++		return -ENOSPC;
++write:
++
++		/* Set up shift count and masks for the flash table */
++		bits = td->options & NAND_BBT_NRBITS_MSK;
++		msk[2] = ~rcode;
++		switch (bits) {
++		case 1:
++			sft = 3;
++			sftmsk = 0x07;
++			msk[0] = 0x00;
++			msk[1] = 0x01;
++			msk[3] = 0x01;
++			break;
++		case 2:
++			sft = 2;
++			sftmsk = 0x06;
++			msk[0] = 0x00;
++			msk[1] = 0x01;
++			msk[3] = 0x03;
++			break;
++		case 4:
++			sft = 1;
++			sftmsk = 0x04;
++			msk[0] = 0x00;
++			msk[1] = 0x0C;
++			msk[3] = 0x0f;
++			break;
++		case 8:
++			sft = 0;
++			sftmsk = 0x00;
++			msk[0] = 0x00;
++			msk[1] = 0x0F;
++			msk[3] = 0xff;
++			break;
++		default:
++			return -EINVAL;
++		}
++
++		to = ((loff_t)page) << this->page_shift;
++
++		fh_dev_debug(&this->spi->dev, " td.options 0x08%x\n", td->options);
++
++		/* Must we save the block contents? */
++		if (td->options & NAND_BBT_SAVECONTENT) {
++			/* Make it block aligned */
++			to &= ~((loff_t)((1 << this->block_shift) - 1));
++			len = 1 << this->block_shift;
++			res = mtd->read(mtd, to, len, &retlen,
++					buf); // mtd_read(mtd, to, len, &retlen, buf);
++			if (res < 0) {
++				if (retlen != len) {
++					pr_info("spi_nand_bbt: error reading block ");
++					pr_info("for writing the bad block table\n");
++					return res;
++				}
++				pr_warn("spi_nand_bbt: ECC error while reading ");
++				pr_warn("block for writing bad block table\n");
++			}
++			/* Read oob data */
++			ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
++			ops.oobbuf = &buf[len];
++			res = mtd->read_oob(mtd, to + mtd->writesize,
++					    &ops); /*mtd_read_oob(mtd, to + mtd->writesize, &ops);*/
++			if (res < 0 || ops.oobretlen != ops.ooblen)
++				goto outerr;
++
++			/* Calc the byte offset in the buffer */
++			pageoffs = page - (int)(to >> this->page_shift);
++			offs = pageoffs << this->page_shift;
++			/* Preset the bbt area with 0xff */
++			memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
++			ooboffs = len + (pageoffs * mtd->oobsize);
++
++		} else if (td->options & NAND_BBT_NO_OOB) {
++			ooboffs = 0;
++			offs = td->len;
++			/* The version byte */
++			if (td->options & NAND_BBT_VERSION)
++				offs++;
++			/* Calc length */
++			len = (size_t)(numblocks >> sft);
++			len += offs;
++			/* Make it page aligned! */
++			len = ALIGN(len, mtd->writesize);
++			/* Preset the buffer with 0xff */
++			memset(buf, 0xff, len);
++			/* Pattern is located at the begin of first page */
++			memcpy(buf, td->pattern, td->len);
++		} else {
++			/* Calc length */
++			len = (size_t)(numblocks >> sft);
++			/* Make it page aligned! */
++			len = ALIGN(len, mtd->writesize);
++			/* Preset the buffer with 0xff */
++			memset(buf, 0xff, len +
++			       (len >> this->page_shift) * mtd->oobsize);
++			offs = 0;
++			ooboffs = len;
++			/* Pattern is located in oob area of first page */
++			memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
++
++			/*fh_debug_dump(td->pattern, td->len);*/
++		}
++
++		if (td->options & NAND_BBT_VERSION)
++			buf[ooboffs + td->veroffs] = td->version[chip];
++
++		/* Walk through the memory table */
++		for (i = 0; i < numblocks; i++) {
++			uint8_t dat;
++			int sftcnt = (i << (3 - sft)) & sftmsk;
++
++			dat = bbt_get_entry(this, chip * numblocks + i);
++			/* Do not store the reserved bbt blocks! */
++			buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
++		}
++
++		memset(&einfo, 0, sizeof(einfo));
++		einfo.mtd = mtd;
++		einfo.addr = to;
++		einfo.len = 1 << this->block_shift;
++		res = __spi_nand_erase(mtd, &einfo, 1);
++		if (res < 0)
++			goto outerr;
++
++		/*fh_debug_dump(&buf[ooboffs],20);*/
++		res = scan_write_bbt(mtd, to, len, buf,
++				     td->options & NAND_BBT_NO_OOB ? NULL :
++				     &buf[len]);
++
++		if (res < 0)
++			goto outerr;
++
++		pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
++			(unsigned long long)to, td->version[chip]);
++
++		/* Mark it as used */
++		td->pages[chip] = page;
++	}
++	return 0;
++
++outerr:
++	pr_warn("spi_nand_bbt: error while writing bad block table %d\n", res);
++	return res;
++}
++
++/**
++ * spi_nand_memory_bbt - [GENERIC] create a memory based bad block table
++ * @mtd: MTD device structure
++ * @bd: descriptor for the good/bad block search pattern
++ *
++ * The function creates a memory based bbt by scanning the device for
++ * manufacturer / software marked good / bad blocks.
++ */
++static inline int spi_nand_memory_bbt(struct mtd_info *mtd,
++				      struct nand_bbt_descr *bd)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
++
++	return create_bbt(mtd, this->buf, bd, -1);
++}
++
++/**
++ * check_create - [GENERIC] create and write bbt(s) if necessary
++ * @mtd: MTD device structure
++ * @buf: temporary buffer
++ * @bd: descriptor for the good/bad block search pattern
++ *
++ * The function checks the results of the previous call to read_bbt and creates
++ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
++ * for the chip/device. Update is necessary if one of the tables is missing or
++ * the version nr. of one table is less than the other.
++ */
++static int check_create(struct mtd_info *mtd, uint8_t *buf,
++			struct nand_bbt_descr *bd)
++{
++	int i, chips, writeops, create, chipsel, res, res2;
++	struct spi_nand_chip *this = mtd->priv;
++	struct nand_bbt_descr *td = this->bbt_td;
++	struct nand_bbt_descr *md = this->bbt_md;
++	struct nand_bbt_descr *rd, *rd2;
++
++	chips = 1;
++
++	for (i = 0; i < chips; i++) {
++		writeops = 0;
++		create = 0;
++		rd = NULL;
++		rd2 = NULL;
++		res = res2 = 0;
++		/* Per chip or per device? */
++		chipsel = -1;
++		/* Mirrored table available? */
++		if (md) {
++			if (td->pages[i] == -1 && md->pages[i] == -1) {
++				create = 1;
++				writeops = 0x03;
++			} else if (td->pages[i] == -1) {
++				rd = md;
++				writeops = 0x01;
++			} else if (md->pages[i] == -1) {
++				rd = td;
++				writeops = 0x02;
++			} else if (td->version[i] == md->version[i]) {
++				rd = td;
++				if (!(td->options & NAND_BBT_VERSION))
++					rd2 = md;
++			} else if (((int8_t)(td->version[i] - md->version[i]))
++				   > 0) {
++				rd = td;
++				writeops = 0x02;
++			} else {
++				rd = md;
++				writeops = 0x01;
++			}
++		} else {
++			if (td->pages[i] == -1) {
++				create = 1;
++				writeops = 0x01;
++			} else
++				rd = td;
++		}
++
++		if (create) {
++			/* Create the bad block table by scanning the device? */
++			if (!(td->options & NAND_BBT_CREATE))
++				continue;
++
++			/* Create the table in memory by scanning the chip(s) */
++			/*xxx: create it; if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))*/
++				create_bbt(mtd, buf, bd, chipsel);
++
++			td->version[i] = 1;
++			if (md)
++				md->version[i] = 1;
++		}
++
++		/* Read back first? */
++		if (rd) {
++			res = read_abs_bbt(mtd, buf, rd, chipsel);
++			if (mtd_is_eccerr(res)) {
++				/* Mark table as invalid */
++				rd->pages[i] = -1;
++				rd->version[i] = 0;
++				i--;
++				continue;
++			}
++		}
++		/* If they weren't versioned, read both */
++		if (rd2) {
++			res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
++			if (mtd_is_eccerr(res2)) {
++				/* Mark table as invalid */
++				rd2->pages[i] = -1;
++				rd2->version[i] = 0;
++				i--;
++				continue;
++			}
++		}
++
++		/* Scrub the flash table(s)? */
++		if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
++			writeops = 0x03;
++
++		/* Update version numbers before writing */
++		if (md) {
++			td->version[i] = max(td->version[i], md->version[i]);
++			md->version[i] = td->version[i];
++		}
++
++		/* Write the bad block table to the device? */
++		if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
++			res = write_bbt(mtd, buf, td, md, chipsel);
++			if (res < 0)
++				return res;
++		}
++
++		/* Write the mirror bad block table to the device? */
++		if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
++			res = write_bbt(mtd, buf, md, td, chipsel);
++			if (res < 0)
++				return res;
++		}
++	}
++	return 0;
++}
++
++/**
++ * mark_bbt_regions - [GENERIC] mark the bad block table regions
++ * @mtd: MTD device structure
++ * @td: bad block table descriptor
++ *
++ * The bad block table regions are marked as "bad" to prevent accidental
++ * erasures / writes. The regions are identified by the mark 0x02.
++ */
++static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int i, j, chips, block, nrblocks, update;
++	uint8_t oldval;
++
++	chips = 1;
++	nrblocks = (int)(mtd->size >> this->block_shift);
++
++	for (i = 0; i < chips; i++) {
++		if ((td->options & NAND_BBT_ABSPAGE) ||
++		    !(td->options & NAND_BBT_WRITE)) {
++			if (td->pages[i] == -1)
++				continue;
++			block = td->pages[i] >>
++				(this->block_shift - this->page_shift);
++			oldval = bbt_get_entry(this, block);
++			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
++			if ((oldval != BBT_BLOCK_RESERVED) &&
++			    td->reserved_block_code)
++				spi_nand_update_bbt(mtd, (loff_t)block <<
++						    this->block_shift);
++			continue;
++		}
++		update = 0;
++		if (td->options & NAND_BBT_LASTBLOCK)
++			block = ((i + 1) * nrblocks) - td->maxblocks;
++		else
++			block = i * nrblocks;
++		for (j = 0; j < td->maxblocks; j++) {
++			oldval = bbt_get_entry(this, block);
++			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
++			if (oldval != BBT_BLOCK_RESERVED)
++				update = 1;
++			block++;
++		}
++		/*
++		 * If we want reserved blocks to be recorded to flash, and some
++		 * new ones have been marked, then we need to update the stored
++		 * bbts.  This should only happen once.
++		 */
++		if (update && td->reserved_block_code)
++			spi_nand_update_bbt(mtd, (loff_t)(block - 1) <<
++					    this->block_shift);
++	}
++	fh_dev_debug(&this->spi->dev, "Leave %s\n", __func__);
++}
++
++/**
++ * verify_bbt_descr - verify the bad block description
++ * @mtd: MTD device structure
++ * @bd: the table to verify
++ *
++ * This functions performs a few sanity checks on the bad block description
++ * table.
++ */
++static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	u32 pattern_len;
++	u32 bits;
++	u32 table_size;
++
++	if (!bd)
++		return;
++
++	pattern_len = bd->len;
++	bits = bd->options & NAND_BBT_NRBITS_MSK;
++
++	BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
++	       !(this->bbt_options & NAND_BBT_USE_FLASH));
++	BUG_ON(!bits);
++
++	if (bd->options & NAND_BBT_VERSION)
++		pattern_len++;
++
++	if (bd->options & NAND_BBT_NO_OOB) {
++		BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
++		BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
++		BUG_ON(bd->offs);
++		if (bd->options & NAND_BBT_VERSION)
++			BUG_ON(bd->veroffs != bd->len);
++		BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
++	}
++
++	table_size = mtd->size >> this->block_shift;
++	table_size >>= 3;
++	table_size *= bits;
++	if (bd->options & NAND_BBT_NO_OOB)
++		table_size += pattern_len;
++	BUG_ON(table_size > (1 << this->block_shift));
++}
++
++/**
++ * spi_nand_scan_bbt - [SPI-NAND Interface] scan, find, read and maybe create
++ * bad block table(s)
++ * @mtd: MTD device structure
++ * @bd: descriptor for the good/bad block search pattern
++ *
++ * The function checks, if a bad block table(s) is/are already available. If
++ * not it scans the device for manufacturer marked good / bad blocks and writes
++ * the bad block table(s) to the selected place.
++ *
++ * The bad block table memory is allocated here. It must be freed by calling
++ * the spi_nand_free_bbt function.
++ */
++int spi_nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int len, res = 0;
++	uint8_t *buf;
++	struct nand_bbt_descr *td = this->bbt_td;
++	struct nand_bbt_descr *md = this->bbt_md;
++
++	fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
++	len = mtd->size >> (this->block_shift + 2);
++	/*
++	 * Allocate memory (2bit per block) and clear the memory bad block
++	 * table.
++	 */
++	this->bbt = kzalloc(len, GFP_KERNEL);
++	if (!this->bbt)
++		return -ENOMEM;
++
++	/*
++	 * If no primary table decriptor is given, scan the device to build a
++	 * memory based bad block table.
++	 */
++	if (!td) {
++		res = spi_nand_memory_bbt(mtd, bd);
++		if (res) {
++			pr_err("spi_nand_bbt: can't scan flash and build the RAM-based BBT\n");
++			kfree(this->bbt);
++			this->bbt = NULL;
++		}
++		return res;
++	}
++	verify_bbt_descr(mtd, td);
++	verify_bbt_descr(mtd, md);
++
++	/* Allocate a temporary buffer for one eraseblock incl. oob */
++	len = (1 << this->block_shift);
++	len += (len >> this->page_shift) * mtd->oobsize;
++	buf = vmalloc(len);
++	if (!buf) {
++		kfree(this->bbt);
++		this->bbt = NULL;
++		return -ENOMEM;
++	}
++
++	/* Is the bbt at a given page? */
++	if (td->options & NAND_BBT_ABSPAGE)
++		read_abs_bbts(mtd, buf, td, md);
++	else {
++		/* Search the bad block table using a pattern in oob */
++		search_read_bbts(mtd, buf, td, md);
++	}
++
++	res = check_create(mtd, buf, bd);
++
++	/* Prevent the bbt regions from erasing / writing */
++	mark_bbt_region(mtd, td);
++	if (md)
++		mark_bbt_region(mtd, md);
++
++	vfree(buf);
++	return res;
++}
++EXPORT_SYMBOL(spi_nand_scan_bbt);
++
++/**
++ * spi_nand_update_bbt - update bad block table(s)
++ * @mtd: MTD device structure
++ * @offs: the offset of the newly marked block
++ *
++ * The function updates the bad block table(s).
++ */
++static int spi_nand_update_bbt(struct mtd_info *mtd, loff_t offs)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int len, res = 0;
++	int chip, chipsel;
++	uint8_t *buf;
++	struct nand_bbt_descr *td = this->bbt_td;
++	struct nand_bbt_descr *md = this->bbt_md;
++
++	if (!this->bbt || !td)
++		return -EINVAL;
++
++	/* Allocate a temporary buffer for one eraseblock incl. oob */
++	len = (1 << this->block_shift);
++	len += (len >> this->page_shift) * mtd->oobsize;
++	buf = kmalloc(len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	chip = 0;
++	chipsel = -1;
++
++	td->version[chip]++;
++	if (md)
++		md->version[chip]++;
++
++	/* Write the bad block table to the device? */
++	if (td->options & NAND_BBT_WRITE) {
++		res = write_bbt(mtd, buf, td, md, chipsel);
++		if (res < 0)
++			goto out;
++	}
++	/* Write the mirror bad block table to the device? */
++	if (md && (md->options & NAND_BBT_WRITE))
++		res = write_bbt(mtd, buf, md, td, chipsel);
++
++out:
++	kfree(buf);
++	return res;
++}
++
++/*
++ * Define some generic bad / good block scan pattern which are used
++ * while scanning a device for factory marked good / bad blocks.
++ */
++static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
++
++/* Generic flash bbt descriptors */
++static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
++static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
++
++static struct nand_bbt_descr bbt_main_descr = {
++	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
++	| NAND_BBT_2BIT | NAND_BBT_VERSION,
++	.offs = 4, //.offs = 8,
++	.len = 4,
++	.veroffs = 2, //.veroffs = 12,
++	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
++	.pattern = bbt_pattern
++};
++
++static struct nand_bbt_descr bbt_mirror_descr = {
++	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
++	| NAND_BBT_2BIT | NAND_BBT_VERSION,
++	.offs = 4, //.offs = 8,
++	.len = 4,
++	.veroffs = 2, //.veroffs = 12,
++	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
++	.pattern = mirror_pattern
++};
++
++static struct nand_bbt_descr bbt_main_no_oob_descr = {
++	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
++	| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_NO_OOB,
++	.len = 4,
++	.veroffs = 4,
++	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
++	.pattern = bbt_pattern
++};
++
++static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
++	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
++	| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_NO_OOB,
++	.len = 4,
++	.veroffs = 4,
++	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
++	.pattern = mirror_pattern
++};
++
++#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
++/**
++ * spi_nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
++ * @this: SPI-NAND chip to create descriptor for
++ *
++ * This function allocates and initializes a nand_bbt_descr for BBM detection
++ * based on the properties of @this. The new descriptor is stored in
++ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
++ * passed to this function.
++ */
++static int spi_nand_create_badblock_pattern(struct spi_nand_chip *this)
++{
++	struct nand_bbt_descr *bd;
++
++	if (this->badblock_pattern) {
++		pr_warn("Bad block pattern already allocated; not replacing\n");
++		return -EINVAL;
++	}
++	bd = kzalloc(sizeof(*bd), GFP_KERNEL);
++	if (!bd)
++		return -ENOMEM;
++	bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
++	bd->offs = this->badblockpos;
++	bd->len = 1;
++	bd->pattern = scan_ff_pattern;
++	bd->options |= NAND_BBT_DYNAMICSTRUCT;
++	this->badblock_pattern = bd;
++	return 0;
++}
++
++/**
++ * spi_nand_default_bbt - [SPI-NAND Interface] Select a default bad block table for the device
++ * @mtd: MTD device structure
++ *
++ * This function selects the default bad block table support for the device and
++ * calls the spi_nand_scan_bbt function.
++ */
++int spi_nand_default_bbt(struct mtd_info *mtd)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int ret;
++
++	fh_dev_debug(&this->spi->dev, "Enter %s\n", __func__);
++
++	fh_dev_debug(&this->spi->dev, "\tbbt option %x\n", this->bbt_options);
++	/* Is a flash based bad block table requested? */
++	if (this->bbt_options & NAND_BBT_USE_FLASH) {
++		/* Use the default pattern descriptors */
++		if (!this->bbt_td) {
++			if (this->bbt_options & NAND_BBT_NO_OOB) {
++				this->bbt_td = &bbt_main_no_oob_descr;
++				this->bbt_md = &bbt_mirror_no_oob_descr;
++			} else {
++				this->bbt_td = &bbt_main_descr;
++				this->bbt_md = &bbt_mirror_descr;
++			}
++		}
++	} else {
++		this->bbt_td = NULL;
++		this->bbt_md = NULL;
++	}
++
++	if (!this->badblock_pattern) {
++		ret = spi_nand_create_badblock_pattern(this);
++		if (ret)
++			return ret;
++	}
++
++	fh_dev_debug(&this->spi->dev, "badblock pattern 0x%02x\n",
++		     * this->badblock_pattern->pattern);
++	return spi_nand_scan_bbt(mtd, this->badblock_pattern);
++}
++EXPORT_SYMBOL(spi_nand_default_bbt);
++
++/**
++ * spi_nand_isbad_bbt - [SPI-NAND Interface] Check if a block is bad
++ * @mtd: MTD device structure
++ * @offs: offset in the device
++ * @allowbbt: allow access to bad block table region
++ */
++int spi_nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int block, res;
++
++	block = (int)(offs >> this->block_shift);
++	res = bbt_get_entry(this, block);
++
++	pr_debug("%s: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
++		 __func__, (unsigned int)offs, block, res);
++
++	switch (res) {
++	case BBT_BLOCK_GOOD:
++		return 0;
++	case BBT_BLOCK_WORN:
++		return 1;
++	case BBT_BLOCK_RESERVED:
++		return allowbbt ? 0 : 1;
++	}
++	return 1;
++}
++EXPORT_SYMBOL(spi_nand_isbad_bbt);
++/**
++ * spi_nand_markbad_bbt - [SPI-NAND Interface] Mark a block bad in the BBT
++ * @mtd: MTD device structure
++ * @offs: offset of the bad block
++ */
++int spi_nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
++{
++	struct spi_nand_chip *this = mtd->priv;
++	int block, ret = 0;
++
++	block = (int)(offs >> this->block_shift);
++
++	/* Mark bad block in memory */
++	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
++
++	/* Update flash-based bad block table */
++	if (this->bbt_options & NAND_BBT_USE_FLASH)
++		ret = spi_nand_update_bbt(mtd, offs);
++
++	return ret;
++}
++EXPORT_SYMBOL(spi_nand_markbad_bbt);
+diff --git a/drivers/mtd/spi-nand/spi-nand-device.c b/drivers/mtd/spi-nand/spi-nand-device.c
+new file mode 100644
+index 00000000..174bd6f5
+--- /dev/null
++++ b/drivers/mtd/spi-nand/spi-nand-device.c
+@@ -0,0 +1,276 @@
++/*
++ * Copyright (c) 2009-2014 Micron Technology, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/mtd/nand.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/flash.h>
++#include <linux/mtd/spi-nand.h>
++#include "spi-nand-ids.h"
++
++#ifdef SPINAND_BBT_DEBUG
++#define fh_dev_debug	dev_err
++#else
++#define fh_dev_debug(...)
++#endif
++
++static int spi_nand_read_id(struct spi_nand_chip *chip, u8 *buf)
++{
++	struct spi_device *spi = chip->spi;
++	struct spi_nand_cmd cmd = { 0 };
++
++	cmd.cmd = SPINAND_CMD_READ_ID;
++	cmd.n_rx = SPINAND_MAX_ID_LEN;
++	cmd.rx_buf = buf;
++
++	return spi_nand_send_cmd(spi, &cmd);
++}
++
++static void spi_nand_ecc_status(struct spi_nand_chip *chip, unsigned int status,
++				      unsigned int *corrected, unsigned int *ecc_error)
++{
++	unsigned int ecc_status = (status >> SPI_NAND_ECC_SHIFT) &
++				  chip->ecc_mask;
++
++	*ecc_error = (ecc_status >= chip->ecc_uncorr);
++	if (*ecc_error == 0)
++		*corrected = ecc_status;
++}
++
++static void spi_nand_mt29f_ecc_status(unsigned int status,
++				      unsigned int *corrected, unsigned int *ecc_error)
++{
++	unsigned int ecc_status = (status >> SPI_NAND_MT29F_ECC_SHIFT) &
++				  SPI_NAND_MT29F_ECC_MASK;
++
++	*ecc_error = (ecc_status == SPI_NAND_MT29F_ECC_UNCORR);
++	if (*ecc_error == 0)
++		*corrected = ecc_status;
++}
++
++static void spi_nand_gd5f_ecc_status(unsigned int status,
++				     unsigned int *corrected, unsigned int *ecc_error)
++{
++	unsigned int ecc_status = (status >> SPI_NAND_GD5F_ECC_SHIFT) &
++				  SPI_NAND_GD5F_ECC_MASK;
++
++	*ecc_error = (ecc_status == SPI_NAND_GD5F_ECC_UNCORR);
++	/*TODO fix corrected bits*/
++	if (*ecc_error == 0)
++		*corrected = ecc_status;
++}
++
++/*static int spi_nand_manufacture_init(struct spi_nand_chip *chip)
++{
++	switch (chip->mfr_id) {
++	case SPINAND_MFR_MICRON:
++		chip->get_ecc_status = spi_nand_mt29f_ecc_status;
++
++		if (chip->page_spare_size == 64)
++			chip->ecclayout = &micron_ecc_layout_64;
++
++		chip->bbt_options |= NAND_BBT_NO_OOB;
++		break;
++	case SPINAND_MFR_GIGADEVICE:
++		chip->get_ecc_status = spi_nand_gd5f_ecc_status;
++		chip->read_cache = spi_nand_read_from_cache_snor_protocol;
++		chip->ecc_strength_ds = 8;
++		chip->ecc_step_ds = chip->page_size >> 2;
++		if (chip->page_spare_size == 128)
++			chip->ecclayout = &gd5f_ecc_layout_128;
++		else if (chip->page_spare_size == 256)
++			chip->ecclayout = &gd5f_ecc_layout_256;
++
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}*/
++
++static int spi_nand_device_probe(struct spi_device *spi)
++{
++	struct spi_nand_chip *chip;
++	enum spi_nand_device_variant variant;
++	struct mtd_info *mtd;
++	/*	struct mtd_part_parser_data ppdata;*/
++	struct mtd_partition *parts = NULL;
++	int nr_parts = 0;
++	int ret, i;
++	struct flash_platform_data	*data;
++
++	fh_dev_debug(&spi->dev, "%s with spi%d:%d \n", __func__, spi->master->bus_num, spi->chip_select);
++
++	data = spi->dev.platform_data;
++	chip = kzalloc(sizeof(struct spi_nand_chip), GFP_KERNEL);
++	if (!chip) {
++		ret = -ENOMEM;
++		goto err1;
++	}
++	chip->spi = spi;
++
++	mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
++	if (!mtd) {
++		ret = -ENOMEM;
++		goto err2;
++	}
++	mtd->priv = chip;
++	chip->mtd = mtd;
++	spi_set_drvdata(spi, chip);
++	/*
++	 * read ID command format might be different for different manufactory
++	 * such as, Micron SPI NAND need extra one dummy byte after perform
++	 * read ID command but Giga device don't need.
++	 *
++	 * So, specify manufactory of device in device tree is obligatory
++	 */
++/*	variant = spi_get_device_id(spi)->driver_data;
++	switch (variant) {
++	case SPI_NAND_MT29F:
++		chip->read_id = spi_nand_mt29f_read_id;
++		break;
++	case SPI_NAND_GD5F:
++		chip->read_id = spi_nand_gd5f_read_id;
++		break;
++	default:
++		dev_err(&spi->dev, "unknown device, id %d\n", variant);
++		ret = -ENODEV;
++		goto err3;
++	}*/
++
++	chip->read_id = spi_nand_read_id;
++	ret = spi_nand_scan_ident(mtd);
++	if (ret){
++		ret = -ENODEV;
++		goto err3;
++	}
++
++/*	spi_nand_manufacture_init(chip);*/
++	chip->get_ecc_status = spi_nand_ecc_status;
++
++	ret = spi_nand_scan_tail(mtd);
++	if (ret) {
++		fh_dev_debug(&spi->dev, "goto err4 %s\n", __func__);
++		goto err4;
++	}
++
++	/* partitions should match sector boundaries; and it may be good to
++	 * use readonly partitions for writeprotected sectors (BP2..BP0).
++	 */
++	mtd->name = "spi0.0";
++	if (mtd_has_cmdlinepart()) {
++		static const char *part_probes[] = { "cmdlinepart", NULL, };
++
++		nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
++	}
++
++	if (nr_parts <= 0 && data && data->parts) {
++		parts = data->parts;
++		nr_parts = data->nr_parts;
++	}
++
++	if (nr_parts > 0) {
++		for (i = 0; i < nr_parts; i++) {
++			DEBUG(MTD_DEBUG_LEVEL2,
++			      "partitions[%d] = " "{.name = %s, .offset = 0x%llx, "
++			      ".size = 0x%llx (%lldKiB) }\n",
++			      i, parts[i].name, (long long)parts[i].offset,
++			      (long long)parts[i].size,
++			      (long long)(parts[i].size >> 10));
++		}
++	}
++
++	fh_dev_debug(&spi->dev, " mtd_device_register %s\n", __func__);
++	ret = mtd_device_register(mtd, parts, nr_parts);
++	if (!ret)
++		return 0;
++
++	fh_dev_debug(&spi->dev, " spi_nand_scan_tail_release %s\n", __func__);
++	spi_nand_scan_tail_release(mtd);
++	fh_dev_debug(&spi->dev, "Leave %s\n", __func__);
++err4:
++	spi_nand_scan_ident_release(mtd);
++err3:
++	kfree(mtd);
++err2:
++	kfree(chip);
++err1:
++	return ret;
++}
++
++int spi_nand_device_remove(struct spi_device *spi)
++{
++	struct spi_nand_chip *chip = spi_get_drvdata(spi);
++	struct mtd_info *mtd = chip->mtd;
++
++	spi_nand_release(mtd);
++	kfree(mtd);
++	kfree(chip);
++
++	return 0;
++}
++
++const struct spi_device_id spi_nand_id_table[] = {
++	{ "spi-nand", SPI_NAND_GENERIC},
++	{ "mt29f", SPI_NAND_MT29F },
++	{ "gd5f", SPI_NAND_GD5F },
++	{ },
++};
++MODULE_DEVICE_TABLE(spi, spi_nand_id_table);
++
++/**
++ * module_spi_driver() - Helper macro for registering a SPI driver
++ * @__spi_driver: spi_driver struct
++ *
++ * Helper macro for SPI drivers which do not do anything special in module
++ * init/exit. This eliminates a lot of boilerplate. Each module may only
++ * use this macro once, and calling it replaces module_init() and module_exit()
++ */
++#define module_spi_driver(__spi_driver) \
++	module_driver(__spi_driver, spi_register_driver, \
++			spi_unregister_driver)
++
++static struct spi_driver spi_nand_device_driver = {
++	.driver = {
++		.name	= "spi-nand",
++		.bus	= &spi_bus_type,
++		.owner	= THIS_MODULE,
++	},
++	.id_table = spi_nand_id_table,
++	.probe	= spi_nand_device_probe,
++	.remove	= spi_nand_device_remove,
++};
++
++static int __init spi_nand_init(void)
++{
++	return spi_register_driver(&spi_nand_device_driver);
++}
++
++static void __exit spi_nand_exit(void)
++{
++	spi_unregister_driver(&spi_nand_device_driver);
++}
++
++module_init(spi_nand_init);
++module_exit(spi_nand_exit);
++
++MODULE_DESCRIPTION("SPI NAND device");
++MODULE_AUTHOR("Peter Pan<peterpandong at micron.com>");
++MODULE_AUTHOR("Ezequiel Garcia <ezequiel.garcia at imgtec.com>");
++MODULE_LICENSE("GPL v2");
++
+diff --git a/drivers/mtd/spi-nand/spi-nand-ids.c b/drivers/mtd/spi-nand/spi-nand-ids.c
+new file mode 100644
+index 00000000..a4d86462
+--- /dev/null
++++ b/drivers/mtd/spi-nand/spi-nand-ids.c
+@@ -0,0 +1,287 @@
++/*
++ * Copyright (c) 2016 Fullhan, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/mtd/spi-nand.h>
++
++/*static struct spi_nand_flash spi_nand_table[] = {
++	SPI_NAND_INFO("MT29F2G01AAAED", 0x2C, 0X22, 2048, 64, 64, 2048,
++	SPINAND_NEED_PLANE_SELECT),
++	SPI_NAND_INFO("MT29F4G01AAADD", 0x2C, 0X32, 2048, 64, 64, 4096,
++	SPINAND_NEED_PLANE_SELECT),
++	SPI_NAND_INFO("GD5F 512MiB 1.8V", 0xC8, 0XA4, 4096, 256, 64, 2048,
++	0),
++	SPI_NAND_INFO("GD5F 512MiB 3.3V", 0xC8, 0XB4, 4096, 256, 64, 2048,
++	0),
++	SPI_NAND_INFO("GD5F 256MiB 3.3V", 0xC8, 0XB2, 2048, 128, 64, 2048,
++	0),
++	SPI_NAND_INFO("GD5F 128MiB 3.3V", 0xC8, 0XB1, 2048, 128, 64, 1024,
++	0),
++	SPI_NAND_INFO("W25N01GV", 0xEF, 0XAA21, 2048, 64, 64, 1024,
++	0),
++	{.name = NULL},
++};*/
++
++/**
++*  Default OOB area specification layout
++*/
++static struct nand_ecclayout ecc_layout_64 = {
++	.eccbytes = 32,
++	.eccpos = {
++		8, 9, 10, 11, 12, 13, 14, 15,
++		24, 25, 26, 27, 28, 29, 30, 21,
++		40, 41, 42, 43, 44, 45, 46, 47,
++		56, 57, 58, 59, 60, 61, 62, 63
++	},
++	.oobavail = 30,
++	.oobfree = {
++		{
++			.offset = 2,
++			.length = 6
++		}, {
++			.offset = 16,
++			.length = 8
++		}, {
++			.offset = 32,
++			.length = 8
++		}, {
++			.offset = 48,
++			.length = 8
++		},
++	}
++};
++
++static struct nand_ecclayout gd5f_ecc_layout_256 = {
++	.eccbytes = 128,
++	.eccpos = {
++		128, 129, 130, 131, 132, 133, 134, 135,
++		136, 137, 138, 139, 140, 141, 142, 143,
++		144, 145, 146, 147, 148, 149, 150, 151,
++		152, 153, 154, 155, 156, 157, 158, 159,
++		160, 161, 162, 163, 164, 165, 166, 167,
++		168, 169, 170, 171, 172, 173, 174, 175,
++		176, 177, 178, 179, 180, 181, 182, 183,
++		184, 185, 186, 187, 188, 189, 190, 191,
++		192, 193, 194, 195, 196, 197, 198, 199,
++		200, 201, 202, 203, 204, 205, 206, 207,
++		208, 209, 210, 211, 212, 213, 214, 215,
++		216, 217, 218, 219, 220, 221, 222, 223,
++		224, 225, 226, 227, 228, 229, 230, 231,
++		232, 233, 234, 235, 236, 237, 238, 239,
++		240, 241, 242, 243, 244, 245, 246, 247,
++		248, 249, 250, 251, 252, 253, 254, 255
++	},
++	.oobavail = 127,
++	.oobfree = { {1, 127} }
++};
++
++static struct nand_ecclayout gd5f_ecc_layout_128 = {
++	.eccbytes = 64,
++	.eccpos = {
++		64, 65, 66, 67, 68, 69, 70, 72,
++		72, 73, 74, 75, 76, 77, 78, 79,
++		80, 81, 82, 83, 84, 85, 86, 87,
++		88, 89, 90, 91, 92, 93, 94, 95,
++		96, 97, 98, 99, 100, 101, 102, 103,
++		104, 105, 106, 107, 108, 109, 110, 111,
++		112, 113, 114, 115, 116, 117, 118, 119,
++		120, 121, 122, 123, 124, 125, 126, 127,
++	},
++	.oobavail = 62,
++	.oobfree = { {2, 63} }
++};
++
++static struct nand_ecclayout pn26_ecc_layout_128 = {
++	.eccbytes = 52,
++	.eccpos = {
++		6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,
++		21,22,23,24,25,26,27,28,29,30,31,32,33,
++		36,37,38,39,40,41,42,43,44,45,46,47,48,
++		51,52,53,54,55,56,57,58,59,60,61,62,63
++	},
++	.oobavail = 72,
++	.oobfree = {
++			{
++				.offset = 4,
++				.length = 2
++			}, {
++				.offset = 19,
++				.length = 2
++			}, {
++				.offset = 34,
++				.length = 2
++			}, {
++				.offset = 49,
++				.length = 2
++			},
++			{
++				.offset = 64,
++				.length = 64
++			},
++		}
++};
++
++static struct nand_ecclayout default_ecc_layout = {
++	.eccbytes = 64,
++	.oobavail = 28,
++	.oobfree = { { 2, 30 } }
++};
++
++static struct nand_ecclayout mx35_ecc_layout_64 = {
++	.eccbytes = 0,
++	.oobavail = 62,
++	.oobfree = { {2, 62} }
++};
++
++
++static struct spi_nand_flash spi_nand_table[] = {
++	{
++		.name = "W25N01GV",
++		.id_info = {
++			.id_addr = 0,
++			.id_len = 3,
++		},
++		.dev_id = {0xEF, 0xAA, 0x21},
++		.page_size = 2048,
++		.page_spare_size = 64,
++		.pages_per_blk = 64,
++		.blks_per_chip = 1024,
++		.options = 0,
++		.ecc_mask = 3,
++		.ecc_uncorr = 2,
++		.ecc_layout = &ecc_layout_64,
++	},
++	{
++		.name = "MX35LF1GE4AB",
++		.id_info = {
++			.id_addr = 0,
++			.id_len = 2,
++		},
++		.dev_id = {0xC2, 0x12},
++		.page_size = 2048,
++		.page_spare_size = 64,
++		.pages_per_blk = 64,
++		.blks_per_chip = 1024,
++		.options = 0,
++		.ecc_mask = 3,
++		.ecc_uncorr = 2,
++	},
++	{
++		.name = "MX35LF2GE4AB",
++		.id_info = {
++			.id_addr = 0,
++			.id_len = 2,
++		},
++		.dev_id = {0xC2, 0x22},
++		.page_size = 2048,
++		.page_spare_size = 64,
++		.pages_per_blk = 64,
++		.blks_per_chip = 2048,
++		.options = SPINAND_NEED_PLANE_SELECT,
++		.ecc_mask = 3,
++		.ecc_uncorr = 2,
++		.ecc_layout =&mx35_ecc_layout_64,
++	},
++	{
++		.name = "GD5F1GQ4U",
++		.id_info = {
++			.id_addr = SPI_NAND_ID_NO_DUMMY,
++			.id_len = 3,
++		},
++		.dev_id = {0xC8, 0xB1, 0x48},
++		.page_size = 2048,
++		.page_spare_size = 128,
++		.pages_per_blk = 64,
++		.blks_per_chip = 1024,
++		.options = 0,
++		.ecc_mask = 7,
++		.ecc_uncorr = 7,
++		.ecc_layout = &gd5f_ecc_layout_128,
++	},
++	{
++		.name = "GD5F2GQ4U",
++		.id_info = {
++			.id_addr = SPI_NAND_ID_NO_DUMMY,
++			.id_len = 3,
++		},
++		.dev_id = {0xC8, 0xB2, 0x48},
++		.page_size = 2048,
++		.page_spare_size = 128,
++		.pages_per_blk = 64,
++		.blks_per_chip = 1024,
++		.options = 0,
++		.ecc_mask = 7,
++		.ecc_uncorr = 7,
++	},
++	{
++			.name = "PN26G01A",
++			.id_info = {
++				.id_addr = 0x0,
++				.id_len = 2,
++			},
++			.dev_id = {0xA1, 0xE1},
++			.page_size = 2048,
++			.page_spare_size = 128,
++			.pages_per_blk = 64,
++			.blks_per_chip = 1024,
++			.options = 0,
++			.ecc_mask = 3,
++			.ecc_uncorr = 2,
++			.ecc_layout = &pn26_ecc_layout_128,
++		}
++
++};
++
++/**
++ * spi_nand_scan_id_table - [INTERN] scan chip info in id table
++ * @chip: SPI-NAND device structure
++ * @id: point to manufacture id and device id
++ */
++bool spi_nand_scan_id_table(struct spi_nand_chip *chip, u8 *id)
++{
++	int i, j = 0;
++	struct spi_nand_flash *type = spi_nand_table;
++	int m=0;
++
++	for (m=0; m<ARRAY_SIZE(spi_nand_table); m++,type++) {
++//		if (id[0] == type->mfr_id && id[1] == type->dev_id) {
++		for (j=0, i = (SPI_NAND_ID_NO_DUMMY == type->id_info.id_addr) ? 0 : 1;
++		                j < type->id_info.id_len;j++,i++ ) {
++			if (id[i] != type->dev_id[j])
++				break;
++		}
++		if (j == type->id_info.id_len) {
++			chip->name = type->name;
++			chip->size = type->page_size * type->pages_per_blk
++				     * type->blks_per_chip;
++			chip->block_size = type->page_size
++					   * type->pages_per_blk;
++			chip->page_size = type->page_size;
++			chip->page_spare_size = type->page_spare_size;
++			chip->block_shift = ilog2(chip->block_size);
++			chip->page_shift = ilog2(chip->page_size);
++			chip->page_mask = chip->page_size - 1;
++			chip->options = type->options;
++			if (!type->ecc_layout)
++				chip->ecclayout = &default_ecc_layout;
++			else
++				chip->ecclayout = type->ecc_layout;
++			chip->dev_id_len = type->id_info.id_len;
++			chip->ecc_uncorr = type->ecc_uncorr;
++			chip->ecc_mask = type->ecc_mask;
++			memcpy(chip->dev_id, type->dev_id, chip->dev_id_len);
++			return true;
++		}
++	}
++	return false;
++}
+diff --git a/drivers/mtd/spi-nand/spi-nand-ids.h b/drivers/mtd/spi-nand/spi-nand-ids.h
+new file mode 100644
+index 00000000..c488eee5
+--- /dev/null
++++ b/drivers/mtd/spi-nand/spi-nand-ids.h
+@@ -0,0 +1,28 @@
++/*
++ * Copyright (c) 2016 Fullhan, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++
++#ifndef DRIVERS_MTD_SPI_NAND_SPI_NAND_IDS_H_
++#define DRIVERS_MTD_SPI_NAND_SPI_NAND_IDS_H_
++
++enum spi_nand_device_variant {
++	SPI_NAND_GENERIC,
++	SPI_NAND_MT29F,
++	SPI_NAND_GD5F,
++};
++
++
++bool spi_nand_scan_id_table(struct spi_nand_chip *chip, u8 *id);
++
++#endif /* DRIVERS_MTD_SPI_NAND_SPI_NAND_IDS_H_ */
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index 93359fab..828c6445 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -984,6 +984,8 @@ config DM9000
+ 
+ 	  To compile this driver as a module, choose M here.  The module
+ 	  will be called dm9000.
++	  
++source "drivers/net/fh_gmac/Kconfig"
+ 
+ config DM9000_DEBUGLEVEL
+ 	int "DM9000 maximum debug level"
+@@ -2542,6 +2544,7 @@ config S6GMAC
+ 
+ source "drivers/net/stmmac/Kconfig"
+ 
++
+ config PCH_GBE
+ 	tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
+ 	depends on PCI
+@@ -3450,4 +3453,6 @@ config VMXNET3
+ 	  To compile this driver as a module, choose M here: the
+ 	  module will be called vmxnet3.
+ 
++
++
+ endif # NETDEVICES
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index d5ce0115..7bc4daa1 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -254,6 +254,7 @@ obj-$(CONFIG_SMSC911X) += smsc911x.o
+ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
+ obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
+ obj-$(CONFIG_DM9000) += dm9000.o
++obj-$(CONFIG_FH_GMAC) += fh_gmac/
+ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
+ obj-$(CONFIG_MLX4_CORE) += mlx4/
+diff --git a/drivers/net/fh_gmac/Kconfig b/drivers/net/fh_gmac/Kconfig
+new file mode 100644
+index 00000000..55134815
+--- /dev/null
++++ b/drivers/net/fh_gmac/Kconfig
+@@ -0,0 +1,21 @@
++config FH_GMAC
++	tristate "FH 10/100 Ethernet driver"
++	select MII
++	select PHYLIB
++	select CRC32
++	depends on NETDEVICES && HAS_IOMEM
++	help
++	  This is the driver for the Ethernet IPs are built around a
++	  Synopsys IP Core.
++
++if FH_GMAC
++
++config FH_GMAC_DA
++	bool "FH GMAC DMA arbitration scheme"
++	default n
++	help
++	  Selecting this option, rx has priority over Tx (only for Giga
++	  Ethernet device).
++	  By default, the DMA arbitration scheme is based on Round-robin
++	  (rx:tx priority is 1:1).	  
++endif
+diff --git a/drivers/net/fh_gmac/Makefile b/drivers/net/fh_gmac/Makefile
+new file mode 100644
+index 00000000..e22c42f1
+--- /dev/null
++++ b/drivers/net/fh_gmac/Makefile
+@@ -0,0 +1,5 @@
++
++
++obj-$(CONFIG_FH_GMAC) += fh_gmac.o
++
++fh_gmac-objs := fh_gmac_dma.o fh_gmac_main.o fh_gmac_ethtool.o fh_gmac_phyt.o
+diff --git a/drivers/net/fh_gmac/fh_gmac.h b/drivers/net/fh_gmac/fh_gmac.h
+new file mode 100644
+index 00000000..513fd6a1
+--- /dev/null
++++ b/drivers/net/fh_gmac/fh_gmac.h
+@@ -0,0 +1,245 @@
++/*
++ * fh_gmac.h
++ *
++ *  Created on: May 22, 2014
++ *      Author: duobao
++ */
++
++#ifndef FH_GMAC_H_
++#define FH_GMAC_H_
++
++#include <linux/phy.h>
++#include <linux/etherdevice.h>
++
++#include "fh_gmac_phyt.h"
++#include "fh_gmac_dma.h"
++
++//GMAC-MAC
++#define		REG_GMAC_CONFIG				(0x0000)
++#define		REG_GMAC_FRAME_FILTER		(0x0004)
++#define		REG_GMAC_HASH_HIGH			(0x0008)
++#define		REG_GMAC_HASH_LOW			(0x000C)
++#define		REG_GMAC_GMII_ADDRESS		(0x0010)
++#define		REG_GMAC_GMII_DATA			(0x0014)
++#define		REG_GMAC_FLOW_CTRL			(0x0018)
++#define		REG_GMAC_DEBUG				(0x0024)
++#define		REG_GMAC_MAC_HIGH 			(0x0040)
++#define		REG_GMAC_MAC_LOW 			(0x0044)
++//GMAC-DMA
++#define		REG_GMAC_BUS_MODE			(0x1000)
++#define		REG_GMAC_TX_POLL_DEMAND		(0x1004)
++#define		REG_GMAC_RX_POLL_DEMAND		(0x1008)
++#define		REG_GMAC_RX_DESC_ADDR		(0x100C)
++#define		REG_GMAC_TX_DESC_ADDR		(0x1010)
++#define		REG_GMAC_STATUS				(0x1014)
++#define		REG_GMAC_OP_MODE			(0x1018)
++#define		REG_GMAC_INTR_EN			(0x101C)
++#define		REG_GMAC_ERROR_COUNT		(0x1020)
++#define		REG_GMAC_AXI_BUS_MODE		(0x1028)
++#define		REG_GMAC_AXI_STATUS			(0x102C)
++#define 	REG_GMAC_CURR_TX_DESC		(0x1048)
++#define 	REG_GMAC_CURR_RX_DESC		(0x104C)
++
++enum tx_dma_irq_status {
++	tx_hard_error = 1,
++	tx_hard_error_bump_tc = 2,
++	handle_tx_rx = 3,
++};
++
++enum rx_frame_status {
++	good_frame = 0,
++	discard_frame = 1,
++	csum_none = 2,
++	llc_snap = 4,
++};
++
++#define GMAC_MIN_ETHPKTSIZE		(60) /* Minimum ethernet pkt size */
++#define GMAC_MAX_FRAME_SIZE		(1500 + 14 + 4 + 4)
++
++#define BUFFER_SIZE_2K	2048
++#define BUFFER_SIZE_4K	4096
++#define BUFFER_SIZE_8K	8192
++#define BUFFER_SIZE_16K	16384
++
++#ifdef FH_GMAC_DMA_DEBUG
++#define GMAC_DMA_DBG(fmt, args...)  printk(fmt, ## args)
++#else
++#define GMAC_DMA_DBG(fmt, args...)  do { } while (0)
++#endif
++
++#ifdef FH_GMAC_XMIT_DEBUG
++#define TX_DBG(fmt, args...)  printk(fmt, ## args)
++#else
++#define TX_DBG(fmt, args...)  do { } while (0)
++#endif
++
++#ifdef FH_GMAC_RX_DEBUG
++#define RX_DBG(fmt, args...)  printk(fmt, ## args)
++#else
++#define RX_DBG(fmt, args...)  do { } while (0)
++#endif
++
++#define FH_GMAC_DEBUG	(	NETIF_MSG_DRV | \
++								NETIF_MSG_PROBE | \
++								NETIF_MSG_LINK | \
++								NETIF_MSG_TIMER | \
++								NETIF_MSG_IFDOWN | \
++								NETIF_MSG_IFUP | \
++								NETIF_MSG_RX_ERR | \
++								NETIF_MSG_TX_ERR | \
++								NETIF_MSG_TX_QUEUED | \
++								NETIF_MSG_INTR | \
++								NETIF_MSG_TX_DONE | \
++								NETIF_MSG_RX_STATUS | \
++								NETIF_MSG_PKTDATA | \
++								NETIF_MSG_HW | \
++								NETIF_MSG_WOL			)
++
++enum {
++	gmac_gmii_clock_60_100,
++	gmac_gmii_clock_100_150,
++	gmac_gmii_clock_20_35,
++	gmac_gmii_clock_35_60,
++	gmac_gmii_clock_150_250,
++	gmac_gmii_clock_250_300
++};
++
++enum {
++	gmac_interrupt_all = 0x0001ffff,
++	gmac_interrupt_none = 0x0
++};
++
++typedef struct Gmac_Stats {
++	/* Transmit errors */
++	unsigned long tx_underflow ____cacheline_aligned;
++	unsigned long tx_carrier;
++	unsigned long tx_losscarrier;
++	unsigned long tx_heartbeat;
++	unsigned long tx_deferred;
++	unsigned long tx_vlan;
++	unsigned long tx_jabber;
++	unsigned long tx_frame_flushed;
++	unsigned long tx_payload_error;
++	unsigned long tx_ip_header_error;
++	/* Receive errors */
++	unsigned long rx_desc;
++	unsigned long rx_partial;
++	unsigned long rx_runt;
++	unsigned long rx_toolong;
++	unsigned long rx_collision;
++	unsigned long rx_crc;
++	unsigned long rx_length;
++	unsigned long rx_mii;
++	unsigned long rx_multicast;
++	unsigned long rx_gmac_overflow;
++	unsigned long rx_watchdog;
++	unsigned long da_rx_filter_fail;
++	unsigned long sa_rx_filter_fail;
++	unsigned long rx_missed_cntr;
++	unsigned long rx_overflow_cntr;
++	/* Tx/Rx IRQ errors */
++	unsigned long tx_undeflow_irq;
++	unsigned long tx_process_stopped_irq;
++	unsigned long tx_jabber_irq;
++	unsigned long rx_overflow_irq;
++	unsigned long rx_buf_unav_irq;
++	unsigned long rx_process_stopped_irq;
++	unsigned long rx_watchdog_irq;
++	unsigned long tx_early_irq;
++	unsigned long fatal_bus_error_irq;
++	/* Extra info */
++	unsigned long threshold;
++	unsigned long tx_pkt_n;
++	unsigned long rx_pkt_n;
++	unsigned long poll_n;
++	unsigned long sched_timer_n;
++	unsigned long normal_irq_n;
++}Gmac_Stats;
++
++typedef struct Gmac_Object {
++	Gmac_Tx_DMA_Descriptors* tx_dma_descriptors	____cacheline_aligned;
++	Gmac_Rx_DMA_Descriptors* rx_dma_descriptors;
++	int full_duplex;			//read only
++	int speed_100m;				//read only
++
++	struct sk_buff_head rx_recycle;
++	struct sk_buff** rx_skbuff;
++	struct sk_buff** tx_skbuff;
++	dma_addr_t* rx_skbuff_dma;
++	__u32 cur_rx;
++	__u32 dirty_rx;
++	__u32 cur_tx;
++	__u32 dirty_tx;
++	dma_addr_t tx_bus_addr;
++	dma_addr_t rx_bus_addr;
++	__u32 dma_tx_size;
++	__u32 dma_rx_size;
++	__u32 dma_buf_sz;
++
++	spinlock_t lock;
++
++	void __iomem *remap_addr;
++	__u8 local_mac_address[6];
++	__u32 msg_enable;
++	struct device* dev;
++	struct net_device* ndev;
++	struct platform_device* pdev;
++	struct napi_struct napi;
++	struct mii_bus *mii;
++	struct phy_device *phydev;
++	Gmac_Stats stats;
++
++	int oldlink;
++	int speed;
++	int oldduplex;
++	__u32 flow_ctrl;
++	__u32 pause;
++
++	int wolopts;
++	int wolenabled;
++
++	int phy_interface;
++	struct fh_gmac_platform_data* priv_data;
++
++	struct clk* clk;
++
++}Gmac_Object;
++
++#define TX_TIMEO 5000 /* default 5 seconds */
++#define DMA_RX_SIZE 256
++#define DMA_TX_SIZE 256
++#define FLOW_OFF 0
++#define FLOW_RX		4
++#define FLOW_TX		2
++#define FLOW_AUTO	(FLOW_TX | FLOW_RX)
++#define PAUSE_TIME 0x200
++
++int fh_mdio_register(struct net_device *ndev);
++int fh_mdio_unregister(struct net_device *ndev);
++
++void GMAC_DMA_StartTx(Gmac_Object* pGmac);
++void GMAC_DMA_StopTx(Gmac_Object* pGmac);
++void GMAC_DMA_StartRx(Gmac_Object* pGmac);
++void GMAC_DMA_StopRx(Gmac_Object* pGmac);
++
++void fh_gmac_set_ethtool_ops(struct net_device *netdev);
++
++void GMAC_DMA_InitDescRings(struct net_device *ndev);
++int GMAC_DMA_Init(struct net_device *ndev, __u32 dma_tx, __u32 dma_rx);
++void GMAC_DMA_InitRxDesc(Gmac_Rx_DMA_Descriptors* desc, unsigned int size);
++void GMAC_DMA_InitTxDesc(Gmac_Tx_DMA_Descriptors* desc, unsigned int size);
++void GMAC_DMA_OpMode(Gmac_Object* pGmac);
++void GMAC_DMA_FreeDesc(Gmac_Object* pGmac);
++void GMAC_DMA_FreeRxSkbufs(Gmac_Object* pGmac);
++void GMAC_DMA_FreeTxSkbufs(Gmac_Object* pGmac);
++void GMAC_DMA_DisplayRxDesc(Gmac_Rx_DMA_Descriptors* desc, int size);
++void GMAC_DMA_DisplayTxDesc(Gmac_Tx_DMA_Descriptors* desc, int size);
++int GMAC_DMA_Interrupt(Gmac_Object* pGmac);
++int GMAC_DMA_TxStatus(Gmac_Object* pGmac, Gmac_Tx_DMA_Descriptors* desc);
++int GMAC_DMA_RxStatus(Gmac_Object* pGmac, Gmac_Rx_DMA_Descriptors* desc);
++void GMAC_DMA_ReleaseTxDesc(Gmac_Tx_DMA_Descriptors* desc);
++void GMAC_DMA_DiagnosticFrame(void *data, Gmac_Object* pGmac);
++void GMAC_FlowCtrl(Gmac_Object * pGmac, unsigned int duplex, unsigned int fc,
++		   unsigned int pause_time);
++
++#endif /* FH_GMAC_H_ */
+diff --git a/drivers/net/fh_gmac/fh_gmac_dma.c b/drivers/net/fh_gmac/fh_gmac_dma.c
+new file mode 100644
+index 00000000..57cfa47e
+--- /dev/null
++++ b/drivers/net/fh_gmac/fh_gmac_dma.c
+@@ -0,0 +1,519 @@
++#include <linux/io.h>
++#include <linux/dma-mapping.h>
++#include <asm/dma-mapping.h>
++#include <mach/fh_gmac.h>
++#include "fh_gmac.h"
++#include "fh_gmac_dma.h"
++
++void GMAC_DMA_ReleaseTxDesc(Gmac_Tx_DMA_Descriptors * desc)
++{
++	int ter = desc->desc1.bit.end_of_ring;
++	desc->desc0.dw = 0;
++	desc->desc1.dw = 0;
++	/* set termination field */
++	desc->desc1.bit.end_of_ring = ter;
++}
++
++void GMAC_DMA_DisplayRxDesc(Gmac_Rx_DMA_Descriptors * desc, int size)
++{
++	int i;
++	for (i = 0; i < size; i++) {
++		pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x DES2=0x%x DES3=0x%x",
++			i, (__u32) (&desc[i]), desc[i].desc0.dw,
++			desc[i].desc1.dw, desc[i].desc2.dw, desc[i].desc3.dw);
++		pr_info("\n");
++	}
++}
++
++void GMAC_DMA_DisplayTxDesc(Gmac_Tx_DMA_Descriptors * desc, int size)
++{
++	int i;
++	pr_info("Tx desc:\n");
++	for (i = 0; i < size; i++) {
++		pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
++			i, (__u32) & desc[i], desc[i].desc0.dw,
++			desc[i].desc1.dw, desc[i].desc2.dw, desc[i].desc3.dw);
++		pr_info("\n");
++	}
++}
++
++void GMAC_DMA_InitRxDesc(Gmac_Rx_DMA_Descriptors * desc, __u32 size)
++{
++	int i;
++	for (i = 0; i < size; i++) {
++		desc->desc0.bit.own = 1;
++		desc->desc1.bit.buffer1_size = BUFFER_SIZE_2K - 1;
++		if (i == size - 1) {
++			desc->desc1.bit.end_of_ring = 1;
++		}
++		desc++;
++	}
++}
++
++void GMAC_DMA_InitTxDesc(Gmac_Tx_DMA_Descriptors * desc, __u32 size)
++{
++	int i;
++	for (i = 0; i < size; i++) {
++		desc->desc0.bit.own = 0;
++		if (i == size - 1) {
++			desc->desc1.bit.end_of_ring = 1;
++		}
++		desc++;
++	}
++}
++
++void GMAC_DMA_OpMode(Gmac_Object * pGmac)
++{
++
++	//op mode, reg 6
++	//transmit_store_forward
++	//receive_store_forward
++	writel(0 << 25 | 1 << 21 | 0 << 2 | 0 << 14,
++	       pGmac->remap_addr + REG_GMAC_OP_MODE);
++}
++
++void GMAC_DMA_InitDescRings(struct net_device *ndev)
++{
++	int i;
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	struct sk_buff *skb;
++	__u32 txsize = pGmac->dma_tx_size;
++	__u32 rxsize = pGmac->dma_rx_size;
++	__u32 bfsize = pGmac->dma_buf_sz;
++
++	pGmac->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
++	pGmac->rx_skbuff =
++	    kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
++	pGmac->rx_dma_descriptors =
++	    (Gmac_Rx_DMA_Descriptors *) dma_alloc_coherent(pGmac->dev,
++							   rxsize *
++							   sizeof
++							   (Gmac_Rx_DMA_Descriptors),
++							   &pGmac->rx_bus_addr,
++							   GFP_KERNEL);
++	pGmac->tx_skbuff =
++	    kmalloc(sizeof(struct sk_buff *) * txsize, GFP_KERNEL);
++	pGmac->tx_dma_descriptors =
++	    (Gmac_Tx_DMA_Descriptors *) dma_alloc_coherent(pGmac->dev,
++							   txsize *
++							   sizeof
++							   (Gmac_Tx_DMA_Descriptors),
++							   &pGmac->tx_bus_addr,
++							   GFP_KERNEL);
++
++	if ((pGmac->rx_dma_descriptors == NULL)
++	    || (pGmac->tx_dma_descriptors == NULL)) {
++		pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
++		return;
++	}
++
++	pr_debug("fh gmac (%s) DMA desc rings: virt addr (Rx %p, "
++		 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
++		 ndev->name, pGmac->rx_dma_descriptors,
++		 pGmac->tx_dma_descriptors, (__u32) pGmac->rx_bus_addr,
++		 (__u32) pGmac->tx_bus_addr);
++
++	for (i = 0; i < rxsize; i++) {
++		Gmac_Rx_DMA_Descriptors *desc = pGmac->rx_dma_descriptors + i;
++
++		skb = netdev_alloc_skb_ip_align(ndev, bfsize);
++		if (unlikely(skb == NULL)) {
++			pr_err("%s: Rx init fails; skb is NULL\n", __func__);
++			break;
++		}
++		pGmac->rx_skbuff[i] = skb;
++		pGmac->rx_skbuff_dma[i] =
++		    dma_map_single(pGmac->dev, skb->data, bfsize,
++				   DMA_FROM_DEVICE);
++
++		desc->desc2.dw = pGmac->rx_skbuff_dma[i];
++	}
++	pGmac->cur_rx = 0;
++	pGmac->dirty_rx = (__u32) (i - rxsize);
++
++	pGmac->dma_buf_sz = bfsize;
++
++	/* TX INITIALIZATION */
++	for (i = 0; i < txsize; i++) {
++		pGmac->tx_skbuff[i] = NULL;
++		pGmac->tx_dma_descriptors[i].desc2.dw = 0;
++	}
++	pGmac->dirty_tx = 0;
++	pGmac->cur_tx = 0;
++
++	/* Clear the Rx/Tx descriptors */
++	GMAC_DMA_InitRxDesc(pGmac->rx_dma_descriptors, rxsize);
++	GMAC_DMA_InitTxDesc(pGmac->tx_dma_descriptors, txsize);
++#ifdef FH_GMAC_DMA_DEBUG
++	if (netif_msg_hw(pGmac)) {
++		pr_info("RX descriptor ring:\n");
++		GMAC_DMA_DisplayRxDesc(pGmac->rx_dma_descriptors, rxsize);
++		pr_info("TX descriptor ring:\n");
++		GMAC_DMA_DisplayTxDesc(pGmac->tx_dma_descriptors, txsize);
++	}
++#endif
++}
++
++void GMAC_DMA_FreeRxSkbufs(Gmac_Object * pGmac)
++{
++	int i;
++
++	for (i = 0; i < pGmac->dma_rx_size; i++) {
++		if (pGmac->rx_skbuff[i]) {
++			dma_unmap_single(pGmac->dev, pGmac->rx_skbuff_dma[i],
++					 pGmac->dma_buf_sz, DMA_FROM_DEVICE);
++			dev_kfree_skb_any(pGmac->rx_skbuff[i]);
++		}
++		pGmac->rx_skbuff[i] = NULL;
++	}
++}
++
++void GMAC_DMA_FreeTxSkbufs(Gmac_Object * pGmac)
++{
++	int i;
++
++	for (i = 0; i < pGmac->dma_tx_size; i++) {
++		if (pGmac->tx_skbuff[i] != NULL) {
++			Gmac_Tx_DMA_Descriptors *desc =
++			    pGmac->tx_dma_descriptors + i;
++			if (desc->desc2.dw) {
++				__u32 size;
++				size = desc->desc1.bit.buffer1_size;
++				dma_unmap_single(pGmac->dev, desc->desc2.dw,
++						 size, DMA_TO_DEVICE);
++			}
++			dev_kfree_skb_any(pGmac->tx_skbuff[i]);
++			pGmac->tx_skbuff[i] = NULL;
++		}
++	}
++}
++
++void GMAC_DMA_FreeDesc(Gmac_Object * pGmac)
++{
++	/* Release the DMA TX/RX socket buffers */
++	GMAC_DMA_FreeRxSkbufs(pGmac);
++	GMAC_DMA_FreeTxSkbufs(pGmac);
++
++	/* Free the region of consistent memory previously allocated for
++	 * the DMA */
++	dma_free_coherent(pGmac->dev,
++			  pGmac->dma_tx_size * sizeof(Gmac_Tx_DMA_Descriptors),
++			  pGmac->tx_dma_descriptors, pGmac->tx_bus_addr);
++	dma_free_coherent(pGmac->dev,
++			  pGmac->dma_rx_size * sizeof(Gmac_Tx_DMA_Descriptors),
++			  pGmac->rx_dma_descriptors, pGmac->rx_bus_addr);
++	kfree(pGmac->rx_skbuff_dma);
++	kfree(pGmac->rx_skbuff);
++	kfree(pGmac->tx_skbuff);
++}
++
++int GMAC_DMA_Init(struct net_device *ndev, __u32 dma_tx, __u32 dma_rx)
++{
++	Gmac_Object *pGmac = netdev_priv(ndev);
++#ifdef GMAC_RESET
++	int limit;
++
++	__u32 reg = readl(pGmac->remap_addr + REG_GMAC_BUS_MODE);
++	reg |= 0x1;
++	writel(reg, pGmac->remap_addr + REG_GMAC_BUS_MODE);
++
++	limit = 10;
++	while (limit--) {
++		if (!(readl(pGmac->remap_addr + REG_GMAC_BUS_MODE) & 0x1)) {
++			break;
++		}
++		mdelay(10);
++	}
++	if (limit < 0)
++		return -EBUSY;
++#endif
++	//initialize dma bus mode reg0
++	//8xpbl
++	//no address_aligned_beats
++	//no fixed_burst
++	writel(0 << 25 | 0 << 24 | 0 << 16 | 32 << 8,
++	       pGmac->remap_addr + REG_GMAC_BUS_MODE);
++	writel(0x1a061, pGmac->remap_addr + REG_GMAC_INTR_EN);
++	/* The base address of the RX/TX descriptor lists must be written into
++	 * DMA CSR3 and CSR4, respectively. */
++	writel(dma_rx, pGmac->remap_addr + REG_GMAC_RX_DESC_ADDR);
++	writel(dma_tx, pGmac->remap_addr + REG_GMAC_TX_DESC_ADDR);
++
++	return 0;
++}
++
++void GMAC_DMA_StartTx(Gmac_Object * pGmac)
++{
++	__u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
++	reg |= 1 << 13;
++	writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
++}
++
++void GMAC_DMA_StopTx(Gmac_Object * pGmac)
++{
++	__u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
++	reg &= ~(1 << 13);
++	writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
++}
++
++void GMAC_DMA_StartRx(Gmac_Object * pGmac)
++{
++	__u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
++	reg |= 1 << 1;
++	writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
++}
++
++void GMAC_DMA_StopRx(Gmac_Object * pGmac)
++{
++	__u32 reg = readl(pGmac->remap_addr + REG_GMAC_OP_MODE);
++	reg &= ~(1 << 1);
++	writel(reg, pGmac->remap_addr + REG_GMAC_OP_MODE);
++}
++
++#ifdef FH_GMAC_DMA_DEBUG
++static void GMAC_DMA_ShowTxState(__u32 status)
++{
++	__u32 state;
++	state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
++
++	switch (state) {
++	case 0:
++		pr_info("- TX (Stopped): Reset or Stop command\n");
++		break;
++	case 1:
++		pr_info("- TX (Running):Fetching the Tx desc\n");
++		break;
++	case 2:
++		pr_info("- TX (Running): Waiting for end of tx\n");
++		break;
++	case 3:
++		pr_info("- TX (Running): Reading the data "
++			"and queuing the data into the Tx buf\n");
++		break;
++	case 6:
++		pr_info("- TX (Suspended): Tx Buff Underflow "
++			"or an unavailable Transmit descriptor\n");
++		break;
++	case 7:
++		pr_info("- TX (Running): Closing Tx descriptor\n");
++		break;
++	default:
++		break;
++	}
++}
++
++static void GMAC_DMA_ShowRxState(__u32 status)
++{
++	__u32 state;
++	state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
++
++	switch (state) {
++	case 0:
++		pr_info("- RX (Stopped): Reset or Stop command\n");
++		break;
++	case 1:
++		pr_info("- RX (Running): Fetching the Rx desc\n");
++		break;
++	case 2:
++		pr_info("- RX (Running):Checking for end of pkt\n");
++		break;
++	case 3:
++		pr_info("- RX (Running): Waiting for Rx pkt\n");
++		break;
++	case 4:
++		pr_info("- RX (Suspended): Unavailable Rx buf\n");
++		break;
++	case 5:
++		pr_info("- RX (Running): Closing Rx descriptor\n");
++		break;
++	case 6:
++		pr_info("- RX(Running): Flushing the current frame"
++			" from the Rx buf\n");
++		break;
++	case 7:
++		pr_info("- RX (Running): Queuing the Rx frame"
++			" from the Rx buf into memory\n");
++		break;
++	default:
++		break;
++	}
++}
++#endif
++
++int GMAC_DMA_Interrupt(Gmac_Object * pGmac)
++{
++	int ret = 0;
++	Gmac_Stats *gmac_stats = &pGmac->stats;
++	/* read the status register (CSR5) */
++	__u32 intr_status;
++	intr_status = readl(pGmac->remap_addr + REG_GMAC_STATUS);
++
++	GMAC_DMA_DBG("%s: [GMAC_STATUS: 0x%08x]\n", __func__, intr_status);
++#ifdef FH_GMAC_DMA_DEBUG
++	/* It displays the DMA process states (CSR5 register) */
++	GMAC_DMA_ShowTxState(intr_status);
++	GMAC_DMA_ShowRxState(intr_status);
++#endif
++	/* ABNORMAL interrupts */
++	if (unlikely(intr_status & DMA_STATUS_AIS)) {
++		GMAC_DMA_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
++		if (unlikely(intr_status & DMA_STATUS_UNF)) {
++			GMAC_DMA_DBG(KERN_INFO "transmit underflow\n");
++			ret = tx_hard_error_bump_tc;
++			gmac_stats->tx_undeflow_irq++;
++		}
++		if (unlikely(intr_status & DMA_STATUS_TJT)) {
++			GMAC_DMA_DBG(KERN_INFO "transmit jabber\n");
++			gmac_stats->tx_jabber_irq++;
++		}
++		if (unlikely(intr_status & DMA_STATUS_OVF)) {
++			GMAC_DMA_DBG(KERN_INFO "recv overflow\n");
++			gmac_stats->rx_overflow_irq++;
++		}
++		if (unlikely(intr_status & DMA_STATUS_RU)) {
++			GMAC_DMA_DBG(KERN_INFO "receive buffer unavailable\n");
++			gmac_stats->rx_buf_unav_irq++;
++		}
++		if (unlikely(intr_status & DMA_STATUS_RPS)) {
++			GMAC_DMA_DBG(KERN_INFO "receive process stopped\n");
++			gmac_stats->rx_process_stopped_irq++;
++		}
++		if (unlikely(intr_status & DMA_STATUS_RWT)) {
++			GMAC_DMA_DBG(KERN_INFO "receive watchdog\n");
++			gmac_stats->rx_watchdog_irq++;
++		}
++		if (unlikely(intr_status & DMA_STATUS_ETI)) {
++			GMAC_DMA_DBG(KERN_INFO "transmit early interrupt\n");
++			gmac_stats->tx_early_irq++;
++		}
++		if (unlikely(intr_status & DMA_STATUS_TPS)) {
++			GMAC_DMA_DBG(KERN_INFO "transmit process stopped\n");
++			gmac_stats->tx_process_stopped_irq++;
++			ret = tx_hard_error;
++		}
++		if (unlikely(intr_status & DMA_STATUS_FBI)) {
++			GMAC_DMA_DBG(KERN_INFO "fatal bus error\n");
++			gmac_stats->fatal_bus_error_irq++;
++			ret = tx_hard_error;
++		}
++	}
++	/* TX/RX NORMAL interrupts */
++	if (intr_status & DMA_STATUS_NIS) {
++		gmac_stats->normal_irq_n++;
++		if (likely((intr_status & DMA_STATUS_RI) ||
++			   (intr_status & (DMA_STATUS_TI))))
++			ret = handle_tx_rx;
++	}
++	/* Optional hardware blocks, interrupts should be disabled */
++	if (unlikely(intr_status &
++		     (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
++		pr_info("%s: unexpected status %08x\n", __func__, intr_status);
++
++	/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
++	writel(intr_status & 0x1ffff, pGmac->remap_addr + REG_GMAC_STATUS);
++	GMAC_DMA_DBG(KERN_INFO "\n\n");
++
++	return ret;
++}
++
++int GMAC_DMA_TxStatus(Gmac_Object * pGmac, Gmac_Tx_DMA_Descriptors * desc)
++{
++	int ret = 0;
++	struct net_device_stats *stats = &pGmac->ndev->stats;
++	Gmac_Stats *gmac_stats = &pGmac->stats;
++
++	if (unlikely(desc->desc0.bit.error_summary)) {
++		if (unlikely(desc->desc0.bit.underflow_error)) {
++			gmac_stats->tx_underflow++;
++			stats->tx_fifo_errors++;
++		}
++		if (unlikely(desc->desc0.bit.no_carrier)) {
++			gmac_stats->tx_carrier++;
++			stats->tx_carrier_errors++;
++		}
++		if (unlikely(desc->desc0.bit.loss_of_carrier)) {
++			gmac_stats->tx_losscarrier++;
++			stats->tx_carrier_errors++;
++		}
++		if (unlikely((desc->desc0.bit.excessive_deferral) ||
++			     (desc->desc0.bit.excessive_collision) ||
++			     (desc->desc0.bit.late_collision)))
++			stats->collisions += desc->desc0.bit.collision_count;
++		ret = -1;
++	}
++	if (unlikely(desc->desc0.bit.deferred))
++		gmac_stats->tx_deferred++;
++
++	return ret;
++}
++
++int GMAC_DMA_RxStatus(Gmac_Object * pGmac, Gmac_Rx_DMA_Descriptors * desc)
++{
++	int ret = csum_none;
++	struct net_device_stats *stats = &pGmac->ndev->stats;
++	Gmac_Stats *gmac_stats = &pGmac->stats;
++
++	if (unlikely(desc->desc0.bit.last_descriptor == 0)) {
++		pr_warning("ndesc Error: Oversized Ethernet "
++			   "frame spanned multiple buffers\n");
++		stats->rx_length_errors++;
++		return discard_frame;
++	}
++
++	if (unlikely(desc->desc0.bit.error_summary)) {
++		if (unlikely(desc->desc0.bit.descriptor_error))
++			gmac_stats->rx_desc++;
++		if (unlikely(desc->desc0.bit.ipc_chksum_error_giant_frame))
++			gmac_stats->rx_toolong++;
++		if (unlikely(desc->desc0.bit.late_collision)) {
++			gmac_stats->rx_collision++;
++			stats->collisions++;
++		}
++		if (unlikely(desc->desc0.bit.crc_error)) {
++			gmac_stats->rx_crc++;
++			stats->rx_crc_errors++;
++		}
++		ret = discard_frame;
++	}
++	if (unlikely(desc->desc0.bit.dribble_error))
++		ret = discard_frame;
++
++	if (unlikely(desc->desc0.bit.length_error)) {
++		gmac_stats->rx_length++;
++		ret = discard_frame;
++	}
++
++	return ret;
++}
++
++void GMAC_DMA_DiagnosticFrame(void *data, Gmac_Object * pGmac)
++{
++	struct net_device_stats *stats = (struct net_device_stats *)data;
++	Gmac_Stats *gmac_stats = &pGmac->stats;
++	__u32 csr8 = readl(pGmac->remap_addr + REG_GMAC_ERROR_COUNT);
++
++	if (unlikely(csr8)) {
++		//Overflow bit for FIFO Overflow Counter
++		if (csr8 & 0x10000000) {
++			stats->rx_over_errors += 0x800;
++			gmac_stats->rx_overflow_cntr += 0x800;
++		} else {
++			unsigned int ove_cntr;
++			//indicates the number of frames missed by the application
++			ove_cntr = ((csr8 & 0x0ffe0000) >> 17);
++			stats->rx_over_errors += ove_cntr;
++			gmac_stats->rx_overflow_cntr += ove_cntr;
++		}
++
++		//Overflow bit for Missed Frame Counter
++		if (csr8 & 0x10000) {
++			stats->rx_missed_errors += 0xffff;
++			gmac_stats->rx_missed_cntr += 0xffff;
++		} else {
++			//indicates the number of frames missed by the controller
++			unsigned int miss_f = (csr8 & 0xffff);
++			stats->rx_missed_errors += miss_f;
++			gmac_stats->rx_missed_cntr += miss_f;
++		}
++	}
++}
+diff --git a/drivers/net/fh_gmac/fh_gmac_dma.h b/drivers/net/fh_gmac/fh_gmac_dma.h
+new file mode 100644
+index 00000000..43c02761
+--- /dev/null
++++ b/drivers/net/fh_gmac/fh_gmac_dma.h
+@@ -0,0 +1,183 @@
++/*
++ * fh_gmac_dma.h
++ *
++ *  Created on: May 22, 2014
++ *      Author: duobao
++ */
++
++#ifndef FH_GMAC_DMA_H_
++#define FH_GMAC_DMA_H_
++
++
++
++/* DMA Status register defines */
++#define DMA_STATUS_GPI		0x10000000	/* PMT interrupt */
++#define DMA_STATUS_GMI		0x08000000	/* MMC interrupt */
++#define DMA_STATUS_GLI		0x04000000	/* GMAC Line interface int */
++#define DMA_STATUS_GMI		0x08000000
++#define DMA_STATUS_GLI		0x04000000
++#define DMA_STATUS_EB_MASK	0x00380000	/* Error Bits Mask */
++#define DMA_STATUS_EB_TX_ABORT	0x00080000	/* Error Bits - TX Abort */
++#define DMA_STATUS_EB_RX_ABORT	0x00100000	/* Error Bits - RX Abort */
++#define DMA_STATUS_TS_MASK	0x00700000	/* Transmit Process State */
++#define DMA_STATUS_TS_SHIFT	20
++#define DMA_STATUS_RS_MASK	0x000e0000	/* Receive Process State */
++#define DMA_STATUS_RS_SHIFT	17
++#define DMA_STATUS_NIS	0x00010000	/* Normal Interrupt Summary */
++#define DMA_STATUS_AIS	0x00008000	/* Abnormal Interrupt Summary */
++#define DMA_STATUS_ERI	0x00004000	/* Early Receive Interrupt */
++#define DMA_STATUS_FBI	0x00002000	/* Fatal Bus Error Interrupt */
++#define DMA_STATUS_ETI	0x00000400	/* Early Transmit Interrupt */
++#define DMA_STATUS_RWT	0x00000200	/* Receive Watchdog Timeout */
++#define DMA_STATUS_RPS	0x00000100	/* Receive Process Stopped */
++#define DMA_STATUS_RU	0x00000080	/* Receive Buffer Unavailable */
++#define DMA_STATUS_RI	0x00000040	/* Receive Interrupt */
++#define DMA_STATUS_UNF	0x00000020	/* Transmit Underflow */
++#define DMA_STATUS_OVF	0x00000010	/* Receive Overflow */
++#define DMA_STATUS_TJT	0x00000008	/* Transmit Jabber Timeout */
++#define DMA_STATUS_TU	0x00000004	/* Transmit Buffer Unavailable */
++#define DMA_STATUS_TPS	0x00000002	/* Transmit Process Stopped */
++#define DMA_STATUS_TI	0x00000001	/* Transmit Interrupt */
++#define DMA_CONTROL_FTF		0x00100000 /* Flush transmit FIFO */
++
++typedef union
++{
++	struct
++	{
++		__u32	deferred						:1; //0~31
++		__u32	underflow_error					:1;
++		__u32	excessive_deferral				:1;
++		__u32	collision_count					:4;
++		__u32	vlan_frame						:1;
++		__u32	excessive_collision				:1;
++		__u32	late_collision					:1;
++		__u32	no_carrier						:1;
++		__u32	loss_of_carrier					:1;
++		__u32	payload_checksum_error			:1;
++		__u32	frame_flushed					:1;
++		__u32	jabber_timeout					:1;
++		__u32	error_summary					:1;
++		__u32	ip_header_error					:1;
++		__u32	tx_timestamp_status				:1;
++		__u32	reserved_30_18					:13;
++		__u32	own								:1;
++	}bit;
++	__u32 dw;
++}Gmac_Tx_Descriptor0;
++
++typedef union
++{
++	struct
++	{
++		__u32	buffer1_size					:11; //0~31
++		__u32	buffer2_size					:11;
++		__u32	timestamp_enable				:1;
++		__u32	disable_padding					:1;
++		__u32	second_address_chained			:1;
++		__u32	end_of_ring						:1;
++		__u32	disable_crc						:1;
++		__u32	checksum_insertion_ctrl			:2;
++		__u32	first_segment					:1;
++		__u32	last_segment					:1;
++		__u32	intr_on_completion				:1;
++	}bit;
++	__u32 dw;
++}Gmac_Tx_Descriptor1;
++
++typedef union
++{
++	struct
++	{
++		__u32	buffer_address_pointer					:32; //0~31
++	}bit;
++	__u32 dw;
++}Gmac_Tx_Descriptor2;
++
++typedef union
++{
++	struct
++	{
++		__u32	buffer_address_pointer					:32; //0~31
++	}bit;
++	__u32 dw;
++}Gmac_Tx_Descriptor3;
++
++typedef union
++{
++	struct
++	{
++		__u32	mac_addr_payload_chksum_error	:1; //0
++		__u32	crc_error						:1;	//1
++		__u32	dribble_error					:1;	//2
++		__u32	receive_error					:1;	//3
++		__u32	watchdog_timeout				:1;	//4
++		__u32	frame_type						:1;	//5
++		__u32	late_collision					:1;	//6
++		__u32	ipc_chksum_error_giant_frame	:1;	//7
++		__u32	last_descriptor					:1;	//8
++		__u32	first_descriptor				:1;	//9
++		__u32	vlan_tag						:1; //10
++		__u32	overflow_error					:1; //11
++		__u32	length_error					:1; //12
++		__u32	sa_filter_fail					:1; //13
++		__u32	descriptor_error				:1; //14
++		__u32	error_summary					:1;	//15
++		__u32	frame_length					:14;//16~29
++		__u32	da_filter_fail					:1;	//30
++		__u32	own								:1; //31
++	}bit;
++	__u32 dw;
++}Gmac_Rx_Descriptor0;
++
++typedef union
++{
++	struct
++	{
++		__u32	buffer1_size					:11; //0~10
++		__u32	buffer2_size					:11; //11~21
++		__u32	reserved_23_22					:2;  //22~23
++		__u32	second_address_chained			:1;	 //24
++		__u32	end_of_ring						:1;	 //25
++		__u32	reserved_30_26					:5;	 //26~30
++		__u32	disable_intr_on_completion		:1;  //31
++	}bit;
++	__u32 dw;
++}Gmac_Rx_Descriptor1;
++
++typedef union
++{
++	struct
++	{
++		__u32	buffer_address_pointer					:32; //0~31
++	}bit;
++	__u32 dw;
++}Gmac_Rx_Descriptor2;
++
++typedef union
++{
++	struct
++	{
++		__u32	buffer_address_pointer					:32; //0~31
++	}bit;
++	__u32 dw;
++}Gmac_Rx_Descriptor3;
++
++typedef struct
++{
++	Gmac_Tx_Descriptor0 desc0;   	 /* control and status information of descriptor */
++	Gmac_Tx_Descriptor1 desc1;   	 /* buffer sizes                                 */
++	Gmac_Tx_Descriptor2 desc2;   	 /* physical address of the buffer 1             */
++	Gmac_Tx_Descriptor3 desc3;    	 /* physical address of the buffer 2             */
++}Gmac_Tx_DMA_Descriptors;
++
++typedef struct
++{
++	Gmac_Rx_Descriptor0 desc0;   	 /* control and status information of descriptor */
++	Gmac_Rx_Descriptor1 desc1;   	 /* buffer sizes                                 */
++	Gmac_Rx_Descriptor2 desc2;   	 /* physical address of the buffer 1             */
++	Gmac_Rx_Descriptor3 desc3;    	 /* physical address of the buffer 2             */
++}Gmac_Rx_DMA_Descriptors;
++
++
++
++#endif /* FH_GMAC_DMA_H_ */
+diff --git a/drivers/net/fh_gmac/fh_gmac_ethtool.c b/drivers/net/fh_gmac/fh_gmac_ethtool.c
+new file mode 100644
+index 00000000..624b3a02
+--- /dev/null
++++ b/drivers/net/fh_gmac/fh_gmac_ethtool.c
+@@ -0,0 +1,316 @@
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++#include <linux/phy.h>
++#include <mach/fh_gmac.h>
++#include "fh_gmac.h"
++
++#define REG_SPACE_SIZE	0x1054
++#define GMAC_ETHTOOL_NAME	"fh_gmac"
++
++struct gmac_stats
++{
++	char stat_string[ETH_GSTRING_LEN];
++	int sizeof_stat;
++	int stat_offset;
++};
++
++#define FH_GMAC_STAT(m)	\
++	{ #m, FIELD_SIZEOF(Gmac_Stats, m),	\
++	offsetof(Gmac_Object, stats.m)}
++
++static const struct gmac_stats gmac_gstrings_stats[] =
++{
++	FH_GMAC_STAT(tx_underflow),
++	FH_GMAC_STAT(tx_carrier),
++	FH_GMAC_STAT(tx_losscarrier),
++	FH_GMAC_STAT(tx_heartbeat),
++	FH_GMAC_STAT(tx_deferred),
++	FH_GMAC_STAT(tx_vlan),
++	FH_GMAC_STAT(tx_jabber),
++	FH_GMAC_STAT(tx_frame_flushed),
++	FH_GMAC_STAT(tx_payload_error),
++	FH_GMAC_STAT(tx_ip_header_error),
++	FH_GMAC_STAT(rx_desc),
++	FH_GMAC_STAT(rx_partial),
++	FH_GMAC_STAT(rx_runt),
++	FH_GMAC_STAT(rx_toolong),
++	FH_GMAC_STAT(rx_collision),
++	FH_GMAC_STAT(rx_crc),
++	FH_GMAC_STAT(rx_length),
++	FH_GMAC_STAT(rx_mii),
++	FH_GMAC_STAT(rx_multicast),
++	FH_GMAC_STAT(rx_gmac_overflow),
++	FH_GMAC_STAT(rx_watchdog),
++	FH_GMAC_STAT(da_rx_filter_fail),
++	FH_GMAC_STAT(sa_rx_filter_fail),
++	FH_GMAC_STAT(rx_missed_cntr),
++	FH_GMAC_STAT(rx_overflow_cntr),
++	FH_GMAC_STAT(tx_undeflow_irq),
++	FH_GMAC_STAT(tx_process_stopped_irq),
++	FH_GMAC_STAT(tx_jabber_irq),
++	FH_GMAC_STAT(rx_overflow_irq),
++	FH_GMAC_STAT(rx_buf_unav_irq),
++	FH_GMAC_STAT(rx_process_stopped_irq),
++	FH_GMAC_STAT(rx_watchdog_irq),
++	FH_GMAC_STAT(tx_early_irq),
++	FH_GMAC_STAT(fatal_bus_error_irq),
++	FH_GMAC_STAT(threshold),
++	FH_GMAC_STAT(tx_pkt_n),
++	FH_GMAC_STAT(rx_pkt_n),
++	FH_GMAC_STAT(poll_n),
++	FH_GMAC_STAT(sched_timer_n),
++	FH_GMAC_STAT(normal_irq_n),
++};
++#define FH_GMAC_STATS_LEN ARRAY_SIZE(gmac_gstrings_stats)
++
++static void gmac_ethtool_getdrvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
++{
++	strcpy(info->driver, GMAC_ETHTOOL_NAME);
++
++	strcpy(info->version, "0.0.1");
++	info->fw_version[0] = '\0';
++	info->n_stats = FH_GMAC_STATS_LEN;
++}
++
++static int gmac_ethtool_getsettings(struct net_device *ndev, struct ethtool_cmd *cmd)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	struct phy_device *phy = pGmac->phydev;
++	int rc;
++	if (phy == NULL)
++	{
++		pr_err("%s: %s: PHY is not registered\n",
++		       __func__, ndev->name);
++		return -ENODEV;
++	}
++	if (!netif_running(ndev))
++	{
++		pr_err("%s: interface is disabled: we cannot track "
++		"link speed / duplex setting\n", ndev->name);
++		return -EBUSY;
++	}
++	cmd->transceiver = XCVR_INTERNAL;
++	spin_lock_irq(&pGmac->lock);
++	rc = phy_ethtool_gset(phy, cmd);
++	spin_unlock_irq(&pGmac->lock);
++	return rc;
++}
++
++static int gmac_ethtool_setsettings(struct net_device *ndev, struct ethtool_cmd *cmd)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	struct phy_device *phy = pGmac->phydev;
++	int rc;
++
++	spin_lock(&pGmac->lock);
++	rc = phy_ethtool_sset(phy, cmd);
++	spin_unlock(&pGmac->lock);
++
++	return rc;
++}
++
++static __u32 gmac_ethtool_getmsglevel(struct net_device *ndev)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	return pGmac->msg_enable;
++}
++
++static void gmac_ethtool_setmsglevel(struct net_device *ndev, __u32 level)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	pGmac->msg_enable = level;
++
++}
++
++static int gmac_check_if_running(struct net_device *ndev)
++{
++	if (!netif_running(ndev))
++		return -EBUSY;
++	return 0;
++}
++
++static int gmac_ethtool_get_regs_len(struct net_device *ndev)
++{
++	return REG_SPACE_SIZE;
++}
++
++static void gmac_ethtool_gregs(struct net_device *ndev, struct ethtool_regs *regs, void *space)
++{
++	int i;
++	__u32 *reg_space = (__u32 *) space;
++
++	Gmac_Object* pGmac = netdev_priv(ndev);
++
++	memset(reg_space, 0x0, REG_SPACE_SIZE);
++
++	/* MAC registers */
++	for (i = 0; i < 55; i++)
++		reg_space[i] = readl(pGmac->remap_addr + (i * 4));
++	/* DMA registers */
++	for (i = 0; i < 22; i++)
++		reg_space[i + 55] = readl(pGmac->remap_addr + (REG_GMAC_BUS_MODE + (i * 4)));
++}
++
++static void gmac_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++
++	spin_lock(&pGmac->lock);
++
++	pause->rx_pause = 0;
++	pause->tx_pause = 0;
++	pause->autoneg = pGmac->phydev->autoneg;
++
++	if (pGmac->flow_ctrl & FLOW_RX)
++		pause->rx_pause = 1;
++	if (pGmac->flow_ctrl & FLOW_TX)
++		pause->tx_pause = 1;
++
++	spin_unlock(&pGmac->lock);
++}
++
++static int gmac_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	struct phy_device *phy = pGmac->phydev;
++	int new_pause = FLOW_OFF;
++	int ret = 0;
++
++	spin_lock(&pGmac->lock);
++
++	if (pause->rx_pause)
++		new_pause |= FLOW_RX;
++	if (pause->tx_pause)
++		new_pause |= FLOW_TX;
++
++	pGmac->flow_ctrl = new_pause;
++	phy->autoneg = pause->autoneg;
++
++	if (phy->autoneg)
++	{
++		if (netif_running(ndev))
++			ret = phy_start_aneg(phy);
++	}
++	else
++	{
++		GMAC_FlowCtrl(pGmac, phy->duplex, pGmac->flow_ctrl, pGmac->pause);
++	}
++	spin_unlock(&pGmac->lock);
++	return ret;
++}
++
++static void gmac_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *dummy, __u64 *data)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	int i;
++
++	/* Update HW stats if supported */
++	GMAC_DMA_DiagnosticFrame(&ndev->stats, pGmac);
++
++	for (i = 0; i < FH_GMAC_STATS_LEN; i++)
++	{
++		char *p = (char *)pGmac + gmac_gstrings_stats[i].stat_offset;
++		data[i] = (gmac_gstrings_stats[i].sizeof_stat ==
++		sizeof(__u64)) ? (*(__u64 *)p) : (*(__u32 *)p);
++	}
++}
++
++static int gmac_get_sset_count(struct net_device *netdev, int sset)
++{
++	switch (sset)
++	{
++	case ETH_SS_STATS:
++		return FH_GMAC_STATS_LEN;
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static void gmac_get_strings(struct net_device *ndev, __u32 stringset, __u8 *data)
++{
++	int i;
++	__u8 *p = data;
++
++	switch (stringset)
++	{
++	case ETH_SS_STATS:
++		for (i = 0; i < FH_GMAC_STATS_LEN; i++)
++		{
++			memcpy(p, gmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
++			p += ETH_GSTRING_LEN;
++		}
++		break;
++	default:
++		WARN_ON(1);
++		break;
++	}
++}
++
++/* Currently only support WOL through Magic packet. */
++static void gmac_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++
++	spin_lock_irq(&pGmac->lock);
++	if (device_can_wakeup(pGmac->dev))
++	{
++		wol->supported = WAKE_MAGIC | WAKE_UCAST;
++		wol->wolopts = pGmac->wolopts;
++	}
++	spin_unlock_irq(&pGmac->lock);
++}
++
++static int gmac_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	__u32 support = WAKE_MAGIC | WAKE_UCAST;
++
++	if (!device_can_wakeup(pGmac->dev))
++		return -EINVAL;
++
++	if (wol->wolopts & ~support)
++		return -EINVAL;
++
++	if (wol->wolopts)
++	{
++		pr_info("fh_gmac: wakeup enable\n");
++		device_set_wakeup_enable(pGmac->dev, 1);
++		enable_irq_wake(ndev->irq);
++	}
++	else
++	{
++		device_set_wakeup_enable(pGmac->dev, 0);
++		disable_irq_wake(ndev->irq);
++	}
++
++	spin_lock_irq(&pGmac->lock);
++	pGmac->wolopts = wol->wolopts;
++	spin_unlock_irq(&pGmac->lock);
++
++	return 0;
++}
++
++static struct ethtool_ops fh_gmac_ethtool_ops = {
++	.begin = gmac_check_if_running,
++	.get_drvinfo = gmac_ethtool_getdrvinfo,
++	.get_settings = gmac_ethtool_getsettings,
++	.set_settings = gmac_ethtool_setsettings,
++	.get_msglevel = gmac_ethtool_getmsglevel,
++	.set_msglevel = gmac_ethtool_setmsglevel,
++	.get_regs = gmac_ethtool_gregs,
++	.get_regs_len = gmac_ethtool_get_regs_len,
++	.get_link = ethtool_op_get_link,
++	.get_pauseparam = gmac_get_pauseparam,
++	.set_pauseparam = gmac_set_pauseparam,
++	.get_ethtool_stats = gmac_get_ethtool_stats,
++	.get_strings = gmac_get_strings,
++	.get_wol = gmac_get_wol,
++	.set_wol = gmac_set_wol,
++	.get_sset_count	= gmac_get_sset_count,
++};
++
++void fh_gmac_set_ethtool_ops(struct net_device *netdev)
++{
++	SET_ETHTOOL_OPS(netdev, &fh_gmac_ethtool_ops);
++}
+diff --git a/drivers/net/fh_gmac/fh_gmac_main.c b/drivers/net/fh_gmac/fh_gmac_main.c
+new file mode 100644
+index 00000000..e4534e68
+--- /dev/null
++++ b/drivers/net/fh_gmac/fh_gmac_main.c
+@@ -0,0 +1,1364 @@
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/in.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <linux/highmem.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/version.h>
++#include <linux/spinlock.h>
++#include <linux/dma-mapping.h>
++#include <linux/clk.h>
++#include <linux/platform_device.h>
++#include <linux/semaphore.h>
++#include <linux/phy.h>
++#include <linux/bitops.h>
++#include <linux/io.h>
++#include <linux/uaccess.h>
++#include <linux/irqreturn.h>
++
++#include <asm/irq.h>
++#include <asm/page.h>
++#include <asm/setup.h>
++#include <linux/crc32.h>
++#include <mach/fh_gmac.h>
++#include "fh_gmac.h"
++
++/* Module parameters */
++static int watchdog = TX_TIMEO;
++static int debug = 16;      /* -1: default, 0: no output, 16:  all */
++static int dma_txsize = DMA_TX_SIZE;
++static int dma_rxsize = DMA_RX_SIZE;
++static int flow_ctrl = FLOW_AUTO;
++static int pause = PAUSE_TIME;
++
++static unsigned int phymode = PHY_INTERFACE_MODE_MII;
++
++#if defined(FH_GMAC_XMIT_DEBUG) || defined(FH_GMAC_RX_DEBUG)
++static void print_pkt(unsigned char *buf, int len)
++{
++	int j;
++	printk("len = %d byte, buf addr: 0x%p", len, buf);
++	for (j = 0; j < len; j++)
++	{
++		if ((j % 16) == 0)
++			printk("\n %03x:", j);
++		printk(" %02x", buf[j]);
++	}
++	printk("\n");
++}
++#endif
++
++static __u32 GMAC_BitReverse(register __u32 x)
++{
++        register __u32 y = 0x55555555;
++        x = (((x >> 1) & y) | ((x & y) << 1));
++        y = 0x33333333;
++        x = (((x >> 2) & y) | ((x & y) << 2));
++        y = 0x0f0f0f0f;
++        x = (((x >> 4) & y) | ((x & y) << 4));
++        y = 0x00ff00ff;
++        x = (((x >> 8) & y) | ((x & y) << 8));
++        return((x >> 16) | (x << 16));
++}
++
++
++static void GMAC_SetMacAddress(Gmac_Object* pGmac)
++{
++
++	__u32 macHigh = pGmac->local_mac_address[5]<<8 | pGmac->local_mac_address[4];
++	__u32 macLow = pGmac->local_mac_address[3]<<24 | pGmac->local_mac_address[2]<<16 | pGmac->local_mac_address[1]<<8 | pGmac->local_mac_address[0];
++
++	writel(macHigh, pGmac->remap_addr + REG_GMAC_MAC_HIGH);
++	writel(macLow, pGmac->remap_addr + REG_GMAC_MAC_LOW);
++}
++
++int gmac_dev_set_mac_addr(struct net_device *dev, void *p)
++{
++    Gmac_Object *pGmac = netdev_priv(dev);
++    struct sockaddr *addr = p;
++    memcpy(pGmac->local_mac_address, addr->sa_data, ETH_ALEN);
++    GMAC_SetMacAddress(pGmac);
++    return eth_mac_addr(dev, p);
++}
++
++static inline void GMAC_EnableMac(Gmac_Object* pGmac)
++{
++		//transmitter enable
++		//receive enable
++		__u32 reg = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
++		reg |= 0xc;
++		writel(reg, pGmac->remap_addr + REG_GMAC_CONFIG);
++
++}
++
++
++
++static inline void GMAC_DisableMac(Gmac_Object* pGmac)
++{
++		//transmitter disable
++		//receive disable
++		__u32 reg = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
++		reg &= ~0xc;
++		writel(reg | 0xc, pGmac->remap_addr + REG_GMAC_CONFIG);
++}
++
++static inline void GMAC_CoreInit(Gmac_Object* pGmac)
++{
++	//FIXME: heartbeat disable
++	//auto pad or crc stripping
++	__u32 reg = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
++	reg |= 0x80;
++	writel(reg | 0xc, pGmac->remap_addr + REG_GMAC_CONFIG);
++}
++
++void GMAC_FlowCtrl(Gmac_Object* pGmac, unsigned int duplex,
++			       unsigned int fc, unsigned int pause_time)
++{
++	__u32 flow = fc;
++
++	if (duplex)
++		flow |= (pause_time << 16);
++	writel(flow, pGmac->remap_addr + REG_GMAC_FLOW_CTRL);
++}
++
++static void gmac_tx_err(Gmac_Object* pGmac)
++{
++
++	netif_stop_queue(pGmac->ndev);
++	GMAC_DMA_StopTx(pGmac);
++	GMAC_DMA_FreeTxSkbufs(pGmac);
++	GMAC_DMA_InitTxDesc(pGmac->tx_dma_descriptors, pGmac->dma_tx_size);
++	pGmac->dirty_tx = 0;
++	pGmac->cur_tx = 0;
++	GMAC_DMA_StartTx(pGmac);
++	pGmac->ndev->stats.tx_errors++;
++	netif_wake_queue(pGmac->ndev);
++}
++
++
++static irqreturn_t fh_gmac_interrupt(int irq, void *dev_id)
++{
++
++	struct net_device *ndev = (struct net_device *)dev_id;
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	int status;
++
++	if (unlikely(!ndev))
++	{
++		pr_err("%s: invalid ndev pointer\n", __func__);
++		return IRQ_NONE;
++	}
++
++
++	status = GMAC_DMA_Interrupt(pGmac);
++	if (likely(status == handle_tx_rx))
++	{
++		if (likely(napi_schedule_prep(&pGmac->napi)))
++		{
++			writel(0x0, pGmac->remap_addr + REG_GMAC_INTR_EN);
++			__napi_schedule(&pGmac->napi);
++		}
++	}
++	else if (unlikely(status & tx_hard_error_bump_tc))
++	{
++		//FIXME: tx underflow
++	}
++	else if (unlikely(status == tx_hard_error))
++	{
++		gmac_tx_err(pGmac);
++	}
++
++	return IRQ_HANDLED;
++}
++
++static void fh_gmac_verify_args(void)
++{
++	if (unlikely(watchdog < 0))
++		watchdog = TX_TIMEO;
++	if (unlikely(dma_rxsize < 0))
++		dma_rxsize = DMA_RX_SIZE;
++	if (unlikely(dma_txsize < 0))
++		dma_txsize = DMA_TX_SIZE;
++	if (unlikely(flow_ctrl > 1))
++		flow_ctrl = FLOW_AUTO;
++	else if (likely(flow_ctrl < 0))
++		flow_ctrl = FLOW_OFF;
++	if (unlikely((pause < 0) || (pause > 0xffff)))
++		pause = PAUSE_TIME;
++
++}
++
++static void fh_gmac_adjust_link(struct net_device *ndev)
++{
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	struct phy_device *phydev = pGmac->phydev;
++	unsigned long flags;
++	int new_state = 0;
++
++	if (phydev == NULL)
++		return;
++
++	spin_lock_irqsave(&pGmac->lock, flags);
++	if (phydev->link)
++	{
++		__u32 ctrl = readl(pGmac->remap_addr + REG_GMAC_CONFIG);
++
++		/* Now we make sure that we can be in full duplex mode.
++		 * If not, we operate in half-duplex mode. */
++		if (phydev->duplex != pGmac->oldduplex)
++		{
++			new_state = 1;
++			if (!(phydev->duplex))
++				ctrl &= ~0x800;
++			else
++				ctrl |= 0x800;
++			pGmac->oldduplex = phydev->duplex;
++		}
++		/* Flow Control operation */
++		if (phydev->pause)
++		{
++			__u32 fc = pGmac->flow_ctrl, pause_time = pGmac->pause;
++			GMAC_FlowCtrl(pGmac, phydev->duplex, fc, pause_time);
++		}
++
++		if (phydev->speed != pGmac->speed)
++		{
++			new_state = 1;
++			switch (phydev->speed)
++			{
++			case 100:
++				ctrl |= 0x4000;
++				if(pGmac->priv_data->set_rmii_speed)
++					pGmac->priv_data->set_rmii_speed(gmac_speed_100m);
++				break;
++			case 10:
++				ctrl &= ~0x4000;
++				if(pGmac->priv_data->set_rmii_speed)
++					pGmac->priv_data->set_rmii_speed(gmac_speed_10m);
++				break;
++			default:
++				if (netif_msg_link(pGmac))
++					pr_warning("%s: Speed (%d) is not 10"
++				       " or 100!\n", ndev->name, phydev->speed);
++				break;
++			}
++
++			pGmac->speed = phydev->speed;
++		}
++		writel(ctrl, pGmac->remap_addr + REG_GMAC_CONFIG);
++		if (!pGmac->oldlink)
++		{
++			new_state = 1;
++			pGmac->oldlink = 1;
++		}
++	}
++	else if (pGmac->oldlink)
++	{
++		new_state = 1;
++		pGmac->oldlink = 0;
++		pGmac->speed = 0;
++		pGmac->oldduplex = -1;
++	}
++
++	if (new_state && netif_msg_link(pGmac))
++		phy_print_status(phydev);
++
++	spin_unlock_irqrestore(&pGmac->lock, flags);
++}
++
++
++static inline void fh_gmac_rx_refill(Gmac_Object* pGmac)
++{
++	__u32 rxsize = pGmac->dma_rx_size;
++	int bfsize = pGmac->dma_buf_sz;
++	Gmac_Rx_DMA_Descriptors *desc = pGmac->rx_dma_descriptors;
++
++	for (; pGmac->cur_rx - pGmac->dirty_rx > 0; pGmac->dirty_rx++)
++	{
++		__u32 entry = pGmac->dirty_rx % rxsize;
++		if (likely(pGmac->rx_skbuff[entry] == NULL))
++		{
++			struct sk_buff *skb;
++
++			skb = __skb_dequeue(&pGmac->rx_recycle);
++			if (skb == NULL)
++				skb = netdev_alloc_skb_ip_align(pGmac->ndev, bfsize);
++
++			if (unlikely(skb == NULL))
++				break;
++
++			pGmac->rx_skbuff[entry] = skb;
++			pGmac->rx_skbuff_dma[entry] =
++			    dma_map_single(pGmac->dev, skb->data, bfsize,
++					   DMA_FROM_DEVICE);
++
++			(desc + entry)->desc2.dw = pGmac->rx_skbuff_dma[entry];
++			RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
++		}
++		wmb();
++		(desc+entry)->desc0.bit.own = 1;
++		wmb();
++	}
++}
++
++static int fh_gmac_rx(Gmac_Object* pGmac, int limit)
++{
++	__u32 rxsize = pGmac->dma_rx_size;
++	__u32 entry = pGmac->cur_rx % rxsize;
++	__u32 next_entry;
++	__u32 count = 0;
++	Gmac_Rx_DMA_Descriptors *desc = pGmac->rx_dma_descriptors + entry;
++	Gmac_Rx_DMA_Descriptors *desc_next;
++
++#ifdef FH_GMAC_RX_DEBUG
++	if (netif_msg_hw(pGmac))
++	{
++		printk(">>> fh_gmac_rx: descriptor ring:\n");
++		GMAC_DMA_DisplayRxDesc(pGmac->rx_dma_descriptors, rxsize);
++	}
++#endif
++	count = 0;
++	while (!desc->desc0.bit.own)
++	{
++		int status;
++
++		if (count >= limit)
++			break;
++
++		count++;
++
++		next_entry = (++pGmac->cur_rx) % rxsize;
++		desc_next = pGmac->rx_dma_descriptors + next_entry;
++		prefetch(desc_next);
++
++		/* read the status of the incoming frame */
++		status = (GMAC_DMA_RxStatus(pGmac, desc));
++		if (unlikely(status == discard_frame))
++		{
++			pGmac->ndev->stats.rx_errors++;
++		}
++		else
++		{
++			struct sk_buff *skb;
++			int frame_len;
++			frame_len = desc->desc0.bit.frame_length;
++			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
++			 * Type frames (LLC/LLC-SNAP) */
++			if (unlikely(status != llc_snap))
++				frame_len -= ETH_FCS_LEN;
++#ifdef FH_GMAC_RX_DEBUG
++			if (frame_len > ETH_FRAME_LEN)
++				pr_debug("\tRX frame size %d, COE status: %d\n",
++					frame_len, status);
++
++			if (netif_msg_hw(pGmac))
++				pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
++					desc, entry, desc->desc2.dw);
++#endif
++			skb = pGmac->rx_skbuff[entry];
++			if (unlikely(!skb))
++			{
++				pr_err("%s: Inconsistent Rx descriptor chain\n",
++						pGmac->ndev->name);
++				pGmac->ndev->stats.rx_dropped++;
++				break;
++			}
++			prefetch(skb->data - NET_IP_ALIGN);
++			pGmac->rx_skbuff[entry] = NULL;
++
++			skb_put(skb, frame_len);
++			dma_unmap_single(pGmac->dev,
++					pGmac->rx_skbuff_dma[entry],
++					pGmac->dma_buf_sz, DMA_FROM_DEVICE);
++#ifdef FH_GMAC_RX_DEBUG
++			if (netif_msg_pktdata(pGmac))
++			{
++				pr_info(" frame received (%dbytes)", frame_len);
++				print_pkt(skb->data, frame_len);
++			}
++#endif
++			skb->protocol = eth_type_trans(skb, pGmac->ndev);
++
++			if (unlikely(status == csum_none))
++			{
++				/* always for the old mac 10/100 */
++				skb_checksum_none_assert(skb);
++				netif_receive_skb(skb);
++			}
++			else
++			{
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++				napi_gro_receive(&pGmac->napi, skb);
++			}
++
++			pGmac->ndev->stats.rx_packets++;
++			pGmac->ndev->stats.rx_bytes += frame_len;
++		}
++		entry = next_entry;
++		desc = desc_next;	/* use prefetched values */
++	}
++
++	fh_gmac_rx_refill(pGmac);
++
++	pGmac->stats.rx_pkt_n += count;
++
++	return count;
++}
++
++/* minimum number of free TX descriptors required to wake up TX process */
++#define FH_GMAC_TX_THRESH(x)	(x->dma_tx_size/4)
++
++static inline __u32 gmac_tx_avail(Gmac_Object* pGmac)
++{
++	return pGmac->dirty_tx + pGmac->dma_tx_size - pGmac->cur_tx - 1;
++}
++
++
++
++static void fh_gmac_tx(Gmac_Object* pGmac)
++{
++	__u32 txsize = pGmac->dma_tx_size;
++	while (pGmac->dirty_tx != pGmac->cur_tx)
++	{
++		int last;
++		__u32 entry = pGmac->dirty_tx % txsize;
++		struct sk_buff *skb = pGmac->tx_skbuff[entry];
++		Gmac_Tx_DMA_Descriptors *desc = pGmac->tx_dma_descriptors + entry;
++
++		/* Check if the descriptor is owned by the DMA. */
++		if (desc->desc0.bit.own)
++			break;
++
++		/* Verify tx error by looking at the last segment */
++		last = desc->desc1.bit.last_segment;
++		if (likely(last))
++		{
++			int tx_error = GMAC_DMA_TxStatus(pGmac, desc);
++			if (likely(tx_error == 0))
++			{
++				pGmac->ndev->stats.tx_packets++;
++				pGmac->stats.tx_pkt_n++;
++			}
++			else
++			{
++				pGmac->ndev->stats.tx_errors++;
++			}
++		}
++		TX_DBG("%s: curr %d, dirty %d\n", __func__,
++				pGmac->cur_tx, pGmac->dirty_tx);
++
++		if (likely(desc->desc2.dw))
++		{
++			dma_unmap_single(pGmac->dev, desc->desc2.dw, desc->desc1.bit.buffer1_size,
++					 DMA_TO_DEVICE);
++		}
++		if (unlikely(desc->desc3.dw))
++		{
++			desc->desc3.dw = 0;
++		}
++
++		if (likely(skb != NULL))
++		{
++			/*
++			 * If there's room in the queue (limit it to size)
++			 * we add this skb back into the pool,
++			 * if it's the right size.
++			 */
++			if ((skb_queue_len(&pGmac->rx_recycle) < pGmac->dma_rx_size)
++					&&
++				skb_recycle_check(skb, pGmac->dma_buf_sz))
++			{
++				__skb_queue_head(&pGmac->rx_recycle, skb);
++			}
++			else
++			{
++				dev_kfree_skb(skb);
++			}
++
++			pGmac->tx_skbuff[entry] = NULL;
++		}
++		GMAC_DMA_ReleaseTxDesc(desc);
++
++		entry = (++pGmac->dirty_tx) % txsize;
++	}
++	if (unlikely(netif_queue_stopped(pGmac->ndev) &&
++		     gmac_tx_avail(pGmac) > FH_GMAC_TX_THRESH(pGmac)))
++	{
++		netif_tx_lock(pGmac->ndev);
++		if (netif_queue_stopped(pGmac->ndev) &&
++				gmac_tx_avail(pGmac) > FH_GMAC_TX_THRESH(pGmac))
++		{
++			TX_DBG("%s: restart transmit\n", __func__);
++			netif_wake_queue(pGmac->ndev);
++		}
++		netif_tx_unlock(pGmac->ndev);
++	}
++}
++
++
++static int fh_gmac_poll(struct napi_struct *napi, int budget)
++{
++	Gmac_Object* pGmac = container_of(napi, Gmac_Object, napi);
++	int work_done = 0;
++
++	pGmac->stats.poll_n++;
++	fh_gmac_tx(pGmac);
++	work_done = fh_gmac_rx(pGmac, budget);
++
++	if (work_done < budget)
++	{
++		napi_complete(napi);
++		writel(0x1a061, pGmac->remap_addr + REG_GMAC_INTR_EN);
++	}
++	return work_done;
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++/* Polling receive - used by NETCONSOLE and other diagnostic tools
++ * to allow network I/O with interrupts disabled. */
++static void gmac_poll_controller(struct net_device *ndev)
++{
++	disable_irq(ndev->irq);
++	fh_gmac_interrupt(ndev->irq, ndev);
++	enable_irq(ndev->irq);
++}
++#endif
++
++
++static __u32 gmac_handle_jumbo_frames(struct sk_buff *skb, struct net_device *dev, int checksum_insertion)
++{
++	//FIXME: 8K jumbo frame
++	Gmac_Object* pGmac = netdev_priv(dev);
++	__u32 nopaged_len = skb_headlen(skb);
++	__u32 txsize = pGmac->dma_tx_size;
++	__u32 entry = pGmac->cur_tx % txsize;
++	Gmac_Tx_DMA_Descriptors *desc = pGmac->tx_dma_descriptors + entry;
++
++	if (nopaged_len > BUFFER_SIZE_2K)
++	{
++
++		int buf2_size = nopaged_len - BUFFER_SIZE_2K + 1;
++
++		desc->desc2.dw = dma_map_single(pGmac->dev, skb->data,
++					BUFFER_SIZE_2K, DMA_TO_DEVICE);
++		desc->desc3.dw = desc->desc2.dw + BUFFER_SIZE_2K;
++		desc->desc1.bit.first_segment = 1;
++		desc->desc1.bit.buffer1_size = BUFFER_SIZE_2K - 1;
++		desc->desc1.bit.checksum_insertion_ctrl = 3;
++		entry = (++pGmac->cur_tx) % txsize;
++		desc = pGmac->tx_dma_descriptors + entry;
++		desc->desc2.dw = dma_map_single(pGmac->dev,
++					skb->data + BUFFER_SIZE_2K,
++					buf2_size, DMA_TO_DEVICE);
++		desc->desc3.dw = desc->desc2.dw + BUFFER_SIZE_2K;
++		desc->desc1.bit.first_segment = 0;
++		desc->desc1.bit.buffer1_size = buf2_size;
++		desc->desc1.bit.checksum_insertion_ctrl = checksum_insertion;
++		desc->desc0.bit.own = 1;
++		pGmac->tx_skbuff[entry] = NULL;
++	}
++	else
++	{
++		desc->desc2.dw = dma_map_single(pGmac->dev, skb->data,
++					nopaged_len, DMA_TO_DEVICE);
++		desc->desc3.dw = desc->desc2.dw + BUFFER_SIZE_2K;
++		desc->desc1.bit.first_segment = 1;
++		desc->desc1.bit.buffer1_size = nopaged_len;
++		desc->desc1.bit.checksum_insertion_ctrl = checksum_insertion;
++	}
++	return entry;
++}
++
++/* Configuration changes (passed on by ifconfig) */
++static int gmac_dev_set_config(struct net_device *ndev, struct ifmap *map)
++{
++	if (ndev->flags & IFF_UP)	/* can't act on a running interface */
++		return -EBUSY;
++
++	/* Don't allow changing the I/O address */
++	if (map->base_addr != ndev->base_addr) {
++		pr_warning("%s: can't change I/O address\n", ndev->name);
++		return -EOPNOTSUPP;
++	}
++
++	/* Don't allow changing the IRQ */
++	if (map->irq != ndev->irq) {
++		pr_warning("%s: can't change IRQ number %d\n",
++				ndev->name, ndev->irq);
++		return -EOPNOTSUPP;
++	}
++
++	/* ignore other fields */
++	return 0;
++}
++
++
++static int gmac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	__u32 txsize = pGmac->dma_tx_size;
++	__u32 entry;
++	int i, csum_insertion = 0;
++	int nfrags = skb_shinfo(skb)->nr_frags;
++	Gmac_Tx_DMA_Descriptors *desc, *first;
++
++	if (unlikely(gmac_tx_avail(pGmac) < nfrags + 1))
++	{
++		if (!netif_queue_stopped(ndev))
++		{
++			netif_stop_queue(ndev);
++			/* This is a hard error, log it. */
++			pr_err("%s: BUG! Tx Ring full when queue awake\n",
++				__func__);
++		}
++		return NETDEV_TX_BUSY;
++	}
++
++	entry = pGmac->cur_tx % txsize;
++	//fixme: debug
++
++#ifdef FH_GMAC_XMIT_DEBUG
++	if ((skb->len > ETH_FRAME_LEN) || nfrags)
++		pr_info("fh gmac xmit:\n"
++			   "\tskb addr %p - len: %d - nopaged_len: %d\n"
++			   "\tn_frags: %d - ip_summed: %d - %s gso\n",
++			   skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
++			   !skb_is_gso(skb) ? "isn't" : "is");
++#endif
++
++	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL)? 3 : 0;
++
++	desc = pGmac->tx_dma_descriptors + entry;
++	first = desc;
++
++#ifdef FH_GMAC_XMIT_DEBUG
++	if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
++		pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
++			   "\t\tn_frags: %d, ip_summed: %d\n",
++			   skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
++#endif
++	pGmac->tx_skbuff[entry] = skb;
++	if (unlikely(skb->len >= BUFFER_SIZE_2K))
++	{
++		printk("jumbo_frames detected\n");
++		entry = gmac_handle_jumbo_frames(skb, ndev, csum_insertion);
++		desc = pGmac->tx_dma_descriptors + entry;
++	}
++	else
++	{
++		__u32 nopaged_len = skb_headlen(skb);
++		desc->desc2.dw = dma_map_single(pGmac->dev, skb->data,
++					nopaged_len, DMA_TO_DEVICE);
++		desc->desc1.bit.first_segment = 1;
++		desc->desc1.bit.buffer1_size = nopaged_len;
++		desc->desc1.bit.checksum_insertion_ctrl = csum_insertion;
++	}
++
++	for (i = 0; i < nfrags; i++)
++	{
++		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++		int len = frag->size;
++
++		entry = (++pGmac->cur_tx) % txsize;
++		desc = pGmac->tx_dma_descriptors + entry;
++
++		TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
++		desc->desc2.dw = dma_map_page(pGmac->dev, frag->page,
++					  frag->page_offset,
++					  len, DMA_TO_DEVICE);
++		pGmac->tx_skbuff[entry] = NULL;
++		desc->desc1.bit.first_segment = 0;
++		desc->desc1.bit.buffer1_size = len;
++		wmb();
++		desc->desc1.bit.checksum_insertion_ctrl = csum_insertion;
++		desc->desc0.bit.own = 1;
++		wmb();
++	}
++
++	/* Interrupt on completition only for the latest segment */
++	desc->desc1.bit.last_segment = 1;
++	desc->desc1.bit.intr_on_completion = 1;
++	wmb();
++#ifdef CONFIG_STMMAC_TIMER
++	/* Clean IC while using timer */
++	if (likely(priv->tm->enable))
++		priv->hw->desc->clear_tx_ic(desc);
++#endif
++	/* To avoid raise condition */
++	first->desc0.bit.own = 1;
++	wmb();
++	pGmac->cur_tx++;
++
++#ifdef FH_GMAC_XMIT_DEBUG
++	if (netif_msg_pktdata(pGmac))
++	{
++		pr_info("fh gmac xmit: current=%d, dirty=%d, entry=%d, "
++			   "first=%p, nfrags=%d\n",
++			   (pGmac->cur_tx % txsize), (pGmac->dirty_tx % txsize),
++			   entry, first, nfrags);
++		GMAC_DMA_DisplayTxDesc(pGmac->tx_dma_descriptors, 3);
++		pr_info(">>> frame to be transmitted: ");
++		print_pkt(skb->data, skb->len);
++	}
++#endif
++	if (unlikely(gmac_tx_avail(pGmac) <= (MAX_SKB_FRAGS + 1)))
++	{
++		TX_DBG("%s: stop transmitted packets\n", __func__);
++		netif_stop_queue(ndev);
++	}
++
++	ndev->stats.tx_bytes += skb->len;
++	writel(0x1, pGmac->remap_addr + REG_GMAC_TX_POLL_DEMAND);
++
++	return NETDEV_TX_OK;
++}
++
++static void gmac_dev_tx_timeout(struct net_device *ndev)
++{
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	gmac_tx_err(pGmac);
++}
++
++static int gmac_dev_change_mtu(struct net_device *ndev, int new_mtu)
++{
++	int max_mtu;
++
++	if (netif_running(ndev))
++	{
++		pr_err("%s: must be stopped to change its MTU\n", ndev->name);
++		return -EBUSY;
++	}
++
++	max_mtu = ETH_DATA_LEN;
++
++	if ((new_mtu < 46) || (new_mtu > max_mtu))
++	{
++		pr_err("%s: invalid MTU, max MTU is: %d\n", ndev->name, max_mtu);
++		return -EINVAL;
++	}
++
++	ndev->mtu = new_mtu;
++	netdev_update_features(ndev);
++
++	return 0;
++}
++
++
++static void gmac_set_filter(struct net_device *ndev)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	__u32 value = readl(pGmac->remap_addr + REG_GMAC_FRAME_FILTER);
++
++	if (ndev->flags & IFF_PROMISC)
++	{
++		//enable Promiscuous Mode
++		value = 0x1;
++	}
++	else if ((netdev_mc_count(ndev) > 64)	//HASH_TABLE_SIZE = 64
++		   || (ndev->flags & IFF_ALLMULTI))
++	{
++		//enable Pass All Multicast
++		value = 0x10;
++
++		writel(0xffffffff, pGmac->remap_addr + REG_GMAC_HASH_HIGH);
++		writel(0xffffffff, pGmac->remap_addr + REG_GMAC_HASH_LOW);
++	}
++	else if (netdev_mc_empty(ndev))
++	{	/* no multicast */
++		value = 0;
++	}
++	else
++	{
++		__u32 mc_filter[2];
++		struct netdev_hw_addr *ha;
++
++		/* Perfect filter mode for physical address and Hash
++		   filter for multicast */
++		value = 0x404;
++
++		memset(mc_filter, 0, sizeof(mc_filter));
++		netdev_for_each_mc_addr(ha, ndev)
++		{
++			/* The upper 6 bits of the calculated CRC are used to
++			 * index the contens of the hash table */
++			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
++
++			/* The most significant bit determines the register to
++			 * use (H/L) while the other 5 bits determine the bit
++			 * within the register. */
++			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
++		}
++		writel(GMAC_BitReverse(mc_filter[0]), pGmac->remap_addr + REG_GMAC_HASH_HIGH);
++		writel(GMAC_BitReverse(mc_filter[1]), pGmac->remap_addr + REG_GMAC_HASH_LOW);
++	}
++	writel(value, pGmac->remap_addr + REG_GMAC_FRAME_FILTER);
++}
++
++static void gmac_dev_mcast_set(struct net_device *ndev)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++
++	spin_lock(&pGmac->lock);
++	gmac_set_filter(ndev);
++	spin_unlock(&pGmac->lock);
++}
++
++static int gmac_dev_ioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	int ret;
++
++	if (!netif_running(ndev))
++		return -EINVAL;
++
++	if (!pGmac->phydev)
++		return -EINVAL;
++
++	ret = phy_mii_ioctl(pGmac->phydev, ifrq, cmd);
++
++	return ret;
++
++}
++
++
++
++static int fh_gmac_init_phy(struct net_device *ndev)
++{
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	struct phy_device *phydev;
++	char phy_id[MII_BUS_ID_SIZE + 3];
++	char bus_id[MII_BUS_ID_SIZE];
++
++	if (pGmac->phydev == NULL)
++		return -ENODEV;
++
++	snprintf(bus_id, MII_BUS_ID_SIZE, "%x", 0);
++	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, pGmac->priv_data->phyid);
++	pr_debug("fh_gmac_init_phy:  trying to attach to %s\n", phy_id);
++
++	phydev = phy_connect(ndev, phy_id, &fh_gmac_adjust_link, 0,
++			pGmac->phy_interface);
++
++	if (IS_ERR(phydev))
++	{
++		pr_err("%s: Could not attach to PHY\n", ndev->name);
++		return PTR_ERR(phydev);
++	}
++
++	phydev->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
++
++
++	//FIXME: phy_id == 0
++	/*
++	 * Broken HW is sometimes missing the pull-up resistor on the
++	 * MDIO line, which results in reads to non-existent devices returning
++	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
++	 * device as well.
++	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
++	 */
++	if (phydev->phy_id == 0)
++	{
++		phy_disconnect(phydev);
++		return -ENODEV;
++	}
++	pr_debug("fh_gmac_init_phy:  %s: attached to PHY (UID 0x%x)"
++	       " Link = %d\n", ndev->name, phydev->phy_id, phydev->link);
++
++	return 0;
++}
++
++
++static int gmac_dev_open(struct net_device *ndev)
++{
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	int ret;
++
++	fh_gmac_verify_args();
++
++	/* MDIO bus Registration */
++	ret = fh_mdio_register(ndev);
++	if (ret < 0)
++	{
++		goto open_error;
++	}
++
++	ret = fh_gmac_init_phy(ndev);
++	if (unlikely(ret))
++	{
++		pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
++		goto open_error;
++	}
++	/* Create and initialize the TX/RX descriptors chains. */
++	//FIXME: STMMAC_ALIGN(buf_sz);
++	pGmac->dma_tx_size = dma_txsize;
++	pGmac->dma_rx_size = dma_rxsize;
++	pGmac->dma_buf_sz = BUFFER_SIZE_2K;
++
++	GMAC_DMA_InitDescRings(ndev);
++	/* DMA initialization and SW reset */
++	ret = GMAC_DMA_Init(ndev, pGmac->tx_bus_addr, pGmac->rx_bus_addr);
++	if (ret < 0)
++	{
++		pr_err("%s: DMA initialization failed\n", __func__);
++		goto open_error;
++	}
++
++	/* Copy the MAC addr into the HW  */
++	GMAC_SetMacAddress(pGmac);
++
++	/* Initialize the MAC Core */
++	GMAC_CoreInit(pGmac);
++
++	netdev_update_features(ndev);
++
++	/* Request the IRQ lines */
++	ret = request_irq(ndev->irq, fh_gmac_interrupt, IRQF_SHARED, ndev->name, ndev);
++	if (unlikely(ret < 0))
++	{
++		pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
++			   __func__, ndev->irq, ret);
++		goto open_error;
++	}
++
++	/* Enable the MAC Rx/Tx */
++	GMAC_EnableMac(pGmac);
++
++	/* Set the HW DMA mode and the COE */
++	//FIXME:TTC or SF
++	GMAC_DMA_OpMode(pGmac);
++
++	/* Extra statistics */
++	memset(&pGmac->stats, 0, sizeof(struct Gmac_Stats));
++	//FIXME: threshold?
++	pGmac->stats.threshold = 64;
++
++	/* Start the ball rolling... */
++	pr_debug("%s: DMA RX/TX processes started...\n", ndev->name);
++	GMAC_DMA_StartTx(pGmac);
++	GMAC_DMA_StartRx(pGmac);
++	//FIXME: dump register
++
++	if (pGmac->phydev)
++		phy_start(pGmac->phydev);
++
++	napi_enable(&pGmac->napi);
++	skb_queue_head_init(&pGmac->rx_recycle);
++	netif_start_queue(ndev);
++
++	return 0;
++
++open_error:
++	if (pGmac->phydev)
++		phy_disconnect(pGmac->phydev);
++
++	return ret;
++
++}
++
++static int gmac_dev_stop(struct net_device *ndev)
++{
++
++	Gmac_Object* pGmac = netdev_priv(ndev);
++
++	/* Stop and disconnect the PHY */
++	if (pGmac->phydev)
++	{
++		phy_stop(pGmac->phydev);
++		phy_disconnect(pGmac->phydev);
++		pGmac->phydev = NULL;
++
++		pGmac->oldduplex = 0;
++		pGmac->speed = 0;
++	}
++
++	netif_stop_queue(ndev);
++
++	napi_disable(&pGmac->napi);
++	skb_queue_purge(&pGmac->rx_recycle);
++
++	/* Free the IRQ lines */
++	free_irq(ndev->irq, ndev);
++
++	/* Stop TX/RX DMA and clear the descriptors */
++	GMAC_DMA_StopTx(pGmac);
++	GMAC_DMA_StopRx(pGmac);
++
++	/* Release and free the Rx/Tx resources */
++	GMAC_DMA_FreeDesc(pGmac);
++
++	/* Disable the MAC Rx/Tx */
++	GMAC_DisableMac(pGmac);
++
++	netif_carrier_off(ndev);
++
++	fh_mdio_unregister(ndev);
++
++	return 0;
++}
++
++
++static const struct net_device_ops fh_gmac_netdev_ops =
++{
++	.ndo_open			= gmac_dev_open,
++	.ndo_stop			= gmac_dev_stop,
++	.ndo_start_xmit			= gmac_dev_xmit,
++	.ndo_set_multicast_list		= gmac_dev_mcast_set,
++	.ndo_set_mac_address		= gmac_dev_set_mac_addr,
++	.ndo_do_ioctl			= gmac_dev_ioctl,
++	.ndo_tx_timeout			= gmac_dev_tx_timeout,
++	.ndo_change_mtu 		= gmac_dev_change_mtu,
++	.ndo_fix_features 		= NULL,
++	.ndo_set_config 		= gmac_dev_set_config,
++#ifdef STMMAC_VLAN_TAG_USED
++	.ndo_vlan_rx_register		= stmmac_vlan_rx_register,
++#endif
++#ifdef CONFIG_NET_POLL_CONTROLLER
++	.ndo_poll_controller		= gmac_poll_controller,
++#endif
++};
++
++
++static int __devinit fh_gmac_probe(struct platform_device *pdev)
++{
++	int ret = 0;
++	Gmac_Object *pGmac;
++	struct net_device *ndev;
++	struct resource *mem_res, *irq_res;
++	struct fh_gmac_platform_data *plat_data;
++
++	pr_info("GMAC driver:\n\tplatform registration... ");
++
++	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!mem_res)
++	{
++		pr_err("%s: ERROR: getting resource failed"
++		       "cannot get IORESOURCE_MEM\n",
++		       __func__);
++		return -ENODEV;
++	}
++
++	if (!request_mem_region(mem_res->start, resource_size(mem_res), pdev->name))
++	{
++		pr_err("%s: ERROR: memory allocation failed"
++		       "cannot get the I/O addr 0x%x\n",
++		       __func__, (__u32)mem_res->start);
++		return -EBUSY;
++	}
++
++	ndev = alloc_etherdev(sizeof(Gmac_Object));
++
++	if (!ndev)
++	{
++		pr_err("%s: ERROR: allocating the device\n", __func__);
++		ret = -ENOMEM;
++		goto out_release_region;
++	}
++
++	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++	if (!irq_res)
++	{
++		pr_err("%s: ERROR: getting resource failed"
++		       "cannot get IORESOURCE_IRQ\n", __func__);
++		ret = -ENXIO;
++		goto out_free_ndev;
++	}
++	ndev->irq = irq_res->start;
++
++	pGmac = netdev_priv(ndev);
++
++	pGmac->remap_addr = ioremap(mem_res->start, resource_size(mem_res));
++
++	if (!pGmac->remap_addr)
++	{
++		pr_err("%s: ERROR: memory mapping failed\n", __func__);
++		ret = -ENOMEM;
++		goto out_free_ndev;
++	}
++
++	pGmac->clk = clk_get(&pdev->dev, "eth_clk");
++	if (IS_ERR(pGmac->clk))
++	{
++		ret = PTR_ERR(pGmac->clk);
++		goto out_unmap;
++	}
++
++	clk_enable(pGmac->clk);
++	/* add net_device to platform_device */
++	SET_NETDEV_DEV(ndev, &pdev->dev);
++
++	pGmac->dev = &(pdev->dev);
++	pGmac->pdev = pdev;
++	pGmac->ndev = ndev;
++	plat_data = pdev->dev.platform_data;
++	pGmac->priv_data = plat_data;
++
++	platform_set_drvdata(pdev, ndev);
++	ndev->base_addr = (unsigned long)pGmac->remap_addr;
++
++	ether_setup(ndev);
++	ndev->netdev_ops = &fh_gmac_netdev_ops;
++	fh_gmac_set_ethtool_ops(ndev);
++
++	ndev->hw_features = NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_RXCSUM;
++	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
++	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
++	pGmac->msg_enable = netif_msg_init(debug, FH_GMAC_DEBUG);
++
++
++	if (flow_ctrl)
++	{
++		pGmac->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
++	}
++
++	pGmac->pause = pause;
++
++
++	netif_napi_add(ndev, &(pGmac->napi), fh_gmac_poll, 64);
++
++
++	if (!is_valid_ether_addr(pGmac->local_mac_address))
++	{
++		/* Use random MAC if none passed */
++		random_ether_addr(pGmac->local_mac_address);
++		pr_warning("\tusing random MAC address: %pM\n", pGmac->local_mac_address);
++	}
++	ndev->dev_addr = pGmac->local_mac_address;
++
++	spin_lock_init(&pGmac->lock);
++
++	ret = register_netdev(ndev);
++	if (ret)
++	{
++		pr_err("%s: ERROR %i registering the netdevice\n", __func__, ret);
++		ret = -ENODEV;
++		goto out_plat_exit;
++	}
++
++	pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
++		   "\t\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
++		   pdev->id, ndev->irq, pGmac->remap_addr);
++
++	plat_data->interface = pGmac->phy_interface = phymode;
++
++	/* Custom initialisation */
++	if (pGmac->priv_data->early_init)
++	{
++		pGmac->priv_data->early_init(plat_data);
++	}
++
++	if (pGmac->priv_data->plat_init)
++	{
++		pGmac->priv_data->plat_init(plat_data);
++	}
++
++
++	return 0;
++
++out_plat_exit:
++	clk_disable(pGmac->clk);
++out_unmap:
++	iounmap(pGmac->remap_addr);
++out_free_ndev:
++	free_netdev(ndev);
++	platform_set_drvdata(pdev, NULL);
++out_release_region:
++	release_mem_region(mem_res->start, resource_size(mem_res));
++
++	return ret;
++}
++
++static int __init parse_tag_phymode(const struct tag *tag)
++{
++    phymode = tag->u.phymode.phymode;
++    return 0;
++}
++
++__tagtable(ATAG_PHYMODE, parse_tag_phymode);
++
++static int __devexit fh_gmac_remove(struct platform_device *pdev)
++{
++
++	struct net_device* ndev = platform_get_drvdata(pdev);
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	struct resource* res;
++
++	pr_info("%s:\n\tremoving driver", __func__);
++
++	GMAC_DMA_StopTx(pGmac);
++	GMAC_DMA_StopRx(pGmac);
++
++	GMAC_DisableMac(pGmac);
++
++	netif_carrier_off(ndev);
++
++	platform_set_drvdata(pdev, NULL);
++	unregister_netdev(ndev);
++
++	clk_disable(pGmac->clk);
++
++	iounmap((void *)pGmac->remap_addr);
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	release_mem_region(res->start, resource_size(res));
++	free_netdev(ndev);
++
++	return 0;
++}
++
++
++#ifdef CONFIG_PM
++static int fh_gmac_suspend(struct device *dev)
++{
++	struct net_device *ndev = dev_get_drvdata(dev);
++	Gmac_Object* pGmac = netdev_priv(ndev);
++
++	if (!ndev || !netif_running(ndev))
++		return 0;
++
++	spin_lock(&pGmac->lock);
++
++	netif_device_detach(ndev);
++	netif_stop_queue(ndev);
++	if (pGmac->phydev)
++		phy_stop(pGmac->phydev);
++
++	napi_disable(&pGmac->napi);
++
++	/* Stop TX/RX DMA */
++	GMAC_DMA_StopTx(pGmac);
++	GMAC_DMA_StopRx(pGmac);
++	/* Clear the Rx/Tx descriptors */
++	GMAC_DMA_InitRxDesc(pGmac->rx_dma_descriptors, pGmac->dma_rx_size);
++	GMAC_DMA_InitTxDesc(pGmac->tx_dma_descriptors, pGmac->dma_tx_size);
++
++	/* Enable Power down mode by programming the PMT regs */
++	if (device_may_wakeup(pGmac->dev))
++	{
++		//priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
++	}
++	else
++	{
++		GMAC_DisableMac(pGmac);
++	}
++
++	spin_unlock(&pGmac->lock);
++	return 0;
++}
++
++static int fh_gmac_resume(struct device *dev)
++{
++	struct net_device *ndev = dev_get_drvdata(dev);
++	Gmac_Object* pGmac = netdev_priv(ndev);
++	u32 reg;
++
++	reg = pGmac->mii->read(pGmac->mii, 0, 0);
++	reg |= 1 << 15;
++	pGmac->mii->write(pGmac->mii, 0, 0, reg);
++
++	pGmac->mii->reset(pGmac->mii);
++
++	if (!netif_running(ndev))
++		return 0;
++
++	spin_lock(&pGmac->lock);
++
++	/* Power Down bit, into the PM register, is cleared
++	 * automatically as soon as a magic packet or a Wake-up frame
++	 * is received. Anyway, it's better to manually clear
++	 * this bit because it can generate problems while resuming
++	 * from another devices (e.g. serial console). */
++	if (device_may_wakeup(pGmac->dev))
++	{
++		//priv->hw->mac->pmt(priv->ioaddr, 0);
++	}
++
++	netif_device_attach(ndev);
++
++	/* Enable the MAC and DMA */
++	GMAC_EnableMac(pGmac);
++	GMAC_DMA_StartTx(pGmac);
++	GMAC_DMA_StartRx(pGmac);
++
++	napi_enable(&pGmac->napi);
++
++	if (pGmac->phydev)
++		phy_start(pGmac->phydev);
++
++	netif_start_queue(ndev);
++
++	spin_unlock(&pGmac->lock);
++	return 0;
++}
++
++static int fh_gmac_freeze(struct device *dev)
++{
++	struct net_device *ndev = dev_get_drvdata(dev);
++
++	if (!ndev || !netif_running(ndev))
++		return 0;
++
++	return gmac_dev_stop(ndev);
++}
++
++static int fh_gmac_restore(struct device *dev)
++{
++	struct net_device *ndev = dev_get_drvdata(dev);
++
++	if (!ndev || !netif_running(ndev))
++		return 0;
++
++	return gmac_dev_open(ndev);
++}
++
++static const struct dev_pm_ops fh_gmac_pm_ops =
++{
++	.suspend = fh_gmac_suspend,
++	.resume = fh_gmac_resume,
++	.freeze = fh_gmac_freeze,
++	.thaw = fh_gmac_restore,
++	.restore = fh_gmac_restore,
++};
++#else
++static const struct dev_pm_ops fh_gmac_pm_ops;
++#endif /* CONFIG_PM */
++
++
++static struct platform_driver fh_gmac_driver =
++{
++	.driver =
++	{
++		.name	 = "fh_gmac",
++		.owner	 = THIS_MODULE,
++		.pm	 = &fh_gmac_pm_ops,
++	},
++	.probe = fh_gmac_probe,
++	.remove = __devexit_p(fh_gmac_remove),
++};
++
++static int __init fh_gmac_init(void)
++{
++	return platform_driver_register(&fh_gmac_driver);
++}
++late_initcall(fh_gmac_init);
++
++
++static void __exit fh_gmac_exit(void)
++{
++	platform_driver_unregister(&fh_gmac_driver);
++}
++module_exit(fh_gmac_exit);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("QIN");
++MODULE_DESCRIPTION("Fullhan Ethernet driver");
+diff --git a/drivers/net/fh_gmac/fh_gmac_phyt.c b/drivers/net/fh_gmac/fh_gmac_phyt.c
+new file mode 100644
+index 00000000..5182a71b
+--- /dev/null
++++ b/drivers/net/fh_gmac/fh_gmac_phyt.c
+@@ -0,0 +1,222 @@
++#include <linux/mii.h>
++#include <linux/phy.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/gpio.h>
++#include <mach/fh_gmac.h>
++#include "fh_gmac_phyt.h"
++#include "fh_gmac.h"
++
++static int fh_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
++{
++	struct net_device *ndev = bus->priv;
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	int timeout = 1000;
++
++	if (phyaddr < 0)
++		return -ENODEV;
++
++	writel(phyaddr << 11 | gmac_gmii_clock_100_150 << 2 | phyreg << 6 | 0x1,
++	       pGmac->remap_addr + REG_GMAC_GMII_ADDRESS);
++
++	while (readl(pGmac->remap_addr + REG_GMAC_GMII_ADDRESS) & 0x1) {
++		udelay(100);
++		timeout--;
++		if (timeout < 0) {
++			printk(KERN_ERR "ERROR: %s, timeout\n", __func__);
++			break;
++		}
++	}
++
++	return readl(pGmac->remap_addr + REG_GMAC_GMII_DATA);
++}
++
++static int fh_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
++			 u16 phydata)
++{
++	struct net_device *ndev = bus->priv;
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	int timeout = 1000;
++
++	if (phyaddr < 0)
++		return -ENODEV;
++
++	writel(phydata, pGmac->remap_addr + REG_GMAC_GMII_DATA);
++	writel(0x1 << 1 | phyaddr << 11 | gmac_gmii_clock_100_150 << 2 | phyreg
++	       << 6 | 0x1, pGmac->remap_addr + REG_GMAC_GMII_ADDRESS);
++
++	while (readl(pGmac->remap_addr + REG_GMAC_GMII_ADDRESS) & 0x1) {
++		udelay(100);
++		timeout--;
++		if (timeout < 0) {
++			printk(KERN_ERR "ERROR: %s, timeout\n", __func__);
++			break;
++		}
++	}
++	return 0;
++}
++
++int fh_mdio_reset(struct mii_bus *bus)
++{
++	struct net_device *ndev = bus->priv;
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	struct fh_gmac_platform_data *plat_data;
++
++	plat_data = pGmac->priv_data;
++
++	if (plat_data && plat_data->phy_reset)
++		plat_data->phy_reset();
++
++	return 0;
++}
++
++int fh_mdio_set_mii(struct mii_bus *bus)
++{
++	struct net_device *ndev = bus->priv;
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	__u32 rmii_mode;
++	int phyid = pGmac->priv_data->phyid;
++
++	if (pGmac->phydev == NULL)
++		return -ENODEV;
++
++	if (pGmac->phy_interface == PHY_INTERFACE_MODE_RMII) {
++		switch (pGmac->phydev->phy_id) {
++		case FH_GMAC_PHY_RTL8201:
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_page_select, 7);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_rmii_mode, 0x1ffa);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_page_select, 0);
++			break;
++		case FH_GMAC_PHY_IP101G:
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_ip101g_page_select, 16);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_rmii_mode, 0x1006);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_ip101g_page_select, 0x10);
++			break;
++		case FH_GMAC_PHY_TI83848:
++			rmii_mode = fh_mdio_read(bus, phyid,
++					gmac_phyt_ti83848_rmii_mode);
++			rmii_mode |= 0x20;
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_ti83848_rmii_mode, rmii_mode);
++			break;
++		default:
++			return -ENODEV;
++		}
++	} else if (pGmac->phy_interface == PHY_INTERFACE_MODE_MII) {
++		switch (pGmac->phydev->phy_id) {
++		case FH_GMAC_PHY_RTL8201:
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_page_select, 7);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_rmii_mode, 0x6ff3);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_page_select, 0);
++			break;
++		case FH_GMAC_PHY_IP101G:
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_ip101g_page_select, 16);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_rtl8201_rmii_mode, 0x2);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_ip101g_page_select, 0x10);
++			break;
++		case FH_GMAC_PHY_TI83848:
++			rmii_mode = fh_mdio_read(bus, phyid,
++					gmac_phyt_ti83848_rmii_mode);
++			rmii_mode &= ~(0x20);
++			fh_mdio_write(bus, phyid,
++					gmac_phyt_ti83848_rmii_mode, rmii_mode);
++			break;
++		default:
++			return -ENODEV;
++		}
++	}
++
++	return 0;
++}
++
++int fh_mdio_register(struct net_device *ndev)
++{
++	int err = 0, found, addr;
++	struct mii_bus *new_bus;
++	Gmac_Object *pGmac = netdev_priv(ndev);
++	struct phy_device *phydev = NULL;
++
++	new_bus = mdiobus_alloc();
++	if (new_bus == NULL)
++		return -ENOMEM;
++	new_bus->name =
++	    pGmac->phy_interface ==
++	    PHY_INTERFACE_MODE_MII ? "gmac_mii" : "gmac_rmii";
++	new_bus->read = &fh_mdio_read;
++	new_bus->write = &fh_mdio_write;
++	new_bus->reset = &fh_mdio_reset;
++	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", 0);
++	new_bus->priv = ndev;
++	new_bus->parent = pGmac->dev;
++	err = mdiobus_register(new_bus);
++	if (err != 0) {
++		pr_err("%s: Cannot register as MDIO bus, error: %d\n",
++		       new_bus->name, err);
++		goto bus_register_fail;
++	}
++
++	pGmac->mii = new_bus;
++
++	found = 0;
++	for (addr = 0; addr < 32; addr++) {
++		phydev = new_bus->phy_map[addr];
++		if (phydev) {
++			if (pGmac->priv_data->phyid == -1)
++				pGmac->priv_data->phyid = addr;
++
++			found = 1;
++			pGmac->phydev = phydev;
++			break;
++		}
++	}
++
++	if (pGmac->mii == NULL || phydev == NULL) {
++		pr_warning("%s: MII BUS or phydev is NULL\n", ndev->name);
++		err = -ENXIO;
++		goto bus_register_fail;
++	}
++
++	err = fh_mdio_set_mii(pGmac->mii);
++
++	if (!found || err) {
++		pr_warning("%s: No PHY found\n", ndev->name);
++		err = -ENXIO;
++		goto bus_register_fail;
++	}
++
++	pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
++		ndev->name, pGmac->phydev->phy_id, addr,
++		pGmac->phydev->irq, dev_name(&pGmac->phydev->dev),
++		(addr ==
++		 pGmac->priv_data->phyid) ? " active" : "");
++
++	return 0;
++
++bus_register_fail:
++	pGmac->phydev = NULL;
++	mdiobus_unregister(new_bus);
++	kfree(new_bus);
++	return err;
++}
++
++int fh_mdio_unregister(struct net_device *ndev)
++{
++	Gmac_Object *pGmac = netdev_priv(ndev);
++
++	mdiobus_unregister(pGmac->mii);
++	pGmac->mii->priv = NULL;
++	kfree(pGmac->mii);
++	return 0;
++}
+diff --git a/drivers/net/fh_gmac/fh_gmac_phyt.h b/drivers/net/fh_gmac/fh_gmac_phyt.h
+new file mode 100644
+index 00000000..de53c08d
+--- /dev/null
++++ b/drivers/net/fh_gmac/fh_gmac_phyt.h
+@@ -0,0 +1,83 @@
++/*
++ * fh_gmac_phyt.h
++ *
++ *  Created on: May 22, 2014
++ *      Author: duobao
++ */
++
++#ifndef FH_GMAC_PHYT_H_
++#define FH_GMAC_PHYT_H_
++
++#define FH_GMAC_PHY_IP101G	0x02430C54
++#define FH_GMAC_PHY_RTL8201	0x001CC816
++#define FH_GMAC_PHY_TI83848	0xFFFFFFFF
++
++enum
++{
++	gmac_phyt_speed_10M_half_duplex = 1,
++	gmac_phyt_speed_100M_half_duplex = 2,
++	gmac_phyt_speed_10M_full_duplex = 5,
++	gmac_phyt_speed_100M_full_duplex = 6
++};
++
++
++typedef union
++{
++	struct
++	{
++		__u32	reserved_6_0					:7;
++		__u32	collision_test					:1;
++		__u32	duplex_mode						:1;
++		__u32	restart_auto_negotiate			:1;
++		__u32	isolate							:1;
++		__u32	power_down						:1;
++		__u32	auto_negotiate_enable			:1;
++		__u32	speed_select					:1;
++		__u32	loopback						:1;
++		__u32	reset							:1;
++		__u32	reserved_31_16					:16;
++	}bit;
++	__u32 dw;
++}Reg_Phyt_Basic_Ctrl;
++
++
++typedef union
++{
++	struct
++	{
++		__u32	extended_capabilities			:1;
++		__u32	jabber_detect					:1;
++		__u32	link_status						:1;
++		__u32	auto_negotiate_ability			:1;
++		__u32	remote_fault					:1;
++		__u32	auto_negotiate_complete			:1;
++		__u32	reserved_10_6					:5;
++		__u32	base_t_half_duplex_10			:1;
++		__u32	base_t_full_duplex_10			:1;
++		__u32	base_tx_half_duplex_100			:1;
++		__u32	base_tx_full_duplex_100			:1;
++		__u32	base_t_4						:1;
++		__u32	reserved_31_16					:16;
++	}bit;
++	__u32 dw;
++}Reg_Phyt_Basic_Status;
++
++typedef union
++{
++	struct
++	{
++		__u32	scramble_disable			:1;
++		__u32	reserved_1					:1;
++		__u32	speed_indication			:3;
++		__u32	reserved_5					:1;
++		__u32	enable_4b5b					:1;
++		__u32	gpo							:3;
++		__u32	reserved_11_10				:2;
++		__u32	auto_done					:1;
++		__u32	reserved_31_13				:19;
++	}bit;
++	__u32 dw;
++}Reg_Phyt_Special_Status;
++
++
++#endif /* FH_GMAC_PHYT_H_ */
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index ff109fe5..6ede872a 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -845,7 +845,7 @@ static int genphy_config_init(struct phy_device *phydev)
+ 	 * all possible port types */
+ 	features = (SUPPORTED_TP | SUPPORTED_MII
+ 			| SUPPORTED_AUI | SUPPORTED_FIBRE |
+-			SUPPORTED_BNC);
++			SUPPORTED_BNC | SUPPORTED_Pause);
+ 
+ 	/* Do we support autonegotiation? */
+ 	val = phy_read(phydev, MII_BMSR);
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index 52502883..5bf326ff 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -20,8 +20,8 @@
+  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  */
+ 
+-// #define	DEBUG			// error path messages, extra info
+-// #define	VERBOSE			// more; success messages
++#define	DEBUG			// error path messages, extra info
++#define	VERBOSE			// more; success messages
+ 
+ #include <linux/module.h>
+ #include <linux/kmod.h>
+@@ -39,6 +39,22 @@
+ #define DRIVER_VERSION "14-Jun-2006"
+ static const char driver_name [] = "asix";
+ 
++/* cpy from linux 49 by zhangy 2018-10-31*/
++struct asix_rx_fixup_info {
++	struct sk_buff *ax_skb;
++	u32 header;
++	u16 remaining;
++	bool split_head;
++};
++/* cpy from linux 49 by zhangy 2018-10-31*/
++struct asix_common_private {
++	void (*resume)(struct usbnet *dev);
++	void (*suspend)(struct usbnet *dev);
++	u16 presvd_phy_advertise;
++	u16 presvd_phy_bmcr;
++	struct asix_rx_fixup_info rx_fixup_info;
++};
++
+ /* ASIX AX8817X based USB 2.0 Ethernet Devices */
+ 
+ #define AX_CMD_SET_SW_MII		0x06
+@@ -163,7 +179,7 @@ static const char driver_name [] = "asix";
+ 
+ #define MARVELL_CTRL_TXDELAY	0x0002
+ #define MARVELL_CTRL_RXDELAY	0x0080
+-
++struct asix_common_private * get_asix_private(struct usbnet *dev);
+ /* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+ struct asix_data {
+ 	u8 multi_filter[AX_MCAST_FILTER_SIZE];
+@@ -266,15 +282,19 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ 	int status;
+ 	struct urb *urb;
+ 
+-	netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
++	netdev_dbg(dev->net, "asix_write_cmd_async()"
++                   "cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ 		   cmd, value, index, size);
+ 	if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
+-		netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
++		netdev_err(dev->net,
++			"Error allocating URB in write_cmd_async!\n");
+ 		return;
+ 	}
+ 
+-	if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
+-		netdev_err(dev->net, "Failed to allocate memory for control request\n");
++	if ((req = kmalloc(sizeof(struct usb_ctrlrequest),
++                           GFP_ATOMIC)) == NULL) {
++		netdev_err(dev->net,
++			"Failed to allocate memory for control request\n");
+ 		usb_free_urb(urb);
+ 		return;
+ 	}
+@@ -290,7 +310,7 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ 			     (void *)req, data, size,
+ 			     asix_async_cmd_callback, req);
+ 
+-	if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
++	if ((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
+ 		netdev_err(dev->net, "Error submitting the control message: status=%d\n",
+ 			   status);
+ 		kfree(req);
+@@ -298,98 +318,120 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ 	}
+ }
+ 
++/* cpy from linux 49 by zhangy 2018-10-31*/
+ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
+-	u8  *head;
+-	u32  header;
+-	char *packet;
+-	struct sk_buff *ax_skb;
++	struct asix_rx_fixup_info *rx;
++	struct asix_common_private *priv;
++	int offset = 0;
+ 	u16 size;
++	priv = get_asix_private(dev);
++	rx = &priv->rx_fixup_info;
++	/* When an Ethernet frame spans multiple URB socket buffers,
++	 * do a sanity test for the Data header synchronisation.
++	 * Attempt to detect the situation of the previous socket buffer having
++	 * been truncated or a socket buffer was missing. These situations
++	 * cause a discontinuity in the data stream and therefore need to avoid
++	 * appending bad data to the end of the current netdev socket buffer.
++	 * Also avoid unnecessarily discarding a good current netdev socket
++	 * buffer.
++	 */
++	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
++		offset = ((rx->remaining + 1) & 0xfffe);
++		rx->header = get_unaligned_le32(skb->data + offset);
++		offset = 0;
++
++		size = (u16)(rx->header & 0x7ff);
++		if (size != ((~rx->header >> 16) & 0x7ff)) {
++			netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
++				   rx->remaining);
++			if (rx->ax_skb) {
++				kfree_skb(rx->ax_skb);
++				rx->ax_skb = NULL;
++				/* Discard the incomplete netdev Ethernet frame
++				 * and assume the Data header is at the start of
++				 * the current URB socket buffer.
++				 */
++			}
++			rx->remaining = 0;
++		}
++	}
+ 
+-	head = (u8 *) skb->data;
+-	memcpy(&header, head, sizeof(header));
+-	le32_to_cpus(&header);
+-	packet = head + sizeof(header);
++	while (offset + sizeof(u16) <= skb->len) {
++		u16 copy_length;
++		unsigned char *data;
++
++		if (!rx->remaining) {
++			if (skb->len - offset == sizeof(u16)) {
++				rx->header = get_unaligned_le16(
++						skb->data + offset);
++				rx->split_head = true;
++				offset += sizeof(u16);
++				break;
++			}
+ 
+-	skb_pull(skb, 4);
++			if (rx->split_head == true) {
++				rx->header |= (get_unaligned_le16(
++						skb->data + offset) << 16);
++				rx->split_head = false;
++				offset += sizeof(u16);
++			} else {
++				rx->header = get_unaligned_le32(skb->data +
++								offset);
++				offset += sizeof(u32);
++			}
+ 
+-	while (skb->len > 0) {
+-		if ((short)(header & 0x0000ffff) !=
+-		    ~((short)((header & 0xffff0000) >> 16))) {
+-			netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
+-		}
+-		/* get the packet length */
+-		size = (u16) (header & 0x0000ffff);
+-
+-		if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
+-			u8 alignment = (unsigned long)skb->data & 0x3;
+-			if (alignment != 0x2) {
+-				/*
+-				 * not 16bit aligned so use the room provided by
+-				 * the 32 bit header to align the data
+-				 *
+-				 * note we want 16bit alignment as MAC header is
+-				 * 14bytes thus ip header will be aligned on
+-				 * 32bit boundary so accessing ipheader elements
+-				 * using a cast to struct ip header wont cause
+-				 * an unaligned accesses.
+-				 */
+-				u8 realignment = (alignment + 2) & 0x3;
+-				memmove(skb->data - realignment,
+-					skb->data,
+-					size);
+-				skb->data -= realignment;
+-				skb_set_tail_pointer(skb, size);
++			/* take frame length from Data header 32-bit word */
++			size = (u16)(rx->header & 0x7ff);
++			if (size != ((~rx->header >> 16) & 0x7ff)) {
++				netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
++					   rx->header, offset);
++				return 0;
+ 			}
+-			return 2;
+-		}
++			if (size > dev->net->mtu + ETH_HLEN + 4) {
++				netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
++					   size);
++				return 0;
++			}
++
++			/* Sometimes may fail to get a netdev socket buffer but
++			 * continue to process the URB socket buffer so that
++			 * synchronisation of the Ethernet frame Data header
++			 * word is maintained.
++			 */
++			rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ 
+-		if (size > dev->net->mtu + ETH_HLEN) {
+-			netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+-				   size);
+-			return 0;
++			rx->remaining = size;
+ 		}
+-		ax_skb = skb_clone(skb, GFP_ATOMIC);
+-		if (ax_skb) {
+-			u8 alignment = (unsigned long)packet & 0x3;
+-			ax_skb->len = size;
+-
+-			if (alignment != 0x2) {
+-				/*
+-				 * not 16bit aligned use the room provided by
+-				 * the 32 bit header to align the data
+-				 */
+-				u8 realignment = (alignment + 2) & 0x3;
+-				memmove(packet - realignment, packet, size);
+-				packet -= realignment;
+-			}
+-			ax_skb->data = packet;
+-			skb_set_tail_pointer(ax_skb, size);
+-			usbnet_skb_return(dev, ax_skb);
++
++		if (rx->remaining > skb->len - offset) {
++			copy_length = skb->len - offset;
++			rx->remaining -= copy_length;
+ 		} else {
+-			return 0;
++			copy_length = rx->remaining;
++			rx->remaining = 0;
+ 		}
+ 
+-		skb_pull(skb, (size + 1) & 0xfffe);
+-
+-		if (skb->len == 0)
+-			break;
++		if (rx->ax_skb) {
++			data = skb_put(rx->ax_skb, copy_length);
++			memcpy(data, skb->data + offset, copy_length);
++			if (!rx->remaining)
++				usbnet_skb_return(dev, rx->ax_skb);
++		}
+ 
+-		head = (u8 *) skb->data;
+-		memcpy(&header, head, sizeof(header));
+-		le32_to_cpus(&header);
+-		packet = head + sizeof(header);
+-		skb_pull(skb, 4);
++		offset += (copy_length + 1) & 0xfffe;
+ 	}
+ 
+-	if (skb->len < 0) {
+-		netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
+-			   skb->len);
++	if (skb->len != offset) {
++		netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
++			   skb->len, offset);
+ 		return 0;
+ 	}
++
+ 	return 1;
+ }
+ 
++
+ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ 					gfp_t flags)
+ {
+@@ -442,7 +484,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
+ 	if (netif_carrier_ok(dev->net) != link) {
+ 		if (link) {
+ 			netif_carrier_on(dev->net);
+-			usbnet_defer_kevent (dev, EVENT_LINK_RESET );
++			usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ 		} else
+ 			netif_carrier_off(dev->net);
+ 		netdev_dbg(dev->net, "Link Status is: %d\n", link);
+@@ -489,8 +531,7 @@ out:
+ static int asix_sw_reset(struct usbnet *dev, u8 flags)
+ {
+ 	int ret;
+-
+-        ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
++    ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
+ 	if (ret < 0)
+ 		netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
+ 
+@@ -514,7 +555,6 @@ out:
+ static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
+ {
+ 	int ret;
+-
+ 	netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
+ 	ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ 	if (ret < 0)
+@@ -562,9 +602,6 @@ static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
+ 		netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
+ 			   value, ret);
+ 
+-	if (sleep)
+-		msleep(sleep);
+-
+ 	return ret;
+ }
+ 
+@@ -729,7 +766,7 @@ static int asix_get_eeprom(struct net_device *net,
+ 	eeprom->magic = AX_EEPROM_MAGIC;
+ 
+ 	/* ax8817x returns 2 bytes from eeprom on read */
+-	for (i=0; i < eeprom->len / 2; i++) {
++	for (i = 0; i < eeprom->len / 2; i++) {
+ 		if (asix_read_cmd(dev, AX_CMD_READ_EEPROM,
+ 			eeprom->offset + i, 0, 2, &ebuf[i]) < 0)
+ 			return -EINVAL;
+@@ -757,7 +794,7 @@ static u32 asix_get_link(struct net_device *net)
+ 	return mii_link_ok(&dev->mii);
+ }
+ 
+-static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
++static int asix_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+ {
+ 	struct usbnet *dev = netdev_priv(net);
+ 
+@@ -870,7 +907,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
+ 	.ndo_start_xmit		= usbnet_start_xmit,
+ 	.ndo_tx_timeout		= usbnet_tx_timeout,
+ 	.ndo_change_mtu		= usbnet_change_mtu,
+-	.ndo_set_mac_address 	= eth_mac_addr,
++	.ndo_set_mac_address = eth_mac_addr,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_do_ioctl		= asix_ioctl,
+ 	.ndo_set_multicast_list = ax88172_set_multicast,
+@@ -886,7 +923,7 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	data->eeprom_len = AX88172_EEPROM_LEN;
+ 
+-	usbnet_get_endpoints(dev,intf);
++	usbnet_get_endpoints(dev, intf);
+ 
+ 	/* Toggle the GPIOs in a manufacturer/model specific way */
+ 	for (i = 2; i >= 0; i--) {
+@@ -894,7 +931,6 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
+ 					(gpio_bits >> (i * 8)) & 0xff, 0, 0,
+ 					NULL)) < 0)
+ 			goto out;
+-		msleep(5);
+ 	}
+ 
+ 	if ((ret = asix_write_rx_ctl(dev, 0x80)) < 0)
+@@ -973,12 +1009,47 @@ static const struct net_device_ops ax88772_netdev_ops = {
+ 	.ndo_start_xmit		= usbnet_start_xmit,
+ 	.ndo_tx_timeout		= usbnet_tx_timeout,
+ 	.ndo_change_mtu		= usbnet_change_mtu,
+-	.ndo_set_mac_address 	= asix_set_mac_address,
++	.ndo_set_mac_address	= asix_set_mac_address,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_do_ioctl		= asix_ioctl,
+ 	.ndo_set_multicast_list = asix_set_multicast,
+ };
+ 
++#ifdef ASIX_USE_UNBIND
++/* zhangy add unbind. but crash when usb reconnect */
++static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
++{
++	if (rx->ax_skb)
++	{
++		kfree_skb(rx->ax_skb);
++		rx->ax_skb = NULL;
++	}
++	rx->remaining = 0;
++	rx->split_head = false;
++	rx->header = 0;
++
++}
++static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
++{
++	struct asix_common_private *p_private;
++	p_private = dev->driver_info->driver_priv;
++	if (p_private)
++	{
++		reset_asix_rx_fixup_info(&p_private->rx_fixup_info);
++		/*may be add reset first...*/
++		kfree(p_private);
++	}
++
++}
++#endif
++
++struct asix_common_private *get_asix_private(struct usbnet *dev)
++{
++	if (dev->driver_info->driver_priv)
++		return (struct asix_common_private *)dev->driver_info->driver_priv;
++	return NULL;
++}
++
+ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ 	int ret, embd_phy;
+@@ -989,7 +1060,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	data->eeprom_len = AX88772_EEPROM_LEN;
+ 
+-	usbnet_get_endpoints(dev,intf);
++	usbnet_get_endpoints(dev, intf);
+ 
+ 	if ((ret = asix_write_gpio(dev,
+ 			AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5)) < 0)
+@@ -1002,25 +1073,26 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ 		dbg("Select PHY #1 failed: %d", ret);
+ 		goto out;
+ 	}
+-
++	/* malloc private info by zhangy add below*/
++	if (!dev->driver_info->driver_priv) {
++		dev->driver_info->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
++		if (!dev->driver_info->driver_priv)
++			return -ENOMEM;
++	}
+ 	if ((ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL)) < 0)
+ 		goto out;
+ 
+-	msleep(150);
+ 	if ((ret = asix_sw_reset(dev, AX_SWRESET_CLEAR)) < 0)
+ 		goto out;
+ 
+-	msleep(150);
+ 	if (embd_phy) {
+ 		if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL)) < 0)
+ 			goto out;
+-	}
+-	else {
++	} else {
+ 		if ((ret = asix_sw_reset(dev, AX_SWRESET_PRTE)) < 0)
+ 			goto out;
+ 	}
+ 
+-	msleep(150);
+ 	rx_ctl = asix_read_rx_ctl(dev);
+ 	dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
+ 	if ((ret = asix_write_rx_ctl(dev, 0x0000)) < 0)
+@@ -1051,13 +1123,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	if ((ret = asix_sw_reset(dev, AX_SWRESET_PRL)) < 0)
+ 		goto out;
+ 
+-	msleep(150);
+-
+ 	if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL)) < 0)
+ 		goto out;
+ 
+-	msleep(150);
+-
+ 	dev->net->netdev_ops = &ax88772_netdev_ops;
+ 	dev->net->ethtool_ops = &ax88772_ethtool_ops;
+ 
+@@ -1154,13 +1222,13 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
+ 	reg &= 0xfc0f;
+ 
+ 	switch (speed) {
+-		case SPEED_1000:
++	case SPEED_1000:
+ 			reg |= 0x03e0;
+ 			break;
+-		case SPEED_100:
++	case SPEED_100:
+ 			reg |= 0x03b0;
+ 			break;
+-		default:
++	default:
+ 			reg |= 0x02f0;
+ 	}
+ 
+@@ -1286,7 +1354,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	int gpio0 = 0;
+ 	u32 phyid;
+ 
+-	usbnet_get_endpoints(dev,intf);
++	usbnet_get_endpoints(dev, intf);
+ 
+ 	asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
+ 	dbg("GPIO Status: 0x%04x", status);
+@@ -1320,10 +1388,8 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	}
+ 
+ 	asix_sw_reset(dev, 0);
+-	msleep(150);
+ 
+ 	asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
+-	msleep(150);
+ 
+ 	asix_write_rx_ctl(dev, 0);
+ 
+@@ -1352,7 +1418,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	if (data->phymode == PHY_MODE_MARVELL) {
+ 		marvell_phy_init(dev);
+-		msleep(60);
+ 	}
+ 
+ 	asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
+@@ -1425,10 +1490,13 @@ static const struct driver_info hawking_uf200_info = {
+ static const struct driver_info ax88772_info = {
+ 	.description = "ASIX AX88772 USB 2.0 Ethernet",
+ 	.bind = ax88772_bind,
++#ifdef ASIX_USE_UNBIND
++	.unbind = ax88772_unbind,
++#endif
+ 	.status = asix_status,
+ 	.link_reset = ax88772_link_reset,
+ 	.reset = ax88772_link_reset,
+-	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
++	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR ,
+ 	.rx_fixup = asix_rx_fixup,
+ 	.tx_fixup = asix_tx_fixup,
+ };
+@@ -1444,125 +1512,146 @@ static const struct driver_info ax88178_info = {
+ 	.tx_fixup = asix_tx_fixup,
+ };
+ 
+-static const struct usb_device_id	products [] = {
++static const struct usb_device_id	products[] = {
+ {
+-	// Linksys USB200M
+ 	USB_DEVICE (0x077b, 0x2226),
+ 	.driver_info =	(unsigned long) &ax8817x_info,
+-}, {
+-	// Netgear FA120
++},
++{
+ 	USB_DEVICE (0x0846, 0x1040),
+ 	.driver_info =  (unsigned long) &netgear_fa120_info,
+-}, {
+-	// DLink DUB-E100
++},
++{
+ 	USB_DEVICE (0x2001, 0x1a00),
+ 	.driver_info =  (unsigned long) &dlink_dub_e100_info,
+-}, {
+-	// Intellinet, ST Lab USB Ethernet
++},
++{
+ 	USB_DEVICE (0x0b95, 0x1720),
+ 	.driver_info =  (unsigned long) &ax8817x_info,
+-}, {
+-	// Hawking UF200, TrendNet TU2-ET100
++},
++{
+ 	USB_DEVICE (0x07b8, 0x420a),
+ 	.driver_info =  (unsigned long) &hawking_uf200_info,
+-}, {
+-	// Billionton Systems, USB2AR
++},
++{
+ 	USB_DEVICE (0x08dd, 0x90ff),
+ 	.driver_info =  (unsigned long) &ax8817x_info,
+-}, {
+-	// ATEN UC210T
++},
++{
+ 	USB_DEVICE (0x0557, 0x2009),
+ 	.driver_info =  (unsigned long) &ax8817x_info,
+-}, {
+-	// Buffalo LUA-U2-KTX
++},
++{
++
+ 	USB_DEVICE (0x0411, 0x003d),
+ 	.driver_info =  (unsigned long) &ax8817x_info,
+-}, {
+-	// Buffalo LUA-U2-GT 10/100/1000
++},
++{
++
+ 	USB_DEVICE (0x0411, 0x006e),
+ 	.driver_info =  (unsigned long) &ax88178_info,
+-}, {
+-	// Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
++},
++{
++
+ 	USB_DEVICE (0x6189, 0x182d),
+ 	.driver_info =  (unsigned long) &ax8817x_info,
+-}, {
+-	// corega FEther USB2-TX
++},
++{
++
+ 	USB_DEVICE (0x07aa, 0x0017),
+ 	.driver_info =  (unsigned long) &ax8817x_info,
+-}, {
+-	// Surecom EP-1427X-2
++},
++{
++
+ 	USB_DEVICE (0x1189, 0x0893),
+ 	.driver_info = (unsigned long) &ax8817x_info,
+-}, {
+-	// goodway corp usb gwusb2e
++},
++{
++
+ 	USB_DEVICE (0x1631, 0x6200),
+ 	.driver_info = (unsigned long) &ax8817x_info,
+-}, {
+-	// JVC MP-PRX1 Port Replicator
++},
++{
++
+ 	USB_DEVICE (0x04f1, 0x3008),
+ 	.driver_info = (unsigned long) &ax8817x_info,
+-}, {
+-	// ASIX AX88772B 10/100
++},
++{
++
+ 	USB_DEVICE (0x0b95, 0x772b),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// ASIX AX88772 10/100
++},
++{
++
+ 	USB_DEVICE (0x0b95, 0x7720),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// ASIX AX88178 10/100/1000
++},
++{
++
+ 	USB_DEVICE (0x0b95, 0x1780),
+ 	.driver_info = (unsigned long) &ax88178_info,
+-}, {
+-	// Logitec LAN-GTJ/U2A
++},
++{
++
+ 	USB_DEVICE (0x0789, 0x0160),
+ 	.driver_info = (unsigned long) &ax88178_info,
+-}, {
+-	// Linksys USB200M Rev 2
++},
++{
++
+ 	USB_DEVICE (0x13b1, 0x0018),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// 0Q0 cable ethernet
++},
++{
++
+ 	USB_DEVICE (0x1557, 0x7720),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// DLink DUB-E100 H/W Ver B1
++},
++{
++
+ 	USB_DEVICE (0x07d1, 0x3c05),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// DLink DUB-E100 H/W Ver B1 Alternate
++},
++{
++
+ 	USB_DEVICE (0x2001, 0x3c05),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// Linksys USB1000
++},
++{
++
+ 	USB_DEVICE (0x1737, 0x0039),
+ 	.driver_info = (unsigned long) &ax88178_info,
+-}, {
+-	// IO-DATA ETG-US2
+-	USB_DEVICE (0x04bb, 0x0930),
++},
++{
++
++	USB_DEVICE(0x04bb, 0x0930),
+ 	.driver_info = (unsigned long) &ax88178_info,
+-}, {
+-	// Belkin F5D5055
++},
++{
++
+ 	USB_DEVICE(0x050d, 0x5055),
+ 	.driver_info = (unsigned long) &ax88178_info,
+-}, {
+-	// Apple USB Ethernet Adapter
++},
++{
++
+ 	USB_DEVICE(0x05ac, 0x1402),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// Cables-to-Go USB Ethernet Adapter
++},
++{
++
+ 	USB_DEVICE(0x0b95, 0x772a),
+ 	.driver_info = (unsigned long) &ax88772_info,
+-}, {
+-	// ABOCOM for pci
++},
++{
++
+ 	USB_DEVICE(0x14ea, 0xab11),
+ 	.driver_info = (unsigned long) &ax88178_info,
+-}, {
+-	// ASIX 88772a
++},
++{
++
+ 	USB_DEVICE(0x0db0, 0xa877),
+ 	.driver_info = (unsigned long) &ax88772_info,
+ },
+-	{ },		// END
++	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, products);
+ 
+@@ -1578,13 +1667,13 @@ static struct usb_driver asix_driver = {
+ 
+ static int __init asix_init(void)
+ {
+- 	return usb_register(&asix_driver);
++	return usb_register(&asix_driver);
+ }
+ module_init(asix_init);
+ 
+ static void __exit asix_exit(void)
+ {
+- 	usb_deregister(&asix_driver);
++	usb_deregister(&asix_driver);
+ }
+ module_exit(asix_exit);
+ 
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index ce395fe5..1e456a82 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -64,19 +64,18 @@
+  * is required, under load.  Jumbograms change the equation.
+  */
+ #define RX_MAX_QUEUE_MEMORY (60 * 1518)
++
+ #define	RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
+ 			(RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
+ #define	TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
+ 			(RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
+ 
+-// reawaken network queue this soon after stopping; else watchdog barks
++
+ #define TX_TIMEOUT_JIFFIES	(5*HZ)
+ 
+-// throttle rx/tx briefly after some faults, so khubd might disconnect()
+-// us (it polls at HZ/4 usually) before we report too many false errors.
++
+ #define THROTTLE_JIFFIES	(HZ/8)
+ 
+-// between wakeups
+ #define UNLINK_TIMEOUT_MS	3
+ 
+ /*-------------------------------------------------------------------------*/
+@@ -92,7 +91,7 @@ module_param (msg_level, int, 0);
+ MODULE_PARM_DESC (msg_level, "Override default message level");
+ 
+ /*-------------------------------------------------------------------------*/
+-
++static void usbnet_bh (unsigned long param);
+ /* handles CDC Ethernet and many other network "bulk data" interfaces */
+ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
+ {
+@@ -239,13 +238,15 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
+ 		  skb->len + sizeof (struct ethhdr), skb->protocol);
+ 	memset (skb->cb, 0, sizeof (struct skb_data));
+ 	status = netif_rx (skb);
+-	if (status != NET_RX_SUCCESS)
++	if (status != NET_RX_SUCCESS){
+ 		netif_dbg(dev, rx_err, dev->net,
+ 			  "netif_rx status %d\n", status);
++	}
++
+ }
+ EXPORT_SYMBOL_GPL(usbnet_skb_return);
+ 
+-
++
+ /*-------------------------------------------------------------------------
+  *
+  * Network Device Driver (peer link to "Host Device", from USB host)
+@@ -283,18 +284,30 @@ EXPORT_SYMBOL_GPL(usbnet_change_mtu);
+  * completion callbacks.  2.5 should have fixed those bugs...
+  */
+ 
+-static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
++static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
++		struct sk_buff_head *list, enum skb_state state)
+ {
+ 	unsigned long		flags;
++	enum skb_state 		old_state;
++	struct skb_data *entry = (struct skb_data *) skb->cb;
+ 
+ 	spin_lock_irqsave(&list->lock, flags);
++	old_state = entry->state;
++	entry->state = state;
+ 	__skb_unlink(skb, list);
+-	spin_unlock(&list->lock);
+-	spin_lock(&dev->done.lock);
++
++	/* defer_bh() is never called with list == &dev->done.
++	 * spin_lock_nested() tells lockdep that it is OK to take
++	 * dev->done.lock here with list->lock held.
++	 */
++	spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
++
+ 	__skb_queue_tail(&dev->done, skb);
+ 	if (dev->done.qlen == 1)
+ 		tasklet_schedule(&dev->bh);
+-	spin_unlock_irqrestore(&dev->done.lock, flags);
++	spin_unlock(&dev->done.lock);
++	spin_unlock_irqrestore(&list->lock, flags);
++	return old_state;
+ }
+ 
+ /* some work can't be done in tasklets, so we use keventd
+@@ -324,12 +337,19 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ 	unsigned long		lockflags;
+ 	size_t			size = dev->rx_urb_size;
+ 
++	/* prevent rx skb allocation when error ratio is high */
++	if (test_bit(EVENT_RX_KILL, &dev->flags)) {
++		usb_free_urb(urb);
++		return -ENOLINK;
++	}
++
+ 	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
+ 		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
+ 		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
+ 		usb_free_urb (urb);
+ 		return -ENOMEM;
+ 	}
++	/* skb_reserve case memleak here */
+ 	skb_reserve (skb, NET_IP_ALIGN);
+ 
+ 	entry = (struct skb_data *) skb->cb;
+@@ -337,10 +357,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ 	entry->dev = dev;
+ 	entry->state = rx_start;
+ 	entry->length = 0;
+-
+ 	usb_fill_bulk_urb (urb, dev->udev, dev->in,
+ 		skb->data, size, rx_complete, skb);
+-
+ 	spin_lock_irqsave (&dev->rxq.lock, lockflags);
+ 
+ 	if (netif_running (dev->net) &&
+@@ -395,44 +413,41 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
+ 	}
+ 	// else network stack removes extra byte if we forced a short packet
+ 
+-	if (skb->len) {
+-		/* all data was already cloned from skb inside the driver */
+-		if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+-			dev_kfree_skb_any(skb);
+-		else
+-			usbnet_skb_return(dev, skb);
++	/* all data was already cloned from skb inside the driver */
++	if (dev->driver_info->flags & FLAG_MULTI_PACKET)
++		goto done;
++
++	if (skb->len < ETH_HLEN) {
++		dev->net->stats.rx_errors++;
++		dev->net->stats.rx_length_errors++;
++		netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
++	} else {
++		usbnet_skb_return(dev, skb);
+ 		return;
+ 	}
+ 
+-	netif_dbg(dev, rx_err, dev->net, "drop\n");
+-	dev->net->stats.rx_errors++;
+ done:
+ 	skb_queue_tail(&dev->done, skb);
+ }
+ 
+ /*-------------------------------------------------------------------------*/
+ 
++
+ static void rx_complete (struct urb *urb)
+ {
+ 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
+ 	struct skb_data		*entry = (struct skb_data *) skb->cb;
+ 	struct usbnet		*dev = entry->dev;
+ 	int			urb_status = urb->status;
++	enum skb_state		state;
+ 
+ 	skb_put (skb, urb->actual_length);
+-	entry->state = rx_done;
++	state = rx_done;
+ 	entry->urb = NULL;
+ 
+ 	switch (urb_status) {
+ 	/* success */
+ 	case 0:
+-		if (skb->len < dev->net->hard_header_len) {
+-			entry->state = rx_cleanup;
+-			dev->net->stats.rx_errors++;
+-			dev->net->stats.rx_length_errors++;
+-			netif_dbg(dev, rx_err, dev->net,
+-				  "rx length %d\n", skb->len);
+-		}
+ 		break;
+ 
+ 	/* stalls need manual reset. this is rare ... except that
+@@ -452,9 +467,9 @@ static void rx_complete (struct urb *urb)
+ 			  "rx shutdown, code %d\n", urb_status);
+ 		goto block;
+ 
+-	/* we get controller i/o faults during khubd disconnect() delays.
++	/* we get controller i/o faults during hub_wq disconnect() delays.
+ 	 * throttle down resubmits, to avoid log floods; just temporarily,
+-	 * so we still recover when the fault isn't a khubd delay.
++	 * so we still recover when the fault isn't a hub_wq delay.
+ 	 */
+ 	case -EPROTO:
+ 	case -ETIME:
+@@ -466,7 +481,7 @@ static void rx_complete (struct urb *urb)
+ 				  "rx throttle %d\n", urb_status);
+ 		}
+ block:
+-		entry->state = rx_cleanup;
++		state = rx_cleanup;
+ 		entry->urb = urb;
+ 		urb = NULL;
+ 		break;
+@@ -477,25 +492,37 @@ block:
+ 		// FALLTHROUGH
+ 
+ 	default:
+-		entry->state = rx_cleanup;
++		state = rx_cleanup;
+ 		dev->net->stats.rx_errors++;
+ 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+ 		break;
+ 	}
+ 
+-	defer_bh(dev, skb, &dev->rxq);
++	/* stop rx if packet error rate is high */
++	if (++dev->pkt_cnt > 30) {
++		dev->pkt_cnt = 0;
++		dev->pkt_err = 0;
++	} else {
++		if (state == rx_cleanup)
++			dev->pkt_err++;
++		if (dev->pkt_err > 20)
++			set_bit(EVENT_RX_KILL, &dev->flags);
++	}
++
++	state = defer_bh(dev, skb, &dev->rxq, state);
+ 
+ 	if (urb) {
+ 		if (netif_running (dev->net) &&
+-		    !test_bit (EVENT_RX_HALT, &dev->flags)) {
++		    !test_bit (EVENT_RX_HALT, &dev->flags) &&
++		    state != unlink_start) {
+ 			rx_submit (dev, urb, GFP_ATOMIC);
++			usb_mark_last_busy(dev->udev);
+ 			return;
+ 		}
+ 		usb_free_urb (urb);
+ 	}
+ 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+ }
+-
+ static void intr_complete (struct urb *urb)
+ {
+ 	struct usbnet	*dev = urb->context;
+@@ -573,18 +600,34 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
+ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ {
+ 	unsigned long		flags;
+-	struct sk_buff		*skb, *skbnext;
++	struct sk_buff		*skb;
+ 	int			count = 0;
+ 
+ 	spin_lock_irqsave (&q->lock, flags);
+-	skb_queue_walk_safe(q, skb, skbnext) {
++	while (!skb_queue_empty(q)) {
+ 		struct skb_data		*entry;
+ 		struct urb		*urb;
+ 		int			retval;
+ 
+-		entry = (struct skb_data *) skb->cb;
++		skb_queue_walk(q, skb) {
++			entry = (struct skb_data *) skb->cb;
++			if (entry->state != unlink_start)
++				goto found;
++		}
++		break;
++found:
++		entry->state = unlink_start;
+ 		urb = entry->urb;
+ 
++		/*
++		 * Get reference count of the URB to avoid it to be
++		 * freed during usb_unlink_urb, which may trigger
++		 * use-after-free problem inside usb_unlink_urb since
++		 * usb_unlink_urb is always racing with .complete
++		 * handler(include defer_bh).
++		 */
++		usb_get_urb(urb);
++		spin_unlock_irqrestore(&q->lock, flags);
+ 		// during some PM-driven resume scenarios,
+ 		// these (async) unlinks complete immediately
+ 		retval = usb_unlink_urb (urb);
+@@ -592,6 +635,8 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ 			netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
+ 		else
+ 			count++;
++		usb_put_urb(urb);
++		spin_lock_irqsave(&q->lock, flags);
+ 	}
+ 	spin_unlock_irqrestore (&q->lock, flags);
+ 	return count;
+@@ -710,7 +755,6 @@ int usbnet_open (struct net_device *net)
+ 		goto done_nopm;
+ 	}
+ 
+-	// put into "known safe" state
+ 	if (info->reset && (retval = info->reset (dev)) < 0) {
+ 		netif_info(dev, ifup, dev->net,
+ 			   "open reset fail (%d) usbnet usb-%s-%s, %s\n",
+@@ -750,6 +794,11 @@ int usbnet_open (struct net_device *net)
+ 		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
+ 		   "simple");
+ 
++	/* reset rx error state */
++	dev->pkt_cnt = 0;
++	dev->pkt_err = 0;
++	clear_bit(EVENT_RX_KILL, &dev->flags);
++
+ 	// delay posting reads until we're fully open
+ 	tasklet_schedule (&dev->bh);
+ 	if (info->manage_power) {
+@@ -884,7 +933,6 @@ kevent (struct work_struct *work)
+ 		container_of(work, struct usbnet, kevent);
+ 	int			status;
+ 
+-	/* usb_clear_halt() needs a thread context */
+ 	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
+ 		unlink_urbs (dev, &dev->txq);
+ 		status = usb_autopm_get_interface(dev->intf);
+@@ -1023,8 +1071,8 @@ static void tx_complete (struct urb *urb)
+ 
+ 	usb_autopm_put_interface_async(dev->intf);
+ 	urb->dev = NULL;
+-	entry->state = tx_done;
+-	defer_bh(dev, skb, &dev->txq);
++
++	(void) defer_bh(dev, skb, &dev->txq, tx_done);
+ }
+ 
+ /*-------------------------------------------------------------------------*/
+@@ -1082,7 +1130,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
+ 
+ 	usb_fill_bulk_urb (urb, dev->udev, dev->out,
+ 			skb->data, skb->len, tx_complete, skb);
+-
++	//printk("\n [tx]  user complete add is %x\n",(unsigned int)tx_complete);
+ 	/* don't assume the hardware handles USB_ZERO_PACKET
+ 	 * NOTE:  strictly conforming cdc-ether devices should expect
+ 	 * the ZLP here, but ignore the one-byte packet.
+@@ -1149,7 +1197,10 @@ drop:
+ not_drop:
+ 		if (skb)
+ 			dev_kfree_skb_any (skb);
+-		usb_free_urb (urb);
++		if (urb) {
++			kfree(urb->sg);
++			usb_free_urb(urb);
++		}
+ 	} else
+ 		netif_dbg(dev, tx_queued, dev->net,
+ 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
+@@ -1162,7 +1213,7 @@ EXPORT_SYMBOL_GPL(usbnet_start_xmit);
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+-// tasklet (work deferred from completions, in_irq) or timer
++
+ 
+ static void usbnet_bh (unsigned long param)
+ {
+@@ -1174,11 +1225,14 @@ static void usbnet_bh (unsigned long param)
+ 		entry = (struct skb_data *) skb->cb;
+ 		switch (entry->state) {
+ 		case rx_done:
++
+ 			entry->state = rx_cleanup;
+ 			rx_process (dev, skb);
+ 			continue;
+ 		case tx_done:
++			kfree(entry->urb->sg);
+ 		case rx_cleanup:
++
+ 			usb_free_urb (entry->urb);
+ 			dev_kfree_skb (skb);
+ 			continue;
+@@ -1187,7 +1241,9 @@ static void usbnet_bh (unsigned long param)
+ 		}
+ 	}
+ 
+-	// waiting for all pending urbs to complete?
++	/* restart RX again after disabling due to high error rate */
++	clear_bit(EVENT_RX_KILL, &dev->flags);
++
+ 	if (dev->wait) {
+ 		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
+ 			wake_up (dev->wait);
+@@ -1196,6 +1252,7 @@ static void usbnet_bh (unsigned long param)
+ 	// or are we maybe short a few urbs?
+ 	} else if (netif_running (dev->net) &&
+ 		   netif_device_present (dev->net) &&
++		   netif_carrier_ok(dev->net) &&
+ 		   !timer_pending (&dev->delay) &&
+ 		   !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ 		int	temp = dev->rxq.qlen;
+@@ -1205,7 +1262,6 @@ static void usbnet_bh (unsigned long param)
+ 			struct urb	*urb;
+ 			int		i;
+ 
+-			// don't refill the queue all at once
+ 			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
+ 				urb = usb_alloc_urb (0, GFP_ATOMIC);
+ 				if (urb != NULL) {
+@@ -1218,12 +1274,17 @@ static void usbnet_bh (unsigned long param)
+ 				netif_dbg(dev, link, dev->net,
+ 					  "rxqlen %d --> %d\n",
+ 					  temp, dev->rxq.qlen);
+-			if (dev->rxq.qlen < qlen)
++			if (dev->rxq.qlen < qlen){
+ 				tasklet_schedule (&dev->bh);
++			}
++
+ 		}
+-		if (dev->txq.qlen < TX_QLEN (dev))
++		if (dev->txq.qlen < TX_QLEN (dev)){
+ 			netif_wake_queue (dev->net);
++		}
++
+ 	}
++
+ }
+ 
+ 
+@@ -1237,35 +1298,34 @@ static void usbnet_bh (unsigned long param)
+ 
+ void usbnet_disconnect (struct usb_interface *intf)
+ {
+-	struct usbnet		*dev;
+-	struct usb_device	*xdev;
+-	struct net_device	*net;
++	struct usbnet *dev;
++	struct usb_device *xdev;
++	struct net_device *net;
+ 
+ 	dev = usb_get_intfdata(intf);
+ 	usb_set_intfdata(intf, NULL);
+ 	if (!dev)
+ 		return;
+ 
+-	xdev = interface_to_usbdev (intf);
++	xdev = interface_to_usbdev(intf);
+ 
+ 	netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
+-		   intf->dev.driver->name,
+-		   xdev->bus->bus_name, xdev->devpath,
+-		   dev->driver_info->description);
++	                intf->dev.driver->name, xdev->bus->bus_name,
++	                xdev->devpath, dev->driver_info->description);
+ 
+ 	net = dev->net;
+-	unregister_netdev (net);
++	unregister_netdev(net);
+ 
+ 	cancel_work_sync(&dev->kevent);
+ 
+ 	if (dev->driver_info->unbind)
+-		dev->driver_info->unbind (dev, intf);
++		dev->driver_info->unbind(dev, intf);
+ 
+ 	usb_kill_urb(dev->interrupt);
+ 	usb_free_urb(dev->interrupt);
+ 
+ 	free_netdev(net);
+-	usb_put_dev (xdev);
++	usb_put_dev(xdev);
+ }
+ EXPORT_SYMBOL_GPL(usbnet_disconnect);
+ 
+@@ -1325,7 +1385,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 
+ 	status = -ENOMEM;
+ 
+-	// set up our own records
++
+ 	net = alloc_etherdev(sizeof(*dev));
+ 	if (!net) {
+ 		dbg ("can't kmalloc dev");
+@@ -1374,16 +1434,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
+ 	net->ethtool_ops = &usbnet_ethtool_ops;
+ 
+-	// allow device-specific bind/init procedures
+-	// NOTE net->name still not usable ...
++
+ 	if (info->bind) {
+ 		status = info->bind (dev, udev);
+ 		if (status < 0)
+ 			goto out1;
+ 
+-		// heuristic:  "usb%d" for links we know are two-host,
+-		// else "eth%d" when there's reasonable doubt.  userspace
+-		// can rename the link if it knows better.
+ 		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
+ 		    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
+ 		     (net->dev_addr [0] & 0x02) == 0))
+@@ -1411,11 +1467,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 			status = 0;
+ 
+ 	}
++#if(1)
+ 	if (status >= 0 && dev->status)
+ 		status = init_status (dev, udev);
+ 	if (status < 0)
+ 		goto out3;
+-
++#endif
+ 	if (!dev->rx_urb_size)
+ 		dev->rx_urb_size = dev->hard_mtu;
+ 	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+@@ -1435,7 +1492,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 		   dev->driver_info->description,
+ 		   net->dev_addr);
+ 
+-	// ok, it's ready to go.
++
+ 	usb_set_intfdata (udev, dev);
+ 
+ 	netif_device_attach (net);
+@@ -1447,8 +1504,15 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 
+ out3:
+ 	if (info->unbind)
+-		info->unbind (dev, udev);
++		info->unbind(dev, udev);
+ out1:
++	/* subdrivers must undo all they did in bind() if they
++	 * fail it, but we may fail later and a deferred kevent
++	 * may trigger an error resubmitting itself and, worse,
++	 * schedule a timer. So we kill it all just in case.
++	 */
++	cancel_work_sync(&dev->kevent);
++	del_timer_sync(&dev->delay);
+ 	free_netdev(net);
+ out:
+ 	usb_put_dev(xdev);
+@@ -1465,7 +1529,7 @@ EXPORT_SYMBOL_GPL(usbnet_probe);
+ 
+ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
+ {
+-	struct usbnet		*dev = usb_get_intfdata(intf);
++	struct usbnet *dev = usb_get_intfdata(intf);
+ 
+ 	if (!dev->suspend_count++) {
+ 		spin_lock_irq(&dev->txq.lock);
+@@ -1481,7 +1545,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
+ 		 * accelerate emptying of the rx and queues, to avoid
+ 		 * having everything error out.
+ 		 */
+-		netif_device_detach (dev->net);
++		netif_device_detach(dev->net);
+ 		usbnet_terminate_urbs(dev);
+ 		usb_kill_urb(dev->interrupt);
+ 
+@@ -1489,7 +1553,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
+ 		 * reattach so runtime management can use and
+ 		 * wake the device
+ 		 */
+-		netif_device_attach (dev->net);
++		netif_device_attach(dev->net);
+ 	}
+ 	return 0;
+ }
+@@ -1497,10 +1561,10 @@ EXPORT_SYMBOL_GPL(usbnet_suspend);
+ 
+ int usbnet_resume (struct usb_interface *intf)
+ {
+-	struct usbnet		*dev = usb_get_intfdata(intf);
+-	struct sk_buff          *skb;
+-	struct urb              *res;
+-	int                     retval;
++	struct usbnet *dev = usb_get_intfdata(intf);
++	struct sk_buff *skb;
++	struct urb *res;
++	int retval;
+ 
+ 	if (!--dev->suspend_count) {
+ 		/* resume interrupt URBs */
+@@ -1510,10 +1574,11 @@ int usbnet_resume (struct usb_interface *intf)
+ 		spin_lock_irq(&dev->txq.lock);
+ 		while ((res = usb_get_from_anchor(&dev->deferred))) {
+ 
+-			skb = (struct sk_buff *)res->context;
++			skb = (struct sk_buff *) res->context;
+ 			retval = usb_submit_urb(res, GFP_ATOMIC);
+ 			if (retval < 0) {
+ 				dev_kfree_skb_any(skb);
++				kfree(res->sg);
+ 				usb_free_urb(res);
+ 				usb_autopm_put_interface_async(dev->intf);
+ 			} else {
+@@ -1529,7 +1594,7 @@ int usbnet_resume (struct usb_interface *intf)
+ 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ 			if (!(dev->txq.qlen >= TX_QLEN(dev)))
+ 				netif_start_queue(dev->net);
+-			tasklet_schedule (&dev->bh);
++			tasklet_schedule(&dev->bh);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
+new file mode 100644
+index 00000000..1b1fe51e
+--- /dev/null
++++ b/drivers/pwm/Kconfig
+@@ -0,0 +1,47 @@
++menuconfig PWM
++	bool "Pulse-Width Modulation (PWM) Support"
++	help
++	  Generic Pulse-Width Modulation (PWM) support.
++
++	  In Pulse-Width Modulation, a variation of the width of pulses
++	  in a rectangular pulse signal is used as a means to alter the
++	  average power of the signal. Applications include efficient
++	  power delivery and voltage regulation. In computer systems,
++	  PWMs are commonly used to control fans or the brightness of
++	  display backlights.
++
++	  This framework provides a generic interface to PWM devices
++	  within the Linux kernel. On the driver side it provides an API
++	  to register and unregister a PWM chip, an abstraction of a PWM
++	  controller, that supports one or more PWM devices. Client
++	  drivers can request PWM devices and use the generic framework
++	  to configure as well as enable and disable them.
++
++	  This generic framework replaces the legacy PWM framework which
++	  allows only a single driver implementing the required API. Not
++	  all legacy implementations have been ported to the framework
++	  yet. The framework provides an API that is backward compatible
++	  with the legacy framework so that existing client drivers
++	  continue to work as expected.
++
++	  If unsure, say no.
++
++if PWM
++
++config PWM_FULLHAN
++	tristate "FH PWM support"
++	help
++	  To compile this driver as a module, choose M here: the module will
++	  be called fh_pwm.
++	  
++if PWM_FULLHAN
++config FH_PWM_NUM
++	int
++	prompt "Number of PWMs, range: 1~8"
++	default 2
++	range 1 8
++	help
++	  Number of PWMs
++endif
++
++endif
+diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
+new file mode 100644
+index 00000000..64d42f52
+--- /dev/null
++++ b/drivers/pwm/Makefile
+@@ -0,0 +1,20 @@
++obj-$(CONFIG_PWM)		+= core.o
++obj-$(CONFIG_PWM_AB8500)	+= pwm-ab8500.o
++obj-$(CONFIG_PWM_ATMEL_TCB)	+= pwm-atmel-tcb.o
++obj-$(CONFIG_PWM_BFIN)		+= pwm-bfin.o
++obj-$(CONFIG_PWM_IMX)		+= pwm-imx.o
++obj-$(CONFIG_PWM_JZ4740)	+= pwm-jz4740.o
++obj-$(CONFIG_PWM_LPC32XX)	+= pwm-lpc32xx.o
++obj-$(CONFIG_PWM_MXS)		+= pwm-mxs.o
++obj-$(CONFIG_PWM_PUV3)		+= pwm-puv3.o
++obj-$(CONFIG_PWM_PXA)		+= pwm-pxa.o
++obj-$(CONFIG_PWM_SAMSUNG)	+= pwm-samsung.o
++obj-$(CONFIG_PWM_SPEAR)		+= pwm-spear.o
++obj-$(CONFIG_PWM_TEGRA)		+= pwm-tegra.o
++obj-$(CONFIG_PWM_TIECAP)	+= pwm-tiecap.o
++obj-$(CONFIG_PWM_TIEHRPWM)	+= pwm-tiehrpwm.o
++obj-$(CONFIG_PWM_TIPWMSS)	+= pwm-tipwmss.o
++obj-$(CONFIG_PWM_TWL)		+= pwm-twl.o
++obj-$(CONFIG_PWM_TWL_LED)	+= pwm-twl-led.o
++obj-$(CONFIG_PWM_VT8500)	+= pwm-vt8500.o
++obj-$(CONFIG_PWM_FULLHAN)	+= pwmv2-fullhan.o
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+new file mode 100644
+index 00000000..01e8aba6
+--- /dev/null
++++ b/drivers/pwm/core.c
+@@ -0,0 +1,642 @@
++/*
++ * Generic pwmlib implementation
++ *
++ * Copyright (C) 2011 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2011-2012 Avionic Design GmbH
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License as published by
++ *  the Free Software Foundation; either version 2, or (at your option)
++ *  any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *  GNU General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; see the file COPYING.  If not, write to
++ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/pwm.h>
++#include <linux/radix-tree.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++
++#define MAX_PWMS 1024
++
++/* flags in the third cell of the DT PWM specifier */
++#define PWM_SPEC_POLARITY	(1 << 0)
++
++static DEFINE_MUTEX(pwm_lookup_lock);
++static LIST_HEAD(pwm_lookup_list);
++static DEFINE_MUTEX(pwm_lock);
++static LIST_HEAD(pwm_chips);
++static DECLARE_BITMAP(allocated_pwms, MAX_PWMS);
++static RADIX_TREE(pwm_tree, GFP_KERNEL);
++
++static struct pwm_device *pwm_to_device(unsigned int pwm)
++{
++	return radix_tree_lookup(&pwm_tree, pwm);
++}
++
++static int alloc_pwms(int pwm, unsigned int count)
++{
++	unsigned int from = 0;
++	unsigned int start;
++
++	if (pwm >= MAX_PWMS)
++		return -EINVAL;
++
++	if (pwm >= 0)
++		from = pwm;
++
++	start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from,
++					   count, 0);
++
++	if (pwm >= 0 && start != pwm)
++		return -EEXIST;
++
++	if (start + count > MAX_PWMS)
++		return -ENOSPC;
++
++	return start;
++}
++
++static void free_pwms(struct pwm_chip *chip)
++{
++	unsigned int i;
++
++	for (i = 0; i < chip->npwm; i++) {
++		struct pwm_device *pwm = &chip->pwms[i];
++		radix_tree_delete(&pwm_tree, pwm->pwm);
++	}
++
++	bitmap_clear(allocated_pwms, chip->base, chip->npwm);
++
++	kfree(chip->pwms);
++	chip->pwms = NULL;
++}
++
++static struct pwm_chip *pwmchip_find_by_name(const char *name)
++{
++	struct pwm_chip *chip;
++
++	if (!name)
++		return NULL;
++
++	mutex_lock(&pwm_lock);
++
++	list_for_each_entry(chip, &pwm_chips, list) {
++		const char *chip_name = dev_name(chip->dev);
++
++		if (chip_name && strcmp(chip_name, name) == 0) {
++			mutex_unlock(&pwm_lock);
++			return chip;
++		}
++	}
++
++	mutex_unlock(&pwm_lock);
++
++	return NULL;
++}
++
++static int pwm_device_request(struct pwm_device *pwm, const char *label)
++{
++	int err;
++
++	if (test_bit(PWMF_REQUESTED, &pwm->flags))
++		return -EBUSY;
++
++	if (!try_module_get(pwm->chip->ops->owner))
++		return -ENODEV;
++
++	if (pwm->chip->ops->request) {
++		err = pwm->chip->ops->request(pwm->chip, pwm);
++		if (err) {
++			module_put(pwm->chip->ops->owner);
++			return err;
++		}
++	}
++
++	set_bit(PWMF_REQUESTED, &pwm->flags);
++	pwm->label = label;
++
++	return 0;
++}
++
++/**
++ * pwm_set_chip_data() - set private chip data for a PWM
++ * @pwm: PWM device
++ * @data: pointer to chip-specific data
++ */
++int pwm_set_chip_data(struct pwm_device *pwm, void *data)
++{
++	if (!pwm)
++		return -EINVAL;
++
++	pwm->chip_data = data;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(pwm_set_chip_data);
++
++/**
++ * pwm_get_chip_data() - get private chip data for a PWM
++ * @pwm: PWM device
++ */
++void *pwm_get_chip_data(struct pwm_device *pwm)
++{
++	return pwm ? pwm->chip_data : NULL;
++}
++EXPORT_SYMBOL_GPL(pwm_get_chip_data);
++
++/**
++ * pwmchip_add() - register a new PWM chip
++ * @chip: the PWM chip to add
++ *
++ * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
++ * will be used.
++ */
++int pwmchip_add(struct pwm_chip *chip)
++{
++	struct pwm_device *pwm;
++	unsigned int i;
++	int ret;
++
++	if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
++	    !chip->ops->enable || !chip->ops->disable)
++		return -EINVAL;
++
++	mutex_lock(&pwm_lock);
++
++	ret = alloc_pwms(chip->base, chip->npwm);
++	if (ret < 0)
++		goto out;
++
++	chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL);
++	if (!chip->pwms) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	chip->base = ret;
++
++	for (i = 0; i < chip->npwm; i++) {
++		pwm = &chip->pwms[i];
++
++		pwm->chip = chip;
++		pwm->pwm = chip->base + i;
++		pwm->hwpwm = i;
++
++		radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
++	}
++
++	bitmap_set(allocated_pwms, chip->base, chip->npwm);
++
++	INIT_LIST_HEAD(&chip->list);
++	list_add(&chip->list, &pwm_chips);
++
++	ret = 0;
++out:
++	mutex_unlock(&pwm_lock);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(pwmchip_add);
++
++/**
++ * pwmchip_remove() - remove a PWM chip
++ * @chip: the PWM chip to remove
++ *
++ * Removes a PWM chip. This function may return busy if the PWM chip provides
++ * a PWM device that is still requested.
++ */
++int pwmchip_remove(struct pwm_chip *chip)
++{
++	unsigned int i;
++	int ret = 0;
++
++	mutex_lock(&pwm_lock);
++
++	for (i = 0; i < chip->npwm; i++) {
++		struct pwm_device *pwm = &chip->pwms[i];
++
++		if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
++			ret = -EBUSY;
++			goto out;
++		}
++	}
++
++	list_del_init(&chip->list);
++
++	free_pwms(chip);
++
++out:
++	mutex_unlock(&pwm_lock);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(pwmchip_remove);
++
++/**
++ * pwm_request() - request a PWM device
++ * @pwm_id: global PWM device index
++ * @label: PWM device label
++ *
++ * This function is deprecated, use pwm_get() instead.
++ */
++struct pwm_device *pwm_request(int pwm, const char *label)
++{
++	struct pwm_device *dev;
++	int err;
++
++	if (pwm < 0 || pwm >= MAX_PWMS)
++		return ERR_PTR(-EINVAL);
++
++	mutex_lock(&pwm_lock);
++
++	dev = pwm_to_device(pwm);
++	if (!dev) {
++		dev = ERR_PTR(-ENODEV);
++		goto out;
++	}
++
++	err = pwm_device_request(dev, label);
++	if (err < 0)
++		dev = ERR_PTR(err);
++
++out:
++	mutex_unlock(&pwm_lock);
++
++	return dev;
++}
++EXPORT_SYMBOL_GPL(pwm_request);
++
++/**
++ * pwm_request_from_chip() - request a PWM device relative to a PWM chip
++ * @chip: PWM chip
++ * @index: per-chip index of the PWM to request
++ * @label: a literal description string of this PWM
++ *
++ * Returns the PWM at the given index of the given PWM chip. A negative error
++ * code is returned if the index is not valid for the specified PWM chip or
++ * if the PWM device cannot be requested.
++ */
++struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
++					 unsigned int index,
++					 const char *label)
++{
++	struct pwm_device *pwm;
++	int err;
++
++	if (!chip || index >= chip->npwm)
++		return ERR_PTR(-EINVAL);
++
++	mutex_lock(&pwm_lock);
++	pwm = &chip->pwms[index];
++
++	err = pwm_device_request(pwm, label);
++	if (err < 0)
++		pwm = ERR_PTR(err);
++
++	mutex_unlock(&pwm_lock);
++	return pwm;
++}
++EXPORT_SYMBOL_GPL(pwm_request_from_chip);
++
++/**
++ * pwm_free() - free a PWM device
++ * @pwm: PWM device
++ *
++ * This function is deprecated, use pwm_put() instead.
++ */
++void pwm_free(struct pwm_device *pwm)
++{
++	pwm_put(pwm);
++}
++EXPORT_SYMBOL_GPL(pwm_free);
++
++/**
++ * pwm_config() - change a PWM device configuration
++ * @pwm: PWM device
++ * @duty_ns: "on" time (in nanoseconds)
++ * @period_ns: duration (in nanoseconds) of one cycle
++ */
++int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
++{
++	if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns)
++		return -EINVAL;
++
++	return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
++}
++EXPORT_SYMBOL_GPL(pwm_config);
++
++/**
++ * pwm_set_polarity() - configure the polarity of a PWM signal
++ * @pwm: PWM device
++ * @polarity: new polarity of the PWM signal
++ *
++ * Note that the polarity cannot be configured while the PWM device is enabled
++ */
++int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
++{
++	if (!pwm || !pwm->chip->ops)
++		return -EINVAL;
++
++	if (!pwm->chip->ops->set_polarity)
++		return -ENOSYS;
++
++	if (test_bit(PWMF_ENABLED, &pwm->flags))
++		return -EBUSY;
++
++	return pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
++}
++EXPORT_SYMBOL_GPL(pwm_set_polarity);
++
++/**
++ * pwm_enable() - start a PWM output toggling
++ * @pwm: PWM device
++ */
++int pwm_enable(struct pwm_device *pwm)
++{
++	if (pwm && !test_and_set_bit(PWMF_ENABLED, &pwm->flags))
++		return pwm->chip->ops->enable(pwm->chip, pwm);
++
++	return pwm ? 0 : -EINVAL;
++}
++EXPORT_SYMBOL_GPL(pwm_enable);
++
++/**
++ * pwm_disable() - stop a PWM output toggling
++ * @pwm: PWM device
++ */
++void pwm_disable(struct pwm_device *pwm)
++{
++	if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags))
++		pwm->chip->ops->disable(pwm->chip, pwm);
++}
++EXPORT_SYMBOL_GPL(pwm_disable);
++
++/**
++ * pwm_add_table() - register PWM device consumers
++ * @table: array of consumers to register
++ * @num: number of consumers in table
++ */
++void __init pwm_add_table(struct pwm_lookup *table, size_t num)
++{
++	mutex_lock(&pwm_lookup_lock);
++
++	while (num--) {
++		list_add_tail(&table->list, &pwm_lookup_list);
++		table++;
++	}
++
++	mutex_unlock(&pwm_lookup_lock);
++}
++
++/**
++ * pwm_get() - look up and request a PWM device
++ * @dev: device for PWM consumer
++ * @con_id: consumer name
++ *
++ * Lookup is first attempted using DT. If the device was not instantiated from
++ * a device tree, a PWM chip and a relative index is looked up via a table
++ * supplied by board setup code (see pwm_add_table()).
++ *
++ * Once a PWM chip has been found the specified PWM device will be requested
++ * and is ready to be used.
++ */
++struct pwm_device *pwm_get(struct device *dev, const char *con_id)
++{
++	struct pwm_device *pwm = ERR_PTR(-ENODEV);
++	const char *dev_id = dev ? dev_name(dev) : NULL;
++	struct pwm_chip *chip = NULL;
++	unsigned int index = 0;
++	unsigned int best = 0;
++	struct pwm_lookup *p;
++	unsigned int match;
++
++	/*
++	 * We look up the provider in the static table typically provided by
++	 * board setup code. We first try to lookup the consumer device by
++	 * name. If the consumer device was passed in as NULL or if no match
++	 * was found, we try to find the consumer by directly looking it up
++	 * by name.
++	 *
++	 * If a match is found, the provider PWM chip is looked up by name
++	 * and a PWM device is requested using the PWM device per-chip index.
++	 *
++	 * The lookup algorithm was shamelessly taken from the clock
++	 * framework:
++	 *
++	 * We do slightly fuzzy matching here:
++	 *  An entry with a NULL ID is assumed to be a wildcard.
++	 *  If an entry has a device ID, it must match
++	 *  If an entry has a connection ID, it must match
++	 * Then we take the most specific entry - with the following order
++	 * of precedence: dev+con > dev only > con only.
++	 */
++	mutex_lock(&pwm_lookup_lock);
++
++	list_for_each_entry(p, &pwm_lookup_list, list) {
++		match = 0;
++
++		if (p->dev_id) {
++			if (!dev_id || strcmp(p->dev_id, dev_id))
++				continue;
++
++			match += 2;
++		}
++
++		if (p->con_id) {
++			if (!con_id || strcmp(p->con_id, con_id))
++				continue;
++
++			match += 1;
++		}
++
++		if (match > best) {
++			chip = pwmchip_find_by_name(p->provider);
++			index = p->index;
++
++			if (match != 3)
++				best = match;
++			else
++				break;
++		}
++	}
++
++	if (chip)
++		pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id);
++
++	mutex_unlock(&pwm_lookup_lock);
++
++	return pwm;
++}
++EXPORT_SYMBOL_GPL(pwm_get);
++
++/**
++ * pwm_put() - release a PWM device
++ * @pwm: PWM device
++ */
++void pwm_put(struct pwm_device *pwm)
++{
++	if (!pwm)
++		return;
++
++	mutex_lock(&pwm_lock);
++
++	if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
++		pr_warn("PWM device already freed\n");
++		goto out;
++	}
++
++	if (pwm->chip->ops->free)
++		pwm->chip->ops->free(pwm->chip, pwm);
++
++	pwm->label = NULL;
++
++	module_put(pwm->chip->ops->owner);
++out:
++	mutex_unlock(&pwm_lock);
++}
++EXPORT_SYMBOL_GPL(pwm_put);
++
++static void devm_pwm_release(struct device *dev, void *res)
++{
++	pwm_put(*(struct pwm_device **)res);
++}
++
++/**
++ * devm_pwm_get() - resource managed pwm_get()
++ * @dev: device for PWM consumer
++ * @con_id: consumer name
++ *
++ * This function performs like pwm_get() but the acquired PWM device will
++ * automatically be released on driver detach.
++ */
++struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
++{
++	struct pwm_device **ptr, *pwm;
++
++	ptr = devres_alloc(devm_pwm_release, sizeof(**ptr), GFP_KERNEL);
++	if (!ptr)
++		return ERR_PTR(-ENOMEM);
++
++	pwm = pwm_get(dev, con_id);
++	if (!IS_ERR(pwm)) {
++		*ptr = pwm;
++		devres_add(dev, ptr);
++	} else {
++		devres_free(ptr);
++	}
++
++	return pwm;
++}
++EXPORT_SYMBOL_GPL(devm_pwm_get);
++
++/**
++  * pwm_can_sleep() - report whether PWM access will sleep
++  * @pwm: PWM device
++  *
++  * It returns true if accessing the PWM can sleep, false otherwise.
++  */
++bool pwm_can_sleep(struct pwm_device *pwm)
++{
++	return pwm->chip->can_sleep;
++}
++EXPORT_SYMBOL_GPL(pwm_can_sleep);
++
++#ifdef CONFIG_DEBUG_FS
++static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
++{
++	unsigned int i;
++
++	for (i = 0; i < chip->npwm; i++) {
++		struct pwm_device *pwm = &chip->pwms[i];
++
++		seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
++
++		if (test_bit(PWMF_REQUESTED, &pwm->flags))
++			seq_printf(s, " requested");
++
++		if (test_bit(PWMF_ENABLED, &pwm->flags))
++			seq_printf(s, " enabled");
++
++		seq_printf(s, "\n");
++	}
++}
++
++static void *pwm_seq_start(struct seq_file *s, loff_t *pos)
++{
++	mutex_lock(&pwm_lock);
++	s->private = "";
++
++	return seq_list_start(&pwm_chips, *pos);
++}
++
++static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++	s->private = "\n";
++
++	return seq_list_next(v, &pwm_chips, pos);
++}
++
++static void pwm_seq_stop(struct seq_file *s, void *v)
++{
++	mutex_unlock(&pwm_lock);
++}
++
++static int pwm_seq_show(struct seq_file *s, void *v)
++{
++	struct pwm_chip *chip = list_entry(v, struct pwm_chip, list);
++
++	seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private,
++		   chip->dev->bus ? chip->dev->bus->name : "no-bus",
++		   dev_name(chip->dev), chip->npwm,
++		   (chip->npwm != 1) ? "s" : "");
++
++	if (chip->ops->dbg_show)
++		chip->ops->dbg_show(chip, s);
++	else
++		pwm_dbg_show(chip, s);
++
++	return 0;
++}
++
++static const struct seq_operations pwm_seq_ops = {
++	.start = pwm_seq_start,
++	.next = pwm_seq_next,
++	.stop = pwm_seq_stop,
++	.show = pwm_seq_show,
++};
++
++static int pwm_seq_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &pwm_seq_ops);
++}
++
++static const struct file_operations pwm_debugfs_ops = {
++	.owner = THIS_MODULE,
++	.open = pwm_seq_open,
++	.read = seq_read,
++	.llseek = seq_lseek,
++	.release = seq_release,
++};
++
++static int __init pwm_debugfs_init(void)
++{
++	debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL,
++			    &pwm_debugfs_ops);
++
++	return 0;
++}
++
++subsys_initcall(pwm_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
+diff --git a/drivers/pwm/pwm-fullhan.c b/drivers/pwm/pwm-fullhan.c
+new file mode 100644
+index 00000000..b1f3c618
+--- /dev/null
++++ b/drivers/pwm/pwm-fullhan.c
+@@ -0,0 +1,607 @@
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/pwm.h>
++#include <linux/printk.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/decompress/mm.h>
++#include <linux/of_address.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include <asm/uaccess.h>
++#include "pwm-fullhan.h"
++
++#undef  FH_PWM_DEBUG
++#ifdef FH_PWM_DEBUG
++#define PRINT_DBG(fmt,args...)  printk(fmt,##args)
++#else
++#define PRINT_DBG(fmt,args...)  do{} while(0)
++#endif
++
++struct fh_pwm_chip {
++	struct pwm_chip chip;
++	void __iomem *base;
++	struct clk *clk;
++	struct proc_dir_entry *proc_file;
++};
++
++struct fh_pwm_chip fh_pwm = {
++
++};
++
++
++static int pwm_get_duty_cycle_ns(struct fh_pwm_chip_data *chip_data)
++{
++	u32 reg, period, duty;
++	u32 clk_rate = clk_get_rate(fh_pwm.clk);
++
++	reg = readl(fh_pwm.base + REG_PWM_CMD(chip_data->id));
++	period = reg & 0x0fff;
++	duty = (reg >> 16) & 0xfff;
++	duty = period - duty;       //reverse duty cycle
++
++	if (period == 0)
++		period = duty;
++
++	chip_data->counter_ns = duty * 1000000000 / clk_rate;
++	chip_data->period_ns = period * 1000000000 / clk_rate;
++
++	PRINT_DBG("get duty: %d, period: %d, reg: 0x%x\n", duty, period, reg);
++
++	return 0;
++}
++
++static int pwm_set_duty_cycle_ns(struct fh_pwm_chip_data *chip_data)
++{
++	u32 period, duty, reg, clk_rate, duty_revert;
++	clk_rate = clk_get_rate(fh_pwm.clk);
++	if (!clk_rate) {
++		pr_err("PWM: clock rate is 0\n");
++		return -EINVAL;
++	}
++	period = chip_data->period_ns / (1000000000 / clk_rate);
++
++	if (period < 8) {
++		pr_err("PWM: min period is 8\n");
++		return -EINVAL;
++	}
++
++	duty = chip_data->counter_ns / (1000000000 / clk_rate);
++
++	if (period < duty) {
++		pr_err("PWM: period < duty\n");
++		return -EINVAL;
++	}
++
++	duty_revert = period - duty;
++
++	if (duty == period)
++		reg = (duty & 0xfff) << 16 | (0 & 0xfff);
++	else
++		reg = (duty_revert & 0xfff) << 16 | (period & 0xfff);
++
++	PRINT_DBG("set duty_revert: %d, period: %d, reg: 0x%x\n", duty_revert, period, reg);
++
++	writel(reg, fh_pwm.base + REG_PWM_CMD(chip_data->id));
++	return 0;
++}
++
++
++static int pwm_set_duty_cycle_percent(struct fh_pwm_chip_data *chip_data)
++{
++	u32 period, duty, reg, clk_rate, duty_revert;
++	clk_rate = clk_get_rate(fh_pwm.clk);
++	if (!clk_rate) {
++		pr_err("PWM: clock rate is 0\n");
++		return -EINVAL;
++	}
++
++	if (chip_data->percent > 100 || chip_data->percent < 0) {
++		pr_err("PWM: pwm->percent is out of range\n");
++		return -EINVAL;
++	}
++
++	period = chip_data->period_ns / (1000000000 / clk_rate);
++
++	if (period < 8) {
++		pr_err("PWM: min period is 8\n");
++		return -EINVAL;
++	}
++
++	duty = period * 100 / chip_data->percent;
++
++	if (period < duty) {
++		pr_err("PWM: period < duty\n");
++		return -EINVAL;
++	}
++
++	duty_revert = period - duty;
++
++	if (duty == period)
++		reg = (duty & 0xfff) << 16 | (0 & 0xfff);
++	else
++		reg = (duty_revert & 0xfff) << 16 | (period & 0xfff);
++
++	PRINT_DBG("set duty_revert: %d, period: %d, reg: 0x%x\n", duty_revert, period, reg);
++
++	writel(reg, fh_pwm.base + REG_PWM_CMD(chip_data->id));
++	return 0;
++}
++
++
++int fh_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
++		  int duty_ns, int period_ns)
++{
++	struct fh_pwm_chip_data *chip_data;
++
++	chip_data = pwm_get_chip_data(pwm);
++	if (!chip_data) {
++		pr_err("%s: ERROR: PWM %d does NOT exist\n",
++		       __func__, pwm->hwpwm);
++		return -ENXIO;
++	}
++	chip_data->counter_ns = duty_ns;
++	chip_data->period_ns = period_ns;
++	pwm_set_duty_cycle_ns(chip_data);
++	return 0;
++}
++
++static int _fh_pwm_enable(struct fh_pwm_chip_data *chip_data)
++{
++	int i;
++	unsigned int reg = 0;
++	chip_data->working = 1;
++
++	for (i = 0; i < fh_pwm.chip.npwm; i++) {
++		chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[i]);
++		reg |= chip_data->working << i;
++	}
++
++	writel(reg, fh_pwm.base + REG_PWM_CTRL);
++	return 0;
++}
++
++int fh_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++	struct fh_pwm_chip_data *chip_data;
++
++	chip_data = pwm_get_chip_data(pwm);
++	if (!chip_data) {
++		pr_err("%s: ERROR: PWM %d does NOT exist\n",
++		       __func__, pwm->hwpwm);
++		return -ENXIO;
++	}
++
++	_fh_pwm_enable(chip_data);
++
++	return 0;
++}
++
++static int _fh_pwm_disable(struct fh_pwm_chip_data *chip_data)
++{
++	int i;
++	unsigned int reg = 0;
++	chip_data->working = 0;
++
++	for (i = 0; i < fh_pwm.chip.npwm; i++) {
++		chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[i]);
++		reg |= chip_data->working << i;
++	}
++
++	writel(reg, fh_pwm.base + REG_PWM_CTRL);
++	return 0;
++}
++
++void fh_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++	struct fh_pwm_chip_data *chip_data;
++
++	chip_data = pwm_get_chip_data(pwm);
++	if (!chip_data) {
++		pr_err("%s: ERROR: PWM %d does NOT exist\n",
++		       __func__, pwm->hwpwm);
++		return;
++	}
++
++	_fh_pwm_disable(chip_data);
++}
++
++static int fh_pwm_open(struct inode *inode, struct file *file)
++{
++	int i;
++	struct fh_pwm_chip_data *chip_data;
++	struct pwm_device *pwm;
++
++	for (i = 0; i < fh_pwm.chip.npwm; i++) {
++		pwm = &fh_pwm.chip.pwms[i];
++
++		if (!pwm) {
++			pr_err("%s: ERROR: PWM %d does NOT exist\n",
++			       __func__, i);
++			return -ENXIO;
++		}
++		chip_data = pwm_get_chip_data(pwm);
++		if (!chip_data)
++			chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
++
++		chip_data->id = pwm->hwpwm;
++		chip_data->working = 0;
++		pwm->chip_data = chip_data;
++	}
++	return 0;
++}
++
++static int fh_pwm_release(struct inode *inode, struct file *filp)
++{
++	int i;
++	struct fh_pwm_chip_data *chip_data;
++	struct pwm_device *pwm;
++
++	for (i = 0; i < fh_pwm.chip.npwm; i++) {
++		pwm = &fh_pwm.chip.pwms[i];
++
++		if (!pwm) {
++			pr_err("%s: ERROR: PWM %d does NOT exist\n",
++			       __func__, i);
++			return -ENOTTY;
++		}
++		chip_data = pwm_get_chip_data(pwm);
++		free(chip_data);
++		pwm_set_chip_data(pwm, NULL);
++	}
++	return 0;
++}
++
++
++static long fh_pwm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++	int ret = 0;
++	struct fh_pwm_chip_data *pwm;
++
++	if (unlikely(_IOC_TYPE(cmd) != PWM_IOCTL_MAGIC)) {
++		pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
++		       __func__, _IOC_TYPE(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (unlikely(_IOC_NR(cmd) > PWM_IOCTL_MAXNR)) {
++		pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
++		       __func__, _IOC_NR(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (_IOC_DIR(cmd) & _IOC_READ)
++		ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
++	else if (_IOC_DIR(cmd) & _IOC_WRITE)
++		ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
++
++	if (ret) {
++		pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
++		       __func__, _IOC_NR(cmd), -EACCES);
++		return -EACCES;
++	}
++
++	switch (cmd) {
++	case ENABLE_PWM:
++		pwm = (struct fh_pwm_chip_data __user *)arg;
++		_fh_pwm_enable(pwm);
++		break;
++	case DISABLE_PWM:
++		pwm = (struct fh_pwm_chip_data __user *)arg;
++		_fh_pwm_disable(pwm);
++		break;
++	case SET_PWM_DUTY_CYCLE:
++		pwm = (struct fh_pwm_chip_data __user *)arg;
++		printk("ioctl: pwm addr: %p, pwm->period: %d ns\n", pwm, pwm->period_ns);
++		pwm_set_duty_cycle_ns(pwm);
++		break;
++	case GET_PWM_DUTY_CYCLE:
++		pwm = (struct fh_pwm_chip_data __user *)arg;
++		printk("ioctl: pwm->id: %d, pwm->counter: %d, pwm->period: %d\n", pwm->id, pwm->counter_ns,
++		       pwm->period_ns);
++		pwm_get_duty_cycle_ns(pwm);
++		break;
++	case SET_PWM_DUTY_CYCLE_PERCENT:
++		pwm = (struct fh_pwm_chip_data __user *)arg;
++		printk("ioctl: pwm->id: %d, pwm->counter: %d, pwm->period: %d\n", pwm->id, pwm->counter_ns,
++		       pwm->period_ns);
++		pwm_set_duty_cycle_percent(pwm);
++		break;
++	}
++
++
++	return ret;
++}
++
++static const struct file_operations fh_pwm_fops = {
++	.owner 			= THIS_MODULE,
++	.open 			= fh_pwm_open,
++	.release 		= fh_pwm_release,
++	.unlocked_ioctl = fh_pwm_ioctl,
++};
++
++static struct miscdevice fh_pwm_misc = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = DEVICE_NAME,
++	.fops = &fh_pwm_fops,
++};
++
++static const struct pwm_ops fh_pwm_ops = {
++	.config = fh_pwm_config,
++	.enable = fh_pwm_enable,
++	.disable = fh_pwm_disable,
++	.owner = THIS_MODULE,
++};
++
++
++static void del_char(char *str, char ch)
++{
++	char *p = str;
++	char *q = str;
++	while (*q) {
++		if (*q != ch)
++			*p++ = *q;
++		q++;
++	}
++	*p = '\0';
++}
++
++static ssize_t fh_pwm_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
++{
++	int i;
++	char message[32] = {0};
++	char *const delim = ",";
++	char *cur = message;
++	char *param_str[4];
++	unsigned int param[4];
++	struct fh_pwm_chip_data *chip_data;
++
++	len = (len > 32) ? 32 : len;
++
++	if (copy_from_user(message, buf, len))
++		return -EFAULT;
++
++	for (i = 0; i < 4; i++) {
++		param_str[i] = strsep(&cur, delim);
++		if (!param_str[i]) {
++			pr_err("%s: ERROR: parameter[%d] is empty\n", __func__, i);
++			return -EINVAL;
++		} else {
++			del_char(param_str[i], ' ');
++			del_char(param_str[i], '\n');
++			param[i] = (u32)simple_strtoul(param_str[i], NULL, 10);
++			if (param[i] < 0) {
++				pr_err("%s: ERROR: parameter[%d] is incorrect\n", __func__, i);
++				return -EINVAL;
++			}
++		}
++	}
++
++	printk("set pwm %d to %s, duty cycle: %u ns, period cycle: %u\n", param[0],
++	       param[1] ? "enable" : "disable", param[2], param[3]);
++	chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[param[0]]);
++	chip_data->counter_ns = param[2];
++	chip_data->period_ns = param[3];
++
++	param[1] ? fh_pwm_enable(&fh_pwm.chip, &fh_pwm.chip.pwms[param[0]]) : fh_pwm_disable(&fh_pwm.chip,
++			&fh_pwm.chip.pwms[param[0]]);
++	pwm_set_duty_cycle_ns(chip_data);
++
++	return len;
++}
++
++static void *v_seq_start(struct seq_file *s, loff_t *pos)
++{
++	static unsigned long counter = 0;
++	if (*pos == 0)
++		return &counter;
++	else {
++		*pos = 0;
++		return NULL;
++	}
++}
++
++static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++	(*pos)++;
++	return NULL;
++}
++
++static void v_seq_stop(struct seq_file *s, void *v)
++{
++
++}
++
++static int v_seq_show(struct seq_file *sfile, void *v)
++{
++	int i;
++	seq_printf(sfile, "\nPWM Status:\n");
++
++	for (i = 0; i < fh_pwm.chip.npwm; i++) {
++		struct fh_pwm_chip_data *chip_data;
++
++		chip_data = pwm_get_chip_data(&fh_pwm.chip.pwms[i]);
++		seq_printf(sfile, "id: %d \t%s, duty_ns: %u, period_ns: %u\n",
++			   chip_data->id,
++			   (chip_data->working) ? "ENABLE" : "DISABLE",
++			   chip_data->counter_ns,
++			   chip_data->period_ns);
++	}
++	return 0;
++}
++
++static const struct seq_operations isp_seq_ops = {
++	.start = v_seq_start,
++	.next = v_seq_next,
++	.stop = v_seq_stop,
++	.show = v_seq_show
++};
++
++static int fh_pwm_proc_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &isp_seq_ops);
++}
++
++
++static struct file_operations fh_pwm_proc_ops = {
++	.owner = THIS_MODULE,
++	.open = fh_pwm_proc_open,
++	.read = seq_read,
++	.write = fh_pwm_proc_write,
++	.release = seq_release,
++};
++
++static int __devinit fh_pwm_probe(struct platform_device *pdev)
++{
++	int err, i;
++	struct resource *res;
++
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (res == NULL) {
++		err = -ENXIO;
++		goto fail_no_mem_resource;
++	}
++
++	res = request_mem_region(res->start, resource_size(res), pdev->name);
++	if (res == NULL) {
++		err = -EBUSY;
++		goto fail_no_mem_resource;
++	}
++
++	fh_pwm.base = ioremap(res->start, resource_size(res));
++	if (fh_pwm.base == NULL) {
++		err = -ENXIO;
++		goto fail_no_ioremap;
++	}
++
++	fh_pwm.clk = clk_get(&pdev->dev, "pwm_clk");
++
++	if (IS_ERR(fh_pwm.clk)) {
++		err = PTR_ERR(fh_pwm.clk);
++		goto fail_no_clk;
++	}
++
++	clk_enable(fh_pwm.clk);
++
++	err = misc_register(&fh_pwm_misc);
++	if (err < 0) {
++		pr_err("%s: ERROR: %s registration failed",
++		       __func__, DEVICE_NAME);
++		return -ENXIO;
++	}
++
++	fh_pwm.chip.dev = &pdev->dev;
++	fh_pwm.chip.ops = &fh_pwm_ops;
++	fh_pwm.chip.base = pdev->id;
++	fh_pwm.chip.npwm = CONFIG_FH_PWM_NUM;
++
++	err = pwmchip_add(&fh_pwm.chip);
++	if (err < 0) {
++		pr_err("%s: ERROR: %s pwmchip_add failed",
++		       __func__, DEVICE_NAME);
++		return err;
++	}
++
++	for (i = 0; i < fh_pwm.chip.npwm; i++) {
++		struct fh_pwm_chip_data *chip_data;
++
++		chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
++		if (chip_data == NULL) {
++			pr_err("pwm[%d], chip data malloced failed\n", i);
++			continue;
++		}
++
++		chip_data->id = i;
++		chip_data->working = 0;
++
++		pwm_set_chip_data(&fh_pwm.chip.pwms[i], chip_data);
++	}
++
++	platform_set_drvdata(pdev, &fh_pwm);
++
++	/* disable pwm at startup. Avoids zero value. */
++	writel(0x0, fh_pwm.base + REG_PWM_CTRL);
++
++	pr_info("PWM driver, Number: %d, IO base addr: 0x%p\n",
++		fh_pwm.chip.npwm, fh_pwm.base);
++
++	fh_pwm.proc_file = create_proc_entry(FH_PWM_PROC_FILE, 0644, NULL);
++
++	if (fh_pwm.proc_file)
++		fh_pwm.proc_file->proc_fops = &fh_pwm_proc_ops;
++	else
++		pr_err("%s: ERROR: %s proc file create failed",
++		       __func__, DEVICE_NAME);
++
++	dev_dbg(&pdev->dev, "PWM probe successful, IO base addr: %p\n",
++		fh_pwm.base);
++	return 0;
++
++fail_no_clk:
++	iounmap(fh_pwm.base);
++fail_no_ioremap:
++	release_mem_region(res->start, resource_size(res));
++fail_no_mem_resource:
++	return err;
++}
++
++static int __exit fh_pwm_remove(struct platform_device *pdev)
++{
++	int err, i;
++	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++	for (i = 0; i < fh_pwm.chip.npwm; i++)
++		kfree(fh_pwm.chip.pwms[i].chip_data);
++
++	err = pwmchip_remove(&fh_pwm.chip);
++	if (err < 0)
++		return err;
++
++	dev_dbg(&pdev->dev, "pwm driver removed\n");
++
++	writel(0x0, fh_pwm.base + REG_PWM_CTRL);
++	clk_disable(fh_pwm.clk);
++
++	iounmap(fh_pwm.base);
++	release_mem_region(res->start, resource_size(res));
++	platform_set_drvdata(pdev, NULL);
++	misc_deregister(&fh_pwm_misc);
++	return 0;
++}
++
++static struct platform_driver fh_pwm_driver = {
++	.driver	=
++	{
++		.name	= DEVICE_NAME,
++		.owner	= THIS_MODULE,
++	},
++	.probe 		= fh_pwm_probe,
++	.remove		= __exit_p(fh_pwm_remove),
++};
++
++static int __init fh_pwm_init(void)
++{
++	return platform_driver_register(&fh_pwm_driver);
++}
++
++static void __exit fh_pwm_exit(void)
++{
++
++	platform_driver_unregister(&fh_pwm_driver);
++
++}
++
++module_init(fh_pwm_init);
++module_exit(fh_pwm_exit);
++
++
++MODULE_AUTHOR("fullhan");
++
++MODULE_DESCRIPTION("FH PWM driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
+diff --git a/drivers/pwm/pwm-fullhan.h b/drivers/pwm/pwm-fullhan.h
+new file mode 100644
+index 00000000..cb81ac09
+--- /dev/null
++++ b/drivers/pwm/pwm-fullhan.h
+@@ -0,0 +1,31 @@
++#ifndef FH_PMU_H_
++#define FH_PMU_H_
++
++#include <linux/slab.h>
++#include <linux/ioctl.h>
++
++#define DEVICE_NAME					"fh_pwm"
++#define FH_PWM_PROC_FILE            "driver/pwm"
++
++#define REG_PWM_CTRL				(0x00)
++#define REG_PWM_CMD(n)          	(((n) * 4) + REG_PWM_CTRL + 4)
++
++#define PWM_IOCTL_MAGIC 			'p'
++#define ENABLE_PWM	 				_IOWR(PWM_IOCTL_MAGIC, 0, __u32)
++#define DISABLE_PWM		 			_IOWR(PWM_IOCTL_MAGIC, 1, __u32)
++
++#define SET_PWM_DUTY_CYCLE			_IOWR(PWM_IOCTL_MAGIC, 2, __u32)
++#define GET_PWM_DUTY_CYCLE 		    _IOWR(PWM_IOCTL_MAGIC, 3, __u32)
++#define SET_PWM_DUTY_CYCLE_PERCENT  _IOWR(PWM_IOCTL_MAGIC, 4, __u32)
++#define PWM_IOCTL_MAXNR 			8
++
++struct fh_pwm_chip_data
++{
++	int id;
++	int working;
++	u32 period_ns;
++	u32 counter_ns;
++	int percent;
++};
++
++#endif /* FH_PMU_H_ */
+diff --git a/drivers/pwm/pwmv2-fullhan.c b/drivers/pwm/pwmv2-fullhan.c
+new file mode 100644
+index 00000000..0a0603e6
+--- /dev/null
++++ b/drivers/pwm/pwmv2-fullhan.c
+@@ -0,0 +1,864 @@
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/pwm.h>
++#include <linux/printk.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/decompress/mm.h>
++#include <linux/of_address.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#include <linux/time.h>
++#include <linux/interrupt.h>
++
++#include <linux/uaccess.h>
++#include "pwmv2-fullhan.h"
++
++#define FH_PWM_DEBUG
++#ifdef FH_PWM_DEBUG
++#define PRINT_DBG(fmt,args...)  printk(fmt,##args)
++#else
++#define PRINT_DBG(fmt,args...)  do{} while(0)
++#endif
++
++#define STATUS_INT			(1<<31)
++#define STATUS_FINALL0			(1<<0)
++#define STATUS_FINALL1			(1<<1)
++#define STATUS_FINALL2			(1<<2)
++#define STATUS_FINALL3			(1<<3)
++#define STATUS_FINALL4			(1<<4)
++#define STATUS_FINALL5			(1<<5)
++#define STATUS_FINALL6			(1<<6)
++#define STATUS_FINALL7			(1<<7)
++#define STATUS_FINONCE0			(1<<8)
++#define STATUS_FINONCE1			(1<<9)
++#define STATUS_FINONCE2			(1<<10)
++#define STATUS_FINONCE3			(1<<11)
++#define STATUS_FINONCE4			(1<<12)
++#define STATUS_FINONCE5			(1<<13)
++#define STATUS_FINONCE6			(1<<14)
++#define STATUS_FINONCE7			(1<<15)
++
++#define OFFSET_PWM_BASE(n)		(0x100 + 0x100 * n)
++
++#define OFFSET_PWM_GLOBAL_CTRL0		(0x000)
++#define OFFSET_PWM_GLOBAL_CTRL1		(0x004)
++#define OFFSET_PWM_GLOBAL_CTRL2		(0x008)
++#define OFFSET_PWM_INT_ENABLE		(0x010)
++#define OFFSET_PWM_INT_STATUS		(0x014)
++
++#define OFFSET_PWM_CTRL(n)		(0x000 + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_CFG0(n)		(0x004 + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_CFG1(n)		(0x008 + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_CFG2(n)		(0x00c + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_CFG3(n)		(0x010 + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_CFG4(n)		(0x014 + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_STATUS0(n)		(0x020 + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_STATUS1(n)		(0x024 + OFFSET_PWM_BASE(n))
++#define OFFSET_PWM_STATUS2(n)		(0x028 + OFFSET_PWM_BASE(n))
++
++struct fh_pwm_driver
++{
++	unsigned int irq;
++	struct pwm_chip chip;
++	void __iomem *base;
++	struct clk *clk;
++	struct proc_dir_entry *proc_file;
++};
++
++struct fh_pwm_driver *fh_pwm_drv;
++
++static void fh_pwm_output_mask(unsigned int mask)
++{
++	writel(mask, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
++}
++
++static void fh_pwm_output_enable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
++	reg |= (1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
++}
++
++static void fh_pwm_output_disable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++	reg |= 1 << (8 + n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
++	reg &= ~(1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2);
++}
++
++static void fh_pwm_config_enable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
++	reg |= (1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
++}
++
++static void fh_pwm_config_disable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
++	reg &= ~(1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0);
++}
++
++static void fh_pwm_shadow_enable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++	reg |= (1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++}
++
++static void fh_pwm_shadow_disable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++	reg &= ~(1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++}
++
++static void fh_pwm_interrupt_finishall_enable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++	reg |= (1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++}
++
++static void fh_pwm_interrupt_finishall_disable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++	reg &= ~(1 << n);
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++}
++
++static void fh_pwm_interrupt_finishonce_enable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++	reg |= (1 << (n + 8));
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++}
++
++static void fh_pwm_interrupt_finishonce_disable(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++	reg &= ~(1 << (n + 8));
++	writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++}
++
++static unsigned int fh_pwm_interrupt_get_status(void)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
++	reg &= readl(fh_pwm_drv->base + OFFSET_PWM_INT_ENABLE);
++
++	return reg;
++}
++
++static void fh_pwm_interrupt_finishonce_clear(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
++
++	reg &= ~(1 << (n + 8));
++
++	return writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
++}
++
++static void fh_pwm_interrupt_finishall_clear(unsigned int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
++
++	reg &= ~(1 << n);
++
++	return writel(reg, fh_pwm_drv->base + OFFSET_PWM_INT_STATUS);
++}
++
++static void fh_pwm_set_config(struct fh_pwm_chip_data *chip_data)
++{
++	unsigned int clk_rate = clk_get_rate(fh_pwm_drv->clk);
++	unsigned int ctrl = 0, period, duty, delay, phase;
++
++	fh_pwm_config_disable(chip_data->id);
++
++	period = chip_data->config.period_ns / (NSEC_PER_SEC / clk_rate);
++	duty = chip_data->config.duty_ns / (NSEC_PER_SEC / clk_rate);
++	delay = chip_data->config.delay_ns / (NSEC_PER_SEC / clk_rate);
++	phase = chip_data->config.phase_ns / (NSEC_PER_SEC / clk_rate);
++
++	if (period > 0x1ffffff) {
++		pr_err("PWM: period exceed 24-bit\n");
++		period = 0x1ffffff;
++	}
++
++	if (duty > 0x1ffffff) {
++		pr_err("PWM: duty exceed 24-bit\n");
++		duty = 0x1ffffff;
++	}
++
++	PRINT_DBG("set period: 0x%x\n", period);
++	PRINT_DBG("set duty: 0x%x\n", duty);
++	PRINT_DBG("set phase: 0x%x\n", phase);
++	PRINT_DBG("set delay: 0x%x\n", delay);
++
++	writel(period, fh_pwm_drv->base + OFFSET_PWM_CFG0(chip_data->id));
++	writel(duty, fh_pwm_drv->base + OFFSET_PWM_CFG1(chip_data->id));
++	writel(phase, fh_pwm_drv->base + OFFSET_PWM_CFG2(chip_data->id));
++	writel(delay, fh_pwm_drv->base + OFFSET_PWM_CFG3(chip_data->id));
++	writel(chip_data->config.pulses,
++			fh_pwm_drv->base + OFFSET_PWM_CFG4(chip_data->id));
++
++	if (chip_data->config.delay_ns)
++		ctrl |= 1 << 3;
++
++	if(!chip_data->config.pulses)
++		ctrl |= 1 << 0;
++
++	ctrl |= (chip_data->config.stop & 0x3) << 1;
++
++	writel(ctrl, fh_pwm_drv->base + OFFSET_PWM_CTRL(chip_data->id));
++
++	PRINT_DBG("set ctrl: 0x%x\n", ctrl);
++
++	ctrl = readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++
++	ctrl |= (chip_data->config.stop >> 4) << (8 + chip_data->id);
++
++	writel(ctrl, fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1);
++
++	writel(chip_data->config.pulses,
++		fh_pwm_drv->base + OFFSET_PWM_CFG4(chip_data->id));
++	PRINT_DBG("set pulses: 0x%x\n", chip_data->config.pulses);
++
++	if (chip_data->config.finish_once)
++		fh_pwm_interrupt_finishonce_enable(chip_data->id);
++	else
++		fh_pwm_interrupt_finishonce_disable(chip_data->id);
++
++	if (chip_data->config.finish_all)
++		fh_pwm_interrupt_finishall_enable(chip_data->id);
++	else
++		fh_pwm_interrupt_finishall_disable(chip_data->id);
++
++	if (chip_data->config.shadow_enable)
++		fh_pwm_shadow_enable(chip_data->id);
++	else
++		fh_pwm_shadow_disable(chip_data->id);
++
++	fh_pwm_config_enable(chip_data->id);
++
++}
++
++static void fh_pwm_get_config(struct fh_pwm_chip_data *chip_data)
++{
++	unsigned int clk_rate = clk_get_rate(fh_pwm_drv->clk);
++	unsigned int ctrl = 0, period, duty, delay, phase, pulses,
++			status0, status1, status2;
++
++	period = readl(fh_pwm_drv->base + OFFSET_PWM_CFG0(chip_data->id));
++	duty = readl(fh_pwm_drv->base + OFFSET_PWM_CFG1(chip_data->id));
++	phase = readl(fh_pwm_drv->base + OFFSET_PWM_CFG2(chip_data->id));
++	delay = readl(fh_pwm_drv->base + OFFSET_PWM_CFG3(chip_data->id));
++	pulses = readl(fh_pwm_drv->base + OFFSET_PWM_CFG4(chip_data->id));
++	ctrl = readl(fh_pwm_drv->base + OFFSET_PWM_CTRL(chip_data->id));
++	status0 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS0(chip_data->id));
++	status1 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS1(chip_data->id));
++	status2 = readl(fh_pwm_drv->base + OFFSET_PWM_STATUS2(chip_data->id));
++
++
++	PRINT_DBG("==============================\n");
++	PRINT_DBG("pwm%d register config:\n", chip_data->id);
++	PRINT_DBG("\t\tperiod: 0x%x\n", period);
++	PRINT_DBG("\t\tduty: 0x%x\n", duty);
++	PRINT_DBG("\t\tphase: 0x%x\n", phase);
++	PRINT_DBG("\t\tdelay: 0x%x\n", delay);
++	PRINT_DBG("\t\tpulses: 0x%x\n", pulses);
++	PRINT_DBG("\t\tctrl: 0x%x\n", ctrl);
++	PRINT_DBG("\t\tstatus0: 0x%x\n", status0);
++	PRINT_DBG("\t\tstatus1: 0x%x\n", status1);
++	PRINT_DBG("\t\tstatus2: 0x%x\n", status2);
++
++	chip_data->config.period_ns = period * (NSEC_PER_SEC / clk_rate);
++	chip_data->config.duty_ns = duty * (NSEC_PER_SEC / clk_rate);
++
++	PRINT_DBG("\t\tclk_rate: %d\n", clk_rate);
++	PRINT_DBG("\t\tconfig.period_ns: %d\n", chip_data->config.period_ns);
++	PRINT_DBG("\t\tconfig.duty_ns: %d\n", chip_data->config.duty_ns);
++	PRINT_DBG("==============================\n\n");
++
++	chip_data->config.phase_ns = phase * (NSEC_PER_SEC / clk_rate);
++	chip_data->config.delay_ns = delay * (NSEC_PER_SEC / clk_rate);
++	chip_data->config.pulses = pulses;
++	chip_data->config.stop = (ctrl >> 1) & 0x3;
++	chip_data->config.percent = chip_data->config.duty_ns /
++			(chip_data->config.period_ns / 100);
++
++	chip_data->status.busy = (status2 >> 4) & 0x1;
++	chip_data->status.error = (status2 >> 3) & 0x1;
++	chip_data->status.total_cnt = status1;
++	chip_data->status.done_cnt = status0;
++}
++
++int fh_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
++		  int duty_ns, int period_ns)
++{
++	struct fh_pwm_chip_data *chip_data;
++
++	chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
++	if (chip_data == NULL) {
++		pr_err("pwm[%d], chip data malloc failed\n", pwm->hwpwm);
++		return -ENOMEM;
++	}
++
++	chip_data->id = pwm->hwpwm;
++	chip_data->config.duty_ns = duty_ns;
++	chip_data->config.period_ns = period_ns;
++
++	fh_pwm_set_config(chip_data);
++
++	kfree(chip_data);
++
++	return 0;
++}
++
++int fh_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++	fh_pwm_output_enable(pwm->hwpwm);
++	return 0;
++}
++
++void fh_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++	fh_pwm_output_disable(pwm->hwpwm);
++}
++
++static int fh_pwm_open(struct inode *inode, struct file *file)
++{
++	return 0;
++}
++
++static int fh_pwm_release(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++
++static long fh_pwm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++	int ret = 0;
++	struct fh_pwm_chip_data *chip_data;
++	unsigned int val;
++
++	if (unlikely(_IOC_TYPE(cmd) != PWM_IOCTL_MAGIC)) {
++		pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
++			__func__, _IOC_TYPE(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (unlikely(_IOC_NR(cmd) > PWM_IOCTL_MAXNR)) {
++		pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
++			__func__, _IOC_NR(cmd), -ENOTTY);
++		return -ENOTTY;
++	}
++
++	if (_IOC_DIR(cmd) & _IOC_READ)
++		ret = !access_ok(VERIFY_WRITE,
++			(void __user *)arg, _IOC_SIZE(cmd));
++	else if (_IOC_DIR(cmd) & _IOC_WRITE)
++		ret = !access_ok(VERIFY_READ,
++			(void __user *)arg, _IOC_SIZE(cmd));
++
++	if (ret) {
++		pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
++		       __func__, _IOC_NR(cmd), -EACCES);
++		return -EACCES;
++	}
++
++	switch (cmd) {
++	case SET_PWM_ENABLE:
++		get_user(val, (unsigned int __user *)&arg);
++		fh_pwm_output_enable(val);
++		break;
++	case ENABLE_PWM:
++		chip_data = (struct fh_pwm_chip_data __user *)arg;
++		ret = copy_from_user(
++			fh_pwm_drv->chip.pwms[chip_data->id].chip_data,
++			(struct fh_pwm_chip_data __user *)arg,
++			sizeof(struct fh_pwm_chip_data));
++		fh_pwm_output_enable(chip_data->id);
++		break;
++	case DISABLE_PWM:
++		chip_data = (struct fh_pwm_chip_data __user *)arg;
++		ret = copy_from_user(
++			fh_pwm_drv->chip.pwms[chip_data->id].chip_data,
++			(struct fh_pwm_chip_data __user *)arg,
++			sizeof(struct fh_pwm_chip_data));
++		fh_pwm_output_disable(chip_data->id);
++		break;
++	case SET_PWM_DUTY_CYCLE:
++		chip_data = (struct fh_pwm_chip_data __user *)arg;
++		ret = copy_from_user(
++			fh_pwm_drv->chip.pwms[chip_data->id].chip_data,
++			(struct fh_pwm_chip_data __user *)arg,
++			sizeof(struct fh_pwm_chip_data));
++		printk("ioctl: SET_PWM_DUTY_CYCLE, "
++			"pwm->id: %d, pwm->counter: %d, pwm->period: %d ns\n",
++			chip_data->id, chip_data->config.duty_ns,
++			chip_data->config.period_ns);
++		fh_pwm_set_config(chip_data);
++		break;
++	case GET_PWM_DUTY_CYCLE:
++		chip_data = (struct fh_pwm_chip_data __user *)arg;
++		ret = copy_from_user(
++			fh_pwm_drv->chip.pwms[chip_data->id].chip_data,
++			(struct fh_pwm_chip_data __user *)arg,
++			sizeof(struct fh_pwm_chip_data));
++		printk("ioctl: GET_PWM_DUTY_CYCLE, "
++			"pwm->id: %d, pwm->counter: %d, pwm->period: %d ns\n",
++			chip_data->id, chip_data->config.duty_ns,
++			chip_data->config.period_ns);
++		fh_pwm_get_config(chip_data);
++		break;
++	case SET_PWM_DUTY_CYCLE_PERCENT:
++		chip_data = (struct fh_pwm_chip_data __user *)arg;
++		ret = copy_from_user(
++			fh_pwm_drv->chip.pwms[chip_data->id].chip_data,
++			(struct fh_pwm_chip_data __user *)arg,
++			sizeof(struct fh_pwm_chip_data));
++		if(chip_data->config.percent > 100)
++		{
++			pr_err("ERROR: percentage is over 100\n");
++			return -EIO;
++		}
++		chip_data->config.duty_ns = chip_data->config.period_ns *
++				chip_data->config.percent / 100;
++		printk("ioctl: SET_PWM_DUTY_CYCLE_PERCENT, "
++			"pwm->id: %d, pwm->counter: %d, pwm->period: %d ns\n",
++			chip_data->id, chip_data->config.duty_ns,
++			chip_data->config.period_ns);
++		fh_pwm_set_config(chip_data);
++		break;
++	}
++
++
++	return ret;
++}
++
++static const struct file_operations fh_pwm_fops =
++{
++	.owner			= THIS_MODULE,
++	.open			= fh_pwm_open,
++	.release		= fh_pwm_release,
++	.unlocked_ioctl		= fh_pwm_ioctl,
++};
++
++static struct miscdevice fh_pwm_misc =
++{
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = DEVICE_NAME,
++	.fops = &fh_pwm_fops,
++};
++
++static const struct pwm_ops fh_pwm_ops =
++{
++	.config = fh_pwm_config,
++	.enable = fh_pwm_enable,
++	.disable = fh_pwm_disable,
++	.owner = THIS_MODULE,
++};
++
++
++static void del_char(char *str, char ch)
++{
++	char *p = str;
++	char *q = str;
++	while (*q) {
++		if (*q != ch)
++			*p++ = *q;
++		q++;
++	}
++	*p = '\0';
++}
++
++static ssize_t fh_pwm_proc_write(struct file *filp,
++                                 const char *buf, size_t len, loff_t *off)
++{
++	int i;
++	char message[64] = {0};
++	char *const delim = ",";
++	char *cur = message;
++	char *param_str[8];
++	unsigned int param[8];
++	struct fh_pwm_chip_data *chip_data;
++
++	len = (len > 64) ? 64 : len;
++
++	if (copy_from_user(message, buf, len))
++		return -EFAULT;
++
++	for (i = 0; i < 8; i++) {
++		param_str[i] = strsep(&cur, delim);
++		if (!param_str[i]) {
++			pr_err("%s: ERROR: parameter[%d] is empty\n",
++                               __func__, i);
++			pr_err("id, switch_mask, duty_ns, period_ns, "
++                               "numofpules, delay_ns, phase_ns, stop_status\n");
++			pr_err("eg. echo '0,1,5000,10000,0,0,1000,0' > /proc/driver/pwm\n");
++			return -EINVAL;
++		} else {
++			del_char(param_str[i], ' ');
++			del_char(param_str[i], '\n');
++			param[i] = (unsigned int)simple_strtoul(param_str[i], NULL, 10);
++			if (param[i] < 0)
++			{
++				pr_err("%s: ERROR: parameter[%d] is incorrect\n", __func__, i);
++				pr_err("id, switch_mask, duty_ns, period_ns, numofpules, "
++                                       "delay_ns, phase_ns, stop_status\n");
++				pr_err("eg. echo '0,1,5000,10000,0,0,1000,0' > /proc/driver/pwm\n");
++				return -EINVAL;
++			}
++		}
++	}
++
++	chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
++	if (chip_data == NULL) {
++		pr_err("pwm[%d], chip data malloc failed\n", i);
++		return 0;
++	}
++
++	chip_data->id = param[0];
++	chip_data->config.duty_ns = param[2];
++	chip_data->config.period_ns = param[3];
++	chip_data->config.pulses = param[4];
++	chip_data->config.delay_ns = param[5];
++	chip_data->config.phase_ns = param[6];
++	chip_data->config.stop = param[7];
++
++	fh_pwm_set_config(chip_data);
++
++	printk("set pwm %d, enable: 0x%x, duty cycle: %u ns, period cycle: %u,"
++			"numofpulse: %d, delay: %d ns, phase: %d ns, stop: %d\n",
++			param[0], param[1], param[2], param[3],
++					param[4], param[5], param[6], param[7]);
++
++	fh_pwm_output_mask(param[1]);
++
++	kfree(chip_data);
++
++	return len;
++}
++
++static void *v_seq_start(struct seq_file *s, loff_t *pos)
++{
++	static unsigned long counter = 0;
++	if (*pos == 0)
++		return &counter;
++	else {
++		*pos = 0;
++		return NULL;
++	}
++}
++
++static void *v_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++	(*pos)++;
++	return NULL;
++}
++
++static void v_seq_stop(struct seq_file *s, void *v)
++{
++
++}
++
++static int v_seq_show(struct seq_file *sfile, void *v)
++{
++	int i;
++	seq_printf(sfile, "\nPWM Status:\n");
++
++	seq_printf(sfile, "global_ctrl0: 0x%x\n",
++                   readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL0));
++	seq_printf(sfile, "global_ctrl1: 0x%x\n",
++                   readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL1));
++	seq_printf(sfile, "global_ctrl2: 0x%x\n",
++                   readl(fh_pwm_drv->base + OFFSET_PWM_GLOBAL_CTRL2));
++
++	for (i = 0; i < fh_pwm_drv->chip.npwm; i++) {
++		struct fh_pwm_chip_data *chip_data;
++
++		chip_data = pwm_get_chip_data(&fh_pwm_drv->chip.pwms[i]);
++		fh_pwm_get_config(chip_data);
++
++		seq_printf(sfile, "id: %d \t%s, duty_ns: %u, period_ns: %u\n",
++			   chip_data->id,
++			   (chip_data->status.busy) ? "ENABLE" : "DISABLE",
++			   chip_data->config.duty_ns,
++			   chip_data->config.period_ns);
++	}
++	return 0;
++}
++
++static const struct seq_operations pwm_seq_ops =
++{
++	.start = v_seq_start,
++	.next = v_seq_next,
++	.stop = v_seq_stop,
++	.show = v_seq_show
++};
++
++static int fh_pwm_proc_open(struct inode *inode, struct file *file)
++{
++	return seq_open(file, &pwm_seq_ops);
++}
++
++
++static struct file_operations fh_pwm_proc_ops =
++{
++	.owner	= THIS_MODULE,
++	.open	= fh_pwm_proc_open,
++	.read	= seq_read,
++	.write	= fh_pwm_proc_write,
++	.release = seq_release,
++};
++
++static irqreturn_t fh_pwm_interrupt(int this_irq, void *param)
++{
++	unsigned int status, stat_once, stat_all;
++	struct fh_pwm_chip_data *chip_data;
++	unsigned int irq;
++
++	status = fh_pwm_interrupt_get_status();
++
++	status &= 0xffff;
++
++	stat_once = (status >> 8) & 0xff;
++	stat_all = status & 0xff;
++
++	if(stat_once) {
++		irq = fls(stat_once);
++		chip_data = pwm_get_chip_data(&fh_pwm_drv->chip.pwms[irq - 1]);
++		if(chip_data && chip_data->finishonce_callback) {
++			/* chip_data->finishonce_callback(chip_data); */
++		} else {
++			pr_err("callback is empty, status: 0x%x\n", status);
++		}
++		fh_pwm_interrupt_finishonce_clear(irq - 1);
++		fh_pwm_interrupt_finishonce_disable(irq - 1);
++		fh_pwm_interrupt_finishonce_enable(irq - 1);
++	}
++
++	if(stat_all) {
++		irq = fls(stat_all);
++		chip_data = pwm_get_chip_data(&fh_pwm_drv->chip.pwms[irq - 1]);
++		if(chip_data && chip_data->finishonce_callback) {
++			/* chip_data->finishall_callback(chip_data); */
++		} else {
++			pr_err("callback is empty, status: 0x%x\n", status);
++		}
++		fh_pwm_interrupt_finishall_clear(irq - 1);
++		fh_pwm_interrupt_finishall_disable(irq - 1);
++		fh_pwm_interrupt_finishall_enable(irq - 1);
++	}
++
++	return IRQ_HANDLED;
++}
++
++static int __devinit fh_pwm_probe(struct platform_device *pdev)
++{
++	int err, i;
++	struct resource *res;
++
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (res == NULL) {
++		err = -ENXIO;
++		goto fail_no_mem_resource;
++	}
++
++	res = request_mem_region(res->start, resource_size(res), pdev->name);
++	if (res == NULL) {
++		err = -EBUSY;
++		goto fail_no_mem_resource;
++	}
++
++	fh_pwm_drv = kzalloc(sizeof(struct fh_pwm_driver), GFP_KERNEL);
++
++	fh_pwm_drv->base = ioremap(res->start, resource_size(res));
++	if (fh_pwm_drv->base == NULL) {
++		err = -ENXIO;
++		goto fail_no_ioremap;
++	}
++
++	fh_pwm_drv->clk = clk_get(&pdev->dev, "pwm_clk");
++
++	if (IS_ERR(fh_pwm_drv->clk)) {
++		err = PTR_ERR(fh_pwm_drv->clk);
++		goto fail_no_clk;
++	}
++
++	clk_enable(fh_pwm_drv->clk);
++
++	PRINT_DBG("%s: clk_rate: %lu\n", __func__, clk_get_rate(fh_pwm_drv->clk));
++
++	err = platform_get_irq(pdev, 0);
++	if (err < 0) {
++		dev_err(&pdev->dev, "no irq resource?\n");
++		goto fail_no_clk;
++	}
++
++	fh_pwm_drv->irq = err;
++
++	err = request_irq(fh_pwm_drv->irq,
++                          fh_pwm_interrupt, IRQF_DISABLED, pdev->name, fh_pwm_drv);
++	if (err) {
++		dev_err(&pdev->dev, "failure requesting irq %i\n", fh_pwm_drv->irq);
++		goto fail_no_clk;
++	}
++
++	err = misc_register(&fh_pwm_misc);
++	if (err < 0) {
++		pr_err("%s: ERROR: %s registration failed",
++		       __func__, DEVICE_NAME);
++		return -ENXIO;
++	}
++
++	fh_pwm_drv->chip.dev = &pdev->dev;
++	fh_pwm_drv->chip.ops = &fh_pwm_ops;
++	fh_pwm_drv->chip.base = pdev->id;
++	fh_pwm_drv->chip.npwm = CONFIG_FH_PWM_NUM;
++
++	err = pwmchip_add(&fh_pwm_drv->chip);
++	if (err < 0) {
++		pr_err("%s: ERROR: %s pwmchip_add failed",
++		       __func__, DEVICE_NAME);
++		return err;
++	}
++
++	for (i = 0; i < fh_pwm_drv->chip.npwm; i++) {
++		struct fh_pwm_chip_data *chip_data;
++
++		chip_data = kzalloc(sizeof(struct fh_pwm_chip_data), GFP_KERNEL);
++		if (chip_data == NULL) {
++			pr_err("pwm[%d], chip data malloc failed\n", i);
++			continue;
++		}
++
++		chip_data->id = i;
++
++		pwm_set_chip_data(&fh_pwm_drv->chip.pwms[i], chip_data);
++	}
++
++	fh_pwm_output_mask(0);
++
++	platform_set_drvdata(pdev, fh_pwm_drv);
++
++	pr_info("PWM driver, Number: %d, IO base addr: 0x%p\n",
++			fh_pwm_drv->chip.npwm, fh_pwm_drv->base);
++
++	fh_pwm_drv->proc_file = create_proc_entry(FH_PWM_PROC_FILE, 0644, NULL);
++
++	if (fh_pwm_drv->proc_file)
++		fh_pwm_drv->proc_file->proc_fops = &fh_pwm_proc_ops;
++	else
++		pr_err("%s: ERROR: %s proc file create failed",
++		       __func__, DEVICE_NAME);
++
++	dev_dbg(&pdev->dev, "PWM probe successful, IO base addr: %p\n",
++		fh_pwm_drv->base);
++	return 0;
++
++fail_no_clk:
++	iounmap(fh_pwm_drv->base);
++fail_no_ioremap:
++	release_mem_region(res->start, resource_size(res));
++fail_no_mem_resource:
++	return err;
++}
++
++static int __exit fh_pwm_remove(struct platform_device *pdev)
++{
++	int err, i;
++	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++	for (i = 0; i < fh_pwm_drv->chip.npwm; i++)
++		kfree(fh_pwm_drv->chip.pwms[i].chip_data);
++
++	err = pwmchip_remove(&fh_pwm_drv->chip);
++	if (err < 0)
++		return err;
++
++	dev_dbg(&pdev->dev, "pwm driver removed\n");
++
++	fh_pwm_output_mask(0);
++	clk_disable(fh_pwm_drv->clk);
++	free_irq(fh_pwm_drv->irq, NULL);
++	iounmap(fh_pwm_drv->base);
++	release_mem_region(res->start, resource_size(res));
++	platform_set_drvdata(pdev, NULL);
++	misc_deregister(&fh_pwm_misc);
++
++	free(fh_pwm_drv);
++	fh_pwm_drv = NULL;
++
++	return 0;
++}
++
++static struct platform_driver fh_pwm_driver =
++{
++	.driver	=
++	{
++		.name = DEVICE_NAME,
++		.owner = THIS_MODULE,
++	},
++	.probe		= fh_pwm_probe,
++	.remove		= __exit_p(fh_pwm_remove),
++};
++
++static int __init fh_pwm_init(void)
++{
++	return platform_driver_register(&fh_pwm_driver);
++}
++
++static void __exit fh_pwm_exit(void)
++{
++
++	platform_driver_unregister(&fh_pwm_driver);
++
++}
++
++module_init(fh_pwm_init);
++module_exit(fh_pwm_exit);
++
++
++MODULE_AUTHOR("fullhan");
++
++MODULE_DESCRIPTION("FH PWM driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
+diff --git a/drivers/pwm/pwmv2-fullhan.h b/drivers/pwm/pwmv2-fullhan.h
+new file mode 100644
+index 00000000..08f58d30
+--- /dev/null
++++ b/drivers/pwm/pwmv2-fullhan.h
+@@ -0,0 +1,63 @@
++#ifndef FH_PMU_H_
++#define FH_PMU_H_
++
++#include <linux/slab.h>
++#include <linux/ioctl.h>
++
++#define DEVICE_NAME				"fh_pwm"
++#define FH_PWM_PROC_FILE			"driver/pwm"
++
++#define REG_PWM_CTRL				(0x00)
++#define REG_PWM_CMD(n)				(((n) * 4) + REG_PWM_CTRL + 4)
++
++#define PWM_IOCTL_MAGIC 			'p'
++#define ENABLE_PWM	 			_IOWR(PWM_IOCTL_MAGIC, 0, __u32)
++#define DISABLE_PWM		 		_IOWR(PWM_IOCTL_MAGIC, 1, __u32)
++
++#define SET_PWM_DUTY_CYCLE			_IOWR(PWM_IOCTL_MAGIC, 2, __u32)
++#define GET_PWM_DUTY_CYCLE 		    	_IOWR(PWM_IOCTL_MAGIC, 3, __u32)
++#define SET_PWM_DUTY_CYCLE_PERCENT  		_IOWR(PWM_IOCTL_MAGIC, 4, __u32)
++#define SET_PWM_ENABLE		 		_IOWR(PWM_IOCTL_MAGIC, 5, __u32)
++
++#define PWM_IOCTL_MAXNR 			8
++
++struct fh_pwm_config
++{
++	unsigned int period_ns;
++	unsigned int duty_ns;
++	unsigned int pulses;
++#define FH_PWM_STOPLVL_LOW		(0x0)
++#define FH_PWM_STOPLVL_HIGH		(0x3)
++#define FH_PWM_STOPLVL_KEEP		(0x1)
++
++#define FH_PWM_STOPCTRL_ATONCE		(0x00)
++#define FH_PWM_STOPCTRL_AFTERFINISH	(0x10)
++	unsigned int stop;
++	unsigned int delay_ns;
++	unsigned int phase_ns;
++	unsigned int percent;
++	unsigned int finish_once;
++	unsigned int finish_all;
++	unsigned int shadow_enable;
++};
++
++struct fh_pwm_status
++{
++	unsigned int done_cnt;
++	unsigned int total_cnt;
++	unsigned int busy;
++	unsigned int error;
++};
++
++struct fh_pwm_chip_data
++{
++	int id;
++	struct fh_pwm_config config;
++	struct fh_pwm_status status;
++	void (*finishall_callback)(struct fh_pwm_chip_data *data);
++	void (*finishonce_callback)(struct fh_pwm_chip_data *data);
++};
++
++
++
++#endif /* FH_PMU_H_ */
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index ce2aabf5..c5455d51 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -1060,4 +1060,15 @@ config RTC_DRV_PUV3
+ 	  This drive can also be built as a module. If so, the module
+ 	  will be called rtc-puv3.
+ 
++
++config RTC_DRV_FH
++	tristate "FH On-Chip RTC"
++	depends on RTC_CLASS 
++	help
++	  Say Y here to enable support for the on-chip RTC found in
++	  FH processors.
++
++ 	  To compile this driver as a module, choose M here: the
++	  module will be called rtc-fh.
++
+ endif # RTC_CLASS
+diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
+index 0ffefe87..7347a414 100644
+--- a/drivers/rtc/Makefile
++++ b/drivers/rtc/Makefile
+@@ -108,3 +108,5 @@ obj-$(CONFIG_RTC_DRV_VT8500)	+= rtc-vt8500.o
+ obj-$(CONFIG_RTC_DRV_WM831X)	+= rtc-wm831x.o
+ obj-$(CONFIG_RTC_DRV_WM8350)	+= rtc-wm8350.o
+ obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
++
++obj-$(CONFIG_RTC_DRV_FH)	+= rtc-fh.o
+\ No newline at end of file
+diff --git a/drivers/rtc/rtc-fh.c b/drivers/rtc/rtc-fh.c
+new file mode 100644
+index 00000000..a6e7ba1a
+--- /dev/null
++++ b/drivers/rtc/rtc-fh.c
+@@ -0,0 +1,902 @@
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/string.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/rtc.h>
++#include <linux/bcd.h>
++#include <linux/clk.h>
++#include <linux/log2.h>
++#include <linux/slab.h>
++#include <linux/io.h>
++#include <mach/rtc.h>
++#include <mach/fh_sadc.h>
++#include <linux/delay.h>
++#include <linux/proc_fs.h>
++
++/* #define FH_RTC_DEBUG_PRINT */
++
++#ifdef FH_RTC_DEBUG_PRINT
++#define RTC_PRINT_DBG(fmt, args...) \
++	printk(KERN_INFO "[FH_RTC_DEBUG]: "); \
++	printk(fmt, ## args)
++#else
++#define RTC_PRINT_DBG(fmt, args...)  do { } while (0)
++#endif
++
++#define RTC_MAGIC	0x55555555
++#define RTC_PHASE	0x03840384
++
++#define SYNC_LOOP_COUNT 100
++
++struct fh_rtc_controller {
++	void * __iomem regs;
++	unsigned int irq;
++	unsigned int paddr;
++	unsigned int base_year;
++	unsigned int base_month;
++	unsigned int base_day;
++	struct rtc_device *rtc;
++	struct clk *clk;
++	struct proc_dir_entry *proc_rtc_entry;
++	int sadc_channel;
++
++	struct workqueue_struct *wq;
++	struct delayed_work self_adjust;
++};
++
++struct fh_rtc_controller *fh_rtc;
++
++enum {
++
++	TIME_FUNC = 0,
++	ALARM_FUNC,
++
++};
++
++/* value of SADC channel for reference to get current temperature */
++long SadcValue[28] = {
++	260, 293, 332, 375, 426,
++	483, 548, 621, 706, 800,
++	906, 1022, 1149, 1287, 1435,
++	1590, 1750, 1913, 2075, 2233,
++	2385, 2527, 2656, 2772, 2873,
++	2960, 3034, 3094
++};
++
++/* value of temperature for reference */
++int Temperature1[28] = {
++	95000, 90000, 85000, 80000, 75000,
++	70000, 65000, 60000, 55000, 50000,
++	45000, 40000, 35000, 30000, 25000,
++	20000, 15000, 10000, 4000, 0,
++	-5000, -10000, -15000, -20000, -25000,
++	-30000, -35000, -40000
++};
++
++/* value of temperature for reference to get current deviation */
++int Temperature2[136] = {
++	-40000, -39000, -38000, -37000, -36000, -35000,
++	-34000, -33000, -32000, -31000, -30000, -29000,
++	-28000, -27000, -26000, -25000, -24000, -23000,
++	-22000, -21000, -20000, -19000, -18000, -17000,
++	-16000, -15000, -14000, -13000, -12000, -11000,
++	-10000, -9000, -8000, -7000, -6000, -5000, -4000,
++	-3000, -2000, -1000, 0, 1000, 2000, 3000, 4000,
++	5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000,
++	13000, 14000, 15000, 16000, 17000, 18000, 19000,
++	20000, 21000, 22000, 23000, 24000, 25000, 26000,
++	27000, 28000, 29000, 30000, 31000, 32000, 33000,
++	34000, 35000, 36000, 37000, 38000, 39000, 40000,
++	41000, 42000, 43000, 44000, 45000, 46000, 47000,
++	48000, 49000, 50000, 51000, 52000, 53000, 54000,
++	55000, 56000, 57000, 58000, 59000, 60000, 61000,
++	62000, 63000, 64000, 65000, 66000, 67000, 68000,
++	69000, 70000, 71000, 72000, 73000, 74000, 75000,
++	76000, 77000, 78000, 79000, 80000, 81000, 82000,
++	83000, 84000, 85000, 86000, 87000, 88000, 89000,
++	90000, 91000, 92000, 93000, 94000, 95000
++};
++
++/* the value of deviation to adjust rtc clock */
++long Deviation[136] = {
++	1690000, 1638400, 1587600, 1537600, 1488400, 1440000,
++	1392400, 1345600, 1299600, 1254400, 1210000, 1166400,
++	1123600, 1081600, 1040400, 1000000, 960400, 921600,
++	883600, 846400, 810000, 774400, 739600, 705600, 672400,
++	640000, 608400, 577600, 547600, 518400, 490000, 462400,
++	435600, 409600, 384400, 360000, 336400, 313600, 291600,
++	270400, 250000, 230400, 211600, 193600, 176400, 160000,
++	144400, 129600, 115600, 102400, 90000, 78400, 67600, 57600,
++	48400, 40000, 32400, 25600, 19600, 14400, 10000, 6400,
++	3600, 1600, 400, 0, 400, 1600, 3600, 6400, 10000, 14400,
++	19600, 25600, 32400, 40000, 48400, 57600, 67600, 78400,
++	90000, 102400, 115600, 129600, 144400, 160000, 176400,
++	193600, 211600, 230400, 250000, 270400, 291600, 313600,
++	336400, 360000, 384400, 409600, 435600, 462400, 490000,
++	518400, 547600, 577600, 608400, 640000, 672400, 705600,
++	739600, 774400, 810000, 846400, 883600, 921600, 960400,
++	1000000, 1040400, 1081600, 1123600, 1166400, 1210000,
++	1254400, 1299600, 1345600, 1392400, 1440000, 1488400,
++	1537600, 1587600, 1638400, 1690000, 1742400, 1795600,
++	1849600, 1904400, 1960000
++};
++
++static int accelerate_second_rtc(int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
++	reg &= ~(0x7000000);
++	reg |= 0x30000000 | ((n & 0x7) << 24);
++	writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
++	return 0;
++}
++
++static int accelerate_minute_rtc(int m)
++{
++	unsigned int reg;
++
++	reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
++	reg &= ~(0x3F0000);
++	reg |= 0x30000000 | ((m & 0x3f) << 16);
++	writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
++	return 0;
++}
++
++static int __maybe_unused slow_down_second_rtc(int n)
++{
++	unsigned int reg;
++
++	reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
++	reg &= ~(0x7000000);
++	reg |= 0x10000000 | ((n & 0x7) << 24);
++	writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
++	return 0;
++}
++
++static int __maybe_unused slow_down_minute_rtc(int m)
++{
++	unsigned int reg;
++
++	reg = readl(fh_rtc->regs + FH_RTC_OFFSET);
++	reg &= ~(0x3F0000);
++	reg |= 0x10000000 | ((m & 0x3f) << 16);
++	writel(reg, fh_rtc->regs + FH_RTC_OFFSET);
++	return 0;
++}
++
++static unsigned int fh_rtc_get_hw_sec_data(unsigned int func_switch)
++{
++
++	unsigned int ret_sec, raw_value, sec_value;
++	unsigned int min_value, hour_value, day_value;
++
++	if (func_switch == TIME_FUNC)
++		raw_value = fh_rtc_get_time(fh_rtc->regs);
++	else
++		raw_value = fh_rtc_get_alarm_time(fh_rtc->regs);
++
++	sec_value = FH_GET_RTC_SEC(raw_value);
++	min_value = FH_GET_RTC_MIN(raw_value);
++	hour_value = FH_GET_RTC_HOUR(raw_value);
++	day_value = FH_GET_RTC_DAY(raw_value);
++	ret_sec = (day_value * 86400) + (hour_value * 3600)
++			+ (min_value * 60) + sec_value;
++
++	return ret_sec;
++
++}
++
++static void fh_rtc_set_hw_sec_data(struct rtc_time *rtc_tm,
++		unsigned int func_switch) {
++
++	unsigned int raw_value, sec_value, min_value;
++	unsigned int hour_value, day_value;
++
++	day_value = rtc_year_days(rtc_tm->tm_mday, rtc_tm->tm_mon,
++			rtc_tm->tm_year+1900);
++	day_value += (rtc_tm->tm_year-70)*365
++			+ ELAPSED_LEAP_YEARS(rtc_tm->tm_year);
++
++	hour_value = rtc_tm->tm_hour;
++	min_value = rtc_tm->tm_min;
++	sec_value = rtc_tm->tm_sec;
++
++	raw_value = (day_value << DAY_BIT_START)
++			| (hour_value << HOUR_BIT_START)
++			| (min_value << MIN_BIT_START)
++			| (sec_value << SEC_BIT_START);
++
++	if (func_switch == TIME_FUNC)
++		fh_rtc_set_time(fh_rtc->regs, raw_value);
++	else
++		fh_rtc_set_alarm_time(fh_rtc->regs, raw_value);
++
++}
++
++static int fh_rtc_exam_magic(void)
++{
++	unsigned int magic, status;
++	int i;
++
++	for (i = 0; i < 10; i++) {
++		magic = GET_REG(fh_rtc->regs + FH_RTC_USER_REG);
++
++		if (magic != RTC_MAGIC) {
++			status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
++			status &= 0x2;
++			SET_REG(fh_rtc->regs+FH_RTC_SYNC, status);
++
++			msleep(30);
++		} else {
++			return 0;
++		}
++	}
++
++	printk(KERN_INFO "ERROR: read rtc failed: 0x%x\n", magic);
++
++	return -EAGAIN;
++
++}
++
++static int fh_rtc_open(struct device *dev)
++{
++	return 0;
++}
++
++static void fh_rtc_release(struct device *dev)
++{
++	return;
++}
++
++static int fh_rtc_tm_compare(struct rtc_time *tm0, struct rtc_time *tm1)
++{
++	unsigned long read = 0, write = 0;
++
++	rtc_tm_to_time(tm0, &read);
++	rtc_tm_to_time(tm1, &write);
++
++	if (write > read || write < read - 2) {
++		RTC_PRINT_DBG("ERROR: read(%d-%d-%d %d:%d:%d) vs "
++				"write(%d-%d-%d %d:%d:%d)\n",
++			   tm0->tm_year + 1900,
++			   tm0->tm_mon + 1,
++			   tm0->tm_mday,
++			   tm0->tm_hour,
++			   tm0->tm_min,
++			   tm0->tm_sec,
++			   tm1->tm_year + 1900,
++			   tm1->tm_mon + 1,
++			   tm1->tm_mday,
++			   tm1->tm_hour,
++			   tm1->tm_min,
++			   tm1->tm_sec);
++		return -1;
++	}
++	return 0;
++}
++
++static int fh_rtc_gettime_nosync(struct device *dev, struct rtc_time *rtc_tm)
++{
++	unsigned int temp;
++
++	temp = fh_rtc_get_hw_sec_data(TIME_FUNC);
++	rtc_time_to_tm(temp, rtc_tm);
++	RTC_PRINT_DBG("rtc read date:0x%x\n", temp);
++	return 0;
++}
++
++
++static int fh_rtc_gettime_sync(struct device *dev, struct rtc_time *rtc_tm)
++{
++	unsigned int status;
++	unsigned int loop_count;
++	struct platform_device *pdev = to_platform_device(dev);
++	struct fh_rtc_controller *fh_rtc = platform_get_drvdata(pdev);
++
++	status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
++	status &= 0x2;
++
++	SET_REG(fh_rtc->regs+FH_RTC_SYNC, status);
++	msleep(30);
++
++	for (loop_count = 0;
++			loop_count <= SYNC_LOOP_COUNT;
++			loop_count++) {
++		udelay(100);
++		status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
++		status &= 0x1;
++		if(status == 1) {
++			unsigned int temp;
++			temp = fh_rtc_get_hw_sec_data(TIME_FUNC);
++			rtc_time_to_tm(temp, rtc_tm);
++			RTC_PRINT_DBG("rtc read date:0x%x\n", temp);
++			return 0;
++		}
++
++	}
++
++	printk(KERN_INFO "rtc read sync fail!\n");
++	return -EAGAIN;
++}
++
++static int fh_rtc_settime(struct device *dev, struct rtc_time *tm)
++{
++	struct rtc_time rtc_tm_read0;
++	unsigned int status;
++	unsigned int loop_count;
++	struct platform_device *pdev = to_platform_device(dev);
++	struct fh_rtc_controller *fh_rtc = platform_get_drvdata(pdev);
++	int cnt, ret, read_count = 0;
++
++	RTC_PRINT_DBG("rtc write %d-%d-%d %d:%d:%d\n",
++		 tm->tm_year + 1900,
++		 tm->tm_mon + 1,
++		 tm->tm_mday,
++		 tm->tm_hour,
++		 tm->tm_min,
++		 tm->tm_sec);
++
++	SET_REG(fh_rtc->regs + FH_RTC_USER_REG, RTC_MAGIC);
++	msleep(3);
++
++	for (cnt = 0; cnt < 5; cnt++) {
++		int rewrite_count = 0;
++REWRITE:
++		ret = 0;
++
++		fh_rtc_set_hw_sec_data(tm, TIME_FUNC);
++
++		/*spin_lock_irqsave(&rtc_lock, flag);*/
++
++		for (loop_count = 0;
++				loop_count <= SYNC_LOOP_COUNT;
++				loop_count++) {
++			udelay(100);
++
++			status = GET_REG(fh_rtc->regs+FH_RTC_SYNC);
++			status &= 0x2;
++			if (status == 0x2) {
++				printk(KERN_INFO "rtc write loop_count :%d\n",
++						loop_count);
++				if(loop_count > 20) {
++					RTC_PRINT_DBG("error: rewrite: %d, "
++							"rtc write loop_count :%d\n",
++							rewrite_count,
++							loop_count);
++					msleep(3);
++					rewrite_count++;
++					if (rewrite_count < 5) {
++						goto REWRITE;
++					} else {
++						RTC_PRINT_DBG("rtc write retry exceed\n");
++						msleep(3);
++						break;
++					}
++				}
++				/*spin_unlock_irqrestore(&rtc_lock, flag);*/
++				msleep(3);
++				break;
++			}
++		}
++
++		if (loop_count >= SYNC_LOOP_COUNT) {
++			printk(KERN_INFO "rtc write sync fail!\n");
++			return -EAGAIN;
++		}
++
++		for (read_count = 0; read_count < 5; read_count++) {
++			fh_rtc_gettime_sync(dev, &rtc_tm_read0);
++			ret += fh_rtc_tm_compare(&rtc_tm_read0, tm);
++		}
++
++		if (!ret) {
++			return 0;
++		}
++
++	}
++
++	return -1;
++}
++
++static int fh_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
++{
++	struct rtc_time *rtc_tm = &alrm->time;
++
++	rtc_time_to_tm(fh_rtc_get_hw_sec_data(ALARM_FUNC), rtc_tm);
++
++	return 0;
++}
++
++static int fh_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
++{
++	struct rtc_time *rtc_tm = &alrm->time;
++
++	fh_rtc_set_hw_sec_data(rtc_tm, ALARM_FUNC);
++
++	return 0;
++}
++
++
++
++static int fh_rtc_irq_enable(struct device *dev, unsigned int enabled)
++{
++
++	struct platform_device *pdev = to_platform_device(dev);
++	struct fh_rtc_controller *fh_rtc = platform_get_drvdata(pdev);
++
++	if (enabled) {
++		fh_rtc_enable_interrupt(fh_rtc->regs,
++				FH_RTC_ISR_SEC_POS | FH_RTC_ISR_ALARM_POS);
++	}
++	else{
++
++		fh_rtc_disenable_interrupt(fh_rtc->regs,
++				FH_RTC_ISR_SEC_POS | FH_RTC_ISR_ALARM_POS);
++	}
++
++	return 0;
++}
++
++
++
++
++static irqreturn_t fh_rtc_irq(int irq, void *dev_id)
++{
++
++	struct fh_rtc_controller *fh_rtc = (struct fh_rtc_controller *)dev_id;
++	unsigned long events = 0;
++	unsigned int isr_status;
++	struct rtc_device *rtc = fh_rtc->rtc;
++
++	isr_status = fh_rtc_get_enabled_interrupt(fh_rtc->regs);
++
++	fh_rtc_clear_interrupt_status(fh_rtc->regs, isr_status);
++
++	if (isr_status & FH_RTC_ISR_SEC_POS) {
++
++		events |= RTC_IRQF | RTC_UF;
++	}
++	else if(isr_status & FH_RTC_ISR_ALARM_POS){
++		events |= RTC_IRQF | RTC_AF;
++	}
++	else{
++		pr_info("rtc unknown isr...\n");
++		return IRQ_HANDLED;
++	}
++	rtc_update_irq(rtc, 1, events);
++
++	return IRQ_HANDLED;
++
++}
++
++static const struct rtc_class_ops fh_rtcops = {
++	.open		= fh_rtc_open,
++	.release	= fh_rtc_release,
++	.read_time	= fh_rtc_gettime_nosync,
++	.set_time	= fh_rtc_settime,
++	.read_alarm	= fh_rtc_getalarm,
++	.set_alarm	= fh_rtc_setalarm,
++	.alarm_irq_enable = fh_rtc_irq_enable,
++};
++
++/*get the read of SADC and adjust RTC clock*/
++int fh_adjust_rtc(void)
++{
++	int m, n;	/*m:MinuteOffset, n:SecondOffset*/
++	long T = 25000;
++	int i, j, temp;
++	long Ppm = 0;
++	long value[7];
++	int flag = 0;
++	long sum = 0;
++	long v;
++	int num;
++
++	for (i = 0; i < 7; i++) {
++		value[i] = fh_sadc_get_value(fh_rtc->sadc_channel);
++		if(!value[i])
++		{
++			printk("ERROR: %s, sadc value %lu is incorrect\n",
++					__func__, value[i]);
++			return -EIO;
++		}
++		mdelay(100);
++	}
++	for (i = 0; i < 7; i++) {
++		for (j = i + 1; j < 7; j++) {
++			if (value[j] < value[i]) {
++				temp = value[i];
++				value[i] = value[j];
++				value[j] = temp;
++			}
++		}
++	}
++	sum = value[2] + value[3] + value[4];
++	v = sum / 3;
++	printk("the average value of SADC is:%ld\n", v);
++	if(v >= 3094) {	/*if temperature is lower than -40℃,adjust by -40℃*/
++		Ppm = 1690000;
++		n = Ppm / 305176;
++		Ppm -= 305176 * n;
++		m = Ppm / 5086;
++		printk("SecondOffset is: %d\n", n);
++		printk("MinuteOffset is: %d\n", m);
++		if ((n <= 7) && (m <= 63)) {
++			accelerate_second_rtc(n);
++			accelerate_minute_rtc(m);
++			printk("rtc clock has been adjusted!\n");
++		} else {
++			printk("beyond range of adjust\n");
++		}
++		return 0;
++	}
++	if(v < 260) {	/*if temperature is higher than 95℃,adjust by 95℃*/
++		Ppm = 1960000;
++		n = Ppm / 305176;
++		Ppm -= 305176 * n;
++		m = Ppm / 5086;
++		printk("SecondOffset is: %d\n", n);
++		printk("MinuteOffset is: %d\n", m);
++		if ((n <= 7) && (m <= 63)) {
++			accelerate_second_rtc(n);
++			accelerate_minute_rtc(m);
++			printk("rtc clock has been adjusted!\n");
++		} else {
++			printk("beyond range of adjust\n");
++		}
++
++		return 0;
++	}
++	for (i = 0; i < 27; i++) {	/*calculate temperature by voltage*/
++		if ((v >= SadcValue[i]) && (v < SadcValue[i+1])) {
++			T = Temperature1[i] - ((Temperature1[i] - Temperature1[i+1]) *
++					(SadcValue[i] - v) / (SadcValue[i] - SadcValue[i+1]));
++		} else {
++			//printk("the reading of SADC is beyond of voltage range\n");
++			continue;
++		}
++	}
++	for (i = 0; i < 135; i++) {	/*get deviation by temperature*/
++		if ((T >= Temperature2[i]) && (T < Temperature2[i+1])) {
++			num = i;
++			flag = 1;
++			if ((Temperature2[num+1] - T) <= 500) {
++				T = Temperature2[num + 1];
++				Ppm = Deviation[num + 1];
++			} else if ((Temperature2[num+1] - T) > 500) {
++				T = Temperature2[num];
++				Ppm = Deviation[num];
++			}
++			printk("current temperature is: %ld\n", T);
++			printk("current deviation of RTC crystal oscillator is: %ld\n", Ppm);
++		}
++	}
++	if (flag == 1) {
++		n = Ppm / 305176;
++		Ppm -= 305176 * n;
++		m = Ppm / 5086;
++		printk("SecondOffset is: %d\n", n);
++		printk("MinuteOffset is: %d\n", m);
++		if ((n <= 7) && (m <= 63)) {
++			accelerate_second_rtc(n);
++			accelerate_minute_rtc(m);
++			printk("rtc clock has been adjusted!\n");
++		} else {
++			printk("beyond range of adjust\n");
++		}
++	}
++	return 0;
++}
++
++long get_rtc_temperature(void)
++{
++	long T = 0;
++	int i, j, temp;
++	long value[7];
++	long sum = 0;
++	long v;
++	for (i = 0; i < 7; i++) {
++		value[i] = fh_sadc_get_value(fh_rtc->sadc_channel);
++		if(!value[i])
++		{
++			printk("ERROR: %s, sadc value %lu is incorrect\n",
++					__func__, value[i]);
++			return -EIO;
++		}
++		mdelay(100);
++	}
++	for (i = 0; i < 7; i++) {
++		for (j = i + 1; j < 7; j++) {
++			if (value[j] < value[i]) {
++				temp = value[i];
++				value[i] = value[j];
++				value[j] = temp;
++			}
++		}
++	}
++	sum = value[2] + value[3] + value[4];
++	v = sum / 3;
++	printk("the average value of SADC is:%ld\n", v);
++	for (i = 0; i < 27; i++) {
++		if ((v >= SadcValue[i]) && (v < SadcValue[i+1])) {
++			T = Temperature1[i] - ((Temperature1[i] - Temperature1[i+1]) *
++					(SadcValue[i] - v) / (SadcValue[i] - SadcValue[i+1]));
++		} else {
++			//printk("the reading of SADC is beyond of voltage range\n");
++			continue;
++		}
++	}
++	printk("current temperature is: %ld\n", T);
++	return 0;
++}
++
++void fh_rtc_self_adjustment(struct work_struct *work)
++{
++	fh_adjust_rtc();
++
++	queue_delayed_work(fh_rtc->wq, &fh_rtc->self_adjust, 5000);
++}
++
++
++static void create_proc_rtc(struct fh_rtc_controller *rtc);
++static void remove_proc(void);
++static int __devinit fh_rtc_probe(struct platform_device *pdev)
++{
++	int err = 0;
++	struct resource *ioarea;
++	struct fh_rtc_platform_data * rtc_platform_info;
++	struct rtc_device *rtc;
++	struct resource *res;
++
++	fh_rtc = kzalloc(sizeof(struct fh_rtc_controller), GFP_KERNEL);
++	if (!fh_rtc)
++		return -ENOMEM;
++
++	memset(fh_rtc, 0, sizeof(struct fh_rtc_controller));
++
++	/* board info below */
++	rtc_platform_info = (struct fh_rtc_platform_data *)pdev->dev.platform_data;
++	if(rtc_platform_info == NULL){
++		dev_err(&pdev->dev, "%s, rtc platform error.\n",
++			__func__);
++		err = -ENODEV;
++		goto err_nores;
++	}
++	fh_rtc->base_year = rtc_platform_info->base_year;
++	fh_rtc->base_month = rtc_platform_info->base_month;
++	fh_rtc->base_day = rtc_platform_info->base_day;
++	fh_rtc->sadc_channel = rtc_platform_info->sadc_channel;
++
++	/* find the IRQs */
++	fh_rtc->irq = platform_get_irq(pdev, 0);
++	if (fh_rtc->irq < 0) {
++		dev_err(&pdev->dev, "%s, rtc irq error.\n",
++			__func__);
++		err = fh_rtc->irq;
++		goto err_nores;
++	}
++
++	/* get the memory region */
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (res == NULL) {
++		dev_err(&pdev->dev, "failed to get memory region resource\n");
++		err = -ENOENT;
++		goto err_nores;
++	}
++
++	fh_rtc->paddr = res->start;
++	ioarea = request_mem_region(res->start, resource_size(res),
++			pdev->name);
++	if(!ioarea) {
++		dev_err(&pdev->dev, "rtc region already claimed\n");
++		err = -EBUSY;
++		goto err_nores;
++	}
++
++	fh_rtc->regs = ioremap(res->start, resource_size(res));
++	if (!fh_rtc->regs) {
++		dev_err(&pdev->dev, "rtc already mapped\n");
++		err = -EINVAL;
++		goto err_nores;
++	}
++
++	/* register RTC and exit */
++	platform_set_drvdata(pdev, fh_rtc);
++	rtc = rtc_device_register(rtc_platform_info->dev_name, &pdev->dev, &fh_rtcops,
++				  THIS_MODULE);
++
++	if (IS_ERR(rtc)) {
++		dev_err(&pdev->dev, "cannot attach rtc\n");
++		err = PTR_ERR(rtc);
++		goto err_nores;
++	}
++	fh_rtc->rtc = rtc;
++
++	err = request_irq(fh_rtc->irq , fh_rtc_irq, 0,
++			  dev_name(&pdev->dev), fh_rtc);
++	if (err) {
++		dev_dbg(&pdev->dev, "request_irq failed, %d\n", err);
++		goto err_nores;
++	}
++
++	create_proc_rtc(fh_rtc);
++
++	SET_REG(fh_rtc->regs + FH_RTC_DEBUG, RTC_PHASE);
++
++	if(fh_rtc->sadc_channel >= 0)
++	{
++		pr_info("RTC: start self adjustment\n");
++		fh_rtc->wq = create_workqueue("rtc_wq");
++		if(!fh_rtc->wq)
++		{
++			dev_err(&pdev->dev, "no memory to create rtc workqueue\n");
++			return -ENOMEM;
++		}
++		INIT_DELAYED_WORK(&fh_rtc->self_adjust, fh_rtc_self_adjustment);
++
++		queue_delayed_work(fh_rtc->wq, &fh_rtc->self_adjust, 5000);
++	}
++
++	err = fh_rtc_exam_magic();
++
++	if(err)
++		return -1;
++
++
++err_nores:
++	return err;
++
++	return 0;
++}
++
++static int __devexit fh_rtc_remove(struct platform_device *dev)
++{
++	struct fh_rtc_controller *fh_rtc = platform_get_drvdata(dev);
++
++	remove_proc();
++	free_irq(fh_rtc->irq, fh_rtc);
++	rtc_device_unregister(fh_rtc->rtc);
++
++	iounmap(fh_rtc->regs);
++	platform_set_drvdata(dev, NULL);
++	kfree(fh_rtc);
++	return 0;
++}
++
++
++
++#ifdef CONFIG_PM
++
++/* RTC Power management control */
++
++static int fh_rtc_suspend(struct platform_device *pdev, pm_message_t state)
++{
++	return 0;
++}
++
++static int fh_rtc_resume(struct platform_device *pdev)
++{
++	return 0;
++}
++#else
++#define fh_rtc_suspend NULL
++#define fh_rtc_resume  NULL
++#endif
++
++
++
++static struct platform_driver fh_rtc_driver = {
++	.probe		= fh_rtc_probe,
++	.remove		= __devexit_p(fh_rtc_remove),
++	.suspend	= fh_rtc_suspend,
++	.resume		= fh_rtc_resume,
++	.driver		= {
++		.name	= "fh_rtc",
++		.owner	= THIS_MODULE,
++	},
++};
++
++
++static int __init fh_rtc_init(void) {
++
++	return platform_driver_register(&fh_rtc_driver);
++}
++
++static void __exit fh_rtc_exit(void) {
++	platform_driver_unregister(&fh_rtc_driver);
++}
++
++static void del_char(char *str, char ch)
++{
++	char *p = str;
++	char *q = str;
++	while (*q) {
++		if (*q != ch) {
++			*p++ = *q;
++		}
++		q++;
++	}
++	*p = '\0';
++}
++
++ssize_t proc_read(char *page, char **start, off_t off, int count,
++		int *eof, struct fh_rtc_controller *data) {
++	ssize_t len = 0;
++
++	printk(KERN_INFO "------------- dump register -------------\n");
++	printk(KERN_INFO "cnt:0x%x\n",fh_rtc_get_time(data->regs)  );
++	printk(KERN_INFO "offset:0x%x\n",fh_rtc_get_offset(data->regs));
++	printk(KERN_INFO "fail:0x%x\n",fh_rtc_get_power_fail(data->regs));
++	printk(KERN_INFO "alarm_cnt:0x%x\n",fh_rtc_get_alarm_time(data->regs));
++	printk(KERN_INFO "int stat:0x%x\n",fh_rtc_get_int_status(data->regs));
++	printk(KERN_INFO "int en:0x%x\n",fh_rtc_get_enabled_interrupt(data->regs));
++	printk(KERN_INFO "sync:0x%x\n",fh_rtc_get_sync(data->regs));
++	printk(KERN_INFO "debug:0x%x\n",fh_rtc_get_debug(data->regs));
++	printk(KERN_INFO "-------------------------------------------\n");
++
++	return len;
++}
++
++
++static ssize_t fh_rtc_proc_write(struct file *filp, const char *buf, size_t len, loff_t *off)
++{
++	char message[32] = {0};
++	char * const delim = ",";
++	char *cur = message, *power_str;
++	int power;
++	len = (len > 32) ? 32 : len;
++	if (copy_from_user(message, buf, len))
++		return -EFAULT;
++	power_str = strsep(&cur, delim);
++	if (!power_str) {
++		pr_err("%s: ERROR: parameter is empty\n", __func__);
++		return -EINVAL;
++	} else {
++		del_char(power_str, ' ');
++		del_char(power_str, '\n');
++		power = (unsigned int)simple_strtoul(power_str, NULL, 10);
++		if (power < 0) {
++			pr_err("%s: ERROR: parameter is incorrect\n", __func__);
++			return -EINVAL;
++		}
++		printk(KERN_INFO "the diff between rtc and sys is %d\n",
++				power);
++		if (power == 0)
++			fh_adjust_rtc();
++		else if (power == 1)
++			get_rtc_temperature();
++	}
++	return len;
++}
++
++static void create_proc_rtc(struct fh_rtc_controller *rtc)
++{
++	fh_rtc->proc_rtc_entry =
++			create_proc_entry(FH_RTC_PROC_FILE,
++					S_IRUGO, NULL);
++
++	if (!fh_rtc->proc_rtc_entry) {
++		printk(KERN_ERR"create proc failed\n");
++	} else {
++		fh_rtc->proc_rtc_entry->read_proc =
++				(read_proc_t *)proc_read;
++		fh_rtc->proc_rtc_entry->write_proc =
++				(write_proc_t *)fh_rtc_proc_write;
++		fh_rtc->proc_rtc_entry->data = rtc;
++	}
++}
++
++static void remove_proc(void) {
++    remove_proc_entry(FH_RTC_PROC_FILE, NULL);
++}
++
++module_init(fh_rtc_init);
++module_exit(fh_rtc_exit);
++
++MODULE_DESCRIPTION("FH SOC RTC Driver");
++MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:fh-rtc");
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index de35c3ad..ca24baf2 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -442,6 +442,15 @@ config SPI_DW_MMIO
+ 	tristate "Memory-mapped io interface driver for DW SPI core"
+ 	depends on SPI_DESIGNWARE && HAVE_CLK
+ 
++
++config SPI_FH
++	tristate "fh spi driver for DW SPI core"
++	depends on SPI_MASTER
++	
++config SPI_FH_SLAVE
++	tristate "fh spi slave driver for DW SPI core"
++
++	
+ #
+ # There are lots of SPI device types, with sensors and memory
+ # being probably the most widely used ones.
+diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
+index 0f8c69b6..463ffaed 100644
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -22,7 +22,13 @@ obj-$(CONFIG_SPI_DAVINCI)		+= davinci_spi.o
+ obj-$(CONFIG_SPI_DESIGNWARE)		+= dw_spi.o
+ obj-$(CONFIG_SPI_DW_PCI)		+= dw_spi_midpci.o
+ dw_spi_midpci-objs			:= dw_spi_pci.o dw_spi_mid.o
++
+ obj-$(CONFIG_SPI_DW_MMIO)		+= dw_spi_mmio.o
++
++obj-$(CONFIG_SPI_FH)		+= fh_spi.o
++obj-$(CONFIG_SPI_FH_SLAVE)		+= fh_spi_slave.o
++
++
+ obj-$(CONFIG_SPI_EP93XX)		+= ep93xx_spi.o
+ obj-$(CONFIG_SPI_GPIO)			+= spi_gpio.o
+ obj-$(CONFIG_SPI_IMX)			+= spi_imx.o
+diff --git a/drivers/spi/fh_spi.c b/drivers/spi/fh_spi.c
+new file mode 100644
+index 00000000..ef0f000c
+--- /dev/null
++++ b/drivers/spi/fh_spi.c
+@@ -0,0 +1,1460 @@
++/** @file fh_spi.c
++ *  @note ShangHai FullHan Co., Ltd. All Right Reserved.
++ *  @brief fh driver
++ *  @author     yu.zhang
++ *  @date       2015/1/11
++ *  @note history
++ *  @note 2014-1-11 V1.0.0 create the file.
++ */
++/*****************************************************************************
++ *  Include Section
++ *  add all #include here
++ *****************************************************************************/
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/spi/spi.h>
++#include <linux/scatterlist.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/workqueue.h>
++#include <linux/bug.h>
++#include <linux/completion.h>
++#include <linux/gpio.h>
++#include <linux/dmaengine.h>
++#include <mach/spi.h>
++#include <mach/fh_dmac.h>
++#include <linux/dma-mapping.h>
++#include <mach/fh_dmac_regs.h>
++/*****************************************************************************
++ * Define section
++ * add all #define here
++ *****************************************************************************/
++#define lift_shift_bit_num(bit_num)     (1<<bit_num)
++/* read spi irq, only useful if you set which is masked */
++#define SPI_IRQ_TXEIS           (lift_shift_bit_num(0))
++#define SPI_IRQ_TXOIS           (lift_shift_bit_num(1))
++#define SPI_IRQ_RXUIS           (lift_shift_bit_num(2))
++#define SPI_IRQ_RXOIS           (lift_shift_bit_num(3))
++#define SPI_IRQ_RXFIS           (lift_shift_bit_num(4))
++#define SPI_IRQ_MSTIS           (lift_shift_bit_num(5))
++/* spi status */
++#define SPI_STATUS_BUSY         (lift_shift_bit_num(0))
++#define SPI_STATUS_TFNF         (lift_shift_bit_num(1))
++#define SPI_STATUS_TFE          (lift_shift_bit_num(2))
++#define SPI_STATUS_RFNE         (lift_shift_bit_num(3))
++#define SPI_STATUS_RFF          (lift_shift_bit_num(4))
++#define SPI_STATUS_TXE          (lift_shift_bit_num(5))
++#define SPI_STATUS_DCOL         (lift_shift_bit_num(6))
++#define CACHE_LINE_SIZE         (32)
++#define PUMP_DATA_NONE_MODE     (0x00)
++#define PUMP_DATA_DMA_MODE      (0x11)
++#define PUMP_DATA_ISR_MODE      (0x22)
++#define PUMP_DATA_POLL_MODE     (0x33)
++#define DMA_TRANS_GATE_LEVEL    1024
++#define SPI_DATA_REG_OFFSET     (0x60)
++/****************************************************************************
++ * ADT section
++ *  add definition of user defined Data Type that only be used in this file  here
++ ***************************************************************************/
++enum {
++	CONFIG_OK = 0, CONFIG_PARA_ERROR = lift_shift_bit_num(0),
++	/* only for the set slave en/disable */
++	CONFIG_BUSY = lift_shift_bit_num(1),
++	/* only for write_read mode */
++	WRITE_READ_OK = 0,
++	WRITE_READ_ERROR = lift_shift_bit_num(2),
++	WRITE_READ_TIME_OUT = lift_shift_bit_num(3),
++	/* only for write only mode */
++	WRITE_ONLY_OK = 0,
++	WRITE_ONLY_ERROR = lift_shift_bit_num(4),
++	WRITE_ONLY_TIME_OUT = lift_shift_bit_num(5),
++	/* only for read only mode */
++	READ_ONLY_OK = 0,
++	READ_ONLY_ERROR = lift_shift_bit_num(6),
++	READ_ONLY_TIME_OUT = lift_shift_bit_num(7),
++	/* eeprom mode */
++	EEPROM_OK = 0,
++	EEPROM_ERROR = lift_shift_bit_num(8),
++	EEPROM_TIME_OUT = lift_shift_bit_num(9),
++	MULTI_MASTER_ERROR = lift_shift_bit_num(10),
++	TX_OVERFLOW_ERROR = lift_shift_bit_num(11),
++	RX_OVERFLOW_ERROR = lift_shift_bit_num(12),
++};
++
++/* enable spi */
++typedef enum enum_spi_enable {
++	SPI_DISABLE = 0,
++	SPI_ENABLE = (lift_shift_bit_num(0)),
++} spi_enable_e;
++
++/* polarity */
++typedef enum enum_spi_polarity {
++	SPI_POLARITY_LOW = 0,
++	SPI_POLARITY_HIGH = (lift_shift_bit_num(7)),
++	SPI_POLARITY_RANGE = (lift_shift_bit_num(7)),
++} spi_polarity_e;
++
++typedef enum enum_spi_phase {
++	SPI_PHASE_RX_FIRST = 0,
++	SPI_PHASE_TX_FIRST = (lift_shift_bit_num(6)),
++	SPI_PHASE_RANGE = (lift_shift_bit_num(6)),
++} spi_phase_e;
++
++typedef enum enum_spi_format {
++	SPI_MOTOROLA_MODE = 0x00,
++	SPI_TI_MODE = 0x10,
++	SPI_MICROWIRE_MODE = 0x20,
++	SPI_FRAME_FORMAT_RANGE = 0x30,
++} spi_format_e;
++
++typedef enum enum_spi_data_size {
++	SPI_DATA_SIZE_4BIT = 0x03,
++	SPI_DATA_SIZE_5BIT = 0x04,
++	SPI_DATA_SIZE_6BIT = 0x05,
++	SPI_DATA_SIZE_7BIT = 0x06,
++	SPI_DATA_SIZE_8BIT = 0x07,
++	SPI_DATA_SIZE_9BIT = 0x08,
++	SPI_DATA_SIZE_10BIT = 0x09,
++	SPI_DATA_SIZE_16BIT = 0x0f,
++	SPI_DATA_SIZE_RANGE = 0x0f,
++} spi_data_size_e;
++
++typedef enum enum_spi_transfer_mode {
++	SPI_TX_RX_MODE = 0x000,
++	SPI_ONLY_TX_MODE = 0x100,
++	SPI_ONLY_RX_MODE = 0x200,
++	SPI_EEPROM_MODE = 0x300,
++	SPI_TRANSFER_MODE_RANGE = 0x300,
++} spi_transfer_mode_e;
++
++typedef enum enum_spi_baudrate {
++	SPI_SCLKIN = 50000000, 
++	SPI_SCLKOUT_27000000 = (SPI_SCLKIN / 27000000), //27M
++	SPI_SCLKOUT_13500000 = (SPI_SCLKIN / 13500000),  //13.5M
++	SPI_SCLKOUT_6750000 = (SPI_SCLKIN / 6750000),  //6.75M
++	SPI_SCLKOUT_4500000 = (SPI_SCLKIN / 4500000),    //4.5M
++	SPI_SCLKOUT_3375000 = (SPI_SCLKIN / 3375000),  //3.375M
++	SPI_SCLKOUT_2700000 = (SPI_SCLKIN / 2700000),    //2.7M
++	SPI_SCLKOUT_1500000 = (SPI_SCLKIN / 1500000),  //1.5M
++	SPI_SCLKOUT_500000 = (SPI_SCLKIN / 500000),  //0.1M
++	SPI_SCLKOUT_100000 = (SPI_SCLKIN / 100000),  //0.1M
++} spi_baudrate_e;
++
++typedef enum enum_spi_irq {
++	SPI_IRQ_TXEIM = (lift_shift_bit_num(0)),
++	SPI_IRQ_TXOIM = (lift_shift_bit_num(1)),
++	SPI_IRQ_RXUIM = (lift_shift_bit_num(2)),
++	SPI_IRQ_RXOIM = (lift_shift_bit_num(3)),
++	SPI_IRQ_RXFIM = (lift_shift_bit_num(4)),
++	SPI_IRQ_MSTIM = (lift_shift_bit_num(5)),
++	SPI_IRQ_ALL = 0x3f,
++} spi_irq_e;
++
++typedef enum enum_spi_slave {
++	SPI_SLAVE_PORT0 = (lift_shift_bit_num(0)),
++	SPI_SLAVE_PORT1 = (lift_shift_bit_num(1)),
++} spi_slave_e;
++
++typedef enum enum_spi_dma_control_mode {
++	SPI_DMA_RX_POS = (lift_shift_bit_num(0)),
++	SPI_DMA_TX_POS = (lift_shift_bit_num(1)),
++	SPI_DMA_CONTROL_RANGE = 0x03,
++} spi_dma_control_mode_e;
++
++struct fh_spi_reg {
++	u32 ctrl0;
++	u32 ctrl1;
++	u32 ssienr;
++	u32 mwcr;
++	u32 ser;
++	u32 baudr;
++	u32 txfltr;
++	u32 rxfltr;
++	u32 txflr;
++	u32 rxflr;
++	u32 sr;
++	u32 imr;
++	u32 isr;
++	u32 risr;
++	u32 txoicr;
++	u32 rxoicr;
++	u32 rxuicr;
++	u32 msticr;
++	u32 icr;
++	u32 dmacr;
++	u32 dmatdlr;
++	u32 dmardlr;
++	u32 idr;
++	u32 version;
++	u32 dr; /* Currently oper as 32 bits,
++	 though only low 16 bits matters */
++	u32 rev[35];
++	u32 rx_sample_dly;
++};
++
++#define dw_readl(dw, name) \
++__raw_readl(&(((struct fh_spi_reg *)dw->regs)->name))
++#define dw_writel(dw, name, val) \
++__raw_writel((val), &(((struct fh_spi_reg *)dw->regs)->name))
++#define dw_readw(dw, name) \
++ __raw_readw(&(((struct fh_spi_reg *)dw->regs)->name))
++#define dw_writew(dw, name, val) \
++__raw_writew((val), &(((struct fh_spi_reg *)dw->regs)->name))
++#define MAX_SG_LEN		32
++#define SG_ONE_TIME_MAX_SIZE (4096)
++struct _fh_spi_dma_transfer {
++	struct dma_chan *chan;
++	struct dma_slave_config cfg;
++	struct scatterlist sgl[MAX_SG_LEN];
++	u32 sgl_data_size[MAX_SG_LEN];
++	u32 actual_sgl_size;
++	struct dma_async_tx_descriptor *desc;
++};
++
++struct fh_spi {
++	void * __iomem regs; /* vaddr of the control registers */
++	u32 id;
++	u32 paddr;
++	u32 slave_port;
++	u32 irq; /* irq no */
++	u32 fifo_len; /* depth of the FIFO buffer */
++	u32 cpol;
++	u32 cpha;
++	u32 isr_flag;
++	u32 apb_clock_in;
++	u32 max_freq; /* max bus freq supported */
++	u32 div;
++	/*use id u32 bus_num;*//*which bus*/
++	u32 num_cs; /* supported slave numbers */
++	u32 data_width;
++	u32 frame_mode;
++	u32 transfer_mode;
++	u32 active_cs_pin;
++	u32 tx_len;
++	u32 rx_len;
++	void *rx_buff;
++	void *tx_buff;
++	u32 tx_dma_add;
++	u32 rx_dma_add;
++	u32 tx_hs_no;
++	u32 rx_hs_no;
++	u32 tx_dumy_buff[4];
++	u32 rx_dumy_buff[4];
++	u32 tx_dumy_dma_add;
++	u32 rx_dumy_dma_add;
++	struct fh_spi_cs cs_data[SPI_MASTER_CONTROLLER_MAX_SLAVE];
++	u32 pump_data_mode;
++	struct _fh_spi_dma_transfer dma_rx;
++	struct _fh_spi_dma_transfer dma_tx;
++	u32 complete_times;
++	struct fh_spi_platform_data *board_info;
++};
++
++struct fh_spi_controller {
++	struct device *master_dev;
++	struct clk *clk;
++	spinlock_t lock;
++	struct list_head queue;
++	struct platform_device *p_dev;
++	struct work_struct work;
++	struct workqueue_struct *workqueue;
++	struct spi_message *active_message;
++	struct spi_transfer *active_transfer;
++	struct fh_spi dwc;
++	struct completion done;
++};
++
++/******************************************************************************
++ * Function prototype section
++ * add prototypes for all functions called by this file,execepting those
++ * declared in header file
++ *****************************************************************************/
++
++/*****************************************************************************
++ * Global variables section - Exported
++ * add declaration of global variables that will be exported here
++ * e.g.
++ *  int8_t foo;
++ ****************************************************************************/
++
++/*****************************************************************************
++
++ *  static fun;
++ *****************************************************************************/
++
++static u32 Spi_Enable(struct fh_spi *dw, spi_enable_e enable);
++static u32 Spi_SetPolarity(struct fh_spi *dw, spi_polarity_e polarity);
++static u32 Spi_SetPhase(struct fh_spi *dw, spi_phase_e phase);
++static u32 Spi_SetFrameFormat(struct fh_spi *dw, spi_format_e format);
++static u32 Spi_SetBaudrate(struct fh_spi *dw, spi_baudrate_e baudrate);
++static u32 Spi_DisableIrq(struct fh_spi *dw, u32 irq);
++static u32 Spi_ReadStatus(struct fh_spi *dw);
++static u32 Spi_EnableSlaveen(struct fh_spi *dw, spi_slave_e port);
++static u32 Spi_DisableSlaveen(struct fh_spi *dw, spi_slave_e port);
++static u32 Spi_EnableIrq(struct fh_spi *dw, u32 irq);
++static u32 Spi_SetTxlevlel(struct fh_spi *dw, u32 level);
++static u32 Spi_ReadTxfifolevel(struct fh_spi *dw);
++static u32 Spi_ReadRxfifolevel(struct fh_spi *dw);
++static u32 Spi_WriteData(struct fh_spi *dw, u16 data);
++static u16 Spi_ReadData(struct fh_spi *dw);
++static u32 Spi_Isrstatus(struct fh_spi *dw);
++static void Spi_SetDmaTxDataLevel(struct fh_spi *dw, u32 level);
++static void Spi_SetDmaRxDataLevel(struct fh_spi *dw, u32 level);
++static void Spi_SetDmaControlEnable(struct fh_spi *dw,
++spi_dma_control_mode_e enable_pos);
++static bool fh_spi_dma_chan_filter(struct dma_chan *chan, void *param);
++static int fh_spi_setup(struct spi_device *spi);
++static u32 Spi_SetRxdelay(struct fh_spi *dw, u8 data);
++/*****************************************************************************
++ * Global variables section - Local
++ * define global variables(will be refered only in this file) here,
++ * static keyword should be used to limit scope of local variable to this file
++ * e.g.
++ *  static uint8_t ufoo;
++ *****************************************************************************/
++
++/* function body */
++
++static u32 Spi_Enable(struct fh_spi *dw, spi_enable_e enable)
++{
++	dw_writel(dw, ssienr, enable);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetPolarity(struct fh_spi *dw, spi_polarity_e polarity)
++{
++	u32 data;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_POLARITY_RANGE;
++	data |= polarity;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetPhase(struct fh_spi *dw, spi_phase_e phase)
++{
++	u32 data;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_PHASE_RANGE;
++	data |= phase;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetFrameFormat(struct fh_spi *dw, spi_format_e format)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_FRAME_FORMAT_RANGE;
++	data |= format;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetTransferMode(struct fh_spi *dw, spi_transfer_mode_e mode)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_TRANSFER_MODE_RANGE;
++	data |= mode;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetBaudrate(struct fh_spi *dw, spi_baudrate_e baudrate)
++{
++	dw_writel(dw, baudr, baudrate);
++	return CONFIG_OK;
++}
++
++static u32 Spi_DisableIrq(struct fh_spi *dw, u32 irq)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, imr);
++	data &= ~irq;
++	dw_writel(dw, imr, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_EnableIrq(struct fh_spi *dw, u32 irq)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, imr);
++	data |= irq;
++	dw_writel(dw, imr, data);
++	return CONFIG_OK;
++
++}
++
++static u32 Spi_SetTxlevlel(struct fh_spi *dw, u32 level)
++{
++	dw_writel(dw, txfltr, level);
++	return CONFIG_OK;
++}
++
++static u32 Spi_ReadTxfifolevel(struct fh_spi *dw)
++{
++	return dw_readl(dw, txflr);
++}
++
++static u32 Spi_ReadRxfifolevel(struct fh_spi *dw)
++{
++	return (u32) dw_readl(dw, rxflr);
++}
++
++static u32 Spi_ReadStatus(struct fh_spi *dw)
++{
++	return (uint8_t) dw_readl(dw, sr);
++}
++
++static u32 Spi_EnableSlaveen(struct fh_spi *dw, spi_slave_e port)
++{
++	u32 data = 0;
++
++	gpio_direction_output(dw->active_cs_pin, 0);
++	data = dw_readl(dw, ser);
++	data |= port;
++	dw_writel(dw, ser, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_DisableSlaveen(struct fh_spi *dw, spi_slave_e port)
++{
++	u32 data = 0;
++	gpio_direction_output(dw->active_cs_pin, 1);
++	data = dw_readl(dw, ser);
++	data &= ~port;
++	dw_writel(dw, ser, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_WriteData(struct fh_spi *dw, u16 data)
++{
++	dw_writew(dw, dr, data);
++	return WRITE_ONLY_OK;
++}
++
++static u16 Spi_ReadData(struct fh_spi *dw)
++{
++	return dw_readw(dw, dr);
++}
++
++static void Spi_Clearallerror(struct fh_spi *dw)
++{
++	u32 data = dw_readl(dw, icr);
++	data = 0;
++}
++
++static u32 Spi_Isrstatus(struct fh_spi *dw)
++{
++	u32 data = dw_readl(dw, isr);
++	return data;
++}
++
++static void Spi_SetDmaTxDataLevel(struct fh_spi *dw, u32 level)
++{
++	dw_writel(dw, dmatdlr, level);
++}
++
++static void Spi_SetDmaRxDataLevel(struct fh_spi *dw, u32 level)
++{
++	dw_writel(dw, dmardlr, level);
++}
++
++static void Spi_SetDmaControlEnable(struct fh_spi *dw,
++spi_dma_control_mode_e enable_pos)
++{
++	u32 data;
++	data = dw_readl(dw, dmacr);
++	data |= enable_pos;
++	dw_writel(dw, dmacr, data);
++}
++
++static void Spi_SetDmaControlDisable(struct fh_spi *dw,
++spi_dma_control_mode_e enable_pos)
++{
++	u32 data;
++	data = dw_readl(dw, dmacr);
++	data &= ~enable_pos;
++	dw_writel(dw, dmacr, data);
++}
++
++static u32 Spi_SetRxdelay(struct fh_spi *dw, u8 data)
++{
++	dw_writel(dw, rx_sample_dly, data);
++	return CONFIG_OK;
++}
++
++static inline u32 tx_max_tx_only(struct fh_spi_controller *fh_spi)
++{
++	u32 hw_tx_level;
++	hw_tx_level = Spi_ReadTxfifolevel(&fh_spi->dwc);
++	hw_tx_level = fh_spi->dwc.fifo_len - hw_tx_level;
++
++	return min(hw_tx_level, fh_spi->dwc.tx_len);
++}
++
++static inline u32 tx_max(struct fh_spi_controller *fh_spi)
++{
++
++	u32 hw_tx_level, hw_rx_level;
++	u32 temp_tx_lev;
++	temp_tx_lev = Spi_ReadTxfifolevel(&fh_spi->dwc);
++	hw_rx_level = temp_tx_lev + Spi_ReadRxfifolevel(&fh_spi->dwc);
++	/* add shift data... maybe should add apb bus delay */
++	hw_rx_level++;
++
++	hw_tx_level = temp_tx_lev;
++	hw_tx_level = fh_spi->dwc.fifo_len - hw_tx_level;
++	hw_rx_level = fh_spi->dwc.fifo_len - hw_rx_level;
++	/* min(hw_tx_level, fh_spi->dwc.tx_len); */
++	return min(min(hw_tx_level, fh_spi->dwc.tx_len), hw_rx_level); 
++}
++
++/* Return the max entries we should read out of rx fifo */
++static inline u32 rx_max(struct fh_spi_controller *fh_spi)
++{
++	u32 hw_rx_level;
++
++	hw_rx_level = Spi_ReadRxfifolevel(&fh_spi->dwc);
++	return hw_rx_level;
++}
++
++static int fh_spi_init_hw(struct fh_spi_controller *fh_spi,
++struct fh_spi_platform_data *board_info)
++{
++	int status, i;
++
++	fh_spi->dwc.id = board_info->bus_no;
++
++	fh_spi->dwc.fifo_len = board_info->fifo_len;
++	fh_spi->dwc.num_cs = board_info->slave_max_num;
++	for (i = 0; i < fh_spi->dwc.num_cs; i++) {
++		fh_spi->dwc.cs_data[i].GPIO_Pin =
++		                board_info->cs_data[i].GPIO_Pin;
++		fh_spi->dwc.cs_data[i].name = board_info->cs_data[i].name;
++	}
++
++	fh_spi->dwc.rx_hs_no = board_info->rx_handshake_num;
++	fh_spi->dwc.tx_hs_no = board_info->tx_handshake_num;
++	memset(&fh_spi->dwc.dma_rx, 0, sizeof(struct _fh_spi_dma_transfer));
++	memset(&fh_spi->dwc.dma_tx, 0, sizeof(struct _fh_spi_dma_transfer));
++	fh_spi->dwc.complete_times = 0;
++	fh_spi->dwc.pump_data_mode = PUMP_DATA_POLL_MODE;
++	/* bind the platform data here.... */
++	fh_spi->dwc.board_info = board_info;
++
++	fh_spi->dwc.isr_flag = SPI_IRQ_TXEIM | SPI_IRQ_TXOIM | SPI_IRQ_RXUIM
++	                | SPI_IRQ_RXOIM;
++	fh_spi->dwc.frame_mode = SPI_MOTOROLA_MODE;
++	fh_spi->dwc.transfer_mode = SPI_TX_RX_MODE;
++
++	do {
++		status = Spi_ReadStatus(&fh_spi->dwc);
++	} while (status & 0x01);
++	/* add spi disable */
++	Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
++	/* add spi frame mode & transfer mode */
++	Spi_SetFrameFormat(&fh_spi->dwc, fh_spi->dwc.frame_mode);
++	Spi_SetTransferMode(&fh_spi->dwc, fh_spi->dwc.transfer_mode);
++	/* add spi disable all isr */
++	Spi_DisableIrq(&fh_spi->dwc, SPI_IRQ_ALL);
++	/* add spi enable */
++	Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
++	return 0;
++}
++
++static irqreturn_t fh_spi_irq(int irq, void *dev_id)
++{
++	u8* txbuf;
++	struct fh_spi_controller *fh_spi;
++	u32 isr_status;
++	u32 rx_fifo_capability, tx_fifo_capability;
++	u16 data;
++	unsigned size;
++	fh_spi = (struct fh_spi_controller *) dev_id;
++	data = 0x00;
++	txbuf = (u8*) fh_spi->dwc.tx_buff;
++	isr_status = Spi_Isrstatus(&fh_spi->dwc);
++	/* this transfer total size. */
++	size = fh_spi->active_transfer->len;
++	if (isr_status & (SPI_IRQ_TXOIM | SPI_IRQ_RXUIM | SPI_IRQ_RXOIM)) {
++		Spi_Clearallerror(&fh_spi->dwc);
++		/* error handle */
++		dev_err(&fh_spi->p_dev->dev, "spi isr status:%x\n", isr_status);
++		WARN_ON(1);
++	}
++
++	Spi_DisableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
++
++	if (fh_spi->dwc.transfer_mode == SPI_TX_RX_MODE) {
++
++		tx_fifo_capability = tx_max(fh_spi);
++		rx_fifo_capability = rx_max(fh_spi);
++		fh_spi->dwc.rx_len += rx_fifo_capability;
++		while (rx_fifo_capability) {
++			data = Spi_ReadData(&fh_spi->dwc);
++			if (fh_spi->dwc.rx_buff != NULL) {
++				*(u8 *) fh_spi->dwc.rx_buff++ = (u8) data;
++			}
++			rx_fifo_capability--;
++		}
++
++		if (fh_spi->dwc.rx_len == size) {
++			complete(&(fh_spi->done));
++			return IRQ_HANDLED;
++		}
++
++		fh_spi->dwc.tx_len -= tx_fifo_capability;
++		while (tx_fifo_capability) {
++			data = 0x0;
++
++			if (fh_spi->dwc.tx_buff != NULL) {
++				data = *(u8*) fh_spi->dwc.tx_buff++;
++			}
++			Spi_WriteData(&fh_spi->dwc, data);
++			tx_fifo_capability--;
++		}
++		Spi_EnableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
++
++	} else if (fh_spi->dwc.transfer_mode == SPI_ONLY_TX_MODE) {
++		tx_fifo_capability = tx_max(fh_spi);
++
++		fh_spi->dwc.tx_len -= tx_fifo_capability;
++		while (tx_fifo_capability) {
++			/* data = 0x0; */
++			Spi_WriteData(&fh_spi->dwc, *txbuf++);
++			fh_spi->dwc.tx_buff++;
++			tx_fifo_capability--;
++		}
++		if (fh_spi->dwc.tx_len == 0) {
++			complete(&(fh_spi->done));
++			return IRQ_HANDLED;
++		}
++		/* reopen tx isr... */
++		Spi_EnableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
++	}
++	return IRQ_HANDLED;
++}
++
++static int fh_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
++{
++	u8 bits_per_word;
++	u32 hz;
++	u32 div;
++	struct fh_spi_controller *fh_spi = spi_master_get_devdata(spi->master);
++
++	bits_per_word = spi->bits_per_word;
++	if (t && t->bits_per_word)
++		bits_per_word = t->bits_per_word;
++
++	/*
++	 * Calculate speed:
++	 *  - by default, use maximum speed from ssp clk
++	 *  - if device overrides it, use it
++	 *  - if transfer specifies other speed, use transfer's one
++	 */
++	hz = fh_spi->dwc.max_freq;
++	if (spi->max_speed_hz)
++		hz = min(hz, spi->max_speed_hz);
++	if (t && t->speed_hz)
++		hz = min(hz, t->speed_hz);
++
++	div = fh_spi->dwc.apb_clock_in / hz;
++	fh_spi->dwc.div = div;
++
++	if (hz == 0) {
++		dev_err(&spi->dev, "Cannot continue with zero clock\n");
++		WARN_ON(1);
++		return -EINVAL;
++	}
++
++	if (bits_per_word != 8) {
++		dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
++		                __func__, bits_per_word);
++		return -EINVAL;
++	}
++
++	if (spi->mode & SPI_CPOL)
++		fh_spi->dwc.cpol = SPI_POLARITY_HIGH;
++	else
++		fh_spi->dwc.cpol = SPI_POLARITY_LOW;
++
++	if (spi->mode & SPI_CPHA)
++		fh_spi->dwc.cpha = SPI_PHASE_TX_FIRST;
++	else
++		fh_spi->dwc.cpha = SPI_PHASE_RX_FIRST;
++
++	Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
++	Spi_SetPolarity(&fh_spi->dwc, fh_spi->dwc.cpol);
++	Spi_SetPhase(&fh_spi->dwc, fh_spi->dwc.cpha);
++	Spi_SetBaudrate(&fh_spi->dwc, fh_spi->dwc.div);
++	Spi_SetRxdelay(&fh_spi->dwc, 1);
++	Spi_DisableIrq(&fh_spi->dwc, SPI_IRQ_ALL);
++	Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
++	fh_spi_setup(spi);
++	return 0;
++}
++
++static int isr_pump_data(struct fh_spi_controller *fh_spi)
++{
++	u32 status;
++	/* first clear isr... */
++	fh_spi->dwc.isr_flag &= ~(SPI_IRQ_TXEIM | SPI_IRQ_RXFIM);
++	Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
++
++	if ((fh_spi->dwc.rx_buff == NULL) && (fh_spi->dwc.tx_buff != NULL)) {
++
++		fh_spi->dwc.isr_flag |= SPI_IRQ_TXEIM;
++		Spi_SetTxlevlel(&fh_spi->dwc, fh_spi->dwc.fifo_len - 1);
++		Spi_EnableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
++		fh_spi->dwc.transfer_mode = SPI_ONLY_TX_MODE;
++
++	} else {
++		/* tx & rx.. */
++		fh_spi->dwc.isr_flag |= SPI_IRQ_TXEIM;
++		Spi_SetTxlevlel(&fh_spi->dwc, fh_spi->dwc.fifo_len - 1);
++		Spi_EnableIrq(&fh_spi->dwc, fh_spi->dwc.isr_flag);
++		fh_spi->dwc.transfer_mode = SPI_TX_RX_MODE;
++	}
++
++	Spi_SetTransferMode(&fh_spi->dwc, fh_spi->dwc.transfer_mode);
++	Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
++	wait_for_completion(&fh_spi->done);
++	/* add wait spi idle.. */
++	do {
++		status = Spi_ReadStatus(&fh_spi->dwc);
++	} while (status & 0x01);
++
++	return 0;
++}
++
++static bool fh_spi_dma_chan_filter(struct dma_chan *chan, void *param)
++{
++	int dma_channel = *(int *) param;
++	bool ret = false;
++
++	if (chan->chan_id == dma_channel) {
++		ret = true;
++	}
++	return ret;
++}
++
++static void fh_spi_rx_dma_done(void *arg)
++{
++	struct fh_spi_controller *fh_spi = (struct fh_spi_controller *) arg;
++	fh_spi->dwc.complete_times++;
++	if (fh_spi->dwc.complete_times == 2) {
++		fh_spi->dwc.complete_times = 0;
++		complete(&(fh_spi->done));
++	}
++}
++
++
++
++static int dma_set_tx_para(struct fh_spi_controller *fh_spi)
++{
++	struct fh_dma_pri fh_pri;
++	struct dma_slave_config *tx_config;
++	struct spi_transfer *t;
++	struct dma_chan *txchan;
++	struct scatterlist *p_sca_list;
++	unsigned int sg_size = 0;
++	int i, xfer_len, one_sg_data_len;
++	unsigned char *temp_buf;
++	t = fh_spi->active_transfer;
++	memset(&fh_spi->dwc.dma_tx.cfg, 0, sizeof(struct dma_slave_config));
++	txchan = fh_spi->dwc.dma_tx.chan;
++	tx_config = &fh_spi->dwc.dma_tx.cfg;
++	tx_config->dst_addr = fh_spi->dwc.paddr + SPI_DATA_REG_OFFSET;
++	/* set the spi data tx reg */
++	tx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++	tx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++	tx_config->slave_id = fh_spi->dwc.tx_hs_no;
++	tx_config->src_maxburst = 4;
++	tx_config->dst_maxburst = 4;
++	tx_config->direction = DMA_MEM_TO_DEV;
++	tx_config->device_fc = FALSE;
++	xfer_len = t->len;
++	temp_buf = (unsigned char*)t->tx_buf;
++	sg_size =  t->len /SG_ONE_TIME_MAX_SIZE; 
++	if(t->len % SG_ONE_TIME_MAX_SIZE){
++		sg_size++;
++	}
++	if(sg_size > MAX_SG_LEN) {
++		printk("%s_%d :: too large sg size:0x%x\n",__func__,__LINE__,sg_size);
++		return -1;
++	}
++	p_sca_list = &fh_spi->dwc.dma_tx.sgl[0];
++	for(i=0;i<sg_size;i++,p_sca_list++){
++		one_sg_data_len = min(xfer_len,SG_ONE_TIME_MAX_SIZE);
++		xfer_len -=one_sg_data_len;
++
++		if (t->tx_buf == NULL){
++			fh_pri.sinc = FH_DMA_SLAVE_FIX;
++			p_sca_list->dma_address =  fh_spi->dwc.tx_dumy_dma_add;
++		}
++		else{
++			fh_pri.sinc = FH_DMA_SLAVE_INC;
++			p_sca_list->dma_address = dma_map_single(
++		                txchan->dev->device.parent,
++		                (void*) temp_buf, one_sg_data_len,
++		                DMA_TO_DEVICE);
++			fh_spi->dwc.dma_tx.sgl_data_size[i] = one_sg_data_len;
++			temp_buf += one_sg_data_len;
++
++		}
++		p_sca_list->length = one_sg_data_len;
++	}
++	fh_pri.dinc = FH_DMA_SLAVE_FIX;
++	dmaengine_slave_config(txchan, tx_config);
++
++	fh_spi->dwc.dma_tx.desc = txchan->device->device_prep_slave_sg(txchan,
++				&fh_spi->dwc.dma_tx.sgl[0], sg_size, DMA_MEM_TO_DEV,
++				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP,
++				&fh_pri);
++	fh_spi->dwc.dma_tx.actual_sgl_size = sg_size;
++	fh_spi->dwc.dma_tx.desc->callback = fh_spi_rx_dma_done;
++	fh_spi->dwc.dma_tx.desc->callback_param = fh_spi;
++	return 0;
++
++}
++
++static int dma_set_rx_para(struct fh_spi_controller *fh_spi)
++{
++
++	struct fh_dma_pri fh_pri;
++	struct dma_slave_config *rx_config;
++	struct spi_transfer *t;
++	struct dma_chan *rxchan;
++	struct scatterlist *p_sca_list;
++	unsigned int sg_size = 0;
++	int i, xfer_len, one_sg_data_len;
++	unsigned char *temp_buf;
++
++	t = fh_spi->active_transfer;
++	rxchan = fh_spi->dwc.dma_rx.chan;
++	memset(&fh_spi->dwc.dma_rx.cfg, 0, sizeof(struct dma_slave_config));
++	rx_config = &fh_spi->dwc.dma_rx.cfg;
++	rx_config->src_addr = fh_spi->dwc.paddr + SPI_DATA_REG_OFFSET;
++	rx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++	rx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++	rx_config->slave_id = fh_spi->dwc.rx_hs_no;
++	rx_config->src_maxburst = 4;
++	rx_config->dst_maxburst = 4;
++	rx_config->device_fc = FALSE;
++	rx_config->direction = DMA_DEV_TO_MEM;
++	xfer_len = t->len;
++	temp_buf = (unsigned char*)t->rx_buf;
++	sg_size =  t->len /SG_ONE_TIME_MAX_SIZE; 
++	if(t->len % SG_ONE_TIME_MAX_SIZE){
++		sg_size++;
++	}
++	if(sg_size > MAX_SG_LEN) {
++		printk("%s_%d :: too large sg size:0x%x\n",__func__,__LINE__,sg_size);
++		return -1;
++	}
++	p_sca_list = &fh_spi->dwc.dma_rx.sgl[0];
++	for(i=0;i<sg_size;i++,p_sca_list++){
++		one_sg_data_len = min(xfer_len,SG_ONE_TIME_MAX_SIZE);
++		xfer_len -=one_sg_data_len;
++		if (t->rx_buf == NULL){
++			fh_pri.dinc = FH_DMA_SLAVE_FIX;
++			p_sca_list->dma_address =  fh_spi->dwc.rx_dumy_dma_add;
++		}
++		else{
++			fh_pri.dinc = FH_DMA_SLAVE_INC;
++			p_sca_list->dma_address = dma_map_single(
++		                rxchan->dev->device.parent,
++		                (void*) temp_buf, one_sg_data_len,
++		                DMA_FROM_DEVICE);
++			fh_spi->dwc.dma_rx.sgl_data_size[i] = one_sg_data_len;
++			temp_buf += one_sg_data_len;
++		}
++		p_sca_list->length = one_sg_data_len;
++	}
++
++	fh_pri.sinc = FH_DMA_SLAVE_FIX;
++	dmaengine_slave_config(rxchan, rx_config);
++	fh_spi->dwc.dma_rx.desc = rxchan->device->device_prep_slave_sg(rxchan,
++	                &fh_spi->dwc.dma_rx.sgl[0], sg_size, DMA_DEV_TO_MEM,
++	                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP,
++	                &fh_pri);
++	fh_spi->dwc.dma_rx.actual_sgl_size = sg_size;
++	fh_spi->dwc.dma_rx.desc->callback = fh_spi_rx_dma_done;
++	fh_spi->dwc.dma_rx.desc->callback_param = fh_spi;
++
++	return 0;
++
++}
++
++void unmap_dma_tx_sg(struct fh_spi_controller *fh_spi){
++	struct dma_chan *txchan;
++	struct scatterlist *p_sca_list;
++	int i;
++	txchan = fh_spi->dwc.dma_tx.chan;
++	p_sca_list = &fh_spi->dwc.dma_tx.sgl[0];
++	for(i=0;i<fh_spi->dwc.dma_tx.actual_sgl_size;i++,p_sca_list++){
++		dma_unmap_single(txchan->dev->device.parent,
++				p_sca_list->dma_address,
++				fh_spi->dwc.dma_tx.sgl_data_size[i], DMA_MEM_TO_DEV);
++	}
++}
++
++void unmap_dma_rx_sg(struct fh_spi_controller *fh_spi){
++	struct dma_chan *rxchan;
++	struct scatterlist *p_sca_list;
++	int i;
++	
++	rxchan = fh_spi->dwc.dma_rx.chan;
++	p_sca_list = &fh_spi->dwc.dma_rx.sgl[0];
++	for(i=0;i<fh_spi->dwc.dma_rx.actual_sgl_size;i++,p_sca_list++){
++		dma_unmap_single(rxchan->dev->device.parent,
++				p_sca_list->dma_address,
++				fh_spi->dwc.dma_rx.sgl_data_size[i], DMA_FROM_DEVICE);
++	}
++}
++
++
++static int dma_pump_data(struct fh_spi_controller *fh_spi)
++{
++	struct spi_transfer *t;
++	struct dma_chan *rxchan;
++	struct dma_chan *txchan;
++	int ret;
++	t = fh_spi->active_transfer;
++	txchan = fh_spi->dwc.dma_tx.chan;
++	rxchan = fh_spi->dwc.dma_rx.chan;
++	init_completion(&fh_spi->done);
++	ret = dma_set_tx_para(fh_spi);
++	if (ret != 0) {
++		return ret;
++	}
++	ret = dma_set_rx_para(fh_spi);
++	if (ret != 0) {
++		unmap_dma_tx_sg(fh_spi);
++		return ret;
++	}
++	Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
++	Spi_SetDmaRxDataLevel(&fh_spi->dwc, 3);
++	Spi_SetDmaControlEnable(&fh_spi->dwc, SPI_DMA_RX_POS);
++	Spi_SetDmaTxDataLevel(&fh_spi->dwc, 4);
++	Spi_SetDmaControlEnable(&fh_spi->dwc, SPI_DMA_TX_POS);
++	Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
++	fh_spi->dwc.dma_rx.desc->tx_submit(fh_spi->dwc.dma_rx.desc);
++	fh_spi->dwc.dma_tx.desc->tx_submit(fh_spi->dwc.dma_tx.desc);
++	wait_for_completion(&fh_spi->done);
++	if (t->tx_buf != NULL) {
++		unmap_dma_tx_sg(fh_spi);
++	}
++	if (t->rx_buf != NULL) {
++		unmap_dma_rx_sg(fh_spi);
++	}
++	Spi_SetDmaControlDisable(&fh_spi->dwc, SPI_DMA_RX_POS);
++	Spi_SetDmaControlDisable(&fh_spi->dwc, SPI_DMA_TX_POS);
++	return 0;
++
++}
++
++static int poll_pump_data(struct fh_spi_controller *fh_spi)
++{
++	register u32 rx_fifo_capability, tx_fifo_capability;
++	u8* txbuf;
++	u8* rxbuf;
++	u16 data;
++	u32 size;
++	rxbuf = (u8*) fh_spi->dwc.rx_buff;
++	txbuf = (u8*) fh_spi->dwc.tx_buff;
++	size = fh_spi->active_transfer->len;
++
++	Spi_Enable(&fh_spi->dwc, SPI_DISABLE);
++	fh_spi->dwc.transfer_mode = SPI_TX_RX_MODE;
++	Spi_SetTransferMode(&fh_spi->dwc, fh_spi->dwc.transfer_mode);
++	Spi_Enable(&fh_spi->dwc, SPI_ENABLE);
++
++	goto first;
++	start: rx_fifo_capability = rx_max(fh_spi);
++	fh_spi->dwc.rx_len += rx_fifo_capability;
++
++	if (rxbuf != NULL) {
++		fh_spi->dwc.rx_buff += rx_fifo_capability;
++		while (rx_fifo_capability) {
++			*rxbuf++ = Spi_ReadData(&fh_spi->dwc);
++			rx_fifo_capability--;
++		}
++
++	} else {
++		while (rx_fifo_capability) {
++			data = Spi_ReadData(&fh_spi->dwc);
++			rx_fifo_capability--;
++		}
++	}
++
++	if (fh_spi->dwc.rx_len == size) {
++		return 0;
++	}
++
++	first: tx_fifo_capability = tx_max(fh_spi);
++	fh_spi->dwc.tx_len -= tx_fifo_capability;
++	if (txbuf != NULL) {
++		fh_spi->dwc.tx_buff += tx_fifo_capability;
++		while (tx_fifo_capability) {
++			Spi_WriteData(&fh_spi->dwc, *txbuf++);
++			tx_fifo_capability--;
++		}
++
++	} else {
++		while (tx_fifo_capability) {
++			Spi_WriteData(&fh_spi->dwc, 0xff);
++			tx_fifo_capability--;
++		}
++	}
++
++	goto start;
++
++	return 0;
++}
++
++static int fh_spi_handle_message(struct fh_spi_controller *fh_spi,
++                struct spi_message *m)
++{
++	bool first, last;
++	struct spi_transfer *t, *tmp_t;
++	int status = 0;
++	int cs_change;
++	cs_change = 1;
++	m->actual_length = 0;
++	fh_spi->active_message = m;
++
++	list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list)
++	{
++		first = (&t->transfer_list == m->transfers.next);
++		last = (&t->transfer_list == m->transfers.prev);
++
++		if (first || t->speed_hz || t->bits_per_word)
++		fh_spi_setup_transfer(m->spi, t);
++
++		if (cs_change) {
++			Spi_EnableSlaveen(&fh_spi->dwc, fh_spi->dwc.slave_port);
++		}
++		cs_change = t->cs_change;
++
++		fh_spi->active_transfer = t;
++		fh_spi->dwc.tx_len = t->len;
++		fh_spi->dwc.rx_len = 0;
++		fh_spi->dwc.tx_buff = (void *) t->tx_buf;
++		fh_spi->dwc.rx_buff = t->rx_buf;
++
++		if (fh_spi->dwc.board_info->dma_transfer_enable
++				== SPI_TRANSFER_USE_DMA) {
++
++			if(fh_spi->dwc.tx_len < DMA_TRANS_GATE_LEVEL) {
++				fh_spi->dwc.pump_data_mode = PUMP_DATA_POLL_MODE;
++				goto pump_data;
++			}
++			fh_spi->dwc.pump_data_mode = PUMP_DATA_DMA_MODE;
++		}
++		else {
++			fh_spi->dwc.pump_data_mode = PUMP_DATA_POLL_MODE;
++		}
++
++		pump_data:
++
++		switch (fh_spi->dwc.pump_data_mode) {
++			case PUMP_DATA_DMA_MODE:
++			status = dma_pump_data(fh_spi);
++			if (status == 0) {
++				break;
++			} else {
++				WARN_ON(1);
++				dev_err(&fh_spi->p_dev->dev,
++						"spi dma pump data error\n");
++				fh_spi->dwc.pump_data_mode =
++				PUMP_DATA_POLL_MODE;
++			}
++
++			case PUMP_DATA_ISR_MODE:
++			status = isr_pump_data(fh_spi);
++			break;
++
++			case PUMP_DATA_POLL_MODE:
++			status = poll_pump_data(fh_spi);
++			break;
++			default:
++			status = -1;
++			WARN_ON(1);
++			dev_err(&fh_spi->p_dev->dev,
++					"spi pump data mode error..\n");
++		}
++
++		if (!cs_change && last) {
++			Spi_DisableSlaveen(&fh_spi->dwc,
++					fh_spi->dwc.slave_port);
++		}
++/* 
++*      if (t->delay_usecs)
++*		udelay(t->delay_usecs);
++*/
++		m->actual_length += t->len;
++		if (status)
++			break;
++
++	}
++
++	return status;
++
++}
++
++static void fh_spi_handle(struct work_struct *w)
++{
++	struct fh_spi_controller
++	*fh_spi = container_of(w, struct fh_spi_controller, work);
++	unsigned long flags;
++	struct spi_message *m;
++	spin_lock_irqsave(&fh_spi->lock, flags);
++	while (!list_empty(&fh_spi->queue)) {
++		m = list_entry(fh_spi->queue.next, struct spi_message, queue);
++		list_del_init(&m->queue);
++		spin_unlock_irqrestore(&fh_spi->lock, flags);
++		m->status = fh_spi_handle_message(fh_spi, m);
++		if (m->complete)
++			m->complete(m->context);
++
++		spin_lock_irqsave(&fh_spi->lock, flags);
++	}
++	spin_unlock_irqrestore(&fh_spi->lock, flags);
++
++}
++
++static int fh_spi_transfer(struct spi_device *spi, struct spi_message *m)
++{
++	struct fh_spi_controller *fh_spi = spi_master_get_devdata(spi->master);
++	unsigned long flags;
++	m->status = -EINPROGRESS;
++	spin_lock_irqsave(&fh_spi->lock, flags);
++	list_add_tail(&m->queue, &fh_spi->queue);
++	spin_unlock_irqrestore(&fh_spi->lock, flags);
++	queue_work(fh_spi->workqueue, &fh_spi->work);
++	return 0;
++}
++
++static int fh_spi_setup(struct spi_device *spi)
++{
++	/* spi_setup() does basic checks,
++	 * stmp_spi_setup_transfer() does more later
++	 */
++	struct fh_spi_controller *fh_spi = spi_master_get_devdata(spi->master);
++
++	fh_spi->dwc.active_cs_pin =
++	                fh_spi->dwc.cs_data[spi->chip_select].GPIO_Pin;
++
++	if (spi->chip_select >= fh_spi->dwc.num_cs) {
++		dev_err(&spi->dev, "%s, unsupported chip select no=%d\n",
++		                __func__, spi->chip_select);
++		return -EINVAL;
++	}
++	fh_spi->dwc.slave_port = 1 << spi->chip_select;
++
++	if (spi->bits_per_word != 8) {
++		dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
++		                __func__, spi->bits_per_word);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static void fh_spi_cleanup(struct spi_device *spi)
++{
++
++}
++
++static int __devinit fh_spi_probe(struct platform_device *dev)
++{
++	int err = 0;
++	struct spi_master *master;
++	struct fh_spi_controller *fh_spi;
++	struct resource *r, *ioarea;
++	int ret, i, j;
++	dma_cap_mask_t mask;
++	int filter_no;
++	struct fh_spi_platform_data * spi_platform_info;
++
++	spi_platform_info =
++	                (struct fh_spi_platform_data *) dev->dev.platform_data;
++	if (spi_platform_info == NULL) {
++
++		err = -ENODEV;
++		goto out0;
++	}
++
++	master = spi_alloc_master(&dev->dev, sizeof(struct fh_spi_controller));
++	if (master == NULL) {
++		err = -ENOMEM;
++		dev_err(&dev->dev, "%s, master malloc failed.\n", __func__);
++		goto out0;
++	}
++
++	fh_spi = spi_master_get_devdata(master);
++	if (!fh_spi) {
++		dev_err(&dev->dev, "%s, master dev data is null.\n", __func__);
++		err = -ENOMEM;
++		goto out_put_master;
++	}
++	fh_spi->master_dev = &dev->dev;
++	fh_spi->p_dev = dev;
++	platform_set_drvdata(dev, master);
++
++	fh_spi->dwc.irq = platform_get_irq(dev, 0);
++	if (fh_spi->dwc.irq < 0) {
++		dev_err(&dev->dev, "%s, spi irq no error.\n", __func__);
++		err = fh_spi->dwc.irq;
++		goto out_set_plat_drvdata_null;
++	}
++
++	err = request_irq(fh_spi->dwc.irq, fh_spi_irq, 0, dev_name(&dev->dev),
++	                fh_spi);
++	if (err) {
++		dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
++		goto out_set_plat_drvdata_null;
++	}
++
++	/* Get resources(memory, IRQ) associated with the device */
++	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
++	if (r == NULL) {
++		dev_err(&dev->dev, "%s, spi ioresource error. \n", __func__);
++		err = -ENODEV;
++		goto out_free_irq;
++	}
++
++	fh_spi->dwc.paddr = r->start;
++	ioarea = request_mem_region(r->start, resource_size(r), dev->name);
++	if (!ioarea) {
++		dev_err(&dev->dev, "spi region already claimed\n");
++		err = -EBUSY;
++		goto out_free_irq;
++	}
++
++	fh_spi->dwc.regs = ioremap(r->start, resource_size(r));
++	if (!fh_spi->dwc.regs) {
++		dev_err(&dev->dev, "spi region already mapped\n");
++		err = -EINVAL;
++		goto out_relase_mem_region;
++	}
++
++	INIT_WORK(&fh_spi->work, fh_spi_handle);
++	init_completion(&fh_spi->done);
++	INIT_LIST_HEAD(&fh_spi->queue);
++	spin_lock_init(&fh_spi->lock);
++
++	fh_spi->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
++	if (!fh_spi->workqueue) {
++		err = -ENXIO;
++		goto out_iounmap;
++	}
++	master->transfer = fh_spi_transfer;
++	master->setup = fh_spi_setup;
++	master->cleanup = fh_spi_cleanup;
++
++	/* the spi->mode bits understood by this driver: */
++	master->mode_bits = SPI_CPOL | SPI_CPHA;
++	master->bus_num = dev->id;
++	spi_platform_info->bus_no = dev->id;
++
++	master->num_chipselect = spi_platform_info->slave_max_num;
++	/* parse the controller board info~~~ */
++	/* clk enable in the func */
++	ret = fh_spi_init_hw(fh_spi, spi_platform_info);
++	if (ret) {
++		err = ret;
++		goto out_destroy_queue;
++	}
++	fh_spi->clk = clk_get(&fh_spi->p_dev->dev, spi_platform_info->clk_name);
++
++	if (IS_ERR(fh_spi->clk)) {
++		dev_err(&fh_spi->p_dev->dev, "cannot find the spi%d clk.\n",
++		                fh_spi->dwc.id);
++		err = PTR_ERR(fh_spi->clk);
++		goto out_destroy_queue;
++	}
++
++	clk_enable(fh_spi->clk);
++	fh_spi->dwc.apb_clock_in = clk_get_rate(fh_spi->clk);
++	if (spi_platform_info->apb_clock_in > fh_spi->dwc.apb_clock_in) {
++		clk_set_rate(fh_spi->clk, spi_platform_info->apb_clock_in);
++		fh_spi->dwc.apb_clock_in = spi_platform_info->apb_clock_in;
++	}
++	fh_spi->dwc.max_freq = fh_spi->dwc.apb_clock_in / 2;
++
++	/* request the cs gpio */
++	for (i = 0; i < fh_spi->dwc.num_cs; i++) {
++		ret = gpio_request(fh_spi->dwc.cs_data[i].GPIO_Pin,
++		                fh_spi->dwc.cs_data[i].name);
++		if (ret) {
++			dev_err(&dev->dev,
++			                "spi failed to request the gpio:%d\n",
++			                fh_spi->dwc.cs_data[i].GPIO_Pin);
++			/* release the gpio already request.. */
++			if (i != 0) {
++				for (j = 0; j < i; j++) {
++					gpio_free(
++					                fh_spi->dwc.cs_data[j].GPIO_Pin);
++				}
++			}
++			err = ret;
++			/* clk disable */
++			goto out_clk_disable;
++		}
++		/* set the dir */
++		gpio_direction_output(fh_spi->dwc.cs_data[i].GPIO_Pin,
++		                GPIOF_OUT_INIT_HIGH);
++	}
++	/* fix:need use the platform dma channel.. not 0 and 1.... */
++	if (fh_spi->dwc.board_info->dma_transfer_enable == SPI_TRANSFER_USE_DMA) {
++		filter_no = fh_spi->dwc.board_info->tx_dma_channel;
++		dma_cap_zero(mask);
++		dma_cap_set(DMA_SLAVE, mask);
++		fh_spi->dwc.dma_tx.chan = dma_request_channel(mask,
++		                fh_spi_dma_chan_filter, &filter_no);
++
++		if (!fh_spi->dwc.dma_tx.chan) {
++			dev_err(&fh_spi->p_dev->dev,
++			                "spi%d request dma channel error....\n",
++			                fh_spi->dwc.id);
++			fh_spi->dwc.board_info->dma_transfer_enable = 0;
++			goto step_register_master;
++		}
++		filter_no = fh_spi->dwc.board_info->rx_dma_channel;
++		fh_spi->dwc.dma_rx.chan = dma_request_channel(mask,
++		                fh_spi_dma_chan_filter, &filter_no);
++
++		if (!fh_spi->dwc.dma_rx.chan) {
++			dev_err(&fh_spi->p_dev->dev,
++			                "spi%d request dma channel error....\n",
++			                fh_spi->dwc.id);
++			dma_release_channel(fh_spi->dwc.dma_tx.chan);
++			fh_spi->dwc.board_info->dma_transfer_enable = 0;
++			goto step_register_master;
++		}
++
++		fh_spi->dwc.tx_dumy_dma_add = dma_map_single(
++		                fh_spi->dwc.dma_tx.chan->dev->device.parent,
++		                (void*) fh_spi->dwc.tx_dumy_buff,
++		                sizeof(fh_spi->dwc.tx_dumy_buff),
++		                DMA_TO_DEVICE);
++
++		fh_spi->dwc.rx_dumy_dma_add = dma_map_single(
++		                fh_spi->dwc.dma_rx.chan->dev->device.parent,
++		                (void*) fh_spi->dwc.rx_dumy_buff,
++		                sizeof(fh_spi->dwc.rx_dumy_buff),
++		                DMA_TO_DEVICE);
++
++	}
++
++step_register_master:
++	err = spi_register_master(master);
++	if (err) {
++		dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
++		goto out_gpio_tree;
++	}
++
++	return 0;
++
++out_gpio_tree:
++	for (i = 0; i < fh_spi->dwc.num_cs; i++) {
++		gpio_free(fh_spi->dwc.cs_data[i].GPIO_Pin);
++	}
++out_clk_disable:
++	clk_disable(fh_spi->clk);
++out_destroy_queue:
++	destroy_workqueue(fh_spi->workqueue);
++out_iounmap:
++	iounmap(fh_spi->dwc.regs);
++out_relase_mem_region:
++	release_mem_region(r->start, resource_size(r));
++out_free_irq:
++	free_irq(fh_spi->dwc.irq, fh_spi);
++out_set_plat_drvdata_null:
++	memset(fh_spi, 0,sizeof(struct fh_spi_controller));
++	platform_set_drvdata(dev, NULL);
++out_put_master:
++	spi_master_put(master);
++out0:
++	return err;
++
++}
++
++static int __devexit fh_spi_remove(struct platform_device *dev)
++{
++	struct resource *r;
++	struct spi_master *master;
++	struct fh_spi_controller *fh_spi;
++	int i;
++	master = platform_get_drvdata(dev);
++	if (master == NULL)
++		goto out0;
++
++	fh_spi = spi_master_get_devdata(master);
++	spi_unregister_master(master);
++	for(i = 0;i<fh_spi->dwc.num_cs;i++) {
++		gpio_free(fh_spi->dwc.cs_data[i].GPIO_Pin);
++	}
++	clk_disable(fh_spi->clk);
++	if(fh_spi->dwc.pump_data_mode == PUMP_DATA_DMA_MODE) {
++		if(fh_spi->dwc.dma_rx.chan) {
++			dma_release_channel(fh_spi->dwc.dma_rx.chan);
++			fh_spi->dwc.dma_rx.chan->private = NULL;
++		}
++		if(fh_spi->dwc.dma_tx.chan) {
++			dma_release_channel(fh_spi->dwc.dma_tx.chan);
++			fh_spi->dwc.dma_tx.chan->private = NULL;
++		}
++	}
++	destroy_workqueue(fh_spi->workqueue);
++	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
++	iounmap(fh_spi->dwc.regs);
++	release_mem_region(r->start, resource_size(r));
++	free_irq(fh_spi->dwc.irq, fh_spi);
++	memset(fh_spi,0,sizeof(struct fh_spi_controller));
++	platform_set_drvdata(dev, NULL);
++	spi_master_put(master);
++
++out0:
++	return 0;
++
++}
++
++static struct platform_driver fh_spi_driver = {
++		.probe = fh_spi_probe,
++		.remove = __devexit_p(fh_spi_remove),
++		.driver = {
++				.name = "fh_spi",
++				.owner = THIS_MODULE,
++		},
++		.suspend = NULL,
++		.resume = NULL,
++};
++
++static int __init fh_spi_init(void)
++{
++	return platform_driver_register(&fh_spi_driver);
++}
++
++static void __exit fh_spi_exit(void)
++{
++	platform_driver_unregister(&fh_spi_driver);
++}
++
++module_init(fh_spi_init);
++module_exit(fh_spi_exit);
++MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
++MODULE_DESCRIPTION("DUOBAO SPI driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/spi/fh_spi_slave.c b/drivers/spi/fh_spi_slave.c
+new file mode 100644
+index 00000000..f3e864a4
+--- /dev/null
++++ b/drivers/spi/fh_spi_slave.c
+@@ -0,0 +1,979 @@
++/*
++ * fh_slave_spi.c
++ *
++ *  Created on: Sep 19, 2016
++ *      Author: duobao
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/spi/spi.h>
++#include <linux/scatterlist.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/bug.h>
++#include <linux/completion.h>
++#include <linux/gpio.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <mach/fh_dmac.h>
++#include <linux/fs.h>
++#include <linux/ioctl.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <mach/io.h>
++#include <linux/kfifo.h>
++#include <mach/spi.h>
++
++#define lift_shift_bit_num(bit_num)	(1<<bit_num)
++//read spi irq, only useful if you set which is masked
++#define SPI_IRQ_TXEIS	(lift_shift_bit_num(0))
++#define SPI_IRQ_TXOIS	(lift_shift_bit_num(1))
++#define SPI_IRQ_RXUIS	(lift_shift_bit_num(2))
++#define SPI_IRQ_RXOIS	(lift_shift_bit_num(3))
++#define SPI_IRQ_RXFIS	(lift_shift_bit_num(4))
++#define SPI_IRQ_MSTIS	(lift_shift_bit_num(5))
++//spi status
++#define SPI_STATUS_BUSY         (lift_shift_bit_num(0))
++#define SPI_STATUS_TFNF         (lift_shift_bit_num(1))
++#define SPI_STATUS_TFE          (lift_shift_bit_num(2))
++#define SPI_STATUS_RFNE         (lift_shift_bit_num(3))
++#define SPI_STATUS_RFF          (lift_shift_bit_num(4))
++#define SPI_STATUS_TXE          (lift_shift_bit_num(5))
++#define SPI_STATUS_DCOL         (lift_shift_bit_num(6))
++#define CACHE_LINE_SIZE		(32)
++#define PUMP_DATA_NONE_MODE	(0x00)
++#define PUMP_DATA_DMA_MODE	(0x11)
++#define PUMP_DATA_ISR_MODE	(0x22)
++#define PUMP_DATA_POLL_MODE	(0x33)
++#define SPI_DIV_TRANSFER_SIZE	(256)
++#define SPI_DATA_REG_OFFSET	(0x60)
++#define KFIFO_SIZE		2048
++#define DUMY_DATA		0xff
++#define SPI_SLAVE_MAX_FIFO_SIZE	256
++#define SLAVE_SET_PHASE		1
++#define SLAVE_SET_POLARITY	SLAVE_SET_PHASE + 1
++#define SLAVE_INIT_RX_FIFO 	SLAVE_SET_POLARITY + 1
++#define SLAVE_INIT_TX_FIFO 	SLAVE_INIT_RX_FIFO + 1
++#define SLAVE_GET_ERROR_STATUS	SLAVE_INIT_TX_FIFO + 1
++
++//#define FH_SPI_SLAVE_DEBUG
++#define MAX_SPI_SLAVES			8
++/****************************************************************************
++ * ADT section
++ *  add definition of user defined Data Type that only be used in this file  here
++ ***************************************************************************/
++enum {
++	CONFIG_OK = 0, CONFIG_PARA_ERROR = lift_shift_bit_num(0),
++	//only for the set slave en/disable
++	CONFIG_BUSY = lift_shift_bit_num(1),
++	//only for write_read mode
++	WRITE_READ_OK = 0,
++	WRITE_READ_ERROR = lift_shift_bit_num(2),
++	WRITE_READ_TIME_OUT = lift_shift_bit_num(3),
++	//only for write only mode
++	WRITE_ONLY_OK = 0,
++	WRITE_ONLY_ERROR = lift_shift_bit_num(4),
++	WRITE_ONLY_TIME_OUT = lift_shift_bit_num(5),
++	//only for read only mode
++	READ_ONLY_OK = 0,
++	READ_ONLY_ERROR = lift_shift_bit_num(6),
++	READ_ONLY_TIME_OUT = lift_shift_bit_num(7),
++	//eeprom mode
++	EEPROM_OK = 0,
++	EEPROM_ERROR = lift_shift_bit_num(8),
++	EEPROM_TIME_OUT = lift_shift_bit_num(9),
++	//if read/write/eeprom error,the error below could give you more info by reading the 'Spi_ReadTransferError' function
++	MULTI_MASTER_ERROR = lift_shift_bit_num(10),
++	TX_OVERFLOW_ERROR = lift_shift_bit_num(11),
++	RX_OVERFLOW_ERROR = lift_shift_bit_num(12),
++};
++
++//enable spi
++typedef enum enum_spi_enable {
++	SPI_DISABLE = 0,
++	SPI_ENABLE = (lift_shift_bit_num(0)),
++} spi_enable_e;
++
++//polarity
++typedef enum enum_spi_polarity {
++	SPI_POLARITY_LOW = 0,
++	SPI_POLARITY_HIGH = (lift_shift_bit_num(7)),
++	//bit pos
++	SPI_POLARITY_RANGE = (lift_shift_bit_num(7)),
++} spi_polarity_e;
++
++//phase
++typedef enum enum_spi_phase {
++	SPI_PHASE_RX_FIRST = 0,
++	SPI_PHASE_TX_FIRST = (lift_shift_bit_num(6)),
++	//bit pos
++	SPI_PHASE_RANGE = (lift_shift_bit_num(6)),
++} spi_phase_e;
++
++//frame format
++typedef enum enum_spi_format {
++	SPI_MOTOROLA_MODE = 0x00,
++	SPI_TI_MODE = 0x10,
++	SPI_MICROWIRE_MODE = 0x20,
++	//bit pos
++	SPI_FRAME_FORMAT_RANGE = 0x30,
++} spi_format_e;
++
++//data size
++typedef enum enum_spi_data_size {
++	SPI_DATA_SIZE_4BIT = 0x03,
++	SPI_DATA_SIZE_5BIT = 0x04,
++	SPI_DATA_SIZE_6BIT = 0x05,
++	SPI_DATA_SIZE_7BIT = 0x06,
++	SPI_DATA_SIZE_8BIT = 0x07,
++	SPI_DATA_SIZE_9BIT = 0x08,
++	SPI_DATA_SIZE_10BIT = 0x09,
++	SPI_DATA_SIZE_16BIT = 0x0f,
++	//bit pos
++	SPI_DATA_SIZE_RANGE = 0x0f,
++} spi_data_size_e;
++
++//transfer mode
++typedef enum enum_spi_transfer_mode {
++	SPI_TX_RX_MODE = 0x000,
++	SPI_ONLY_TX_MODE = 0x100,
++	SPI_ONLY_RX_MODE = 0x200,
++	SPI_EEPROM_MODE = 0x300,
++	//bit pos
++	SPI_TRANSFER_MODE_RANGE = 0x300,
++} spi_transfer_mode_e;
++
++//spi baudrate
++typedef enum enum_spi_baudrate {
++	SPI_SCLKIN = 50000000,
++	SPI_SCLKOUT_27000000 = (SPI_SCLKIN / 27000000), //27M
++	SPI_SCLKOUT_13500000 = (SPI_SCLKIN / 13500000),  //13.5M
++	SPI_SCLKOUT_6750000 = (SPI_SCLKIN / 6750000),  //6.75M
++	SPI_SCLKOUT_4500000 = (SPI_SCLKIN / 4500000),	 //4.5M
++	SPI_SCLKOUT_3375000 = (SPI_SCLKIN / 3375000),  //3.375M
++	SPI_SCLKOUT_2700000 = (SPI_SCLKIN / 2700000),	 //2.7M
++	SPI_SCLKOUT_1500000 = (SPI_SCLKIN / 1500000),  //1.5M
++	SPI_SCLKOUT_500000 = (SPI_SCLKIN / 500000),  //0.1M
++	SPI_SCLKOUT_100000 = (SPI_SCLKIN / 100000),  //0.1M
++} spi_baudrate_e;
++
++//spi_irq
++typedef enum enum_spi_irq {
++	SPI_IRQ_TXEIM = (lift_shift_bit_num(0)),
++	SPI_IRQ_TXOIM = (lift_shift_bit_num(1)),
++	SPI_IRQ_RXUIM = (lift_shift_bit_num(2)),
++	SPI_IRQ_RXOIM = (lift_shift_bit_num(3)),
++	SPI_IRQ_RXFIM = (lift_shift_bit_num(4)),
++	SPI_IRQ_MSTIM = (lift_shift_bit_num(5)),
++	SPI_IRQ_ALL = 0x3f,
++} spi_irq_e;
++
++//spi_slave_port
++typedef enum enum_spi_slave {
++	SPI_SLAVE_PORT0 = (lift_shift_bit_num(0)),
++	SPI_SLAVE_PORT1 = (lift_shift_bit_num(1)),
++} spi_slave_e;
++
++//dma control
++typedef enum enum_spi_dma_control_mode {
++	SPI_DMA_RX_POS = (lift_shift_bit_num(0)),
++	SPI_DMA_TX_POS = (lift_shift_bit_num(1)),
++	//bit pos
++	SPI_DMA_CONTROL_RANGE = 0x03,
++} spi_dma_control_mode_e;
++
++//frame format
++typedef enum enum_spi_slave_mode {
++	SPI_SLAVE_EN = 0x00,
++	SPI_SLAVE_DIS = 1 << 10,
++	//bit pos
++	SPI_SLAVE_MODE_RANGE = 1 << 10,
++} spi_slave_mode_e;
++
++#ifdef FH_SPI_SLAVE_DEBUG
++#define SPI_SLAVE_PRINT_DBG(fmt, args...) \
++	printk("[FH_SPI_S_DEBUG]: "); \
++	printk(fmt, ## args)
++#else
++#define SPI_SLAVE_PRINT_DBG(fmt, args...)  do { } while (0)
++#endif
++
++struct fh_spi_reg {
++	u32 ctrl0;
++	u32 ctrl1;
++	u32 ssienr;
++	u32 mwcr;
++	u32 ser;
++	u32 baudr;
++	u32 txfltr;
++	u32 rxfltr;
++	u32 txflr;
++	u32 rxflr;
++	u32 sr;
++	u32 imr;
++	u32 isr;
++	u32 risr;
++	u32 txoicr;
++	u32 rxoicr;
++	u32 rxuicr;
++	u32 msticr;
++	u32 icr;
++	u32 dmacr;
++	u32 dmatdlr;
++	u32 dmardlr;
++	u32 idr;
++	u32 version;
++	u32 dr; /* Currently oper as 32 bits,
++	 though only low 16 bits matters */
++};
++
++#define dw_readl(dw, name) \
++	__raw_readl(&(((struct fh_spi_reg *)dw->regs)->name))
++#define dw_writel(dw, name, val) \
++	__raw_writel((val), &(((struct fh_spi_reg *)dw->regs)->name))
++#define dw_readw(dw, name) \
++	__raw_readw(&(((struct fh_spi_reg *)dw->regs)->name))
++#define dw_writew(dw, name, val) \
++	__raw_writew((val), &(((struct fh_spi_reg *)dw->regs)->name))
++
++struct _fh_spi_dma_transfer {
++	//bind to dma channel
++	struct dma_chan *chan;
++	struct dma_slave_config *cfg;
++	struct scatterlist sgl;
++	struct dma_async_tx_descriptor *desc;
++};
++
++struct fh_spi {
++	void * __iomem regs; /* vaddr of the control registers */
++	u32 id;
++	u32 paddr;
++	u32 slave_port;
++	u32 irq; /* irq no */
++	u32 fifo_len; /* depth of the FIFO buffer */
++	u32 cpol;
++	u32 cpha;
++	u32 isr_flag;
++	//clk
++	u32 apb_clock_in;
++	u32 max_freq; /* max bus freq supported */
++	u32 div;
++	/*use id u32 bus_num;*//*which bus*/
++	u32 num_cs; /* supported slave numbers */
++	u32 data_width;
++	u32 frame_mode;
++	u32 transfer_mode;
++	u32 active_cs_pin;
++	//copy from the user...
++	u32 tx_len;
++	u32 rx_len;
++	void *rx_buff;
++	void *tx_buff;
++	u32 tx_dma_add;
++	u32 rx_dma_add;
++	u32 tx_hs_no;			//tx handshaking number
++	u32 rx_hs_no;			//rx handshaking number
++	u32 *tx_dumy_buff;
++	u32 *rx_dumy_buff;
++	struct fh_spi_cs cs_data[SPI_MASTER_CONTROLLER_MAX_SLAVE];
++	u32 pump_data_mode;
++	struct _fh_spi_dma_transfer dma_rx;
++	struct _fh_spi_dma_transfer dma_tx;
++	u32 complete_times;
++	struct fh_spi_platform_data *board_info;
++};
++
++//this file private
++struct fh_spi_slave_controller {
++	struct clk *clk;
++	spinlock_t lock;
++	//message queue
++	struct platform_device *p_dev;
++	struct fh_spi dwc;
++	struct completion tx_done;
++
++	u32 cur_rx_len;
++	u32 cur_tx_len;
++
++	//dev interface
++	int major;
++	struct class *psClass;
++	struct device *psDev;
++
++	//kfifo interface
++	struct kfifo kfifo_in;
++	struct kfifo kfifo_out;
++};
++
++/******************************************************************************
++ * Function prototype section
++ * add prototypes for all functions called by this file,execepting those
++ * declared in header file
++ *****************************************************************************/
++
++/*****************************************************************************
++ * Global variables section - Exported
++ * add declaration of global variables that will be exported here
++ * e.g.
++ *  int8_t foo;
++ ****************************************************************************/
++
++/*****************************************************************************
++
++ *  static fun;
++ *****************************************************************************/
++
++static int fh_spi_slave_init_hw(struct fh_spi_slave_controller *fh_spi_slave,
++                struct fh_spi_platform_data *board_info);
++static u32 Spi_RawIsrstatus(struct fh_spi *dw);
++
++/*****************************************************************************
++ * Global variables section - Local
++ * define global variables(will be refered only in this file) here,
++ * static keyword should be used to limit scope of local variable to this file
++ * e.g.
++ *  static uint8_t ufoo;
++ *****************************************************************************/
++static struct fh_spi_slave_controller *priv_array[MAX_SPI_SLAVES] = { NULL,
++NULL, NULL, NULL, NULL, NULL, NULL, NULL };
++
++/* function body */
++
++static u32 Spi_Enable(struct fh_spi *dw, spi_enable_e enable)
++{
++	dw_writel(dw, ssienr, enable);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetPolarity(struct fh_spi *dw, spi_polarity_e polarity)
++{
++	u32 data;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_POLARITY_RANGE;
++	data |= polarity;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetPhase(struct fh_spi *dw, spi_phase_e phase)
++{
++	u32 data;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_PHASE_RANGE;
++	data |= phase;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetFrameFormat(struct fh_spi *dw, spi_format_e format)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_FRAME_FORMAT_RANGE;
++	data |= format;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetTransferMode(struct fh_spi *dw, spi_transfer_mode_e mode)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_TRANSFER_MODE_RANGE;
++	data |= mode;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_DisableIrq(struct fh_spi *dw, u32 irq)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, imr);
++	data &= ~irq;
++	dw_writel(dw, imr, data);
++	return CONFIG_OK;
++}
++
++static u32 Spi_EnableIrq(struct fh_spi *dw, u32 irq)
++{
++	u32 data = 0;
++
++	data = dw_readl(dw, imr);
++	data |= irq;
++	dw_writel(dw, imr, data);
++	return CONFIG_OK;
++
++}
++
++static u32 Spi_SetTxlevlel(struct fh_spi *dw, u32 level)
++{
++	dw_writel(dw, txfltr, level);
++	return CONFIG_OK;
++}
++
++static u32 Spi_SetRxlevlel(struct fh_spi *dw, u32 level)
++{
++	dw_writel(dw, rxfltr, level);
++	return CONFIG_OK;
++}
++
++static u32 Spi_ReadTxfifolevel(struct fh_spi *dw)
++{
++	return dw_readl(dw, txflr);
++}
++
++static u32 Spi_ReadRxfifolevel(struct fh_spi *dw)
++{
++	return (u32) dw_readl(dw, rxflr);
++}
++
++static u32 Spi_ReadStatus(struct fh_spi *dw)
++{
++	return (uint8_t) dw_readl(dw, sr);
++}
++
++static u32 Spi_SetSlaveMode(struct fh_spi *dw, spi_slave_mode_e format)
++{
++
++	u32 data = 0;
++	data = dw_readl(dw, ctrl0);
++	data &= ~(u32) SPI_SLAVE_MODE_RANGE;
++	data |= format;
++	dw_writel(dw, ctrl0, data);
++	return CONFIG_OK;
++
++}
++
++static u32 Spi_WriteData(struct fh_spi *dw, u16 data)
++{
++	dw_writew(dw, dr, data);
++	return WRITE_ONLY_OK;
++}
++
++static u16 Spi_ReadData(struct fh_spi *dw)
++{
++	return dw_readw(dw, dr);
++}
++
++#if(0)
++static void Spi_Clearallerror(struct fh_spi *dw)
++{
++	u32 data = dw_readl(dw, icr);
++	data = 0;
++}
++#endif
++
++static u32 Spi_Isrstatus(struct fh_spi *dw)
++{
++	u32 data = dw_readl(dw, isr);
++	return data;
++}
++
++static u32 Spi_RawIsrstatus(struct fh_spi *dw)
++{
++	u32 data = dw_readl(dw, risr);
++	return data;
++}
++
++#if(0)
++static void Spi_SetDmaTxDataLevel(struct fh_spi *dw, u32 level)
++{
++	dw_writel(dw, dmatdlr, level);
++}
++
++static void Spi_SetDmaRxDataLevel(struct fh_spi *dw, u32 level)
++{
++	dw_writel(dw, dmardlr, level);
++}
++
++static void Spi_SetDmaControlEnable(struct fh_spi *dw,
++		spi_dma_control_mode_e enable_pos)
++{
++
++	u32 data;
++
++	data = dw_readl(dw, dmacr);
++	data |= enable_pos;
++	dw_writel(dw, dmacr, data);
++}
++#endif
++
++static int spi_slave_open(struct inode *inode, struct file *filp)
++{
++	int i, ret = 0;
++	struct fh_spi_slave_controller *fh_spi_slave;
++	SPI_SLAVE_PRINT_DBG("%s\n", __func__);
++	//bind the pri to the spi slave control...
++	SPI_SLAVE_PRINT_DBG("inode id is %x..\n", inode->i_rdev);
++	for (i = 0; i < MAX_SPI_SLAVES; i++) {
++		SPI_SLAVE_PRINT_DBG("register id is %x..\n",
++		                MKDEV(priv_array[i]->major, 0));
++		if (priv_array[i]
++		                && MKDEV(priv_array[i]->major, 0)
++		                                == inode->i_rdev) {
++			//SPI_SLAVE_PRINT_DBG();
++			filp->private_data = priv_array[i];
++			break;
++		}
++	}
++	if (i == MAX_SPI_SLAVES)
++		return -ENXIO;
++	//reset kfifo...
++	fh_spi_slave = priv_array[i];
++	kfifo_reset(&fh_spi_slave->kfifo_in);
++	kfifo_reset(&fh_spi_slave->kfifo_out);
++	return ret;
++}
++
++static ssize_t spi_slave_read(struct file *filp, char __user *buf, size_t count,
++                loff_t *f_pos)
++{
++	int ret;
++	unsigned int copied;
++	struct fh_spi_slave_controller *fh_spi_slave;
++	fh_spi_slave = (struct fh_spi_slave_controller *) filp->private_data;
++	//write data to fifo_out
++	if (kfifo_is_empty(&fh_spi_slave->kfifo_out)) {
++		return -EFAULT;
++	}
++	ret = kfifo_to_user(&fh_spi_slave->kfifo_out, buf, count, &copied);
++	//start spi hw work...
++	if (ret == 0) {
++		fh_spi_slave->cur_rx_len = copied;
++		return copied;
++	}
++	//error..
++	else {
++
++		return ret;
++	}
++
++	//hw isr pump fifo_out data..
++
++}
++
++#if(0)
++static void wait_spi_idle(struct fh_spi_slave_controller *fh_spi_slave) {
++	int status;
++	do {
++		status = Spi_ReadStatus(&fh_spi_slave->dwc);
++		SPI_SLAVE_PRINT_DBG("status is %x\n",status);
++	}while (status & 0x01);
++}
++#endif
++
++static void spi_slave_isr_tx_data(struct fh_spi_slave_controller *fh_spi_slave)
++{
++	//fh_spi_slave->dwc.isr_flag = SPI_IRQ_TXEIM;
++	//Spi_SetTxlevlel(&fh_spi_slave->dwc, fh_spi_slave->dwc.fifo_len / 2);
++	Spi_SetTxlevlel(&fh_spi_slave->dwc, fh_spi_slave->dwc.fifo_len - 5);
++	SPI_SLAVE_PRINT_DBG("open spi slave isr tx..\n");
++	Spi_EnableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
++	//wait_for_completion(&fh_spi_slave->tx_done);
++}
++
++static ssize_t spi_slave_write(struct file *filp, const char __user *buf,
++                size_t count, loff_t *f_pos)
++{
++	int ret;
++	unsigned int copied;
++	struct fh_spi_slave_controller *fh_spi_slave;
++	fh_spi_slave = (struct fh_spi_slave_controller *) filp->private_data;
++	if (kfifo_is_full(&fh_spi_slave->kfifo_in)) {
++		//spin_unlock_irqrestore(&fh_spi_slave->lock, flags);
++		return -EFAULT;
++	}
++	ret = kfifo_from_user(&fh_spi_slave->kfifo_in, buf, count, &copied);
++	//start spi hw work...
++	if (ret == 0) {
++		//start spi hw work...
++		//here we could start a back work to process the hw write data....
++		fh_spi_slave->cur_tx_len = copied;
++		spi_slave_isr_tx_data(fh_spi_slave);
++		return copied;
++	}
++	//error..
++	else {
++		return ret;
++	}
++
++
++
++}
++
++long spi_slave_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	int err = -ENOIOCTLCMD;
++
++	switch (cmd) {
++	case SLAVE_SET_PHASE:
++		break;
++	case SLAVE_SET_POLARITY:
++		break;
++	case SLAVE_INIT_RX_FIFO:
++		break;
++
++	case SLAVE_INIT_TX_FIFO:
++		break;
++
++	case SLAVE_GET_ERROR_STATUS:
++		break;
++
++	default:
++		break;
++	}
++
++	return err;
++}
++
++static int spi_slave_release(struct inode *inode, struct file *filp)
++{
++
++	int ret = 0;
++	return ret;
++}
++
++static struct file_operations spi_slave_fops = {
++		.open = spi_slave_open,
++		.read = spi_slave_read,
++		.write = spi_slave_write,
++		.unlocked_ioctl = spi_slave_ioctl,
++		.release = spi_slave_release,
++};
++
++static inline u32 tx_max(struct fh_spi_slave_controller *fh_spi_slave)
++{
++	u32 hw_tx_level;
++	hw_tx_level = Spi_ReadTxfifolevel(&fh_spi_slave->dwc);
++	hw_tx_level = fh_spi_slave->dwc.fifo_len - hw_tx_level;
++	hw_tx_level /= 2;
++	return hw_tx_level;	//min(hw_tx_level, fh_spi_slave->dwc.tx_len);
++}
++
++/* Return the max entries we should read out of rx fifo */
++static inline u32 rx_max(struct fh_spi_slave_controller *fh_spi_slave)
++{
++	u32 hw_rx_level;
++
++	hw_rx_level = Spi_ReadRxfifolevel(&fh_spi_slave->dwc);
++	return hw_rx_level;
++}
++
++static void spi_slave_process_tx_isr(
++                struct fh_spi_slave_controller *fh_spi_slave)
++{
++
++	u8 tx_buff[SPI_SLAVE_MAX_FIFO_SIZE] = { 0 };
++	int kfifo_tx_size, hw_tx_size, trans_size;
++	u16 data;
++	int i;
++	int temp;
++	//Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
++	//Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_RXFIM);
++
++	kfifo_tx_size = kfifo_len(&fh_spi_slave->kfifo_in);
++	//kfifo_tx_size = fh_spi_slave->cur_tx_len;
++	hw_tx_size = tx_max(fh_spi_slave);
++	//read MIN(hw tx fifo avail size , tx kfifo size)
++	trans_size = min(kfifo_tx_size, hw_tx_size);
++	temp = kfifo_out(&fh_spi_slave->kfifo_in, tx_buff, trans_size);
++	//transfer data to hw.. and reduce the actual trans data size..
++	SPI_SLAVE_PRINT_DBG("kfifo size :%d,  hw size:%d..\n",kfifo_tx_size,hw_tx_size);
++	SPI_SLAVE_PRINT_DBG("tx isr size is %d..\n",trans_size);
++	//printk("**0d%d\n",trans_size);
++	for (i = 0; i < trans_size; i++) {
++		data = tx_buff[i];
++		//SPI_SLAVE_PRINT_DBG("tx data is %x\n",data);
++		Spi_WriteData(&fh_spi_slave->dwc, data);
++	}
++	//SPI_SLAVE_PRINT_DBG("\n");
++	fh_spi_slave->cur_tx_len -= trans_size;
++	if (fh_spi_slave->cur_tx_len == 0) {
++		Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
++		//complete(&(fh_spi_slave->tx_done));
++	} else {
++		//Spi_EnableIrq(&fh_spi_slave->dwc, SPI_IRQ_TXEIM);
++	}
++
++}
++
++static void spi_slave_process_rx_isr(
++                struct fh_spi_slave_controller *fh_spi_slave)
++{
++
++	int hw_rx_size;
++	int i;
++	u16 data;
++	int status;
++	//here we try to get more data when the the clk is too high...
++	//do {
++	hw_rx_size = rx_max(fh_spi_slave);
++	SPI_SLAVE_PRINT_DBG("rx get size is 0x%d\n",hw_rx_size);
++	for (i = 0; i < hw_rx_size; i++) {
++		data = Spi_ReadData(&fh_spi_slave->dwc);
++		//rx_buff[i] = (u8) data;
++		kfifo_in(&fh_spi_slave->kfifo_out, &data, 1);
++	}
++	status = Spi_ReadStatus(&fh_spi_slave->dwc);
++	//} while (status & (1 << 3));
++
++}
++
++static irqreturn_t fh_spi_slave_irq(int irq, void *dev_id)
++{
++	struct fh_spi_slave_controller *fh_spi_slave;
++	u32 isr_status;
++	u32 raw_status;
++
++	fh_spi_slave = (struct fh_spi_slave_controller *) dev_id;
++	isr_status = Spi_Isrstatus(&fh_spi_slave->dwc);
++	raw_status = Spi_RawIsrstatus(&fh_spi_slave->dwc);
++	//printk("raw irq status is 0x%x..\n",raw_status);
++	SPI_SLAVE_PRINT_DBG("irq status is 0x%x..\n",isr_status);
++	if(raw_status & (1<<3)){
++		printk("[FH_SPI_S_ERROR]: rx overflow....\n");
++	}
++	if (isr_status & SPI_IRQ_TXEIM) {
++		spi_slave_process_tx_isr(fh_spi_slave);
++	}
++	if (isr_status & SPI_IRQ_RXFIM) {
++		spi_slave_process_rx_isr(fh_spi_slave);
++	}
++
++	return IRQ_HANDLED;
++}
++
++static int __devinit fh_spi_slave_probe(struct platform_device *dev)
++{
++	int err = 0;
++	struct resource *r,*ioarea;
++	int ret;
++	char spi_slave_name[32] = {0};
++	char spi_slave_class_name[32] = {0};
++	int major_id;
++	struct fh_spi_slave_controller *fh_spi_slave;
++	struct fh_spi_platform_data * spi_platform_info;
++	spi_platform_info = (struct fh_spi_platform_data *)dev->dev.platform_data;
++	if(spi_platform_info == NULL){
++		err = -ENODEV;
++		dev_err(&dev->dev, "%s, spi slave platform data null.\n",
++			__func__);
++		BUG();
++
++	}
++	fh_spi_slave =kzalloc(sizeof(struct fh_spi_slave_controller), GFP_KERNEL);
++	if (!fh_spi_slave) {
++		dev_err(&dev->dev, "malloc spi slave control mem not enough\n");
++		BUG();
++	}
++	fh_spi_slave->dwc.irq = platform_get_irq(dev, 0);
++	if (fh_spi_slave->dwc.irq < 0) {
++		dev_err(&dev->dev, "%s, spi slave irq no error.\n",
++			__func__);
++		err = fh_spi_slave->dwc.irq;
++		BUG();
++	}
++	err = request_irq(fh_spi_slave->dwc.irq , fh_spi_slave_irq, 0,
++			  dev_name(&dev->dev), fh_spi_slave);
++	if (err) {
++		dev_dbg(&dev->dev, "request_irq failed, %d\n", err);BUG();
++	}
++	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
++	if (r == NULL) {
++		dev_err(&dev->dev, "%s, spi slave ioresource error. \n",
++			__func__);
++		err = -ENODEV;
++		BUG();
++	}
++	fh_spi_slave->dwc.paddr = r->start;
++	ioarea = request_mem_region(r->start,
++                resource_size(r), dev->name);if(!ioarea) {
++		dev_err(&dev->dev, "spi slave region already claimed\n");
++		err = -EBUSY;
++		BUG();
++
++	}
++	fh_spi_slave->dwc.regs = ioremap(r->start, resource_size(r));
++	if (!fh_spi_slave->dwc.regs) {
++		dev_err(&dev->dev, "spi slave region already mapped\n");
++		err = -EINVAL;
++		BUG();
++	}
++	spi_platform_info->bus_no = dev->id;
++	priv_array[dev->id] = fh_spi_slave;
++	init_completion(&fh_spi_slave->tx_done);
++	spin_lock_init(
++                &fh_spi_slave->lock);
++
++	fh_spi_slave->clk = clk_get(NULL, spi_platform_info->clk_name);
++	if (IS_ERR(fh_spi_slave->clk)) {
++		dev_err(&fh_spi_slave->p_dev->dev, "cannot find the spi%d clk.\n",
++				fh_spi_slave->dwc.id);
++		err = PTR_ERR(fh_spi_slave->clk);
++		BUG();
++	}
++	clk_enable(fh_spi_slave->clk);
++	clk_set_rate(fh_spi_slave->clk,spi_platform_info->apb_clock_in);
++
++	ret = fh_spi_slave_init_hw(fh_spi_slave,spi_platform_info);
++	if(ret) {
++		err = ret;
++		BUG();
++	}
++	init_completion(&fh_spi_slave->tx_done);
++	sprintf(spi_slave_name, "fh_spi_slave_%d", dev->id);
++	sprintf(spi_slave_class_name, "fh_spi_slave_class_%d", dev->id);
++	major_id = register_chrdev(0, spi_slave_name, &spi_slave_fops);
++	if (major_id <= 0) {
++		err = -EIO;
++		dev_err(&fh_spi_slave->p_dev->dev, "cannot register spi slave_%d char dev..\n",
++				fh_spi_slave->dwc.id);
++		BUG();
++	} else {
++		fh_spi_slave->major = major_id;
++	}
++
++	fh_spi_slave->psClass = class_create(THIS_MODULE, spi_slave_class_name);if (IS_ERR(fh_spi_slave->psClass)) {
++		err = -EIO;
++		dev_err(&fh_spi_slave->p_dev->dev, "%s: Unable to create class\n", __FILE__);
++		BUG();
++	}
++
++	fh_spi_slave->psDev = device_create(fh_spi_slave->psClass, NULL, MKDEV(major_id, 0),
++			fh_spi_slave, spi_slave_name);
++	if (IS_ERR(fh_spi_slave->psDev)) {
++		err = -EIO;
++		dev_err(&fh_spi_slave->p_dev->dev,"Error: %s: Unable to create device\n", __FILE__);
++		BUG();
++	}
++
++	if(kfifo_alloc(
++                &fh_spi_slave->kfifo_in, KFIFO_SIZE, GFP_KERNEL)){
++		dev_err(&fh_spi_slave->p_dev->dev,"Error: %s: Unable to alloc kfifo..\n", __FILE__);
++		BUG();
++	}
++
++	if(kfifo_alloc(&fh_spi_slave->kfifo_out, KFIFO_SIZE, GFP_KERNEL)) {
++		dev_err(&fh_spi_slave->p_dev->dev,"Error: %s: Unable to alloc kfifo..\n", __FILE__);
++		BUG();
++	}
++
++	#if(0)
++
++	//1 :empty 		0:not empty
++	//1 :full		0:not full
++	int empty,full,avail;
++	char test_buf_out[20] = {0};
++
++	empty = kfifo_is_empty(&fh_spi_slave->kfifo_in);
++	full = kfifo_is_full(&fh_spi_slave->kfifo_in);
++	avail = kfifo_avail(&fh_spi_slave->kfifo_in);
++	printk("empty:  %x,  full:  %x,  avail:  %x\n",empty,full,avail);
++	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fh_spi_slave->kfifo_in));
++	kfifo_in(&fh_spi_slave->kfifo_in, "hello", 5);
++	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fh_spi_slave->kfifo_in));
++
++	empty = kfifo_is_empty(&fh_spi_slave->kfifo_in);
++	full = kfifo_is_full(&fh_spi_slave->kfifo_in);
++	avail = kfifo_avail(&fh_spi_slave->kfifo_in);
++	printk("empty:  %x,  full:  %x,  avail:  %x\n",empty,full,avail);
++
++	/* put values into the fifo */
++	for (i = 0; i !=5; i++)
++	kfifo_put(&fh_spi_slave->kfifo_in, &i);
++
++	i = kfifo_out(&fh_spi_slave->kfifo_in, test_buf_out, 5);
++	printk("data len is %d\n",i);
++	printk(KERN_INFO "buf: %.*s\n", i, test_buf_out);
++
++	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fh_spi_slave->kfifo_in));
++	i = kfifo_out(&fh_spi_slave->kfifo_in, test_buf_out, 10);
++	printk("data len is %d\n",i);
++	printk(KERN_INFO "buf: %.*s\n", i, test_buf_out);
++	#endif
++
++	return err;
++}
++
++static int __devexit fh_spi_slave_remove(struct platform_device *dev)
++{
++	return 0;
++}
++
++static int fh_spi_slave_init_hw(struct fh_spi_slave_controller *fh_spi_slave,
++        struct fh_spi_platform_data *board_info)
++{
++	int status;
++	fh_spi_slave->dwc.id = board_info->bus_no;
++	fh_spi_slave->dwc.fifo_len = board_info->fifo_len;
++	fh_spi_slave->dwc.rx_hs_no = board_info->rx_handshake_num;
++	fh_spi_slave->dwc.tx_hs_no = board_info->tx_handshake_num;
++	memset(&fh_spi_slave->dwc.dma_rx, 0, sizeof(struct _fh_spi_dma_transfer));
++	memset(&fh_spi_slave->dwc.dma_tx, 0, sizeof(struct _fh_spi_dma_transfer));
++	fh_spi_slave->dwc.pump_data_mode = PUMP_DATA_ISR_MODE;
++	//bind the platform data here....
++	fh_spi_slave->dwc.board_info = board_info;
++
++	fh_spi_slave->dwc.isr_flag = SPI_IRQ_RXFIM;
++	fh_spi_slave->dwc.frame_mode = SPI_MOTOROLA_MODE;
++	fh_spi_slave->dwc.transfer_mode = SPI_TX_RX_MODE;
++	fh_spi_slave->dwc.cpol = SPI_POLARITY_HIGH;
++	fh_spi_slave->dwc.cpha = SPI_PHASE_RX_FIRST;
++	do {
++		status = Spi_ReadStatus(&fh_spi_slave->dwc);
++	} while (status & 0x01);
++	//add spi disable
++	Spi_Enable(&fh_spi_slave->dwc, SPI_DISABLE);
++	//add spi frame mode & transfer mode
++	Spi_SetFrameFormat(&fh_spi_slave->dwc, fh_spi_slave->dwc.frame_mode);
++	Spi_SetTransferMode(&fh_spi_slave->dwc, fh_spi_slave->dwc.transfer_mode);
++	Spi_SetPolarity(&fh_spi_slave->dwc, fh_spi_slave->dwc.cpol);
++	Spi_SetPhase(&fh_spi_slave->dwc, fh_spi_slave->dwc.cpha);
++	//Spi_SetRxlevlel(&fh_spi_slave->dwc, fh_spi_slave->dwc.fifo_len / 2);
++	Spi_SetRxlevlel(&fh_spi_slave->dwc, 0);
++	Spi_SetSlaveMode(&fh_spi_slave->dwc, SPI_SLAVE_EN);
++	//add spi disable all isr
++	Spi_DisableIrq(&fh_spi_slave->dwc, SPI_IRQ_ALL);
++	Spi_EnableIrq(&fh_spi_slave->dwc, fh_spi_slave->dwc.isr_flag);
++	//add spi enable
++	Spi_Enable(&fh_spi_slave->dwc, SPI_ENABLE);
++
++	return 0;
++
++}
++
++static struct platform_driver fh_spi_slave_driver = {
++	.probe = fh_spi_slave_probe,
++        .remove = __devexit_p(fh_spi_slave_remove),
++        .driver = {
++        		.name = "fh_spi_slave",
++        		.owner = THIS_MODULE,
++	},
++        .suspend =NULL,
++        .resume = NULL,
++};
++
++static int __init fh_spi_slave_init(void)
++{
++	return platform_driver_register(&fh_spi_slave_driver);
++}
++
++static void __exit fh_spi_slave_exit(void)
++{
++	platform_driver_unregister(&fh_spi_slave_driver);
++}
++
++module_init(fh_spi_slave_init);
++module_exit(fh_spi_slave_exit);
++MODULE_AUTHOR("yu.zhang <zhangy@fullhan.com>");
++MODULE_DESCRIPTION("DUOBAO SPI SLAVE driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 2e13a14b..b423fe92 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -318,7 +318,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
+ 	}
+ 
+ 	spi->master = master;
+-	spi->dev.parent = dev;
++	spi->dev.parent = &master->dev;
+ 	spi->dev.bus = &spi_bus_type;
+ 	spi->dev.release = spidev_release;
+ 	device_initialize(&spi->dev);
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index b3692e6e..925c7cff 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -1635,4 +1635,32 @@ config SERIAL_XILINX_PS_UART_CONSOLE
+ 	help
+ 	  Enable a Xilinx PS UART port to be the system console.
+ 
++config SERIAL_FH
++	tristate "FH UART support"
++	select SERIAL_CORE
++	help
++	  This driver supports the FH UART port.
++	  
++config SERIAL_FH_CONSOLE
++	bool "FH UART console support"
++	depends on SERIAL_FH=y
++	select SERIAL_CORE_CONSOLE
++	help
++	   Enable a FH UART port to be the system console.
++	   
++	   
++	   
++	   
++	
++
++
++	   
++	   
++	   
++	   
++	   
++	   
++	   
++	   
++	   
+ endmenu
+diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
+index cb2628fe..4cae882c 100644
+--- a/drivers/tty/serial/Makefile
++++ b/drivers/tty/serial/Makefile
+@@ -96,3 +96,7 @@ obj-$(CONFIG_SERIAL_MSM_SMD)	+= msm_smd_tty.o
+ obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
+ obj-$(CONFIG_SERIAL_LANTIQ)	+= lantiq.o
+ obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
++obj-$(CONFIG_SERIAL_FH) += fh_serial.o
++
++
++
+diff --git a/drivers/tty/serial/fh_serial.c b/drivers/tty/serial/fh_serial.c
+new file mode 100644
+index 00000000..32bdc87d
+--- /dev/null
++++ b/drivers/tty/serial/fh_serial.c
+@@ -0,0 +1,1041 @@
++#if defined(CONFIG_SERIAL_FH_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
++#define SUPPORT_SYSRQ
++#endif
++
++#include <linux/module.h>
++#include <linux/ioport.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/init.h>
++#include <linux/sysrq.h>
++#include <linux/console.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial_core.h>
++#include <linux/serial.h>
++#include <linux/delay.h>
++#include <linux/clk.h>
++#include <linux/cpufreq.h>
++#include <linux/moduleparam.h>
++#include <linux/ratelimit.h>
++#include <linux/serial_reg.h>
++#include <linux/nmi.h>
++#include <linux/mutex.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/err.h>
++#include <linux/sched.h>
++#include <linux/timex.h>
++#include <asm/irq.h>
++#include <asm/mach/irq.h>
++#include "fh_serial.h"
++
++/*********************************
++ *
++ * fh  private
++ *
++ *********************************/
++#define     REG_UART_RBR		(0x0000)
++#define     REG_UART_THR		(0x0000)
++#define     REG_UART_DLL		(0x0000)
++#define     REG_UART_DLH		(0x0004)
++#define     REG_UART_IER		(0x0004)
++#define     REG_UART_IIR		(0x0008)
++#define     REG_UART_FCR		(0x0008)
++#define     REG_UART_LCR		(0x000c)
++#define     REG_UART_MCR		(0x0010)
++#define     REG_UART_LSR		(0x0014)
++#define     REG_UART_MSR		(0x0018)
++#define     REG_UART_SCR		(0x001c)
++#define     REG_UART_FAR		(0x0070)
++#define     REG_UART_TFR		(0x0074)
++#define     REG_UART_RFW		(0x0078)
++#define     REG_UART_USR		(0x007c)
++#define     REG_UART_TFL		(0x0080)
++#define     REG_UART_RFL		(0x0084)
++#define     REG_UART_SRR		(0x0088)
++#define     REG_UART_SFE		(0x0098)
++#define     REG_UART_SRT		(0x009c)
++#define     REG_UART_STET		(0x00a0)
++#define     REG_UART_HTX		(0x00a4)
++#define     REG_UART_DMASA		(0x00a8)
++#define     REG_UART_CPR		(0x00f4)
++#define     REG_UART_UCV		(0x00f8)
++#define     REG_UART_CTR		(0x00fc)
++
++#define	DBGLINE()		printk(KERN_DEBUG \
++				"file: %s\tfunc:%s\tline:%d\n",\
++				__FILE__, __FUNCTION__, __LINE__)
++#define FH_SERIAL_NAME					"ttyS"
++#define FH_DRIVE_NAME					"ttyS"
++#define FH_DEV_NAME						"ttyS"
++
++
++#define UART_READ_RX_DW_FIFO_OK			0
++#define UART_READ_RX_DW_FIFO_TIME_OUT	0xcc
++#define MAP_SIZE						0x80000
++
++#ifdef CONFIG_SERIAL_FH_CONSOLE
++static struct console fh_serial_console;
++#define FH_SERIAL_CONSOLE		 (&fh_serial_console)
++#else
++#define FH_SERIAL_CONSOLE NULL
++#endif
++
++#define tx_enabled(port)		((port)->unused[0])
++#define rx_enabled(port)		((port)->unused[1])
++#define FH_TYPE					(99)
++#define BOTH_EMPTY				(UART_LSR_TEMT | UART_LSR_THRE)
++#define fh_dev_to_port(__dev)	(struct uart_port *)dev_get_drvdata(__dev)
++
++
++#define fh_uart_readl(addr) \
++	__raw_readl(addr)
++#define fh_uart_writel(addr, val) \
++	__raw_writel((val), addr)
++
++#define fh_uart_readw(addr) \
++	__raw_readw(addr)
++#define fh_uart_writew(addr, val) \
++	__raw_writew((val), addr)
++
++/******************************************************************************
++ * Function prototype section
++ * add prototypes for all functions called by this file,execepting those
++ * declared in header file
++ *****************************************************************************/
++static void fh_uart_pm(struct uart_port *port, unsigned int level,
++			      unsigned int old);
++static void fh_uart_stop_tx(struct uart_port *port);
++static void fh_uart_start_tx(struct uart_port *port);
++static void fh_uart_stop_rx(struct uart_port *port);
++static void fh_uart_start_rx(struct uart_port *port);
++static void fh_uart_enable_ms(struct uart_port *port);
++static unsigned int fh_uart_tx_empty(struct uart_port *port);
++static unsigned int fh_uart_get_mctrl(struct uart_port *port);
++static void fh_uart_set_mctrl(struct uart_port *port, unsigned int mctrl);
++static void fh_uart_break_ctl(struct uart_port *port, int break_state);
++static irqreturn_t fh_uart_rx_chars(int irq, void *dev_id);
++static irqreturn_t fh_uart_tx_chars(int irq, void *dev_id);
++static irqreturn_t fh_uart_isr(int irq, void *dev_id);
++static void fh_serial_shutdown(struct uart_port *port);
++static int fh_serial_startup(struct uart_port *port);
++static void fh_serial_set_termios(struct uart_port *port,
++				       struct ktermios *termios,
++				       struct ktermios *old);
++static const char *fh_serial_type(struct uart_port *port);
++static void fh_serial_release_port(struct uart_port *port);
++static int fh_serial_request_port(struct uart_port *port);
++static void fh_serial_config_port(struct uart_port *port, int flags);
++static int fh_uart_set_wake(struct uart_port *, unsigned int state);
++
++struct fh_uart_port *to_fh_uart_port(struct uart_port *port);
++struct fh_uart_port *info_to_fh_uart_port(fh_uart_info *info);
++/*****************************************************************************
++ * Global variables section - Local
++ * define global variables(will be refered only in this file) here,
++ * static keyword should be used to limit scope of local variable to this file
++ * e.g.
++ *	static uint8_t ufoo;
++ *****************************************************************************/
++static struct uart_ops fh_serial_ops = {
++	.pm		= fh_uart_pm,
++	.tx_empty	= fh_uart_tx_empty,
++	.get_mctrl	= fh_uart_get_mctrl,
++	.set_mctrl	= fh_uart_set_mctrl,
++	.stop_tx	= fh_uart_stop_tx,
++	.start_tx	= fh_uart_start_tx,
++	.stop_rx	= fh_uart_stop_rx,
++	.enable_ms	= fh_uart_enable_ms,
++	.break_ctl	= fh_uart_break_ctl,
++	.startup	= fh_serial_startup,
++	.shutdown	= fh_serial_shutdown,
++	.set_termios	= fh_serial_set_termios,
++	.type		= fh_serial_type,
++	.release_port	= fh_serial_release_port,
++	.request_port	= fh_serial_request_port,
++	.config_port	= fh_serial_config_port,
++	.set_wake	= fh_uart_set_wake,
++	.verify_port	= NULL,
++};
++
++static struct uart_driver fh_uart_drv = {
++	.owner			= THIS_MODULE,
++	.driver_name	= FH_DRIVE_NAME,
++	.nr				= FH_UART_NUMBER,
++	.cons			= FH_SERIAL_CONSOLE,
++	.dev_name		= FH_DEV_NAME,
++	.major			= 4,
++	.minor			= 64,
++};
++
++#if (defined(CONFIG_ARCH_FH8810) || defined(CONFIG_ARCH_WUDANG))
++#define UART_PORT0_ISR      ISR_NUMBER1
++#define UART_PORT1_ISR      ISR_NUMBER0
++#define UART_PORT0_BASE     UART1_REG_BASE
++#define UART_PORT1_BASE     UART0_REG_BASE
++#define UART_PORT0(x)       VUART1(x)
++#define UART_PORT1(x)       VUART0(x)
++#else
++#define UART_PORT0_ISR      ISR_NUMBER0
++#define UART_PORT1_ISR      ISR_NUMBER1
++#define UART_PORT2_ISR      ISR_NUMBER2
++#define UART_PORT0_BASE     UART0_REG_BASE
++#define UART_PORT1_BASE     UART1_REG_BASE
++#define UART_PORT2_BASE     UART2_REG_BASE
++#define UART_PORT0(x)       VUART0(x)
++#define UART_PORT1(x)       VUART1(x)
++#define UART_PORT2(x)       VUART2(x)
++#endif
++
++static  struct fh_uart_port  fh_own_ports[FH_UART_NUMBER] = {
++	[0] = {
++		.port = {
++			.lock		= __SPIN_LOCK_UNLOCKED(fh_own_ports[0].port.lock),
++			.iotype		= UPIO_MEM,
++			.irq		= UART_PORT0_ISR,
++			.uartclk	= 30000000,
++			.fifosize	= UART0_FIFO_SIZE,
++			.ops		= &fh_serial_ops,
++			.flags		= UPF_BOOT_AUTOCONF,
++			.line		= 0,
++			.mapbase = UART_PORT0_BASE,
++			.membase = (unsigned char __iomem *)UART_PORT0(UART_PORT0_BASE),
++		},
++		.fh_info = {
++			.name = "FH UART0",
++			.irq_num = UART_PORT0_ISR,
++			.base_add = (unsigned char __iomem *)UART_PORT0(UART_PORT0_BASE),
++			.baudrate = BAUDRATE_115200,
++			.line_ctrl = Uart_line_8n2,
++			.fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER,
++		}
++	},
++	[1] = {
++		.port = {
++			.lock		= __SPIN_LOCK_UNLOCKED(fh_own_ports[1].port.lock),
++			.iotype		= UPIO_MEM,
++			.irq		= UART_PORT1_ISR,
++			.uartclk	= 30000000,
++			.fifosize	= UART1_FIFO_SIZE,
++			.ops		= &fh_serial_ops,
++			.flags		= UPF_BOOT_AUTOCONF,
++			.line		= 1,
++			.mapbase = UART_PORT1_BASE,
++			.membase = (unsigned char __iomem *)UART_PORT1(UART_PORT1_BASE),
++
++		},
++		.fh_info = {
++			.name = "FH UART1",
++			.irq_num = UART_PORT1_ISR,
++			.base_add = (unsigned char __iomem *)UART_PORT1(UART_PORT1_BASE),
++			.baudrate = BAUDRATE_115200,
++			.line_ctrl = Uart_line_8n2,
++			.fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER,
++		}
++	},
++#ifdef CONFIG_ARCH_FH8830
++	[2] = {
++		.port = {
++			.lock		= __SPIN_LOCK_UNLOCKED(fh_own_ports[2].port.lock),
++			.iotype		= UPIO_MEM,
++			.irq		= UART_PORT2_ISR,
++			.uartclk	= 30000000,
++			.fifosize	= UART2_FIFO_SIZE,
++			.ops		= &fh_serial_ops,
++			.flags		= UPF_BOOT_AUTOCONF,
++			.line		= 2,
++			.mapbase = UART_PORT2_BASE,
++			.membase = (unsigned char __iomem *)UART_PORT2(UART_PORT2_BASE),
++
++		},
++		.fh_info = {
++			.name = "FH UART2",
++			.irq_num = UART_PORT2_ISR,
++			.base_add = (unsigned char __iomem *)UART_PORT2(UART_PORT2_BASE),
++			.baudrate = BAUDRATE_115200,
++			.line_ctrl = Uart_line_8n2,
++			.fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER,
++		}
++	},
++#endif
++};
++
++struct fh_uart_port *to_fh_uart_port(struct uart_port *port)
++{
++	return container_of(port, struct fh_uart_port, port);
++
++}
++
++struct fh_uart_port *info_to_fh_uart_port(fh_uart_info *info)
++{
++	return container_of(info, struct fh_uart_port, fh_info);
++
++}
++
++s32 Uart_Disable_Irq(fh_uart_info *desc, uart_irq_e interrupts)
++{
++	u32 ret;
++	u32 base = (u32)desc->base_add;
++
++	ret = fh_uart_readl(base + REG_UART_IER);
++	ret &= ~interrupts;
++	fh_uart_writel(base+REG_UART_IER, ret);
++
++	return UART_CONFIG_OK;
++}
++
++s32 Uart_Enable_Irq(fh_uart_info *desc, uart_irq_e interrupts)
++{
++	u32 ret;
++	u32 base = (u32)desc->base_add;
++
++	ret = fh_uart_readl(base);
++	ret |= interrupts;
++	fh_uart_writel(base + REG_UART_IER, ret);
++
++	return UART_CONFIG_OK;
++
++}
++
++s32 Uart_Fifo_Config(fh_uart_info *desc)
++{
++	u32 ret;
++	u32 base = (u32)desc->base_add;
++
++	fh_uart_writel(base + REG_UART_FCR, desc->fifo_ctrl);
++	ret = fh_uart_readl(base + REG_UART_IIR);
++
++	if (ret & UART_FIFO_IS_ENABLE)
++		return UART_CONFIG_FIFO_OK;
++	else
++		return UART_CONFIG_FIFO_ERROR;
++}
++
++s32 Uart_Read_Control_Status(fh_uart_info *desc)
++{
++	u32 base = (u32)desc->base_add;
++	return fh_uart_readl(base + REG_UART_USR);
++}
++
++s32 Uart_Set_Line_Control(fh_uart_info *desc)
++{
++	u32 ret;
++	u32 base = (u32)desc->base_add;
++
++	ret = Uart_Read_Control_Status(desc);
++	if (ret & UART_STATUS_BUSY)
++		return UART_IS_BUSY;
++
++	fh_uart_writel(base + REG_UART_LCR, desc->line_ctrl);
++	return UART_CONFIG_LINE_OK;
++}
++
++s32 Uart_Read_Line_Status(fh_uart_info *desc)
++{
++	u32 base = (u32)desc->base_add;
++	return fh_uart_readl(base + REG_UART_LSR);
++}
++
++s32 Uart_Set_Clock_Divisor(fh_uart_info *desc)
++{
++	u32 low, high, ret;
++	u32 base = (u32)desc->base_add;
++
++	low = desc->baudrate & 0x00ff;
++	high = (desc->baudrate & 0xff00) >> 8;
++
++	ret = Uart_Read_Control_Status(desc);
++	if (ret & UART_STATUS_BUSY)
++		return UART_IS_BUSY;
++
++	ret = fh_uart_readl(base + REG_UART_LCR);
++	/* if DLAB not set */
++	if (!(ret & UART_LCR_DLAB_POS)) {
++		ret |= UART_LCR_DLAB_POS;
++		fh_uart_writel(base + REG_UART_LCR, ret);
++	}
++	fh_uart_writel(base + REG_UART_DLL, low);
++	fh_uart_writel(base + REG_UART_DLH, high);
++
++	/* clear DLAB */
++	ret = ret & 0x7f;
++	fh_uart_writel(base + REG_UART_LCR, ret);
++
++	return UART_CONFIG_DIVISOR_OK;
++}
++
++s32 Uart_Read_iir(fh_uart_info *desc)
++{
++	u32 base = (u32)desc->base_add;
++	return fh_uart_readl(base + REG_UART_IIR);
++}
++
++s32 Uart_Init(fh_uart_info *desc)
++{
++
++	u32 base = (u32)desc->base_add;
++	struct fh_uart_port *port = info_to_fh_uart_port(desc);
++	u8 test_init_status = 0;
++
++	/* reset fifo */
++	fh_uart_writel(base + REG_UART_FCR, 6);
++	test_init_status |= Uart_Set_Clock_Divisor(desc);
++	test_init_status |= Uart_Set_Line_Control(desc);
++	test_init_status |= Uart_Fifo_Config(desc);
++	if (test_init_status != 0) {
++		return test_init_status;
++	}
++	Uart_Disable_Irq(desc, UART_INT_ALL);
++	fh_uart_start_rx(&port->port);
++	return 0;
++}
++
++
++
++/*********************************
++ *
++ *
++ * FH  CONSOLE
++ *
++ *
++ *********************************/
++#ifdef CONFIG_SERIAL_FH_CONSOLE
++static struct uart_port *cons_uart;
++
++static void
++fh_serial_console_putchar(struct uart_port *port, int ch)
++{
++	u32 ret;
++	struct fh_uart_port *myown_port = to_fh_uart_port(port);
++
++	do {
++		ret = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
++	}
++	/* wait txfifo is full
++	 * 0 means full.
++	 * 1 means not full
++	 */
++	while(!(ret & UART_STATUS_TFNF));
++
++	fh_uart_writel(myown_port->fh_info.base_add + REG_UART_THR,ch);
++}
++
++static void
++fh_serial_console_write(struct console *co, const char *s,
++			     unsigned int count)
++{
++	struct fh_uart_port* myown_port  = &fh_own_ports[co->index];
++	uart_console_write(&myown_port->port, s, count, fh_serial_console_putchar);
++}
++
++
++static int __init
++fh_serial_console_setup(struct console *co, char *options)
++{
++	struct uart_port *port;
++	int baud = 115200;
++	int bits = 8;
++	int parity = 'n';
++	int flow = 'n';
++	/* is this a valid port */
++	if (co->index == -1 || co->index >= FH_UART_NUMBER)
++	    printk("ERROR: co->index invaild: %d\n", co->index);
++
++	port = &fh_own_ports[co->index].port;
++
++	/* is the port configured? */
++	if (port->mapbase == 0x0)
++	    printk("ERROR: port->mapbase == 0x0\n");
++
++	cons_uart = port;
++	/*
++	 * Check whether an invalid uart number has been specified, and
++	 * if so, search for the first available port that does have
++	 * console support.
++	 */
++	if (options)
++		uart_parse_options(options, &baud, &parity, &bits, &flow);
++	uart_set_options(port, co, baud, parity, bits, flow);
++
++	/* must true for setup ok, see printk.c line:1463 */
++	return 1;
++}
++
++
++int fh_serial_initconsole(void)
++{
++	fh_serial_console.data = &fh_uart_drv;
++	register_console(&fh_serial_console);
++	return 0;
++}
++console_initcall(fh_serial_initconsole);
++
++static struct console fh_serial_console = {
++	.name		= FH_SERIAL_NAME,
++	.device		= uart_console_device,
++	.flags		= CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
++	.index		= -1,
++	.write		= fh_serial_console_write,
++	.setup		= fh_serial_console_setup,
++	.data		= &fh_uart_drv,
++};
++
++#endif /* CONFIG_SERIAL_FH_CONSOLE */
++
++static void fh_uart_stop_tx(struct uart_port *port)
++{
++	/* close tx isr */
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++	u32 base = (u32)myown_port->fh_info.base_add;
++	tx_enabled(port) = 0;
++	fh_uart_writel(base + REG_UART_IER, 0x01);
++}
++
++static void fh_uart_start_tx(struct uart_port *port)
++{
++	/* open tx isr */
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++	u32 base = (u32)myown_port->fh_info.base_add;
++	fh_uart_writel(base + REG_UART_IER, 0x03);
++	tx_enabled(port) = 1;
++}
++
++static void fh_uart_stop_rx(struct uart_port *port)
++{
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++	rx_enabled(port) = 0;
++	Uart_Disable_Irq(&myown_port->fh_info,UART_INT_ERBFI_POS);
++}
++
++static void fh_uart_start_rx(struct uart_port *port)
++{
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++	rx_enabled(port) = 1;
++	Uart_Enable_Irq(&myown_port->fh_info,UART_INT_ERBFI_POS);
++}
++
++static void fh_uart_pm(struct uart_port *port, unsigned int level,
++			      unsigned int old)
++{
++
++}
++
++static int fh_uart_set_wake(struct uart_port *port, unsigned int state)
++{
++	return 0;
++}
++
++
++static void fh_uart_enable_ms(struct uart_port *port)
++{
++
++}
++
++
++static unsigned int fh_uart_tx_empty(struct uart_port *port)
++{
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++	/*
++	 * 1 means empty
++	 * 0:means no empty
++	 */
++	int ret = 1;
++	int ret_status;
++
++	ret_status = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
++	if(ret_status & UART_STATUS_TFE)
++		ret = 1;
++	else
++		ret = 0;
++	return ret;
++}
++
++
++static unsigned int fh_uart_get_mctrl(struct uart_port *port)
++{
++	return 0;
++}
++
++
++static void fh_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++
++}
++
++static void fh_uart_break_ctl(struct uart_port *port, int break_state)
++{
++
++}
++
++static irqreturn_t
++fh_uart_rx_chars(int irq, void *dev_id)
++{
++	struct fh_uart_port* myown_port  = dev_id;
++	struct uart_port *port = &myown_port->port;
++	struct tty_struct *tty = port->state->port.tty;
++	unsigned int ch;
++	unsigned int flag;
++	unsigned int uerstat;
++	int max_count = 64;
++	int ret_status;
++
++	while (max_count-- > 0) {
++		/* check if rx fifo is empty */
++		ret_status = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
++		if(!(ret_status & UART_STATUS_RFNE))
++			break;
++		/* read error in the rx process */
++		uerstat = Uart_Read_Line_Status(&myown_port->fh_info);
++		/* read  data in the rxfifo */
++		if(uerstat & UART_LINE_STATUS_DR)
++			ch = fh_uart_readl(myown_port->fh_info.base_add + REG_UART_RBR);
++		/* insert the character into the buffer */
++		flag = TTY_NORMAL;
++		port->icount.rx++;
++		/* if at least one error in rx process */
++		if (unlikely(uerstat & UART_LINE_STATUS_RFE)) {
++			printk("rxerr: port ch=0x%02x, rxs=0x%08x\n",
++			    ch, uerstat);
++			/* check for break */
++			if (uerstat & UART_LINE_STATUS_BI) {
++				port->icount.brk++;
++				if (uart_handle_break(port))
++				    goto ignore_char;
++			}
++
++			if (uerstat & UART_LINE_STATUS_BI)
++				port->icount.frame++;
++			if (uerstat & UART_LINE_STATUS_OE)
++				port->icount.overrun++;
++
++			uerstat &= port->read_status_mask;
++
++			if (uerstat & UART_LINE_STATUS_BI)
++				flag = TTY_BREAK;
++			else if (uerstat & UART_LINE_STATUS_PE)
++				flag = TTY_PARITY;
++			else if (uerstat & (UART_LINE_STATUS_FE |
++					UART_LINE_STATUS_OE))
++				flag = TTY_FRAME;
++		}
++
++		if (uart_handle_sysrq_char(port, ch))
++			goto ignore_char;
++
++		uart_insert_char(port, uerstat, UART_LINE_STATUS_OE,
++				 ch, flag);
++
++ ignore_char:
++		continue;
++	}
++	tty_flip_buffer_push(tty);
++	return IRQ_HANDLED;
++}
++
++
++
++static irqreturn_t
++fh_uart_tx_chars(int irq, void *dev_id)
++{
++	struct fh_uart_port* myown_port  = dev_id;
++	struct uart_port *port = &myown_port->port;
++	struct circ_buf *xmit = &port->state->xmit;
++	int count = 256;
++	int ret_status;
++
++	/* if there isn't anything more to transmit, or the uart is now
++	 * stopped, disable the uart and exit
++	*/
++	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
++		fh_uart_stop_tx(port);
++		goto out;
++	}
++	/* try and drain the buffer... */
++	while (!uart_circ_empty(xmit) && count-- > 0) {
++
++		/*
++		 *  check the tx fifo full?
++		 * full then break
++		 */
++		ret_status = (u8)Uart_Read_Control_Status(&myown_port->fh_info);
++		if(!(ret_status & UART_STATUS_TFNF))
++			break;
++		/* write data to the hw fifo */
++		fh_uart_writel(myown_port->fh_info.base_add + REG_UART_THR,xmit->buf[xmit->tail]);
++		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
++		port->icount.tx++;
++	}
++
++	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++		uart_write_wakeup(port);
++
++	if (uart_circ_empty(xmit))
++		fh_uart_stop_tx(port);
++out:
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t
++fh_uart_isr(int irq, void *dev_id){
++	irqreturn_t ret_isr;
++	struct fh_uart_port* myown_port  = dev_id;
++	int ret_iir;
++
++	/* check if the tx empty isr */
++	ret_iir = Uart_Read_iir(&myown_port->fh_info);
++	if (ret_iir == 0x06){
++		printk("uart overrun\n");
++	}
++
++	if((ret_iir & 0x04)||(ret_iir & 0x0c)){
++		ret_isr = fh_uart_rx_chars(irq,dev_id);
++	}
++
++	if(ret_iir & 0x02){
++		ret_isr = fh_uart_tx_chars(irq,dev_id);
++	}
++	else
++		ret_isr = IRQ_HANDLED;
++
++	return ret_isr;
++}
++
++static void fh_serial_shutdown(struct uart_port *port)
++{
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++
++	Uart_Disable_Irq(&myown_port->fh_info,UART_INT_ALL);
++	fh_uart_writel( myown_port->fh_info.base_add + REG_UART_FCR, 6);
++	free_irq(myown_port->fh_info.irq_num, myown_port);
++	tx_enabled(port) = 0;
++	rx_enabled(port) = 0;
++}
++
++static int fh_serial_startup(struct uart_port *port)
++{
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++	int ret;
++	int status;
++
++	do {
++		status = Uart_Read_Line_Status(&myown_port->fh_info);
++
++	} while ((status & BOTH_EMPTY) != BOTH_EMPTY);
++
++	Uart_Init(&myown_port->fh_info);
++	if ((ret = request_irq(myown_port->fh_info.irq_num, fh_uart_isr, 0, FH_DEV_NAME, (void*)myown_port)))
++	{
++		printk("cannot get irq %d\n", myown_port->fh_info.irq_num);
++		return ret;
++	}
++
++	enable_irq_wake(myown_port->fh_info.irq_num);
++
++	return 0;
++}
++
++static void fh_serial_set_termios(struct uart_port *port,
++				       struct ktermios *termios,
++				       struct ktermios *old)
++{
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++
++	unsigned long flags;
++	unsigned int baud, quot;
++	unsigned int line_data = 0,status;
++
++	do {
++		status = Uart_Read_Line_Status(&myown_port->fh_info);
++
++	} while ((status & BOTH_EMPTY) != BOTH_EMPTY);
++
++	switch (termios->c_cflag & CSIZE) {
++	case CS5:
++		line_data |= UART_LCR_WLEN5;
++		break;
++	case CS6:
++		line_data |= UART_LCR_WLEN6;
++		break;
++	case CS7:
++		line_data |= UART_LCR_WLEN7;
++		break;
++	case CS8:
++		line_data |= UART_LCR_WLEN8;
++		break;
++	default:
++		line_data |= UART_LCR_WLEN8;
++		break;
++	}
++	/* stop bits */
++	if (termios->c_cflag & CSTOPB)
++		line_data |= UART_LCR_STOP;
++
++	if (termios->c_cflag & PARENB){
++		line_data |= UART_LCR_PARITY;
++
++		if (!(termios->c_cflag & PARODD))
++			line_data |= UART_LCR_EPAR;
++	}
++	/*
++	 * baud cal.
++	 * baud is the uart will be out.
++	 * the quot is the div
++	 */
++	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
++	quot = uart_get_divisor(port, baud);
++
++	do{
++		status = Uart_Read_Line_Status(&myown_port->fh_info);
++	} while ((status & BOTH_EMPTY) != BOTH_EMPTY);
++
++	uart_update_timeout(port, termios->c_cflag, baud);
++	spin_lock_irqsave(&myown_port->port.lock, flags);
++
++	myown_port->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
++	if (termios->c_iflag & INPCK)
++		myown_port->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
++	if (termios->c_iflag & (BRKINT | PARMRK))
++		myown_port->port.read_status_mask |= UART_LSR_BI;
++
++	/*
++	 * Characters to ignore
++	 */
++	myown_port->port.ignore_status_mask = 0;
++	if (termios->c_iflag & IGNPAR)
++		myown_port->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
++	if (termios->c_iflag & IGNBRK) {
++		myown_port->port.ignore_status_mask |= UART_LSR_BI;
++		/*
++		 * If we're ignoring parity and break indicators,
++		 * ignore overruns too (for real raw support).
++		 */
++		if (termios->c_iflag & IGNPAR)
++			myown_port->port.ignore_status_mask |= UART_LSR_OE;
++	}
++
++	/*
++	 * ignore all characters if CREAD is not set
++	 */
++	if ((termios->c_cflag & CREAD) == 0)
++		myown_port->port.ignore_status_mask |= UART_LSR_DR;
++
++
++	myown_port->fh_info.line_ctrl = (uart_line_e)line_data;
++	myown_port->fh_info.baudrate = quot;
++	myown_port->fh_info.fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER;
++	Uart_Init(&myown_port->fh_info);
++	spin_unlock_irqrestore(&myown_port->port.lock, flags);
++}
++
++
++
++static const char *fh_serial_type(struct uart_port *port)
++{
++	return FH_SERIAL_NAME;
++}
++
++static void fh_serial_release_port(struct uart_port *port)
++{
++	release_mem_region(port->mapbase, MAP_SIZE);
++}
++
++static int fh_serial_request_port(struct uart_port *port)
++{
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++	const char* name = myown_port->fh_info.name;
++	return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
++}
++
++
++static void fh_serial_config_port(struct uart_port *port, int flags)
++{
++	if (flags & UART_CONFIG_TYPE) {
++		fh_serial_request_port(port);
++		port->type = FH_TYPE;
++	}
++}
++
++static int fh_serial_init_port(struct fh_uart_port* myown_port,
++
++				    struct platform_device *platdev)
++{
++	struct uart_port *port = &myown_port->port;
++	struct resource *res;
++
++	if(platdev == NULL)
++		return -ENODEV;
++
++
++	myown_port->fh_info.dev = platdev;
++	/* setup info for port */
++	port->dev	= &platdev->dev;
++
++	/* sort our the physical and virtual addresses for each UART */
++	res = platform_get_resource(platdev, IORESOURCE_MEM, 0);
++	if (res == NULL) {
++		printk(KERN_ERR "failed to find memory resource for uart\n");
++		return -EINVAL;
++	}
++
++	myown_port->fh_info.baudrate = BAUDRATE_115200;
++	myown_port->fh_info.fifo_ctrl = UART_INT_RXFIFO_DEPTH_QUARTER;
++	myown_port->fh_info.line_ctrl = Uart_line_8n2;
++
++	Uart_Init(&myown_port->fh_info);
++	return 0;
++}
++
++static inline int fh_serial_cpufreq_register(struct fh_uart_port* myown_port)
++{
++	return 0;
++}
++
++static ssize_t fh_serial_show_clksrc(struct device *dev,
++					  struct device_attribute *attr,
++					  char *buf)
++{
++	return snprintf(buf, PAGE_SIZE, "* %s\n", FH_SERIAL_NAME);
++}
++
++static DEVICE_ATTR(clock_source, S_IRUGO, fh_serial_show_clksrc, NULL);
++
++#ifdef CONFIG_PM
++static int fh_serial_suspend(struct platform_device *pdev,
++				pm_message_t state)
++{
++	struct uart_port *port = platform_get_drvdata(pdev);
++
++	uart_suspend_port(&fh_uart_drv, port);
++
++	return 0;
++}
++
++static int fh_serial_resume(struct platform_device *pdev)
++{
++	struct uart_port *port = platform_get_drvdata(pdev);
++	int may_wakeup;
++
++	may_wakeup = device_may_wakeup(&pdev->dev);
++
++	uart_resume_port(&fh_uart_drv, port);
++	device_set_wakeup_enable(&pdev->dev, may_wakeup);
++
++	return 0;
++}
++#endif
++
++int fh_serial_probe(struct platform_device *dev)
++{
++	int ret = 0;
++	struct fh_uart_port* myown_port;
++	if(dev->id > (sizeof(fh_own_ports)/sizeof(struct fh_uart_port)))
++		goto probe_err;
++
++	myown_port = &fh_own_ports[dev->id];
++	ret = fh_serial_init_port(myown_port,dev);
++	if (ret < 0)
++		goto probe_err;
++
++	ret = uart_add_one_port(&fh_uart_drv, &myown_port->port);
++	if(ret != 0)
++		printk(KERN_ERR "%s: failed to add one port.\n", __func__);
++
++	platform_set_drvdata(dev, &myown_port->port);
++	ret = device_create_file(&dev->dev, &dev_attr_clock_source);
++
++	if (ret < 0)
++		printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
++
++	ret = fh_serial_cpufreq_register(myown_port);
++	if (ret < 0)
++		dev_err(&dev->dev, "failed to add cpufreq notifier\n");
++
++	printk(KERN_DEBUG "fh serial probe done\n");
++	return 0;
++
++ probe_err:
++	printk(KERN_ERR "%s: fh serial probe error.\n",__func__);
++	return ret;
++}
++
++EXPORT_SYMBOL_GPL(fh_serial_probe);
++
++static inline void fh_serial_cpufreq_deregister(struct fh_uart_port* myown_port)
++{
++
++}
++
++
++int __devexit fh_serial_remove(struct platform_device *dev)
++{
++	struct uart_port *port = fh_dev_to_port(&dev->dev);
++	struct fh_uart_port* myown_port = to_fh_uart_port(port);
++
++	if (port) {
++		fh_serial_cpufreq_deregister(myown_port);
++		device_remove_file(&dev->dev, &dev_attr_clock_source);
++		uart_remove_one_port(&fh_uart_drv, port);
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(fh_serial_remove);
++
++int fh_serial_init(struct platform_driver *drv)
++{
++	return platform_driver_register(drv);
++}
++EXPORT_SYMBOL_GPL(fh_serial_init);
++
++static int __init fh_serial_modinit(void)
++{
++	int ret;
++	ret = uart_register_driver(&fh_uart_drv);
++	if (ret < 0) {
++		printk(KERN_ERR "failed to register UART driver\n");
++		return -1;
++	}
++	return 0;
++}
++
++static void __exit fh_serial_modexit(void)
++{
++	uart_unregister_driver(&fh_uart_drv);
++}
++
++
++static int _fh_serial_probe(struct platform_device *dev)
++{
++	return fh_serial_probe(dev);
++}
++
++static struct platform_driver fh_serial_driver = {
++	.probe		= _fh_serial_probe,
++	.remove		= __devexit_p(fh_serial_remove),
++#ifdef CONFIG_PM
++	.suspend	= fh_serial_suspend,
++	.resume		= fh_serial_resume,
++#endif
++	.driver		= {
++		.name	= FH_SERIAL_NAME,
++		.owner	= THIS_MODULE,
++	},
++};
++
++static int __init _fh_serial_init(void)
++{
++	return fh_serial_init(&fh_serial_driver);
++}
++
++static void __exit _fh_serial_exit(void)
++{
++	platform_driver_unregister(&fh_serial_driver);
++}
++
++module_init(_fh_serial_init);
++module_exit(_fh_serial_exit);
++
++module_init(fh_serial_modinit);
++module_exit(fh_serial_modexit);
+diff --git a/drivers/tty/serial/fh_serial.h b/drivers/tty/serial/fh_serial.h
+new file mode 100644
+index 00000000..7350f667
+--- /dev/null
++++ b/drivers/tty/serial/fh_serial.h
+@@ -0,0 +1,166 @@
++/*
++ * fh_serial.h
++ *
++ *  Created on: Jul 29, 2014
++ *      Author: duobao
++ */
++#ifndef FH_SERIAL_H_
++#define FH_SERIAL_H_
++/****************************************************************************
++ * #include section
++ *	add #include here if any
++ ***************************************************************************/
++
++/****************************************************************************
++ * #define section
++ *	add constant #define here if any
++ ***************************************************************************/
++
++#define lift_shift_bit_num(bit_num)			(1<<bit_num)
++#define ISR_NUMBER0							(30)
++#define ISR_NUMBER1							(31)
++#define ISR_NUMBER2							(35)
++#define UART0_PORT							0
++#define UART1_PORT							1
++#define UART2_PORT							2
++#define	UART_MAX_NUM						3
++#define UART_DATA_ARRIVED					1
++#define UART_LCR_DLAB_POS        			(lift_shift_bit_num(7))
++
++#define UART0_FIFO_SIZE							32
++#define UART1_FIFO_SIZE							16
++#define UART2_FIFO_SIZE							16
++
++/****************************************************************************
++* ADT section
++*	add Abstract Data Type definition here
++***************************************************************************/
++//error status
++enum {
++	UART_CONFIG_OK = 0,
++	UART_CONFIG_FIFO_OK = 0,
++	UART_CONFIG_LINE_OK = 0,
++	UART_CONFIG_DIVISOR_OK = 0,
++	UART_WRITE_DATA_OK = 0,
++	UART_READ_DATA_OK = 0,
++	UART_CLEAR_ERROR_OK = 0,
++	UART_RESET_RX_POOL_OK = 0,
++	UART_CLEAR_RX_DATA_READY_OK = 0,
++	UART_INIT_OK = 0,
++	UART_CONFIG_PARA_ERROR = lift_shift_bit_num(0),
++	UART_CONFIG_FIFO_ERROR = lift_shift_bit_num(1),
++	UART_IS_BUSY = lift_shift_bit_num(2),
++	UART_DW_FIFO_OVERFLOW = lift_shift_bit_num(3),  //dw rxfifo overflow ,maybe rxisr is closed or main clock is too low
++	UART_SW_FIFO_OVERFLOW = lift_shift_bit_num(4),	//soft rxfifo overflow , maybe main clk is too low
++	UART_PARITY_ERROR = lift_shift_bit_num(5),
++	UART_FRAME_ERROR = lift_shift_bit_num(6),
++	UART_BREAK_ERROR = lift_shift_bit_num(7),
++	UART_FIFO_EMPTY = lift_shift_bit_num(8),
++};
++
++//interrupt enable
++typedef enum enum_uart_irq {
++	UART_INT_PTIME_POS = (lift_shift_bit_num(7)),
++	UART_INT_EDSSI_POS = (lift_shift_bit_num(3)),
++	UART_INT_ELSI_POS  = (lift_shift_bit_num(2)),
++	UART_INT_ETBEI_POS = (lift_shift_bit_num(1)),
++	UART_INT_ERBFI_POS = (lift_shift_bit_num(0)),
++	UART_INT_ALL          = 0x0f,
++}uart_irq_e;
++
++//interrupt id
++enum {
++	UART_INT_ID_MODEM = 		0,
++	UART_INT_ID_NO_INT = 		1,
++	UART_INT_ID_THR_EMPTY =  	2,
++	UART_INT_ID_RECEIVE_DATA =	4,
++	UART_INT_ID_RECEIVE_LINE =	6,
++	UART_INT_ID_BUSY = 			7,
++	UART_INT_ID_TIME_OUT =	    12,
++	UART_FIFO_IS_ENABLE =	    0xc0,
++};
++
++typedef enum enum_uart_line {
++    Uart_line_5n1 = 0x00,   // 5 data bits, no parity, 1 stop bit
++    Uart_line_5n1_5 = 0x04, // 5 data bits, no parity, 1.5 stop bits
++    Uart_line_5e1 = 0x18,   // 5 data bits, even parity, 1 stop bit
++    Uart_line_5e1_5 = 0x1c, // 5 data bits, even parity, 1.5 stop bits
++    Uart_line_5o1 = 0x08,   // 5 data bits, odd parity, 1 stop bit
++    Uart_line_5o1_5 = 0x0c, // 5 data bits, odd parity, 1.5 stop bits
++    Uart_line_6n1 = 0x01,   // 6 data bits, no parity, 1 stop bit
++    Uart_line_6n2 = 0x05,   // 6 data bits, no parity, 2 stop bits
++    Uart_line_6e1 = 0x19,   // 6 data bits, even parity, 1 stop bit
++    Uart_line_6e2 = 0x1d,   // 6 data bits, even parity, 2 stop bits
++    Uart_line_6o1 = 0x09,   // 6 data bits, odd parity, 1 stop bit
++    Uart_line_6o2 = 0x0d,   // 6 data bits, odd parity, 2 stop bits
++    Uart_line_7n1 = 0x02,   // 7 data bits, no parity, 1 stop bit
++    Uart_line_7n2 = 0x06,   // 7 data bits, no parity, 2 stop bits
++    Uart_line_7e1 = 0x1a,   // 7 data bits, even parity, 1 stop bit
++    Uart_line_7e2 = 0x1e,   // 7 data bits, even parity, 2 stop bits
++    Uart_line_7o1 = 0x0a,   // 7 data bits, odd parity, 1 stop bit
++    Uart_line_7o2 = 0x0e,   // 7 data bits, odd parity, 2 stop bits
++    Uart_line_8n1 = 0x03,   // 8 data bits, no parity, 1 stop bit
++    Uart_line_8n2 = 0x07,   // 8 data bits, no parity, 2 stop bits
++    Uart_line_8e1 = 0x1b,   // 8 data bits, even parity, 1 stop bit
++    Uart_line_8e2 = 0x1f,   // 8 data bits, even parity, 2 stop bits
++    Uart_line_8o1 = 0x0b,   // 8 data bits, odd parity, 1 stop bit
++    Uart_line_8o2 = 0x0f    // 8 data bits, odd parity, 2 stop bits
++}uart_line_e;
++
++//rx & tx fifo config
++typedef enum enum_uart_fifo {
++	UART_INT_RXFIFO_DEPTH_1 = 0x01,        			//fifo enable, rx 1 byte, set rx int
++	UART_INT_RXFIFO_DEPTH_QUARTER = 0x41,			//fifo enable, rx 1/4 fifo, set rx int
++	UART_INT_RXFIFO_DEPTH_HALF =0x81,				//fifo enable, rx 1/2 fifo, set rx int
++	UART_INT_RXFIFO_2LESS_THAN_FULL =0xc1,			//fifo enable, rx 2 less than full,  set rx int
++}uart_fifo_e;
++
++//line status
++enum {
++	UART_LINE_STATUS_RFE	 = (lift_shift_bit_num(7)),
++	UART_LINE_STATUS_TEMT 	 = (lift_shift_bit_num(6)),
++	UART_LINE_STATUS_THRE 	 = (lift_shift_bit_num(5)),
++	UART_LINE_STATUS_BI	 	 = (lift_shift_bit_num(4)),
++	UART_LINE_STATUS_FE 	 = (lift_shift_bit_num(3)),
++	UART_LINE_STATUS_PE   	 = (lift_shift_bit_num(2)),
++	UART_LINE_STATUS_OE 	 = (lift_shift_bit_num(1)),
++	UART_LINE_STATUS_DR 	 = (lift_shift_bit_num(0)),
++};
++
++//uart status
++enum {
++	UART_STATUS_RFF	 = (lift_shift_bit_num(4)),
++	UART_STATUS_RFNE = (lift_shift_bit_num(3)),
++	UART_STATUS_TFE	 = (lift_shift_bit_num(2)),
++	UART_STATUS_TFNF = (lift_shift_bit_num(1)),
++	UART_STATUS_BUSY = (lift_shift_bit_num(0)),
++
++};
++
++#define 	UART_CLOCK_FREQ   	(30000000)   //15MHZ
++typedef enum enum_uart_baudrate{
++	BAUDRATE_9600 	= (((UART_CLOCK_FREQ/9600)+8)/16),
++	BAUDRATE_19200 	= (((UART_CLOCK_FREQ/19200)+8)/16),
++	BAUDRATE_38400  = (((UART_CLOCK_FREQ/38400)+8)/16),
++	BAUDRATE_57600  = (((UART_CLOCK_FREQ/57600)+8)/16),
++	BAUDRATE_115200 = (((UART_CLOCK_FREQ/115200)+8)/16),
++	BAUDRATE_194000 = (((UART_CLOCK_FREQ/194000)+8)/16),
++}uart_baudrate_e;
++
++typedef struct _fh_uart_info {
++	const char * name;
++	unsigned int irq_num;
++	unsigned char __iomem	*base_add;
++	uart_baudrate_e	baudrate;
++	uart_line_e		line_ctrl;
++	uart_fifo_e		fifo_ctrl;
++	struct platform_device *dev;
++
++}fh_uart_info;
++
++struct fh_uart_port {
++	fh_uart_info	fh_info;
++	struct uart_port port;
++};
++
++#endif /* FH_SERIAL_H_ */
+diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
+index 30ddf8dc..57497edc 100644
+--- a/drivers/usb/Makefile
++++ b/drivers/usb/Makefile
+@@ -1,5 +1,5 @@
+ #
+-# Makefile for the kernel USB device drivers.
++#akefile for the kernel USB device drivers.
+ #
+ 
+ # Object files in subdirectories
+@@ -8,6 +8,7 @@ obj-$(CONFIG_USB)		+= core/
+ 
+ obj-$(CONFIG_USB_MON)		+= mon/
+ 
++obj-$(CONFIG_CONFIG_USB_FH_OTG)		+= host/
+ obj-$(CONFIG_PCI)		+= host/
+ obj-$(CONFIG_USB_EHCI_HCD)	+= host/
+ obj-$(CONFIG_USB_ISP116X_HCD)	+= host/
+@@ -23,7 +24,7 @@ obj-$(CONFIG_USB_HWA_HCD)	+= host/
+ obj-$(CONFIG_USB_ISP1760_HCD)	+= host/
+ obj-$(CONFIG_USB_IMX21_HCD)	+= host/
+ obj-$(CONFIG_USB_FSL_MPH_DR_OF)	+= host/
+-
++obj-$(CONFIG_USB_S3C_OTG_HOST)	+= host/
+ obj-$(CONFIG_USB_C67X00_HCD)	+= c67x00/
+ 
+ obj-$(CONFIG_USB_WUSB)		+= wusbcore/
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index ace9f844..d0b57e01 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -393,7 +393,6 @@ rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
+ 	char const *s;
+ 	static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
+ 
+-	// language ids
+ 	switch (id) {
+ 	case 0:
+ 		/* Array of LANGID codes (0x0409 is MSFT-speak for "en-us") */
+@@ -562,7 +561,6 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
+ 	case DeviceOutRequest | USB_REQ_SET_INTERFACE:
+ 		break;
+ 	case DeviceOutRequest | USB_REQ_SET_ADDRESS:
+-		// wValue == urb->dev->devaddr
+ 		dev_dbg (hcd->self.controller, "root hub device address %d\n",
+ 			wValue);
+ 		break;
+@@ -572,7 +570,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
+ 	/* ENDPOINT REQUESTS */
+ 
+ 	case EndpointRequest | USB_REQ_GET_STATUS:
+-		// ENDPOINT_HALT flag
++
+ 		tbuf [0] = 0;
+ 		tbuf [1] = 0;
+ 		len = 2;
+@@ -1582,8 +1580,10 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
+ 	usb_unanchor_urb(urb);
+ 
+ 	/* pass ownership to the completion handler */
++	//printk("god...hand add is %x\n",(unsigned int)urb->complete);
+ 	urb->status = status;
+ 	urb->complete (urb);
++
+ 	atomic_dec (&urb->use_count);
+ 	if (unlikely(atomic_read(&urb->reject)))
+ 		wake_up (&usb_kill_urb_queue);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a428aa08..769bc355 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2939,6 +2939,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 					/* FALL THROUGH */
+ 				default:
+ 					if (r == 0)
++						printk("error is r = eproto\n");
+ 						r = -EPROTO;
+ 					break;
+ 				}
+diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
+index 4fe92b18..be9951c7 100644
+--- a/drivers/usb/gadget/Makefile
++++ b/drivers/usb/gadget/Makefile
+@@ -3,7 +3,7 @@
+ #
+ ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
+ 
+-obj-$(CONFIG_USB_DUMMY_HCD)	+= dummy_hcd.o
++#obj-$(CONFIG_USB_DUMMY_HCD)	+= dummy_hcd.o
+ obj-$(CONFIG_USB_NET2280)	+= net2280.o
+ obj-$(CONFIG_USB_AMD5536UDC)	+= amd5536udc.o
+ obj-$(CONFIG_USB_PXA25X)	+= pxa25x_udc.o
+@@ -22,6 +22,7 @@ obj-$(CONFIG_USB_R8A66597)	+= r8a66597-udc.o
+ obj-$(CONFIG_USB_FSL_QE)	+= fsl_qe_udc.o
+ obj-$(CONFIG_USB_CI13XXX_PCI)	+= ci13xxx_pci.o
+ obj-$(CONFIG_USB_S3C_HSOTG)	+= s3c-hsotg.o
++obj-$(CONFIG_USB_S3C_OTGD)	+= s3c_udc_otg.o
+ obj-$(CONFIG_USB_S3C_HSUDC)	+= s3c-hsudc.o
+ obj-$(CONFIG_USB_LANGWELL)	+= langwell_udc.o
+ obj-$(CONFIG_USB_EG20T)		+= pch_udc.o
+diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
+index bcdac7c7..b6b874d4 100644
+--- a/drivers/usb/gadget/gadget_chips.h
++++ b/drivers/usb/gadget/gadget_chips.h
+@@ -136,6 +136,7 @@
+ #define gadget_is_s3c_hsotg(g)    0
+ #endif
+ 
++
+ #ifdef CONFIG_USB_S3C_HSUDC
+ #define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
+ #else
+@@ -223,6 +224,7 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
+ 		return 0x29;
+ 	else if (gadget_is_s3c_hsudc(gadget))
+ 		return 0x30;
++		return 0x31;
+ 
+ 	return -ENOENT;
+ }
+diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
+index 100f2635..8d8d6516 100644
+--- a/drivers/usb/gadget/s3c2410_udc.c
++++ b/drivers/usb/gadget/s3c2410_udc.c
+@@ -28,6 +28,7 @@
+ #include <linux/ioport.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/smp_lock.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/timer.h>
+@@ -35,8 +36,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+-#include <linux/gpio.h>
+-#include <linux/prefetch.h>
+ 
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+@@ -52,6 +51,7 @@
+ #include <mach/irqs.h>
+ 
+ #include <mach/hardware.h>
++#include <mach/regs-gpio.h>
+ 
+ #include <plat/regs-udc.h>
+ #include <plat/udc.h>
+@@ -736,10 +736,6 @@ static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
+ 	else
+ 		dev->ep0state = EP0_OUT_DATA_PHASE;
+ 
+-	if (!dev->driver)
+-		return;
+-
+-	/* deliver the request to the gadget driver */
+ 	ret = dev->driver->setup(&dev->gadget, crq);
+ 	if (ret < 0) {
+ 		if (dev->req_config) {
+@@ -903,7 +899,7 @@ static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
+ 	int pwr_reg;
+ 	int ep0csr;
+ 	int i;
+-	u32 idx, idx2;
++	u32 idx;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+@@ -1018,20 +1014,6 @@ static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
+ 		}
+ 	}
+ 
+-	/* what else causes this interrupt? a receive! who is it? */
+-	if (!usb_status && !usbd_status && !pwr_reg && !ep0csr) {
+-		for (i = 1; i < S3C2410_ENDPOINTS; i++) {
+-			idx2 = udc_read(S3C2410_UDC_INDEX_REG);
+-			udc_write(i, S3C2410_UDC_INDEX_REG);
+-
+-			if (udc_read(S3C2410_UDC_OUT_CSR1_REG) & 0x1)
+-				s3c2410_udc_handle_ep(&dev->ep[i]);
+-
+-			/* restore index */
+-			udc_write(idx2, S3C2410_UDC_INDEX_REG);
+-		}
+-	}
+-
+ 	dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", IRQ_USBD);
+ 
+ 	/* Restore old index */
+@@ -1482,9 +1464,7 @@ static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
+ {
+ 	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+ 
+-	if (udc_info && (udc_info->udc_command ||
+-		gpio_is_valid(udc_info->pullup_pin))) {
+-
++	if (udc_info && udc_info->udc_command) {
+ 		if (is_on)
+ 			s3c2410_udc_enable(udc);
+ 		else {
+@@ -1530,7 +1510,11 @@ static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
+ 
+ 	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+ 
+-	value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0;
++	/* some cpus cannot read from an line configured to IRQ! */
++	s3c2410_gpio_cfgpin(udc_info->vbus_pin, S3C2410_GPIO_INPUT);
++	value = s3c2410_gpio_getpin(udc_info->vbus_pin);
++	s3c2410_gpio_cfgpin(udc_info->vbus_pin, S3C2410_GPIO_SFN2);
++
+ 	if (udc_info->vbus_pin_inverted)
+ 		value = !value;
+ 
+@@ -1561,32 +1545,6 @@ static const struct usb_gadget_ops s3c2410_ops = {
+ 	.vbus_draw		= s3c2410_vbus_draw,
+ };
+ 
+-static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
+-{
+-	if (!udc_info)
+-		return;
+-
+-	if (udc_info->udc_command) {
+-		udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+-	} else if (gpio_is_valid(udc_info->pullup_pin)) {
+-		int value;
+-
+-		switch (cmd) {
+-		case S3C2410_UDC_P_ENABLE:
+-			value = 1;
+-			break;
+-		case S3C2410_UDC_P_DISABLE:
+-			value = 0;
+-			break;
+-		default:
+-			return;
+-		}
+-		value ^= udc_info->pullup_pin_inverted;
+-
+-		gpio_set_value(udc_info->pullup_pin, value);
+-	}
+-}
+-
+ /*------------------------- gadget driver handling---------------------------*/
+ /*
+  * s3c2410_udc_disable
+@@ -1608,7 +1566,8 @@ static void s3c2410_udc_disable(struct s3c2410_udc *dev)
+ 	udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
+ 
+ 	/* Good bye, cruel world */
+-	s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
++	if (udc_info && udc_info->udc_command)
++		udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+ 
+ 	/* Set speed to unknown */
+ 	dev->gadget.speed = USB_SPEED_UNKNOWN;
+@@ -1669,19 +1628,20 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev)
+ 	udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
+ 
+ 	/* time to say "hello, world" */
+-	s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
++	if (udc_info && udc_info->udc_command)
++		udc_info->udc_command(S3C2410_UDC_P_ENABLE);
+ }
+ 
+ /*
+- *	usb_gadget_probe_driver
++ *	usb_gadget_register_driver
+  */
+-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+-		int (*bind)(struct usb_gadget *))
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+ {
+ 	struct s3c2410_udc *udc = the_controller;
+ 	int		retval;
+ 
+-	dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name);
++	dprintk(DEBUG_NORMAL, "usb_gadget_register_driver() '%s'\n",
++		driver->driver.name);
+ 
+ 	/* Sanity checks */
+ 	if (!udc)
+@@ -1690,9 +1650,10 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+ 	if (udc->driver)
+ 		return -EBUSY;
+ 
+-	if (!bind || !driver->setup || driver->speed < USB_SPEED_FULL) {
++	if (!driver->bind || !driver->setup
++			|| driver->speed < USB_SPEED_FULL) {
+ 		printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
+-			bind, driver->setup, driver->speed);
++			driver->bind, driver->setup, driver->speed);
+ 		return -EINVAL;
+ 	}
+ #if defined(MODULE)
+@@ -1715,7 +1676,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+ 	dprintk(DEBUG_NORMAL, "binding gadget driver '%s'\n",
+ 		driver->driver.name);
+ 
+-	if ((retval = bind(&udc->gadget)) != 0) {
++	if ((retval = driver->bind (&udc->gadget)) != 0) {
+ 		device_del(&udc->gadget.dev);
+ 		goto register_error;
+ 	}
+@@ -1730,7 +1691,6 @@ register_error:
+ 	udc->gadget.dev.driver = NULL;
+ 	return retval;
+ }
+-EXPORT_SYMBOL(usb_gadget_probe_driver);
+ 
+ /*
+  *	usb_gadget_unregister_driver
+@@ -1745,15 +1705,12 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+ 	if (!driver || driver != udc->driver || !driver->unbind)
+ 		return -EINVAL;
+ 
+-	dprintk(DEBUG_NORMAL, "usb_gadget_unregister_driver() '%s'\n",
++	dprintk(DEBUG_NORMAL,"usb_gadget_register_driver() '%s'\n",
+ 		driver->driver.name);
+ 
+-	/* report disconnect */
+ 	if (driver->disconnect)
+ 		driver->disconnect(&udc->gadget);
+ 
+-	driver->unbind(&udc->gadget);
+-
+ 	device_del(&udc->gadget.dev);
+ 	udc->driver = NULL;
+ 
+@@ -1770,7 +1727,7 @@ static struct s3c2410_udc memory = {
+ 		.ep0		= &memory.ep[0].ep,
+ 		.name		= gadget_name,
+ 		.dev = {
+-			.init_name	= "gadget",
++			.bus_id		= "gadget",
+ 		},
+ 	},
+ 
+@@ -1845,7 +1802,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	struct s3c2410_udc *udc = &memory;
+ 	struct device *dev = &pdev->dev;
+ 	int retval;
+-	int irq;
++	unsigned int irq;
+ 
+ 	dev_dbg(dev, "%s()\n", __func__);
+ 
+@@ -1904,7 +1861,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 
+ 	/* irq setup after old hardware state is cleaned up */
+ 	retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
+-			     IRQF_DISABLED, gadget_name, udc);
++			IRQF_DISABLED, gadget_name, udc);
+ 
+ 	if (retval != 0) {
+ 		dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
+@@ -1915,28 +1872,17 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	dev_dbg(dev, "got irq %i\n", IRQ_USBD);
+ 
+ 	if (udc_info && udc_info->vbus_pin > 0) {
+-		retval = gpio_request(udc_info->vbus_pin, "udc vbus");
+-		if (retval < 0) {
+-			dev_err(dev, "cannot claim vbus pin\n");
+-			goto err_int;
+-		}
+-
+-		irq = gpio_to_irq(udc_info->vbus_pin);
+-		if (irq < 0) {
+-			dev_err(dev, "no irq for gpio vbus pin\n");
+-			goto err_gpio_claim;
+-		}
+-
++		irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+ 		retval = request_irq(irq, s3c2410_udc_vbus_irq,
+ 				     IRQF_DISABLED | IRQF_TRIGGER_RISING
+ 				     | IRQF_TRIGGER_FALLING | IRQF_SHARED,
+ 				     gadget_name, udc);
+ 
+ 		if (retval != 0) {
+-			dev_err(dev, "can't get vbus irq %d, err %d\n",
++			dev_err(dev, "can't get vbus irq %i, err %d\n",
+ 				irq, retval);
+ 			retval = -EBUSY;
+-			goto err_gpio_claim;
++			goto err_int;
+ 		}
+ 
+ 		dev_dbg(dev, "got irq %i\n", irq);
+@@ -1944,17 +1890,6 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 		udc->vbus = 1;
+ 	}
+ 
+-	if (udc_info && !udc_info->udc_command &&
+-		gpio_is_valid(udc_info->pullup_pin)) {
+-
+-		retval = gpio_request_one(udc_info->pullup_pin,
+-				udc_info->vbus_pin_inverted ?
+-				GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+-				"udc pullup");
+-		if (retval)
+-			goto err_vbus_irq;
+-	}
+-
+ 	if (s3c2410_udc_debugfs_root) {
+ 		udc->regs_info = debugfs_create_file("registers", S_IRUGO,
+ 				s3c2410_udc_debugfs_root,
+@@ -1967,12 +1902,6 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
+-err_vbus_irq:
+-	if (udc_info && udc_info->vbus_pin > 0)
+-		free_irq(gpio_to_irq(udc_info->vbus_pin), udc);
+-err_gpio_claim:
+-	if (udc_info && udc_info->vbus_pin > 0)
+-		gpio_free(udc_info->vbus_pin);
+ err_int:
+ 	free_irq(IRQ_USBD, udc);
+ err_map:
+@@ -1997,12 +1926,8 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
+ 
+ 	debugfs_remove(udc->regs_info);
+ 
+-	if (udc_info && !udc_info->udc_command &&
+-		gpio_is_valid(udc_info->pullup_pin))
+-		gpio_free(udc_info->pullup_pin);
+-
+ 	if (udc_info && udc_info->vbus_pin > 0) {
+-		irq = gpio_to_irq(udc_info->vbus_pin);
++		irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+ 		free_irq(irq, udc);
+ 	}
+ 
+@@ -2032,14 +1957,16 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
+ #ifdef CONFIG_PM
+ static int s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
+ {
+-	s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
++	if (udc_info && udc_info->udc_command)
++		udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+ 
+ 	return 0;
+ }
+ 
+ static int s3c2410_udc_resume(struct platform_device *pdev)
+ {
+-	s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
++	if (udc_info && udc_info->udc_command)
++		udc_info->udc_command(S3C2410_UDC_P_ENABLE);
+ 
+ 	return 0;
+ }
+@@ -2106,6 +2033,7 @@ static void __exit udc_exit(void)
+ }
+ 
+ EXPORT_SYMBOL(usb_gadget_unregister_driver);
++EXPORT_SYMBOL(usb_gadget_register_driver);
+ 
+ module_init(udc_init);
+ module_exit(udc_exit);
+diff --git a/drivers/usb/gadget/s3c_udc.h b/drivers/usb/gadget/s3c_udc.h
+new file mode 100644
+index 00000000..be2ddca1
+--- /dev/null
++++ b/drivers/usb/gadget/s3c_udc.h
+@@ -0,0 +1,134 @@
++/*
++ * drivers/usb/gadget/s3c_udc.h
++ * Samsung S3C on-chip full/high speed USB device controllers
++ * Copyright (C) 2005 for Samsung Electronics 
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
++ 
++#ifndef __S3C_USB_GADGET
++#define __S3C_USB_GADGET
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/ioport.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/proc_fs.h>
++#include <linux/mm.h>
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++
++#include <asm/byteorder.h>
++#include <asm/dma.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++//#include <asm/hardware.h>
++
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++
++// Max packet size
++#if defined(CONFIG_USB_GADGET_S3C_FS)
++#define EP0_FIFO_SIZE		8
++#define EP_FIFO_SIZE		64
++#define S3C_MAX_ENDPOINTS	5
++#elif defined(CONFIG_USB_GADGET_S3C_HS) || defined(CONFIG_PLAT_S5P64XX)
++#define EP0_FIFO_SIZE		64
++#define EP_FIFO_SIZE		512
++#define EP_FIFO_SIZE2		1024
++#define S3C_MAX_ENDPOINTS	9
++#define DED_TX_FIFO		1	/* Dedicated NPTx fifo for s5p6440 */
++#else
++#define EP0_FIFO_SIZE		64
++#define EP_FIFO_SIZE		512
++#define EP_FIFO_SIZE2		1024
++#define S3C_MAX_ENDPOINTS	16
++#endif
++
++#define WAIT_FOR_SETUP          0
++#define DATA_STATE_XMIT         1
++#define DATA_STATE_NEED_ZLP     2
++#define WAIT_FOR_OUT_STATUS     3
++#define DATA_STATE_RECV         4
++#define RegReadErr		5
++#define FAIL_TO_SETUP		6
++
++/* ********************************************************************************************* */
++/* IO
++ */
++
++typedef enum ep_type {
++	ep_control, ep_bulk_in, ep_bulk_out, ep_interrupt
++} ep_type_t;
++
++struct s3c_ep {
++	struct usb_ep ep;
++	struct s3c_udc *dev;
++
++	const struct usb_endpoint_descriptor *desc;
++	struct list_head queue;
++	unsigned long pio_irqs;
++
++	u8 stopped;
++	u8 bEndpointAddress;
++	u8 bmAttributes;
++
++	ep_type_t ep_type;
++	u32 fifo;
++#ifdef CONFIG_USB_GADGET_S3C_FS
++	u32 csr1;
++	u32 csr2;
++#endif
++};
++
++struct s3c_request {
++	struct usb_request req;
++	struct list_head queue;
++};
++
++struct s3c_udc {
++	struct usb_gadget gadget;
++	struct usb_gadget_driver *driver;
++	//struct device *dev;
++	struct platform_device *dev;
++	spinlock_t lock;
++
++	int ep0state;
++	struct s3c_ep ep[S3C_MAX_ENDPOINTS];
++
++	unsigned char usb_address;
++
++	unsigned req_pending:1, req_std:1, req_config:1;
++};
++
++extern struct s3c_udc *the_controller;
++
++#define ep_is_in(EP) 		(((EP)->bEndpointAddress&USB_DIR_IN)==USB_DIR_IN)
++#define ep_index(EP) 		((EP)->bEndpointAddress&0xF)
++#define ep_maxpacket(EP) 	((EP)->ep.maxpacket)
++
++#endif
+diff --git a/drivers/usb/gadget/s3c_udc_otg.c b/drivers/usb/gadget/s3c_udc_otg.c
+new file mode 100644
+index 00000000..e27b5873
+--- /dev/null
++++ b/drivers/usb/gadget/s3c_udc_otg.c
+@@ -0,0 +1,1093 @@
++/*
++ * drivers/usb/gadget/s3c_udc_otg.c
++ * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers
++ *
++ * Copyright (C) 2008 for Samsung Electronics
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
++
++#include "s3c_udc.h"
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <mach/map.h>
++#include <plat/regs-otg.h>
++
++#if	defined(CONFIG_USB_GADGET_S3C_OTGD_DMA_MODE) /* DMA mode */
++#define OTG_DMA_MODE		1
++
++#elif	defined(CONFIG_USB_GADGET_S3C_OTGD_SLAVE_MODE) /* Slave mode */
++#define OTG_DMA_MODE		0
++#error " Slave Mode is not implemented to do later"
++#else
++#error " Unknown S3C OTG operation mode, Select a correct operation mode"
++#endif
++
++#undef DEBUG_S3C_UDC_SETUP
++#undef DEBUG_S3C_UDC_EP0
++#undef DEBUG_S3C_UDC_ISR
++#undef DEBUG_S3C_UDC_OUT_EP
++#undef DEBUG_S3C_UDC_IN_EP
++#undef DEBUG_S3C_UDC
++
++//#define DEBUG_S3C_UDC_SETUP
++//#define DEBUG_S3C_UDC_EP0
++//#define DEBUG_S3C_UDC_ISR
++//#define DEBUG_S3C_UDC_OUT_EP
++//#define DEBUG_S3C_UDC_IN_EP
++//#define DEBUG_S3C_UDC
++
++#define EP0_CON		0
++#define EP1_OUT		1
++#define EP2_IN		2
++#define EP3_IN		3
++#define EP_MASK		0xF
++
++#if defined(DEBUG_S3C_UDC_SETUP) || defined(DEBUG_S3C_UDC_ISR)\
++	|| defined(DEBUG_S3C_UDC_OUT_EP)
++
++static char *state_names[] = {
++	"WAIT_FOR_SETUP",
++	"DATA_STATE_XMIT",
++	"DATA_STATE_NEED_ZLP",
++	"WAIT_FOR_OUT_STATUS",
++	"DATA_STATE_RECV",
++	};
++#endif
++
++#ifdef DEBUG_S3C_UDC_SETUP
++#define DEBUG_SETUP(fmt,args...) printk(fmt, ##args)
++#else
++#define DEBUG_SETUP(fmt,args...) do {} while(0)
++#endif
++
++#ifdef DEBUG_S3C_UDC_EP0
++#define DEBUG_EP0(fmt,args...) printk(fmt, ##args)
++#else
++#define DEBUG_EP0(fmt,args...) do {} while(0)
++#endif
++
++#ifdef DEBUG_S3C_UDC
++#define DEBUG(fmt,args...) printk(fmt, ##args)
++#else
++#define DEBUG(fmt,args...) do {} while(0)
++#endif
++
++#ifdef DEBUG_S3C_UDC_ISR
++#define DEBUG_ISR(fmt,args...) printk(fmt, ##args)
++#else
++#define DEBUG_ISR(fmt,args...) do {} while(0)
++#endif
++
++#ifdef DEBUG_S3C_UDC_OUT_EP
++#define DEBUG_OUT_EP(fmt,args...) printk(fmt, ##args)
++#else
++#define DEBUG_OUT_EP(fmt,args...) do {} while(0)
++#endif
++
++#ifdef DEBUG_S3C_UDC_IN_EP
++#define DEBUG_IN_EP(fmt,args...) printk(fmt, ##args)
++#else
++#define DEBUG_IN_EP(fmt,args...) do {} while(0)
++#endif
++
++
++#define	DRIVER_DESC		"S3C HS USB OTG Device Driver, (c) 2008-2009 Samsung Electronics"
++#define	DRIVER_VERSION		"15 March 2009"
++
++struct s3c_udc	*the_controller;
++
++static const char driver_name[] = "s3c-udc";
++static const char driver_desc[] = DRIVER_DESC;
++static const char ep0name[] = "ep0-control";
++
++/* Max packet size*/
++static unsigned int ep0_fifo_size = 64;
++static unsigned int ep_fifo_size =  512;
++static unsigned int ep_fifo_size2 = 1024;
++static int reset_available = 1;
++
++extern void otg_phy_init(void);
++extern void otg_phy_off(void);
++extern struct usb_ctrlrequest usb_ctrl;
++
++/*
++  Local declarations.
++*/
++static int s3c_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *);
++static int s3c_ep_disable(struct usb_ep *ep);
++static struct usb_request *s3c_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
++static void s3c_free_request(struct usb_ep *ep, struct usb_request *);
++
++static int s3c_queue(struct usb_ep *ep, struct usb_request *, gfp_t gfp_flags);
++static int s3c_dequeue(struct usb_ep *ep, struct usb_request *);
++static int s3c_fifo_status(struct usb_ep *ep);
++static void s3c_fifo_flush(struct usb_ep *ep);
++static void s3c_ep0_read(struct s3c_udc *dev);
++static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep);
++static void s3c_handle_ep0(struct s3c_udc *dev);
++static int s3c_ep0_write(struct s3c_udc *dev);
++static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req);
++static void done(struct s3c_ep *ep, struct s3c_request *req, int status);
++static void stop_activity(struct s3c_udc *dev, struct usb_gadget_driver *driver);
++static int udc_enable(struct s3c_udc *dev);
++static void udc_set_address(struct s3c_udc *dev, unsigned char address);
++static void reconfig_usbd(void);
++static void set_max_pktsize(struct s3c_udc *dev, enum usb_device_speed speed);
++static void nuke(struct s3c_ep *ep, int status);
++static int s3c_udc_set_halt(struct usb_ep *_ep, int value);
++
++static struct usb_ep_ops s3c_ep_ops = {
++	.enable = s3c_ep_enable,
++	.disable = s3c_ep_disable,
++
++	.alloc_request = s3c_alloc_request,
++	.free_request = s3c_free_request,
++
++	.queue = s3c_queue,
++	.dequeue = s3c_dequeue,
++
++	.set_halt = s3c_udc_set_halt,
++	.fifo_status = s3c_fifo_status,
++	.fifo_flush = s3c_fifo_flush,
++};
++
++#ifdef CONFIG_USB_GADGET_DEBUG_FILES
++
++static const char proc_node_name[] = "driver/udc";
++
++static int
++udc_proc_read(char *page, char **start, off_t off, int count,
++	      int *eof, void *_dev)
++{
++	char *buf = page;
++	struct s3c_udc *dev = _dev;
++	char *next = buf;
++	unsigned size = count;
++	unsigned long flags;
++	int t;
++
++	if (off != 0)
++		return 0;
++
++	local_irq_save(flags);
++
++	/* basic device status */
++	t = scnprintf(next, size,
++		      DRIVER_DESC "\n"
++		      "%s version: %s\n"
++		      "Gadget driver: %s\n"
++		      "\n",
++		      driver_name, DRIVER_VERSION,
++		      dev->driver ? dev->driver->driver.name : "(none)");
++	size -= t;
++	next += t;
++
++	local_irq_restore(flags);
++	*eof = 1;
++	return count - size;
++}
++
++#define create_proc_files() \
++	create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
++#define remove_proc_files() \
++	remove_proc_entry(proc_node_name, NULL)
++
++#else	/* !CONFIG_USB_GADGET_DEBUG_FILES */
++
++#define create_proc_files() do {} while (0)
++#define remove_proc_files() do {} while (0)
++
++#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
++
++#if	OTG_DMA_MODE /* DMA Mode */
++#include "s3c_udc_otg_xfer_dma.c"
++
++#else	/* Slave Mode */
++#include "s3c_udc_otg_xfer_slave.c"
++#endif
++
++/*
++ * 	udc_disable - disable USB device controller
++ */
++static void udc_disable(struct s3c_udc *dev)
++{
++	DEBUG_SETUP("%s: %p\n", __FUNCTION__, dev);
++
++	udc_set_address(dev, 0);
++
++	dev->ep0state = WAIT_FOR_SETUP;
++	dev->gadget.speed = USB_SPEED_UNKNOWN;
++	dev->usb_address = 0;
++
++	otg_phy_off();
++}
++
++/*
++ * 	udc_reinit - initialize software state
++ */
++static void udc_reinit(struct s3c_udc *dev)
++{
++	unsigned int i;
++
++	DEBUG_SETUP("%s: %p\n", __FUNCTION__, dev);
++
++	/* device/ep0 records init */
++	INIT_LIST_HEAD(&dev->gadget.ep_list);
++	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
++	dev->ep0state = WAIT_FOR_SETUP;
++
++	/* basic endpoint records init */
++	for (i = 0; i < S3C_MAX_ENDPOINTS; i++) {
++		struct s3c_ep *ep = &dev->ep[i];
++
++		if (i != 0)
++			list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
++
++		ep->desc = 0;
++		ep->stopped = 0;
++		INIT_LIST_HEAD(&ep->queue);
++		ep->pio_irqs = 0;
++	}
++
++	/* the rest was statically initialized, and is read-only */
++}
++
++#define BYTES2MAXP(x)	(x / 8)
++#define MAXP2BYTES(x)	(x * 8)
++
++/* until it's enabled, this UDC should be completely invisible
++ * to any USB host.
++ */
++static int udc_enable(struct s3c_udc *dev)
++{
++	DEBUG_SETUP("%s: %p\n", __FUNCTION__, dev);
++
++	otg_phy_init();
++	reconfig_usbd();
++
++	DEBUG_SETUP("S3C USB 2.0 OTG Controller Core Initialized : 0x%x\n",
++			readl(S3C_UDC_OTG_GINTMSK));
++
++	dev->gadget.speed = USB_SPEED_UNKNOWN;
++
++	return 0;
++}
++
++/*
++  Register entry point for the peripheral controller driver.
++*/
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++{
++	struct s3c_udc *dev = the_controller;
++	int retval;
++
++	DEBUG_SETUP("%s: %s\n", __FUNCTION__, driver->driver.name);
++
++	if (!driver
++	    || (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
++	    || !driver->bind
++	    || !driver->unbind || !driver->disconnect || !driver->setup)
++		return -EINVAL;
++	if (!dev)
++		return -ENODEV;
++	if (dev->driver)
++		return -EBUSY;
++
++	/* first hook up the driver ... */
++	dev->driver = driver;
++	dev->gadget.dev.driver = &driver->driver;
++	retval = device_add(&dev->gadget.dev);
++
++	if(retval) { /* TODO */
++		printk("target device_add failed, error %d\n", retval);
++		return retval;
++	}
++
++	retval = driver->bind(&dev->gadget);
++	if (retval) {
++		printk("%s: bind to driver %s --> error %d\n", dev->gadget.name,
++		       driver->driver.name, retval);
++		device_del(&dev->gadget.dev);
++
++		dev->driver = 0;
++		dev->gadget.dev.driver = 0;
++		return retval;
++	}
++
++	enable_irq(IRQ_OTG);
++
++	printk("Registered gadget driver '%s'\n", driver->driver.name);
++	udc_enable(dev);
++
++	return 0;
++}
++
++EXPORT_SYMBOL(usb_gadget_register_driver);
++
++/*
++  Unregister entry point for the peripheral controller driver.
++*/
++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++{
++	struct s3c_udc *dev = the_controller;
++	unsigned long flags;
++
++	if (!dev)
++		return -ENODEV;
++	if (!driver || driver != dev->driver)
++		return -EINVAL;
++
++	spin_lock_irqsave(&dev->lock, flags);
++	dev->driver = 0;
++	stop_activity(dev, driver);
++	spin_unlock_irqrestore(&dev->lock, flags);
++
++	driver->unbind(&dev->gadget);
++	device_del(&dev->gadget.dev);
++
++	disable_irq(IRQ_OTG);
++
++	printk("Unregistered gadget driver '%s'\n", driver->driver.name);
++
++	udc_disable(dev);
++
++	return 0;
++}
++
++EXPORT_SYMBOL(usb_gadget_unregister_driver);
++
++/*
++ *	done - retire a request; caller blocked irqs
++ */
++static void done(struct s3c_ep *ep, struct s3c_request *req, int status)
++{
++	unsigned int stopped = ep->stopped;
++
++	DEBUG("%s: %s %p, req = %p, stopped = %d\n",
++		__FUNCTION__, ep->ep.name, ep, &req->req, stopped);
++
++	list_del_init(&req->queue);
++
++	if (likely(req->req.status == -EINPROGRESS)) {
++		req->req.status = status;
++	} else {
++		status = req->req.status;
++	}
++
++	if (status && status != -ESHUTDOWN) {
++		DEBUG("complete %s req %p stat %d len %u/%u\n",
++			ep->ep.name, &req->req, status,
++			req->req.actual, req->req.length);
++	}
++
++	/* don't modify queue heads during completion callback */
++	ep->stopped = 1;
++
++	spin_unlock(&ep->dev->lock);
++	req->req.complete(&ep->ep, &req->req);
++	spin_lock(&ep->dev->lock);
++
++	ep->stopped = stopped;
++}
++
++/*
++ * 	nuke - dequeue ALL requests
++ */
++static void nuke(struct s3c_ep *ep, int status)
++{
++	struct s3c_request *req;
++
++	DEBUG("%s: %s %p\n", __FUNCTION__, ep->ep.name, ep);
++
++	/* called with irqs blocked */
++	while (!list_empty(&ep->queue)) {
++		req = list_entry(ep->queue.next, struct s3c_request, queue);
++		done(ep, req, status);
++	}
++}
++
++static void stop_activity(struct s3c_udc *dev,
++			  struct usb_gadget_driver *driver)
++{
++	int i;
++
++	/* don't disconnect drivers more than once */
++	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
++		driver = 0;
++	dev->gadget.speed = USB_SPEED_UNKNOWN;
++
++	/* prevent new request submissions, kill any outstanding requests  */
++	for (i = 0; i < S3C_MAX_ENDPOINTS; i++) {
++		struct s3c_ep *ep = &dev->ep[i];
++		ep->stopped = 1;
++		nuke(ep, -ESHUTDOWN);
++	}
++
++	/* report disconnect; the driver is already quiesced */
++	if (driver) {
++		spin_unlock(&dev->lock);
++		driver->disconnect(&dev->gadget);
++		spin_lock(&dev->lock);
++	}
++
++	/* re-init driver-visible data structures */
++	udc_reinit(dev);
++}
++
++static void reconfig_usbd(void)
++{
++	/* 2. Soft-reset OTG Core and then unreset again. */
++#ifdef DED_TX_FIFO
++	int i;
++#endif
++	unsigned int uTemp = writel(CORE_SOFT_RESET, S3C_UDC_OTG_GRSTCTL);
++
++	writel(	0<<15		/* PHY Low Power Clock sel*/
++		|1<<14		/* Non-Periodic TxFIFO Rewind Enable*/
++		|0x5<<10	/* Turnaround time*/
++		|0<<9|0<<8	/* [0:HNP disable, 1:HNP enable][ 0:SRP disable, 1:SRP enable] H1= 1,1*/
++		|0<<7		/* Ulpi DDR sel*/
++		|0<<6		/* 0: high speed utmi+, 1: full speed serial*/
++		|0<<4		/* 0: utmi+, 1:ulpi*/
++		|1<<3		/* phy i/f  0:8bit, 1:16bit*/
++		|0x7<<0,	/* HS/FS Timeout**/
++		S3C_UDC_OTG_GUSBCFG);
++
++	/* 3. Put the OTG device core in the disconnected state.*/
++	uTemp = readl(S3C_UDC_OTG_DCTL);
++	uTemp |= SOFT_DISCONNECT;
++	writel(uTemp, S3C_UDC_OTG_DCTL);
++
++	udelay(20);
++
++	/* 4. Make the OTG device core exit from the disconnected state.*/
++	uTemp = readl(S3C_UDC_OTG_DCTL);
++	uTemp = uTemp & ~SOFT_DISCONNECT;
++	writel(uTemp, S3C_UDC_OTG_DCTL);
++
++	/* 5. Configure OTG Core to initial settings of device mode.*/
++	writel(1<<18|0x0<<0, S3C_UDC_OTG_DCFG);		/* [][1: full speed(30Mhz) 0:high speed]*/
++
++	mdelay(1);
++
++	/* 6. Unmask the core interrupts*/
++	writel(GINTMSK_INIT, S3C_UDC_OTG_GINTMSK);
++
++	/* 7. Set NAK bit of EP0, EP1, EP2*/
++	writel(DEPCTL_EPDIS|DEPCTL_SNAK|(0<<0), S3C_UDC_OTG_DOEPCTL(EP0_CON));
++	writel(DEPCTL_EPDIS|DEPCTL_SNAK|(0<<0), S3C_UDC_OTG_DIEPCTL(EP0_CON));
++
++	/* 8. Unmask EPO interrupts*/
++	writel( ((1<<EP0_CON)<<DAINT_OUT_BIT)|(1<<EP0_CON), S3C_UDC_OTG_DAINTMSK);
++
++	/* 9. Unmask device OUT EP common interrupts*/
++	writel(DOEPMSK_INIT, S3C_UDC_OTG_DOEPMSK);
++
++	/* 10. Unmask device IN EP common interrupts*/
++	writel(DIEPMSK_INIT, S3C_UDC_OTG_DIEPMSK);
++
++	/* 11. Set Rx FIFO Size*/
++	writel(RX_FIFO_SIZE, S3C_UDC_OTG_GRXFSIZ);
++
++	/* 12. Set Non Periodic Tx FIFO Size*/
++	writel(NPTX_FIFO_SIZE<<16| NPTX_FIFO_START_ADDR<<0, S3C_UDC_OTG_GNPTXFSIZ);
++
++#ifdef DED_TX_FIFO
++	for (i = 1; i < S3C_MAX_ENDPOINTS; i++)
++		writel(NPTX_FIFO_SIZE << 16 |
++			(NPTX_FIFO_START_ADDR + NPTX_FIFO_SIZE + PTX_FIFO_SIZE*(i-1)) << 0,
++			S3C_UDC_OTG_DIEPTXF(i));
++#endif
++
++	/* 13. Clear NAK bit of EP0, EP1, EP2*/
++	/* For Slave mode*/
++	writel(DEPCTL_EPDIS|DEPCTL_CNAK|(0<<0), S3C_UDC_OTG_DOEPCTL(EP0_CON)); /* EP0: Control OUT */
++
++	/* 14. Initialize OTG Link Core.*/
++	writel(GAHBCFG_INIT, S3C_UDC_OTG_GAHBCFG);
++
++}
++
++static void set_max_pktsize(struct s3c_udc *dev, enum usb_device_speed speed)
++{
++	unsigned int ep_ctrl;
++
++	if (speed == USB_SPEED_HIGH) {
++		ep0_fifo_size = 64;
++		ep_fifo_size = 512;
++		ep_fifo_size2 = 1024;
++		dev->gadget.speed = USB_SPEED_HIGH;
++	} else {
++		ep0_fifo_size = 64;
++		ep_fifo_size = 64;
++		ep_fifo_size2 = 64;
++		dev->gadget.speed = USB_SPEED_FULL;
++	}
++
++	dev->ep[0].ep.maxpacket = ep0_fifo_size;
++	dev->ep[1].ep.maxpacket = ep_fifo_size;
++	dev->ep[2].ep.maxpacket = ep_fifo_size;
++	dev->ep[3].ep.maxpacket = ep_fifo_size;
++	dev->ep[4].ep.maxpacket = ep_fifo_size;
++	dev->ep[5].ep.maxpacket = ep_fifo_size2;
++	dev->ep[6].ep.maxpacket = ep_fifo_size2;
++	dev->ep[7].ep.maxpacket = ep_fifo_size2;
++	dev->ep[8].ep.maxpacket = ep_fifo_size2;
++
++
++	/* EP0 - Control IN (64 bytes)*/
++	ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(EP0_CON));
++	writel(ep_ctrl|(0<<0), S3C_UDC_OTG_DIEPCTL(EP0_CON));
++
++	/* EP0 - Control OUT (64 bytes)*/
++	ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL(EP0_CON));
++	writel(ep_ctrl|(0<<0), S3C_UDC_OTG_DOEPCTL(EP0_CON));
++}
++
++static int s3c_ep_enable(struct usb_ep *_ep,
++			     const struct usb_endpoint_descriptor *desc)
++{
++	struct s3c_ep *ep;
++	struct s3c_udc *dev;
++	unsigned long flags;
++
++	DEBUG("%s: %p\n", __FUNCTION__, _ep);
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	if (!_ep || !desc || ep->desc || _ep->name == ep0name
++	    || desc->bDescriptorType != USB_DT_ENDPOINT
++	    || ep->bEndpointAddress != desc->bEndpointAddress
++	    || ep_maxpacket(ep) < le16_to_cpu(desc->wMaxPacketSize)) {
++
++		DEBUG("%s: bad ep or descriptor\n", __FUNCTION__);
++		return -EINVAL;
++	}
++
++	/* xfer types must match, except that interrupt ~= bulk */
++	if (ep->bmAttributes != desc->bmAttributes
++	    && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
++	    && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
++
++		DEBUG("%s: %s type mismatch\n", __FUNCTION__, _ep->name);
++		return -EINVAL;
++	}
++
++	/* hardware _could_ do smaller, but driver doesn't */
++	if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
++	     && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(ep))
++	    || !desc->wMaxPacketSize) {
++
++		DEBUG("%s: bad %s maxpacket\n", __FUNCTION__, _ep->name);
++		return -ERANGE;
++	}
++
++	dev = ep->dev;
++	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
++
++		DEBUG("%s: bogus device state\n", __FUNCTION__);
++		return -ESHUTDOWN;
++	}
++
++	ep->stopped = 0;
++	ep->desc = desc;
++	ep->pio_irqs = 0;
++	ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
++
++	/* Reset halt state */
++	s3c_udc_set_halt(_ep, 0);
++
++	spin_lock_irqsave(&ep->dev->lock, flags);
++	s3c_udc_ep_activate(ep);
++	spin_unlock_irqrestore(&ep->dev->lock, flags);
++
++	DEBUG("%s: enabled %s, stopped = %d, maxpacket = %d\n",
++		__FUNCTION__, _ep->name, ep->stopped, ep->ep.maxpacket);
++	return 0;
++}
++
++/** Disable EP
++ */
++static int s3c_ep_disable(struct usb_ep *_ep)
++{
++	struct s3c_ep *ep;
++	unsigned long flags;
++
++	DEBUG("%s: %p\n", __FUNCTION__, _ep);
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	if (!_ep || !ep->desc) {
++		DEBUG("%s: %s not enabled\n", __FUNCTION__,
++		      _ep ? ep->ep.name : NULL);
++		return -EINVAL;
++	}
++
++	spin_lock_irqsave(&ep->dev->lock, flags);
++
++	/* Nuke all pending requests */
++	nuke(ep, -ESHUTDOWN);
++
++	ep->desc = 0;
++	ep->stopped = 1;
++
++	spin_unlock_irqrestore(&ep->dev->lock, flags);
++
++	DEBUG("%s: disabled %s\n", __FUNCTION__, _ep->name);
++	return 0;
++}
++
++static struct usb_request *s3c_alloc_request(struct usb_ep *ep,
++						 gfp_t gfp_flags)
++{
++	struct s3c_request *req;
++
++	DEBUG("%s: %s %p\n", __FUNCTION__, ep->name, ep);
++
++	req = kmalloc(sizeof *req, gfp_flags);
++	if (!req)
++		return 0;
++
++	memset(req, 0, sizeof *req);
++	INIT_LIST_HEAD(&req->queue);
++
++	return &req->req;
++}
++
++static void s3c_free_request(struct usb_ep *ep, struct usb_request *_req)
++{
++	struct s3c_request *req;
++
++	DEBUG("%s: %p\n", __FUNCTION__, ep);
++
++	req = container_of(_req, struct s3c_request, req);
++	WARN_ON(!list_empty(&req->queue));
++	kfree(req);
++}
++
++/* dequeue JUST ONE request */
++static int s3c_dequeue(struct usb_ep *_ep, struct usb_request *_req)
++{
++	struct s3c_ep *ep;
++	struct s3c_request *req;
++	unsigned long flags;
++
++	DEBUG("%s: %p\n", __FUNCTION__, _ep);
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	if (!_ep || ep->ep.name == ep0name)
++		return -EINVAL;
++
++	spin_lock_irqsave(&ep->dev->lock, flags);
++
++	/* make sure it's actually queued on this endpoint */
++	list_for_each_entry(req, &ep->queue, queue) {
++		if (&req->req == _req)
++			break;
++	}
++	if (&req->req != _req) {
++		spin_unlock_irqrestore(&ep->dev->lock, flags);
++		return -EINVAL;
++	}
++
++	done(ep, req, -ECONNRESET);
++
++	spin_unlock_irqrestore(&ep->dev->lock, flags);
++	return 0;
++}
++
++/** Return bytes in EP FIFO
++ */
++static int s3c_fifo_status(struct usb_ep *_ep)
++{
++	int count = 0;
++	struct s3c_ep *ep;
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	if (!_ep) {
++		DEBUG("%s: bad ep\n", __FUNCTION__);
++		return -ENODEV;
++	}
++
++	DEBUG("%s: %d\n", __FUNCTION__, ep_index(ep));
++
++	/* LPD can't report unclaimed bytes from IN fifos */
++	if (ep_is_in(ep))
++		return -EOPNOTSUPP;
++
++	return count;
++}
++
++/** Flush EP FIFO
++ */
++static void s3c_fifo_flush(struct usb_ep *_ep)
++{
++	struct s3c_ep *ep;
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
++		DEBUG("%s: bad ep\n", __FUNCTION__);
++		return;
++	}
++
++	DEBUG("%s: %d\n", __FUNCTION__, ep_index(ep));
++}
++
++/* ---------------------------------------------------------------------------
++ * 	device-scoped parts of the api to the usb controller hardware
++ * ---------------------------------------------------------------------------
++ */
++
++static int s3c_udc_get_frame(struct usb_gadget *_gadget)
++{
++	/*fram count number [21:8]*/
++	unsigned int frame = readl(S3C_UDC_OTG_DSTS);
++
++	DEBUG("%s: %p\n", __FUNCTION__, _gadget);
++	return (frame & 0x3ff00);
++}
++
++static int s3c_udc_wakeup(struct usb_gadget *_gadget)
++{
++	DEBUG("%s: %p\n", __FUNCTION__, _gadget);
++	return -ENOTSUPP;
++}
++
++static const struct usb_gadget_ops s3c_udc_ops = {
++	.get_frame = s3c_udc_get_frame,
++	.wakeup = s3c_udc_wakeup,
++	/* current versions must always be self-powered */
++};
++
++static void nop_release(struct device *dev)
++{
++	DEBUG("%s %s\n", __FUNCTION__, dev->bus_id);
++}
++
++static struct s3c_udc memory = {
++	.usb_address = 0,
++
++	.gadget = {
++		   .ops = &s3c_udc_ops,
++		   .ep0 = &memory.ep[0].ep,
++		   .name = driver_name,
++		   .dev = {
++			   .bus_id = "gadget",
++			   .release = nop_release,
++			   },
++		   },
++
++	/* control endpoint */
++	.ep[0] = {
++		  .ep = {
++			 .name = ep0name,
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP0_FIFO_SIZE,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = 0,
++		  .bmAttributes = 0,
++
++		  .ep_type = ep_control,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP0_FIFO,
++		  },
++
++	/* first group of endpoints */
++	.ep[1] = {
++		  .ep = {
++			 .name = "ep1-bulk",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = 1,
++		  .bmAttributes = USB_ENDPOINT_XFER_BULK,
++
++		  .ep_type = ep_bulk_out,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP1_FIFO,
++		  },
++
++	.ep[2] = {
++		  .ep = {
++			 .name = "ep2-bulk",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = USB_DIR_IN | 2,
++		  .bmAttributes = USB_ENDPOINT_XFER_BULK,
++
++		  .ep_type = ep_bulk_in,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP2_FIFO,
++		  },
++
++	.ep[3] = {				/* Though NOT USED XXX*/
++		  .ep = {
++			 .name = "ep3-int",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = USB_DIR_IN | 3,
++		  .bmAttributes = USB_ENDPOINT_XFER_INT,
++
++		  .ep_type = ep_interrupt,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP3_FIFO,
++		  },
++	.ep[4] = {				/* Though NOT USED XXX*/
++		  .ep = {
++			 .name = "ep4-int",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = USB_DIR_IN | 4,
++		  .bmAttributes = USB_ENDPOINT_XFER_INT,
++
++		  .ep_type = ep_interrupt,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP4_FIFO,
++		  },
++	.ep[5] = {				/* Though NOT USED XXX*/
++		  .ep = {
++			 .name = "ep5-int",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE2,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = USB_DIR_IN | 5,
++		  .bmAttributes = USB_ENDPOINT_XFER_INT,
++
++		  .ep_type = ep_interrupt,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP5_FIFO,
++		  },
++	.ep[6] = {				/* Though NOT USED XXX*/
++		  .ep = {
++			 .name = "ep6-int",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE2,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = USB_DIR_IN | 6,
++		  .bmAttributes = USB_ENDPOINT_XFER_INT,
++
++		  .ep_type = ep_interrupt,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP6_FIFO,
++		  },
++	.ep[7] = {				/* Though NOT USED XXX*/
++		  .ep = {
++			 .name = "ep7-int",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE2,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = USB_DIR_IN | 7,
++		  .bmAttributes = USB_ENDPOINT_XFER_INT,
++
++		  .ep_type = ep_interrupt,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP7_FIFO,
++		  },
++	.ep[8] = {				/* Though NOT USED XXX*/
++		  .ep = {
++			 .name = "ep8-int",
++			 .ops = &s3c_ep_ops,
++			 .maxpacket = EP_FIFO_SIZE2,
++			 },
++		  .dev = &memory,
++
++		  .bEndpointAddress = USB_DIR_IN | 8,
++		  .bmAttributes = USB_ENDPOINT_XFER_INT,
++
++		  .ep_type = ep_interrupt,
++		  .fifo = (unsigned int) S3C_UDC_OTG_EP8_FIFO,
++		  },
++};
++
++/*
++ * 	probe - binds to the platform device
++ */
++static struct clk	*otg_clock = NULL;
++
++static int s3c_udc_probe(struct platform_device *pdev)
++{
++	struct s3c_udc *dev = &memory;
++	int retval;
++
++	DEBUG("%s: %p\n", __FUNCTION__, pdev);
++
++	spin_lock_init(&dev->lock);
++	dev->dev = pdev;
++
++	device_initialize(&dev->gadget.dev);
++	dev->gadget.dev.parent = &pdev->dev;
++
++	dev->gadget.is_dualspeed = 1;	/* Hack only*/
++	dev->gadget.is_otg = 0;
++	dev->gadget.is_a_peripheral = 0;
++	dev->gadget.b_hnp_enable = 0;
++	dev->gadget.a_hnp_support = 0;
++	dev->gadget.a_alt_hnp_support = 0;
++
++	the_controller = dev;
++	platform_set_drvdata(pdev, dev);
++
++	otg_clock = clk_get(&pdev->dev, "otg");
++	if (otg_clock == NULL) {
++		printk(KERN_INFO "failed to find otg clock source\n");
++		return -ENOENT;
++	}
++	clk_enable(otg_clock);
++
++	udc_reinit(dev);
++
++	local_irq_disable();
++
++	/* irq setup after old hardware state is cleaned up */
++	retval =
++	    request_irq(IRQ_OTG, s3c_udc_irq, 0, driver_name, dev);
++
++	if (retval != 0) {
++		DEBUG(KERN_ERR "%s: can't get irq %i, err %d\n", driver_name,
++		      IRQ_OTG, retval);
++		return -EBUSY;
++	}
++
++	disable_irq(IRQ_OTG);
++	local_irq_enable();
++	create_proc_files();
++
++	return retval;
++}
++
++static int s3c_udc_remove(struct platform_device *pdev)
++{
++	struct s3c_udc *dev = platform_get_drvdata(pdev);
++
++	DEBUG("%s: %p\n", __FUNCTION__, pdev);
++
++	if (otg_clock != NULL) {
++		clk_disable(otg_clock);
++		clk_put(otg_clock);
++		otg_clock = NULL;
++	}
++
++	remove_proc_files();
++	usb_gadget_unregister_driver(dev->driver);
++
++	free_irq(IRQ_OTG, dev);
++
++	platform_set_drvdata(pdev, 0);
++
++	the_controller = 0;
++
++	return 0;
++}
++
++#ifdef CONFIG_PM
++static int s3c_udc_suspend(struct platform_device *pdev, pm_message_t state)
++{
++        struct s3c_udc *dev = the_controller;
++        int i;
++
++        if (dev->driver) {
++                if (dev->driver->suspend)
++                        dev->driver->suspend(&dev->gadget);
++
++                /* Terminate any outstanding requests  */
++                for (i = 0; i < S3C_MAX_ENDPOINTS; i++) {
++                        struct s3c_ep *ep = &dev->ep[i];
++                        if ( ep->dev != NULL )
++                                spin_lock(&ep->dev->lock);
++                        ep->stopped = 1;
++                        nuke(ep, -ESHUTDOWN);
++                        if ( ep->dev != NULL )
++                                spin_unlock(&ep->dev->lock);
++                }
++
++                disable_irq(IRQ_OTG);
++                udc_disable(dev);
++                clk_disable(otg_clock);
++        }
++
++        return 0;
++}
++
++static int s3c_udc_resume(struct platform_device *pdev)
++{
++        struct s3c_udc *dev = the_controller;
++
++        if (dev->driver) {
++                clk_enable(otg_clock);
++                udc_reinit(dev);
++                enable_irq(IRQ_OTG);
++                udc_enable(dev);
++
++                if (dev->driver->resume)
++                        dev->driver->resume(&dev->gadget);
++        }
++
++        return 0;
++}
++#else
++#define s3c_udc_suspend NULL
++#define s3c_udc_resume  NULL
++#endif /* CONFIG_PM */
++
++/*-------------------------------------------------------------------------*/
++static struct platform_driver s3c_udc_driver = {
++	.probe		= s3c_udc_probe,
++	.remove		= s3c_udc_remove,
++	.suspend	= s3c_udc_suspend,
++	.resume		= s3c_udc_resume,
++	.driver		= {
++		.owner	= THIS_MODULE,
++		.name	= "s3c-usbgadget",
++	},
++};
++
++static int __init udc_init(void)
++{
++	int ret;
++
++	ret = platform_driver_register(&s3c_udc_driver);
++	if(!ret)
++	   printk("%s : %s\n"
++	   	  "%s : version %s %s \n",
++	   	  driver_name, DRIVER_DESC,
++	   	  driver_name, DRIVER_VERSION, OTG_DMA_MODE? "(DMA Mode)" : "(Slave Mode)");
++
++	return ret;
++}
++
++static void __exit udc_exit(void)
++{
++	platform_driver_unregister(&s3c_udc_driver);
++	printk("Unloaded %s version %s\n", driver_name, DRIVER_VERSION);
++}
++
++module_init(udc_init);
++module_exit(udc_exit);
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Samsung");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/usb/gadget/s3c_udc_otg_xfer_dma.c b/drivers/usb/gadget/s3c_udc_otg_xfer_dma.c
+new file mode 100644
+index 00000000..c8cf1e44
+--- /dev/null
++++ b/drivers/usb/gadget/s3c_udc_otg_xfer_dma.c
+@@ -0,0 +1,1269 @@
++/*
++ * drivers/usb/gadget/s3c_udc_otg_xfer_dma.c
++ * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers
++ *
++ * Copyright (C) 2009 for Samsung Electronics
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
++
++#define GINTMSK_INIT	(INT_OUT_EP|INT_IN_EP|INT_RESUME|INT_ENUMDONE|INT_RESET|INT_SUSPEND)
++#define DOEPMSK_INIT	(CTRL_OUT_EP_SETUP_PHASE_DONE|AHB_ERROR|TRANSFER_DONE)
++#define DIEPMSK_INIT	(NON_ISO_IN_EP_TIMEOUT|AHB_ERROR|TRANSFER_DONE)
++#define GAHBCFG_INIT	(PTXFE_HALF|NPTXFE_HALF|MODE_DMA|BURST_INCR4|GBL_INT_UNMASK)
++
++static u8 clear_feature_num;
++static int clear_feature_flag = 0;
++static int set_conf_done = 0;
++
++/* Bulk-Only Mass Storage Reset (class-specific request) */
++#define GET_MAX_LUN_REQUEST	0xFE
++#define BOT_RESET_REQUEST	0xFF
++
++void s3c_udc_ep_set_stall(struct s3c_ep *ep);
++
++static inline void s3c_udc_ep0_zlp(void)
++{
++	u32 ep_ctrl;
++	
++	writel(virt_to_phys(&usb_ctrl), S3C_UDC_OTG_DIEPDMA(EP0_CON));
++	writel((1<<19| 0<<0), S3C_UDC_OTG_DIEPTSIZ(EP0_CON));
++
++	ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(EP0_CON));
++	writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, S3C_UDC_OTG_DIEPCTL(EP0_CON));
++
++	DEBUG_EP0("%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
++		__func__, readl(S3C_UDC_OTG_DIEPCTL(EP0_CON)));
++}
++
++static inline void s3c_udc_pre_setup(void)
++{
++	u32 ep_ctrl;
++
++	DEBUG_IN_EP("%s : Prepare Setup packets.\n", __func__);
++
++	writel((1 << 19)|sizeof(struct usb_ctrlrequest), S3C_UDC_OTG_DOEPTSIZ(EP0_CON));
++	writel(virt_to_phys(&usb_ctrl), S3C_UDC_OTG_DOEPDMA(EP0_CON));
++
++	ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL(EP0_CON));
++	writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, S3C_UDC_OTG_DOEPCTL(EP0_CON));
++}
++
++static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req)
++{
++	u32 *buf, ctrl;
++	u32 length, pktcnt;
++	u32 ep_num = ep_index(ep);
++
++	buf = req->req.buf + req->req.actual;
++	prefetchw(buf);
++
++	length = req->req.length - req->req.actual;
++	dma_cache_maint(buf, length, DMA_FROM_DEVICE);
++
++	if(length == 0)
++		pktcnt = 1;
++	else
++		pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
++
++	ctrl =  readl(S3C_UDC_OTG_DOEPCTL(ep_num));
++
++	writel(virt_to_phys(buf), S3C_UDC_OTG_DOEPDMA(ep_num));
++	writel((pktcnt<<19)|(length<<0), S3C_UDC_OTG_DOEPTSIZ(ep_num));
++	writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, S3C_UDC_OTG_DOEPCTL(ep_num));
++
++	DEBUG_OUT_EP("%s: EP%d RX DMA start : DOEPDMA = 0x%x, DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
++			"\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
++			__func__, ep_num,
++			readl(S3C_UDC_OTG_DOEPDMA(ep_num)),
++			readl(S3C_UDC_OTG_DOEPTSIZ(ep_num)),
++			readl(S3C_UDC_OTG_DOEPCTL(ep_num)),
++			buf, pktcnt, length);
++	return 0;
++
++}
++
++static int setdma_tx(struct s3c_ep *ep, struct s3c_request *req)
++{
++	u32 *buf, ctrl = 0;
++	u32 length, pktcnt;
++	u32 ep_num = ep_index(ep);
++
++	buf = req->req.buf + req->req.actual;
++	prefetch(buf);
++	length = req->req.length - req->req.actual;
++
++	if(ep_num == EP0_CON) {
++		length = min(length, (u32)ep_maxpacket(ep));
++	}
++
++	req->req.actual += length;
++	dma_cache_maint(buf, length, DMA_TO_DEVICE);
++
++	if(length == 0) {
++		pktcnt = 1;
++	} else {
++		pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
++	}
++
++	ctrl = readl(S3C_UDC_OTG_DIEPCTL(ep_num));
++
++	writel(virt_to_phys(buf), S3C_UDC_OTG_DIEPDMA(ep_num));
++	writel((pktcnt<<19)|(length<<0), S3C_UDC_OTG_DIEPTSIZ(ep_num));
++	writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, S3C_UDC_OTG_DIEPCTL(ep_num));
++
++	ctrl = readl(S3C_UDC_OTG_DIEPCTL(EP0_CON));
++	ctrl = (ctrl&~(EP_MASK<<DEPCTL_NEXT_EP_BIT))|(ep_num<<DEPCTL_NEXT_EP_BIT);
++	writel(ctrl, S3C_UDC_OTG_DIEPCTL(EP0_CON));
++
++	DEBUG_IN_EP("%s:EP%d TX DMA start : DIEPDMA0 = 0x%x, DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n"
++			"\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
++			__func__, ep_num,
++			readl(S3C_UDC_OTG_DIEPDMA(ep_num)),
++			readl(S3C_UDC_OTG_DIEPTSIZ(ep_num)),
++			readl(S3C_UDC_OTG_DIEPCTL(ep_num)),
++			buf, pktcnt, length);
++
++	return length;
++}
++
++static void complete_rx(struct s3c_udc *dev, u8 ep_num)
++{
++	struct s3c_ep *ep = &dev->ep[ep_num];
++	struct s3c_request *req = NULL;
++	u32 ep_tsr = 0, xfer_size = 0, xfer_length, is_short = 0;
++
++	if (list_empty(&ep->queue)) {
++		DEBUG_OUT_EP("%s: RX DMA done : NULL REQ on OUT EP-%d\n",
++					__func__, ep_num);
++		return;
++
++	}
++
++	req = list_entry(ep->queue.next, struct s3c_request, queue);
++
++	ep_tsr = readl(S3C_UDC_OTG_DOEPTSIZ(ep_num));
++
++	if(ep_num == EP0_CON) {
++		xfer_size = (ep_tsr & 0x7f);
++
++	} else {
++		xfer_size = (ep_tsr & 0x7fff);
++	}
++
++	dma_cache_maint(req->req.buf, req->req.length, DMA_FROM_DEVICE);
++	xfer_length = req->req.length - xfer_size;
++	req->req.actual += min(xfer_length, req->req.length - req->req.actual);
++	is_short = (xfer_length < ep->ep.maxpacket);
++
++	DEBUG_OUT_EP("%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
++		     "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
++			__func__, ep_num, req->req.actual, req->req.length,
++			is_short, ep_tsr, xfer_size);
++
++	if (is_short || req->req.actual == xfer_length) {
++		if(ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
++			DEBUG_OUT_EP("	=> Send ZLP\n");
++			dev->ep0state = WAIT_FOR_SETUP;
++			s3c_udc_ep0_zlp();
++
++		} else {
++			done(ep, req, 0);
++
++			if(!list_empty(&ep->queue)) {
++				req = list_entry(ep->queue.next, struct s3c_request, queue);
++				DEBUG_OUT_EP("%s: Next Rx request start...\n", __func__);
++				setdma_rx(ep, req);
++			}
++		}
++	}
++}
++
++static void complete_tx(struct s3c_udc *dev, u8 ep_num)
++{
++	struct s3c_ep *ep = &dev->ep[ep_num];
++	struct s3c_request *req;
++	u32 ep_tsr = 0, xfer_size = 0, xfer_length, is_short = 0;
++	u32 last;
++
++	if (list_empty(&ep->queue)) {
++		DEBUG_IN_EP("%s: TX DMA done : NULL REQ on IN EP-%d\n",
++					__func__, ep_num);
++		return;
++
++	}
++
++	req = list_entry(ep->queue.next, struct s3c_request, queue);
++
++	if(dev->ep0state == DATA_STATE_XMIT) {
++		DEBUG_IN_EP("%s: ep_num = %d, ep0stat == DATA_STATE_XMIT\n",
++					__func__, ep_num);
++
++		last = write_fifo_ep0(ep, req);
++
++		if(last) {
++			dev->ep0state = WAIT_FOR_SETUP;
++		}
++
++		return;
++	}
++
++	ep_tsr = readl(S3C_UDC_OTG_DIEPTSIZ(ep_num));
++
++	if(ep_num == EP0_CON) {
++		xfer_size = (ep_tsr & 0x7f);
++
++	} else {
++		xfer_size = (ep_tsr & 0x7fff);
++	}
++
++	req->req.actual = req->req.length - xfer_size;
++	xfer_length = req->req.length - xfer_size;
++	req->req.actual += min(xfer_length, req->req.length - req->req.actual);
++	is_short = (xfer_length < ep->ep.maxpacket);
++
++	DEBUG_IN_EP("%s: TX DMA done : ep = %d, tx bytes = %d/%d, "
++		     "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n",
++			__func__, ep_num, req->req.actual, req->req.length,
++			is_short, ep_tsr, xfer_size);
++
++	if (req->req.actual == req->req.length) {
++		done(ep, req, 0);
++
++		if(!list_empty(&ep->queue)) {
++			req = list_entry(ep->queue.next, struct s3c_request, queue);
++			DEBUG_IN_EP("%s: Next Tx request start...\n", __func__);
++			setdma_tx(ep, req);
++		}
++	}
++}
++static inline void s3c_udc_check_tx_queue(struct s3c_udc *dev, u8 ep_num)
++{
++	struct s3c_ep *ep = &dev->ep[ep_num];
++	struct s3c_request *req;
++
++	DEBUG_IN_EP("%s: Check queue, ep_num = %d\n", __func__, ep_num);
++
++	if (!list_empty(&ep->queue)) {
++		req = list_entry(ep->queue.next, struct s3c_request, queue);
++		DEBUG_IN_EP("%s: Next Tx request(0x%p) start...\n", __func__, req);
++
++		if (ep_is_in(ep))
++			setdma_tx(ep, req);
++		else
++			setdma_rx(ep, req);
++	} else {
++		DEBUG_IN_EP("%s: NULL REQ on IN EP-%d\n", __func__, ep_num);
++
++		return;
++	}
++
++}
++
++static void process_ep_in_intr(struct s3c_udc *dev)
++{
++	u32 ep_intr, ep_intr_status;
++	u8 ep_num = 0;
++
++	ep_intr = readl(S3C_UDC_OTG_DAINT);
++	DEBUG_IN_EP("*** %s: EP In interrupt : DAINT = 0x%x\n",
++				__func__, ep_intr);
++
++	ep_intr &= DAINT_MASK;
++
++	while(ep_intr) {
++		if (ep_intr & 0x1) {
++			ep_intr_status = readl(S3C_UDC_OTG_DIEPINT(ep_num));
++			DEBUG_IN_EP("\tEP%d-IN : DIEPINT = 0x%x\n",
++						ep_num, ep_intr_status);
++
++			/* Interrupt Clear */
++			writel(ep_intr_status, S3C_UDC_OTG_DIEPINT(ep_num));
++
++			if (ep_intr_status & TRANSFER_DONE) {
++				complete_tx(dev, ep_num);
++
++				if (ep_num == 0) {
++					if(dev->ep0state == WAIT_FOR_SETUP) {
++						s3c_udc_pre_setup();
++					}
++
++					/* continue transfer after set_clear_halt for DMA mode */
++					if (clear_feature_flag == 1) {
++						s3c_udc_check_tx_queue(dev, clear_feature_num);
++						clear_feature_flag = 0;
++					}
++				}
++			}
++		}
++		ep_num++;
++		ep_intr >>= 1;
++	}
++
++}
++
++static void process_ep_out_intr(struct s3c_udc * dev)
++{
++	u32 ep_intr, ep_intr_status;
++	u8 ep_num = 0;
++
++	ep_intr = readl(S3C_UDC_OTG_DAINT);
++	DEBUG_OUT_EP("*** %s: EP OUT interrupt : DAINT = 0x%x\n",
++				__func__, ep_intr);
++
++	ep_intr = (ep_intr >> DAINT_OUT_BIT) & DAINT_MASK;
++
++	while(ep_intr) {
++		if (ep_intr & 0x1) {
++			ep_intr_status = readl(S3C_UDC_OTG_DOEPINT(ep_num));
++			DEBUG_OUT_EP("\tEP%d-OUT : DOEPINT = 0x%x\n",
++						ep_num, ep_intr_status);
++
++			/* Interrupt Clear */
++			writel(ep_intr_status, S3C_UDC_OTG_DOEPINT(ep_num));
++
++			if (ep_num == 0 ) {
++				if (ep_intr_status & CTRL_OUT_EP_SETUP_PHASE_DONE) {
++					DEBUG_OUT_EP("\tSETUP packet(transaction) arrived\n");
++					s3c_handle_ep0(dev);
++				}
++
++				if (ep_intr_status & TRANSFER_DONE) {
++					complete_rx(dev, ep_num);
++					s3c_udc_pre_setup();
++				}
++
++			} else {
++				if (ep_intr_status & TRANSFER_DONE) {
++					complete_rx(dev, ep_num);
++				}
++			}
++		}
++		ep_num++;
++		ep_intr >>= 1;
++	}
++}
++
++/*
++ *	usb client interrupt handler.
++ */
++static irqreturn_t s3c_udc_irq(int irq, void *_dev)
++{
++	struct s3c_udc *dev = _dev;
++	u32 intr_status;
++	u32 usb_status, gintmsk;
++	unsigned long flags;
++
++	spin_lock_irqsave(&dev->lock, flags);
++
++	intr_status = readl(S3C_UDC_OTG_GINTSTS);
++	gintmsk = readl(S3C_UDC_OTG_GINTMSK);
++
++	DEBUG_ISR("\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x, DAINT : 0x%x, DAINTMSK : 0x%x\n",
++			__func__, intr_status, state_names[dev->ep0state], gintmsk,
++			readl(S3C_UDC_OTG_DAINT), readl(S3C_UDC_OTG_DAINTMSK));
++
++	if (!intr_status) {
++		spin_unlock_irqrestore(&dev->lock, flags);
++		return IRQ_HANDLED;
++	}
++
++	if (intr_status & INT_ENUMDONE) {
++		DEBUG_ISR("\tSpeed Detection interrupt\n");
++
++		writel(INT_ENUMDONE, S3C_UDC_OTG_GINTSTS);
++		usb_status = (readl(S3C_UDC_OTG_DSTS) & 0x6);
++
++		if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) {
++			DEBUG_ISR("\t\tFull Speed Detection\n");
++			set_max_pktsize(dev, USB_SPEED_FULL);
++
++		} else {
++			DEBUG_ISR("\t\tHigh Speed Detection : 0x%x\n", usb_status);
++			set_max_pktsize(dev, USB_SPEED_HIGH);
++		}
++	}
++
++	if (intr_status & INT_EARLY_SUSPEND) {
++		DEBUG_ISR("\tEarly suspend interrupt\n");
++		writel(INT_EARLY_SUSPEND, S3C_UDC_OTG_GINTSTS);
++	}
++
++	if (intr_status & INT_SUSPEND) {
++		usb_status = readl(S3C_UDC_OTG_DSTS);
++		DEBUG_ISR("\tSuspend interrupt :(DSTS):0x%x\n", usb_status);
++		writel(INT_SUSPEND, S3C_UDC_OTG_GINTSTS);
++
++		if (dev->gadget.speed != USB_SPEED_UNKNOWN
++		    && dev->driver
++		    && dev->driver->suspend) {
++
++			dev->driver->suspend(&dev->gadget);
++		}
++	}
++
++	if (intr_status & INT_RESUME) {
++		DEBUG_ISR("\tResume interrupt\n");
++		writel(INT_RESUME, S3C_UDC_OTG_GINTSTS);
++
++		if (dev->gadget.speed != USB_SPEED_UNKNOWN
++		    && dev->driver
++		    && dev->driver->resume) {
++
++			dev->driver->resume(&dev->gadget);
++		}
++	}
++
++	if (intr_status & INT_RESET) {
++		usb_status = readl(S3C_UDC_OTG_GOTGCTL);
++		DEBUG_ISR("\tReset interrupt - (GOTGCTL):0x%x\n", usb_status);
++		writel(INT_RESET, S3C_UDC_OTG_GINTSTS);
++
++		set_conf_done = 0;
++		
++		if((usb_status & 0xc0000) == (0x3 << 18)) {
++			if(reset_available) {
++				DEBUG_ISR("\t\tOTG core got reset (%d)!! \n", reset_available);
++				reconfig_usbd();
++				dev->ep0state = WAIT_FOR_SETUP;
++				reset_available = 0;
++				s3c_udc_pre_setup();
++			}
++
++		} else {
++			reset_available = 1;
++			DEBUG_ISR("\t\tRESET handling skipped\n");
++		}
++	}
++
++	if (intr_status & INT_IN_EP) {
++		process_ep_in_intr(dev);
++	}
++
++	if(intr_status & INT_OUT_EP) {
++		process_ep_out_intr(dev);
++	}
++
++	spin_unlock_irqrestore(&dev->lock, flags);
++
++	return IRQ_HANDLED;
++}
++
++/** Queue one request
++ *  Kickstart transfer if needed
++ */
++static int s3c_queue(struct usb_ep *_ep, struct usb_request *_req,
++			 gfp_t gfp_flags)
++{
++	struct s3c_request *req;
++	struct s3c_ep *ep;
++	struct s3c_udc *dev;
++	unsigned long flags;
++	u32 ep_num, gintsts;
++
++	req = container_of(_req, struct s3c_request, req);
++	if (unlikely(!_req || !_req->complete || !_req->buf || !list_empty(&req->queue))) {
++
++		DEBUG("%s: bad params\n", __func__);
++		return -EINVAL;
++	}
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++
++	if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
++
++		DEBUG("%s: bad ep\n", __func__);
++		return -EINVAL;
++	}
++
++	ep_num = ep_index(ep);
++	dev = ep->dev;
++	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
++
++		DEBUG("%s: bogus device state %p\n", __func__, dev->driver);
++		return -ESHUTDOWN;
++	}
++
++	spin_lock_irqsave(&dev->lock, flags);
++
++	_req->status = -EINPROGRESS;
++	_req->actual = 0;
++
++	/* kickstart this i/o queue? */
++	DEBUG("\n*** %s: %s-%s req = %p, len = %d, buf = %p"
++		"Q empty = %d, stopped = %d\n",
++		__func__,_ep->name, ep_is_in(ep)? "in" : "out",
++		_req, _req->length,_req->buf,
++		list_empty(&ep->queue), ep->stopped);
++
++	if (list_empty(&ep->queue) && !ep->stopped) {
++
++		if (ep_num == 0) {
++			/* EP0 */
++			list_add_tail(&req->queue, &ep->queue);
++			s3c_ep0_kick(dev, ep);
++			req = 0;
++
++		} else if (ep_is_in(ep)) {
++			gintsts = readl(S3C_UDC_OTG_GINTSTS);
++			DEBUG_IN_EP("%s: ep_is_in, S3C_UDC_OTG_GINTSTS=0x%x\n",
++						__func__, gintsts);
++
++			if (set_conf_done == 1) {
++				setdma_tx(ep, req);
++			} else {
++				done(ep, req, 0);
++				DEBUG("%s: Not yet Set_configureation, ep_num = %d, req = %p\n",
++						__func__, ep_num, req);
++				req = 0;
++			}
++
++		} else {
++			gintsts = readl(S3C_UDC_OTG_GINTSTS);
++			DEBUG_OUT_EP("%s: ep_is_out, S3C_UDC_OTG_GINTSTS=0x%x\n",
++				__func__, gintsts);
++
++			setdma_rx(ep, req);
++		}
++	}
++
++	/* pio or dma irq handler advances the queue. */
++	if (likely(req != 0)) {
++		list_add_tail(&req->queue, &ep->queue);
++	}
++
++	spin_unlock_irqrestore(&dev->lock, flags);
++
++	return 0;
++}
++
++/****************************************************************/
++/* End Point 0 related functions                                */
++/****************************************************************/
++
++/* return:  0 = still running, 1 = completed, negative = errno */
++static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req)
++{
++	u32 max;
++	unsigned count;
++	int is_last;
++
++	max = ep_maxpacket(ep);
++
++	DEBUG_EP0("%s: max = %d\n", __func__, max);
++
++	count = setdma_tx(ep, req);
++
++	/* last packet is usually short (or a zlp) */
++	if (likely(count != max))
++		is_last = 1;
++	else {
++		if (likely(req->req.length != req->req.actual) || req->req.zero)
++			is_last = 0;
++		else
++			is_last = 1;
++	}
++
++	DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __func__,
++		  ep->ep.name, count,
++		  is_last ? "/L" : "", req->req.length - req->req.actual, req);
++
++	/* requests complete when all IN data is in the FIFO */
++	if (is_last) {
++		ep->dev->ep0state = WAIT_FOR_SETUP;
++		return 1;
++	}
++
++	return 0;
++}
++
++static __inline__ int s3c_fifo_read(struct s3c_ep *ep, u32 *cp, int max)
++{
++	u32 bytes;
++
++	bytes = sizeof(struct usb_ctrlrequest);
++	dma_cache_maint(&usb_ctrl, bytes, DMA_FROM_DEVICE);
++	DEBUG_EP0("%s: bytes=%d, ep_index=%d \n", __func__, bytes, ep_index(ep));
++
++	return bytes;
++}
++
++/**
++ * udc_set_address - set the USB address for this device
++ * @address:
++ *
++ * Called from control endpoint function
++ * after it decodes a set address setup packet.
++ */
++static void udc_set_address(struct s3c_udc *dev, unsigned char address)
++{
++	u32 ctrl = readl(S3C_UDC_OTG_DCFG);
++	writel(address << 4 | ctrl, S3C_UDC_OTG_DCFG);
++
++	s3c_udc_ep0_zlp();
++
++	DEBUG_EP0("%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n",
++		__func__, address, readl(S3C_UDC_OTG_DCFG));
++
++	dev->usb_address = address;
++}
++
++static inline void s3c_udc_ep0_set_stall(struct s3c_ep *ep) 
++{
++	struct s3c_udc *dev;
++	u32		ep_ctrl = 0;
++
++	dev = ep->dev;
++	ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(EP0_CON));
++
++	/* set the disable and stall bits */
++	if (ep_ctrl & DEPCTL_EPENA) {
++		ep_ctrl |= DEPCTL_EPDIS;
++	}
++	ep_ctrl |= DEPCTL_STALL;
++
++	writel(ep_ctrl, S3C_UDC_OTG_DIEPCTL(EP0_CON));
++
++	DEBUG_EP0("%s: set ep%d stall, DIEPCTL0 = 0x%x\n",
++		__func__, ep_index(ep), readl(S3C_UDC_OTG_DIEPCTL(EP0_CON)));
++	/* 
++	 * The application can only set this bit, and the core clears it,
++	 * when a SETUP token is received for this endpoint
++	 */
++	dev->ep0state = WAIT_FOR_SETUP;
++
++	s3c_udc_pre_setup();
++}
++
++static void s3c_ep0_read(struct s3c_udc *dev)
++{
++	struct s3c_request *req;
++	struct s3c_ep *ep = &dev->ep[0];
++	int ret;
++
++	if (!list_empty(&ep->queue)) {
++		req = list_entry(ep->queue.next, struct s3c_request, queue);
++
++	} else {
++		DEBUG("%s: ---> BUG\n", __func__);
++		BUG();
++		return;
++	}
++
++	DEBUG_EP0("%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
++		__func__, req, req->req.length, req->req.actual);
++
++	if(req->req.length == 0) {
++		/* zlp for Set_configuration, Set_interface,
++		 * or Bulk-Only mass storge reset */
++
++		dev->ep0state = WAIT_FOR_SETUP;
++		set_conf_done = 1;
++		s3c_udc_ep0_zlp();
++		done(ep, req, 0);
++
++		DEBUG_EP0("%s: req.length = 0, bRequest = %d\n", __func__, usb_ctrl.bRequest);
++		return;
++	}
++
++	ret = setdma_rx(ep, req);
++}
++
++/*
++ * DATA_STATE_XMIT
++ */
++static int s3c_ep0_write(struct s3c_udc *dev)
++{
++	struct s3c_request *req;
++	struct s3c_ep *ep = &dev->ep[0];
++	int ret, need_zlp = 0;
++
++	if (list_empty(&ep->queue)) {
++		req = 0;
++
++	} else {
++		req = list_entry(ep->queue.next, struct s3c_request, queue);
++	}
++
++	if (!req) {
++		DEBUG_EP0("%s: NULL REQ\n", __func__);
++		return 0;
++	}
++
++	DEBUG_EP0("%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
++		__func__, req, req->req.length, req->req.actual);
++
++	if (req->req.length - req->req.actual == ep0_fifo_size) {
++		/* Next write will end with the packet size, */
++		/* so we need Zero-length-packet */
++		need_zlp = 1;
++	}
++
++	ret = write_fifo_ep0(ep, req);
++
++	if ((ret == 1) && !need_zlp) {
++		/* Last packet */
++		dev->ep0state = WAIT_FOR_SETUP;
++		DEBUG_EP0("%s: finished, waiting for status\n", __func__);
++
++	} else {
++		dev->ep0state = DATA_STATE_XMIT;
++		DEBUG_EP0("%s: not finished\n", __func__);
++	}
++
++	if (need_zlp) {
++		dev->ep0state = DATA_STATE_NEED_ZLP;
++		DEBUG_EP0("%s: Need ZLP!\n", __func__);
++	}
++
++	return 1;
++}
++
++u16	g_status;
++
++static int s3c_udc_get_status(struct s3c_udc *dev,
++		struct usb_ctrlrequest *crq)
++{
++	u8 ep_num = crq->wIndex & 0x7F;
++	u32 ep_ctrl;
++
++	DEBUG_SETUP("%s: *** USB_REQ_GET_STATUS  \n",__func__);
++
++	switch (crq->bRequestType & USB_RECIP_MASK) {
++	case USB_RECIP_INTERFACE:
++		g_status = 0;
++		DEBUG_SETUP("\tGET_STATUS: USB_RECIP_INTERFACE, g_stauts = %d\n", g_status);
++		break;
++
++	case USB_RECIP_DEVICE:
++		g_status = 0x1; /* Self powered */
++		DEBUG_SETUP("\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n", g_status);
++		break;
++
++	case USB_RECIP_ENDPOINT:
++		if (ep_num > 4 || crq->wLength > 2) {
++			DEBUG_SETUP("\tGET_STATUS: Not support EP or wLength\n");
++			return 1;
++		}
++
++		g_status = dev->ep[ep_num].stopped;
++		DEBUG_SETUP("\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n", g_status);
++
++		break;
++
++	default:
++		return 1;
++	}
++
++	dma_cache_maint(&g_status, 2, DMA_TO_DEVICE);
++
++	writel(virt_to_phys(&g_status), S3C_UDC_OTG_DIEPDMA(EP0_CON));
++	writel((1<<19)|(2<<0), S3C_UDC_OTG_DIEPTSIZ(EP0_CON));
++
++	ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(EP0_CON));
++	writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, S3C_UDC_OTG_DIEPCTL(EP0_CON));
++	dev->ep0state = WAIT_FOR_SETUP;
++
++	return 0;
++}
++
++void s3c_udc_ep_set_stall(struct s3c_ep *ep)
++{
++	u8		ep_num;
++	u32		ep_ctrl = 0;
++
++	ep_num = ep_index(ep);
++	DEBUG("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
++		
++	if (ep_is_in(ep)) {
++		ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(ep_num));
++	
++		/* set the disable and stall bits */
++		if (ep_ctrl & DEPCTL_EPENA) {
++			ep_ctrl |= DEPCTL_EPDIS;
++		}
++		ep_ctrl |= DEPCTL_STALL;
++
++		writel(ep_ctrl, S3C_UDC_OTG_DIEPCTL(ep_num));
++		DEBUG("%s: set stall, DIEPCTL%d = 0x%x\n",
++			__func__, ep_num, readl(S3C_UDC_OTG_DIEPCTL(ep_num)));
++
++	} else {
++		ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL(ep_num));
++
++		/* set the stall bit */
++		ep_ctrl |= DEPCTL_STALL;
++		ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(ep_num));
++
++		writel(ep_ctrl, S3C_UDC_OTG_DOEPCTL(ep_num));
++		DEBUG("%s: set stall, DOEPCTL%d = 0x%x\n",
++			__func__, ep_num, readl(S3C_UDC_OTG_DOEPCTL(ep_num)));
++	}
++		
++	return;
++}
++
++void s3c_udc_ep_clear_stall(struct s3c_ep *ep)
++{
++	u8		ep_num;
++	u32		ep_ctrl = 0;
++
++	ep_num = ep_index(ep);	
++	DEBUG("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
++
++	if (ep_is_in(ep)) {
++		ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(ep_num));
++
++		/* clear stall bit */
++		ep_ctrl &= ~DEPCTL_STALL;
++
++		/* 
++		 * USB Spec 9.4.5: For endpoints using data toggle, regardless
++		 * of whether an endpoint has the Halt feature set, a
++		 * ClearFeature(ENDPOINT_HALT) request always results in the
++		 * data toggle being reinitialized to DATA0.
++		 */
++		if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
++		    || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
++			ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
++		}
++
++		writel(ep_ctrl, S3C_UDC_OTG_DIEPCTL(ep_num));
++		DEBUG("%s: cleared stall, DIEPCTL%d = 0x%x\n",
++			__func__, ep_num, readl(S3C_UDC_OTG_DIEPCTL(ep_num)));
++
++	} else {
++		ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL(ep_num));
++
++		/* clear stall bit */
++		ep_ctrl &= ~DEPCTL_STALL;
++
++		if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
++		    || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
++			ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
++		}
++
++		writel(ep_ctrl, S3C_UDC_OTG_DOEPCTL(ep_num));
++		DEBUG("%s: cleared stall, DOEPCTL%d = 0x%x\n",
++			__func__, ep_num, readl(S3C_UDC_OTG_DOEPCTL(ep_num)));
++	}
++
++	return;
++}
++
++static int s3c_udc_set_halt(struct usb_ep *_ep, int value)
++{
++	struct s3c_ep	*ep;
++	struct s3c_udc	*dev;
++	unsigned long	flags;
++	u8		ep_num;
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++
++	if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name) ||
++			ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) {
++		DEBUG("%s: %s bad ep or descriptor\n", __func__, ep->ep.name);
++		return -EINVAL;
++	}
++
++	/* Attempt to halt IN ep will fail if any transfer requests
++	 * are still queue */
++	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
++		DEBUG("%s: %s queue not empty, req = %p\n",
++			__func__, ep->ep.name,
++			list_entry(ep->queue.next, struct s3c_request, queue));
++
++		return -EAGAIN;
++	}
++
++	dev = ep->dev;
++	ep_num = ep_index(ep);
++	DEBUG("%s: ep_num = %d, value = %d\n", __func__, ep_num, value);
++
++	spin_lock_irqsave(&dev->lock, flags);
++
++	if (value == 0) {
++		ep->stopped = 0;
++		s3c_udc_ep_clear_stall(ep);
++	} else {
++		if (ep_num == 0) {
++			dev->ep0state = WAIT_FOR_SETUP;
++		}
++		
++		ep->stopped = 1;
++		s3c_udc_ep_set_stall(ep);
++	}
++
++	spin_unlock_irqrestore(&dev->lock, flags);
++
++	return 0;
++}
++
++void s3c_udc_ep_activate(struct s3c_ep *ep)
++{
++	u8 ep_num;
++	u32 ep_ctrl = 0, daintmsk = 0;
++	
++	ep_num = ep_index(ep);
++
++	/* Read DEPCTLn register */
++	if (ep_is_in(ep)) {
++		ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL(ep_num));
++		daintmsk = 1 << ep_num;
++	} else {
++		ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL(ep_num));
++		daintmsk = (1 << ep_num) << DAINT_OUT_BIT;
++	}
++
++	DEBUG("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n",
++		__func__, ep_num, ep_ctrl, ep_is_in(ep));
++		
++	/* If the EP is already active don't change the EP Control
++	 * register. */
++	if (!(ep_ctrl & DEPCTL_USBACTEP)) {
++		ep_ctrl = (ep_ctrl & ~DEPCTL_TYPE_MASK)| (ep->bmAttributes << DEPCTL_TYPE_BIT);
++		ep_ctrl = (ep_ctrl & ~DEPCTL_MPS_MASK) | (ep->ep.maxpacket << DEPCTL_MPS_BIT);
++		ep_ctrl |= (DEPCTL_SETD0PID | DEPCTL_USBACTEP);
++
++		if (ep_is_in(ep)) {
++			writel(ep_ctrl, S3C_UDC_OTG_DIEPCTL(ep_num));
++			DEBUG("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n",
++				__func__, ep_num, ep_num, readl(S3C_UDC_OTG_DIEPCTL(ep_num)));
++		} else {
++			writel(ep_ctrl, S3C_UDC_OTG_DOEPCTL(ep_num));
++			DEBUG("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n",
++				__func__, ep_num, ep_num, readl(S3C_UDC_OTG_DOEPCTL(ep_num)));
++		}
++	}
++
++	/* Unmask EP Interrtupt */
++	writel(readl(S3C_UDC_OTG_DAINTMSK)|daintmsk, S3C_UDC_OTG_DAINTMSK);
++	DEBUG("%s: DAINTMSK = 0x%x\n", __func__, readl(S3C_UDC_OTG_DAINTMSK));
++
++}
++
++static int s3c_udc_clear_feature(struct usb_ep *_ep)
++{
++	struct s3c_ep	*ep;
++	u8		ep_num;
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	ep_num = ep_index(ep);
++
++	DEBUG_SETUP("%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n",
++		__func__, ep_num, ep_is_in(ep), clear_feature_flag);
++
++	if (usb_ctrl.wLength != 0) {
++		DEBUG_SETUP("\tCLEAR_FEATURE: wLength is not zero.....\n");
++		return 1;
++	}
++
++	switch (usb_ctrl.bRequestType & USB_RECIP_MASK) {
++	case USB_RECIP_DEVICE:
++		switch (usb_ctrl.wValue) {
++		case USB_DEVICE_REMOTE_WAKEUP:
++			DEBUG_SETUP("\tCLEAR_FEATURE: USB_DEVICE_REMOTE_WAKEUP\n");
++			break;
++
++		case USB_DEVICE_TEST_MODE:
++			DEBUG_SETUP("\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n");
++			/** @todo Add CLEAR_FEATURE for TEST modes. */
++			break;
++		}
++
++		s3c_udc_ep0_zlp();
++		break;
++
++	case USB_RECIP_ENDPOINT:
++		DEBUG_SETUP("\tCLEAR_FEATURE: USB_RECIP_ENDPOINT, wValue = %d\n",
++				usb_ctrl.wValue);
++
++		if (usb_ctrl.wValue == USB_ENDPOINT_HALT) {
++			if (ep == 0) {
++				s3c_udc_ep0_set_stall(ep);
++				return 0;
++			} 
++
++			s3c_udc_ep0_zlp();
++
++			s3c_udc_ep_clear_stall(ep);
++			s3c_udc_ep_activate(ep);
++			ep->stopped = 0;
++
++			clear_feature_num = ep_num;
++			clear_feature_flag = 1;
++		}
++		break;
++	}
++
++	return 0;
++}
++
++static int s3c_udc_set_feature(struct usb_ep *_ep)
++{
++	struct s3c_ep	*ep;
++	u8		ep_num;
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	ep_num = ep_index(ep);
++
++	DEBUG_SETUP("%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n",__func__, ep_num);
++
++	if (usb_ctrl.wLength != 0) {
++		DEBUG_SETUP("\tSET_FEATURE: wLength is not zero.....\n");
++		return 1;
++	}
++
++	switch (usb_ctrl.bRequestType & USB_RECIP_MASK) {
++	case USB_RECIP_DEVICE:
++		switch (usb_ctrl.wValue) {
++		case USB_DEVICE_REMOTE_WAKEUP:
++			DEBUG_SETUP("\tSET_FEATURE: USB_DEVICE_REMOTE_WAKEUP\n");
++			break;
++
++		case USB_DEVICE_TEST_MODE:
++			DEBUG_SETUP("\tSET_FEATURE: USB_DEVICE_TEST_MODE\n");
++			break;
++
++		case USB_DEVICE_B_HNP_ENABLE:
++			DEBUG_SETUP("\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
++			break;
++
++		case USB_DEVICE_A_HNP_SUPPORT:
++			/* RH port supports HNP */
++			DEBUG_SETUP("\tSET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
++			break;
++
++		case USB_DEVICE_A_ALT_HNP_SUPPORT:
++			/* other RH port does */
++			DEBUG_SETUP("\tSET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
++			break;
++		}
++
++		s3c_udc_ep0_zlp();
++		return 0;
++
++	case USB_RECIP_INTERFACE:
++		DEBUG_SETUP("\tSET_FEATURE: USB_RECIP_INTERFACE\n");
++		break;
++
++	case USB_RECIP_ENDPOINT:
++		DEBUG_SETUP("\tSET_FEATURE: USB_RECIP_ENDPOINT\n");
++		if (usb_ctrl.wValue == USB_ENDPOINT_HALT) {
++			if (ep_num == 0) {
++				s3c_udc_ep0_set_stall(ep);
++				return 0;
++			} 
++			ep->stopped = 1;
++			s3c_udc_ep_set_stall(ep);
++		}
++
++		s3c_udc_ep0_zlp();
++		return 0;
++	}
++
++	return 1;
++}
++
++/*
++ * WAIT_FOR_SETUP (OUT_PKT_RDY)
++ */
++static void s3c_ep0_setup(struct s3c_udc *dev)
++{
++	struct s3c_ep *ep = &dev->ep[0];
++	int i, bytes, is_in;
++	u8 ep_num;
++
++	/* Nuke all previous transfers */
++	nuke(ep, -EPROTO);
++
++	/* read control req from fifo (8 bytes) */
++	bytes = s3c_fifo_read(ep, (u32 *)&usb_ctrl, 8);
++
++	DEBUG_SETUP("%s: bRequestType = 0x%x(%s), bRequest = 0x%x"
++			"\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n",
++			__func__, usb_ctrl.bRequestType,
++			(usb_ctrl.bRequestType & USB_DIR_IN) ? "IN" : "OUT", usb_ctrl.bRequest,
++			usb_ctrl.wLength, usb_ctrl.wValue, usb_ctrl.wIndex);
++
++	if (usb_ctrl.bRequest == GET_MAX_LUN_REQUEST && usb_ctrl.wLength != 1) {
++		DEBUG_SETUP("\t%s:GET_MAX_LUN_REQUEST:invalid wLength = %d, setup returned\n",
++			__func__, usb_ctrl.wLength);
++
++		s3c_udc_ep0_set_stall(ep);
++		dev->ep0state = WAIT_FOR_SETUP;
++
++		return;
++	}
++	else if (usb_ctrl.bRequest == BOT_RESET_REQUEST && usb_ctrl.wLength != 0) {
++		/* Bulk-Only *mass storge reset of class-specific request */ 
++		DEBUG_SETUP("\t%s:BOT Rest:invalid wLength = %d, setup returned\n",
++			__func__, usb_ctrl.wLength);
++
++		s3c_udc_ep0_set_stall(ep);
++		dev->ep0state = WAIT_FOR_SETUP;
++
++		return;
++	}
++
++	/* Set direction of EP0 */
++	if (likely(usb_ctrl.bRequestType & USB_DIR_IN)) {
++		ep->bEndpointAddress |= USB_DIR_IN;
++		is_in = 1;
++
++	} else {
++		ep->bEndpointAddress &= ~USB_DIR_IN;
++		is_in = 0;
++	}
++	/* cope with automagic for some standard requests. */
++	dev->req_std = (usb_ctrl.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD;
++	dev->req_config = 0;
++	dev->req_pending = 1;
++
++	/* Handle some SETUP packets ourselves */
++	switch (usb_ctrl.bRequest) {
++	case USB_REQ_SET_ADDRESS:
++	DEBUG_SETUP("%s: *** USB_REQ_SET_ADDRESS (%d)\n",
++			__func__, usb_ctrl.wValue);
++
++		if (usb_ctrl.bRequestType
++			!= (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
++			break;
++
++		udc_set_address(dev, usb_ctrl.wValue);
++		return;
++
++	case USB_REQ_SET_CONFIGURATION :
++		DEBUG_SETUP("============================================\n");
++		DEBUG_SETUP("%s: USB_REQ_SET_CONFIGURATION (%d)\n",
++				__func__, usb_ctrl.wValue);
++
++		if (usb_ctrl.bRequestType == USB_RECIP_DEVICE) {
++			reset_available = 1;
++			dev->req_config = 1;
++		}
++		break;
++
++	case USB_REQ_GET_DESCRIPTOR:
++		DEBUG_SETUP("%s: *** USB_REQ_GET_DESCRIPTOR  \n",__func__);
++		break;
++
++	case USB_REQ_SET_INTERFACE:
++		DEBUG_SETUP("%s: *** USB_REQ_SET_INTERFACE (%d)\n",
++				__func__, usb_ctrl.wValue);
++
++		if (usb_ctrl.bRequestType == USB_RECIP_INTERFACE) {
++			reset_available = 1;
++			dev->req_config = 1;
++		}
++		break;
++
++	case USB_REQ_GET_CONFIGURATION:
++		DEBUG_SETUP("%s: *** USB_REQ_GET_CONFIGURATION  \n",__func__);
++		break;
++
++	case USB_REQ_GET_STATUS:
++		if (dev->req_std) {
++			if (!s3c_udc_get_status(dev, &usb_ctrl)) {
++				return;
++			}
++		}
++		break;
++
++	case USB_REQ_CLEAR_FEATURE:
++		ep_num = usb_ctrl.wIndex & 0x7f;
++
++		if (!s3c_udc_clear_feature(&dev->ep[ep_num].ep)) {
++			return;
++		}
++		break;
++
++	case USB_REQ_SET_FEATURE:
++		ep_num = usb_ctrl.wIndex & 0x7f;
++
++		if (!s3c_udc_set_feature(&dev->ep[ep_num].ep)) {
++			return;
++		}
++		break;
++
++	default:
++		DEBUG_SETUP("%s: *** Default of usb_ctrl.bRequest=0x%x happened.\n",
++				__func__, usb_ctrl.bRequest);
++		break;
++	}
++
++	if (likely(dev->driver)) {
++		/* device-2-host (IN) or no data setup command,
++		 * process immediately */
++		DEBUG_SETUP("%s: usb_ctrlrequest will be passed to fsg_setup()\n", __func__);
++
++		spin_unlock(&dev->lock);
++		i = dev->driver->setup(&dev->gadget, &usb_ctrl);
++		spin_lock(&dev->lock);
++
++		if (i < 0) {
++			if (dev->req_config) {
++				DEBUG_SETUP("\tconfig change 0x%02x fail %d?\n",
++					(u32)&usb_ctrl.bRequest, i);
++				return;
++			}
++
++			/* setup processing failed, force stall */
++			s3c_udc_ep0_set_stall(ep);
++			dev->ep0state = WAIT_FOR_SETUP;
++
++			DEBUG_SETUP("\tdev->driver->setup failed (%d), bRequest = %d\n",
++				i, usb_ctrl.bRequest);
++
++
++		} else if (dev->req_pending) {
++			dev->req_pending = 0;
++			DEBUG_SETUP("\tdev->req_pending... \n");
++		}
++
++		DEBUG_SETUP("\tep0state = %s\n", state_names[dev->ep0state]);
++
++	}
++}
++
++/*
++ * handle ep0 interrupt
++ */
++static void s3c_handle_ep0(struct s3c_udc *dev)
++{
++	if (dev->ep0state == WAIT_FOR_SETUP) {
++		DEBUG_OUT_EP("%s: WAIT_FOR_SETUP\n", __func__);
++		s3c_ep0_setup(dev);
++
++	} else {
++		DEBUG_OUT_EP("%s: strange state!!(state = %s)\n",
++			__func__, state_names[dev->ep0state]);
++	}
++}
++
++static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep)
++{
++	DEBUG_EP0("%s: ep_is_in = %d\n", __func__, ep_is_in(ep));
++	if (ep_is_in(ep)) {
++		dev->ep0state = DATA_STATE_XMIT;
++		s3c_ep0_write(dev);
++
++	} else {
++		dev->ep0state = DATA_STATE_RECV;
++		s3c_ep0_read(dev);
++	}
++}
+diff --git a/drivers/usb/gadget/s3c_udc_otg_xfer_slave.c b/drivers/usb/gadget/s3c_udc_otg_xfer_slave.c
+new file mode 100644
+index 00000000..37c61712
+--- /dev/null
++++ b/drivers/usb/gadget/s3c_udc_otg_xfer_slave.c
+@@ -0,0 +1,971 @@
++/*
++ * drivers/usb/gadget/s3c_udc_otg_xfer_slave.c
++ * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers
++ *
++ * Copyright (C) 2009 for Samsung Electronics
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ */
++
++#define GINTMSK_INIT	(INT_RESUME|INT_ENUMDONE|INT_RESET|INT_SUSPEND|INT_RX_FIFO_NOT_EMPTY)
++#define DOEPMSK_INIT	(AHB_ERROR)
++#define DIEPMSK_INIT	(NON_ISO_IN_EP_TIMEOUT|AHB_ERROR)
++#define GAHBCFG_INIT	(PTXFE_HALF|NPTXFE_HALF|MODE_SLAVE|BURST_SINGLE|GBL_INT_UNMASK)
++
++u32 tx_ep_num = 2;
++
++static int set_interface_first = 0;
++
++/*-------------------------------------------------------------------------*/
++
++/** Read to request from FIFO (max read == bytes in fifo)
++ *  Return:  0 = still running, 1 = completed, negative = errno
++ */
++static int read_fifo(struct s3c_ep *ep, struct s3c_request *req)
++{
++	u32 csr, gintmsk;
++	u32 *buf;
++	u32 bufferspace, count, count_bytes, is_short = 0;
++	u32 fifo = ep->fifo;
++
++	csr = readl(S3C_UDC_OTG_GRXSTSP);
++	count_bytes = (csr & 0x7ff0)>>4;
++
++	gintmsk = readl(S3C_UDC_OTG_GINTMSK);
++
++	if(!count_bytes) {
++		DEBUG_OUT_EP("%s: count_bytes %d bytes\n", __FUNCTION__, count_bytes);
++
++		// Unmask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY
++		writel(gintmsk | INT_RX_FIFO_NOT_EMPTY, S3C_UDC_OTG_GINTMSK);
++		return 0;
++	}
++
++	buf = req->req.buf + req->req.actual;
++	prefetchw(buf);
++	bufferspace = req->req.length - req->req.actual;
++
++	count = count_bytes / 4;
++	if(count_bytes%4) count = count + 1;
++
++	req->req.actual += min(count_bytes, bufferspace);
++
++	is_short = (count_bytes < ep->ep.maxpacket);
++	DEBUG_OUT_EP("%s: read %s, %d bytes%s req %p %d/%d GRXSTSP:0x%x\n",
++		__FUNCTION__,
++		ep->ep.name, count_bytes,
++		is_short ? "/S" : "", req, req->req.actual, req->req.length, csr);
++
++	while (likely(count-- != 0)) {
++		u32 byte = (u32) readl(fifo);
++
++		if (unlikely(bufferspace == 0)) {
++			/* this happens when the driver's buffer
++		 	* is smaller than what the host sent.
++		 	* discard the extra data.
++		 	*/
++			if (req->req.status != -EOVERFLOW)
++				printk("%s overflow %d\n", ep->ep.name, count);
++			req->req.status = -EOVERFLOW;
++		} else {
++			*buf++ = byte;
++			bufferspace-=4;
++		}
++ 	 }
++
++	// Unmask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY
++	writel(gintmsk | INT_RX_FIFO_NOT_EMPTY, S3C_UDC_OTG_GINTMSK);
++
++	/* completion */
++	if (is_short || req->req.actual == req->req.length) {
++		done(ep, req, 0);
++		return 1;
++	}
++
++	/* finished that packet.  the next one may be waiting... */
++	return 0;
++}
++
++/* Inline code */
++static __inline__ int write_packet(struct s3c_ep *ep,
++				   struct s3c_request *req, int max)
++{
++	u32 *buf;
++	int length, count;
++	u32 fifo = ep->fifo, in_ctrl;
++
++	buf = req->req.buf + req->req.actual;
++	prefetch(buf);
++
++	length = req->req.length - req->req.actual;
++	length = min(length, max);
++	req->req.actual += length;
++
++	DEBUG("%s: Write %d (max %d), fifo=0x%x\n",
++		__FUNCTION__, length, max, fifo);
++
++	if(ep_index(ep) == EP0_CON) {
++		writel((1<<19)|(length<<0), (u32) S3C_UDC_OTG_DIEPTSIZ0);
++
++		in_ctrl =  readl(S3C_UDC_OTG_DIEPCTL0);
++		writel(DEPCTL_EPENA|DEPCTL_CNAK|(EP2_IN<<11)| in_ctrl, (u32) S3C_UDC_OTG_DIEPCTL0);
++
++		DEBUG_EP0("%s:(DIEPTSIZ0):0x%x, (DIEPCTL0):0x%x, (GNPTXSTS):0x%x\n", __FUNCTION__,
++			readl(S3C_UDC_OTG_DIEPTSIZ0),readl(S3C_UDC_OTG_DIEPCTL0),
++			readl(S3C_UDC_OTG_GNPTXSTS));
++
++		udelay(30);
++
++	} else if ((ep_index(ep) == EP2_IN)) {
++		writel((1<<19)|(length<<0), S3C_UDC_OTG_DIEPTSIZ2);
++
++		in_ctrl =  readl(S3C_UDC_OTG_DIEPCTL2);
++		writel(DEPCTL_EPENA|DEPCTL_CNAK|(EP2_IN<<11)| in_ctrl, (u32) S3C_UDC_OTG_DIEPCTL2);
++
++		DEBUG_IN_EP("%s:(DIEPTSIZ2):0x%x, (DIEPCTL2):0x%x, (GNPTXSTS):0x%x\n", __FUNCTION__,
++			readl(S3C_UDC_OTG_DIEPTSIZ2),readl(S3C_UDC_OTG_DIEPCTL2),
++			readl(S3C_UDC_OTG_GNPTXSTS));
++
++		udelay(30);
++
++	} else if ((ep_index(ep) == EP3_IN)) {
++
++		if (set_interface_first == 1) {
++			DEBUG_IN_EP("%s: first packet write skipped after set_interface\n", __FUNCTION__);
++			set_interface_first = 0;
++			return length;
++		}
++
++		writel((1<<19)|(length<<0), S3C_UDC_OTG_DIEPTSIZ3);
++
++		in_ctrl =  readl(S3C_UDC_OTG_DIEPCTL3);
++		writel(DEPCTL_EPENA|DEPCTL_CNAK|(EP2_IN<<11)| in_ctrl, (u32) S3C_UDC_OTG_DIEPCTL3);
++
++		DEBUG_IN_EP("%s:(DIEPTSIZ3):0x%x, (DIEPCTL3):0x%x, (GNPTXSTS):0x%x\n", __FUNCTION__,
++			readl(S3C_UDC_OTG_DIEPTSIZ3),readl(S3C_UDC_OTG_DIEPCTL3),
++			readl(S3C_UDC_OTG_GNPTXSTS));
++
++		udelay(30);
++
++	} else {
++		printk("%s: --> Error Unused Endpoint!!\n",
++			__FUNCTION__);
++		BUG();
++	}
++
++	for (count=0;count<length;count+=4) {
++	  	writel(*buf++, fifo);
++	}
++	return length;
++}
++
++/** Write request to FIFO (max write == maxp size)
++ *  Return:  0 = still running, 1 = completed, negative = errno
++ */
++static int write_fifo(struct s3c_ep *ep, struct s3c_request *req)
++{
++	u32 max, gintmsk;
++	unsigned count;
++	int is_last = 0, is_short = 0;
++
++	gintmsk = readl(S3C_UDC_OTG_GINTMSK);
++
++	max = le16_to_cpu(ep->desc->wMaxPacketSize);
++	count = write_packet(ep, req, max);
++
++	/* last packet is usually short (or a zlp) */
++	if (unlikely(count != max))
++		is_last = is_short = 1;
++	else {
++		if (likely(req->req.length != req->req.actual)
++		    || req->req.zero)
++			is_last = 0;
++		else
++			is_last = 1;
++		/* interrupt/iso maxpacket may not fill the fifo */
++		is_short = unlikely(max < ep_maxpacket(ep));
++	}
++
++	DEBUG_IN_EP("%s: wrote %s %d bytes%s%s req %p %d/%d\n",
++			__FUNCTION__,
++      			ep->ep.name, count,
++     	 		is_last ? "/L" : "", is_short ? "/S" : "",
++      			req, req->req.actual, req->req.length);
++
++	/* requests complete when all IN data is in the FIFO */
++	if (is_last) {
++		if(!ep_index(ep)){
++			printk("%s: --> Error EP0 must not come here!\n",
++				__FUNCTION__);
++			BUG();
++		}
++		writel(gintmsk&(~INT_NP_TX_FIFO_EMPTY), S3C_UDC_OTG_GINTMSK);
++		done(ep, req, 0);
++		return 1;
++	}
++
++	// Unmask USB OTG 2.0 interrupt source : INT_NP_TX_FIFO_EMPTY
++	writel(gintmsk | INT_NP_TX_FIFO_EMPTY, S3C_UDC_OTG_GINTMSK);
++	return 0;
++}
++
++/* ********************************************************************************************* */
++/* Bulk OUT (recv)
++ */
++
++static void s3c_out_epn(struct s3c_udc *dev, u32 ep_idx)
++{
++	struct s3c_ep *ep = &dev->ep[ep_idx];
++	struct s3c_request *req;
++
++	if (unlikely(!(ep->desc))) {
++		/* Throw packet away.. */
++		printk("%s: No descriptor?!?\n", __FUNCTION__);
++		return;
++	}
++
++	if (list_empty(&ep->queue))
++		req = 0;
++	else
++		req = list_entry(ep->queue.next,
++				struct s3c_request, queue);
++
++	if (unlikely(!req)) {
++		DEBUG_OUT_EP("%s: NULL REQ on OUT EP-%d\n", __FUNCTION__, ep_idx);
++		return;
++
++	} else {
++		read_fifo(ep, req);
++	}
++
++}
++
++/**
++ * s3c_in_epn - handle IN interrupt
++ */
++static void s3c_in_epn(struct s3c_udc *dev, u32 ep_idx)
++{
++	struct s3c_ep *ep = &dev->ep[ep_idx];
++	struct s3c_request *req;
++
++	if (list_empty(&ep->queue))
++		req = 0;
++	else
++		req = list_entry(ep->queue.next, struct s3c_request, queue);
++
++	if (unlikely(!req)) {
++		DEBUG_IN_EP("%s: NULL REQ on IN EP-%d\n", __FUNCTION__, ep_idx);
++		return;
++	}
++	else {
++		write_fifo(ep, req);
++	}
++
++}
++
++/*
++ *	elfin usb client interrupt handler.
++ */
++static irqreturn_t s3c_udc_irq(int irq, void *_dev)
++{
++	struct s3c_udc *dev = _dev;
++	u32 intr_status;
++	u32 usb_status, ep_ctrl, gintmsk;
++
++	spin_lock(&dev->lock);
++
++	intr_status = readl(S3C_UDC_OTG_GINTSTS);
++	gintmsk = readl(S3C_UDC_OTG_GINTMSK);
++
++	DEBUG_ISR("\n**** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x\n",
++			__FUNCTION__, intr_status, state_names[dev->ep0state], gintmsk);
++
++	if (!intr_status) {
++		spin_unlock(&dev->lock);
++		return IRQ_HANDLED;
++	}
++
++	if (intr_status & INT_ENUMDONE) {
++		DEBUG_SETUP("####################################\n");
++		DEBUG_SETUP("    %s: Speed Detection interrupt\n",
++				__FUNCTION__);
++		writel(INT_ENUMDONE, S3C_UDC_OTG_GINTSTS);
++
++		usb_status = (readl(S3C_UDC_OTG_DSTS) & 0x6);
++
++		if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) {
++			DEBUG_SETUP("    %s: Full Speed Detection\n",__FUNCTION__);
++			set_max_pktsize(dev, USB_SPEED_FULL);
++
++		} else {
++			DEBUG_SETUP("    %s: High Speed Detection : 0x%x\n", __FUNCTION__, usb_status);
++			set_max_pktsize(dev, USB_SPEED_HIGH);
++		}
++	}
++
++	if (intr_status & INT_EARLY_SUSPEND) {
++		DEBUG_SETUP("####################################\n");
++		DEBUG_SETUP("    %s:Early suspend interrupt\n", __FUNCTION__);
++		writel(INT_EARLY_SUSPEND, S3C_UDC_OTG_GINTSTS);
++	}
++
++	if (intr_status & INT_SUSPEND) {
++		usb_status = readl(S3C_UDC_OTG_DSTS);
++		DEBUG_SETUP("####################################\n");
++		DEBUG_SETUP("    %s:Suspend interrupt :(DSTS):0x%x\n", __FUNCTION__, usb_status);
++		writel(INT_SUSPEND, S3C_UDC_OTG_GINTSTS);
++
++		if (dev->gadget.speed != USB_SPEED_UNKNOWN
++		    && dev->driver
++		    && dev->driver->suspend) {
++
++			dev->driver->suspend(&dev->gadget);
++		}
++	}
++
++	if (intr_status & INT_RESUME) {
++		DEBUG_SETUP("####################################\n");
++		DEBUG_SETUP("    %s: Resume interrupt\n", __FUNCTION__);
++		writel(INT_RESUME, S3C_UDC_OTG_GINTSTS);
++
++		if (dev->gadget.speed != USB_SPEED_UNKNOWN
++		    && dev->driver
++		    && dev->driver->resume) {
++
++			dev->driver->resume(&dev->gadget);
++		}
++	}
++
++	if (intr_status & INT_RESET) {
++		usb_status = readl(S3C_UDC_OTG_GOTGCTL);
++		DEBUG_SETUP("####################################\n");
++		DEBUG_SETUP("    %s: Reset interrupt - (GOTGCTL):0x%x\n", __FUNCTION__, usb_status);
++		writel(INT_RESET, S3C_UDC_OTG_GINTSTS);
++
++		if((usb_status & 0xc0000) == (0x3 << 18)) {
++			if(reset_available) {
++				DEBUG_SETUP("     ===> OTG core got reset (%d)!! \n", reset_available);
++				reconfig_usbd();
++				dev->ep0state = WAIT_FOR_SETUP;
++				reset_available = 0;
++			}
++		} else {
++			reset_available = 1;
++			DEBUG_SETUP("      RESET handling skipped : reset_available : %d\n", reset_available);
++		}
++	}
++
++	if (intr_status & INT_RX_FIFO_NOT_EMPTY) {
++		u32 grx_status, packet_status, ep_num, fifoCntByte = 0;
++
++		// Mask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY
++		gintmsk &= ~INT_RX_FIFO_NOT_EMPTY;
++		writel(gintmsk, S3C_UDC_OTG_GINTMSK);
++
++		grx_status = readl(S3C_UDC_OTG_GRXSTSR);
++		DEBUG_ISR("    INT_RX_FIFO_NOT_EMPTY(GRXSTSR):0x%x, GINTMSK:0x%x\n", grx_status, gintmsk);
++
++		packet_status = grx_status & 0x1E0000;
++		fifoCntByte = (grx_status & 0x7ff0)>>4;
++		ep_num = grx_status & EP_MASK;
++
++		if (fifoCntByte) {
++
++			if (packet_status == SETUP_PKT_RECEIVED)  {
++				DEBUG_EP0("      => A SETUP data packet received : %d bytes\n", fifoCntByte);
++				s3c_handle_ep0(dev);
++
++				// Unmask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY
++				gintmsk |= INT_RX_FIFO_NOT_EMPTY;
++
++			} else if (packet_status == OUT_PKT_RECEIVED) {
++
++				if(ep_num == EP1_OUT) {
++					ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL1);
++					DEBUG_ISR("      => A Bulk OUT data packet received : %d bytes, (DOEPCTL1):0x%x\n",
++						fifoCntByte, ep_ctrl);
++					s3c_out_epn(dev, 1);
++					gintmsk = readl(S3C_UDC_OTG_GINTMSK);
++					writel(ep_ctrl | DEPCTL_CNAK, S3C_UDC_OTG_DOEPCTL1);
++				} else if (ep_num == EP0_CON) {
++					ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL0);
++					DEBUG_EP0("      => A CONTROL OUT data packet received : %d bytes, (DOEPCTL0):0x%x\n",
++						fifoCntByte, ep_ctrl);
++					dev->ep0state = DATA_STATE_RECV;
++					s3c_ep0_read(dev);
++					gintmsk |= INT_RX_FIFO_NOT_EMPTY;
++				} else {
++					DEBUG_ISR("      => Unused EP: %d bytes, (GRXSTSR):0x%x\n", fifoCntByte, grx_status);
++				}
++			} else {
++				grx_status = readl(S3C_UDC_OTG_GRXSTSP);
++
++				// Unmask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY
++				gintmsk |= INT_RX_FIFO_NOT_EMPTY;
++
++				DEBUG_ISR("      => A reserved packet received : %d bytes\n", fifoCntByte);
++			}
++		} else {
++			if (dev->ep0state == DATA_STATE_XMIT) {
++				ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL0);
++				DEBUG_EP0("      => Write ep0 continue... (DOEPCTL0):0x%x\n", ep_ctrl);
++				s3c_ep0_write(dev);
++			}
++
++			if (packet_status == SETUP_TRANSACTION_COMPLETED) {
++				ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL0);
++				DEBUG_EP0("      => A SETUP transaction completed (DOEPCTL0):0x%x\n", ep_ctrl);
++				writel(ep_ctrl | DEPCTL_CNAK, S3C_UDC_OTG_DOEPCTL0);
++
++			} else if (packet_status == OUT_TRANSFER_COMPLELTED) {
++				if (ep_num == EP1_OUT) {
++					ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL1);
++					DEBUG_ISR("      => An OUT transaction completed (DOEPCTL1):0x%x\n", ep_ctrl);
++					writel(ep_ctrl | DEPCTL_CNAK, S3C_UDC_OTG_DOEPCTL1);
++				} else if (ep_num == EP0_CON) {
++					ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL0);
++					DEBUG_ISR("      => An OUT transaction completed (DOEPCTL0):0x%x\n", ep_ctrl);
++					writel(ep_ctrl | DEPCTL_CNAK, S3C_UDC_OTG_DOEPCTL0);
++				} else {
++					DEBUG_ISR("      => Unused EP: %d bytes, (GRXSTSR):0x%x\n", fifoCntByte, grx_status);
++				}
++			} else if (packet_status == OUT_PKT_RECEIVED) {
++				DEBUG_ISR("      => A  OUT PACKET RECEIVED (NO FIFO CNT BYTE)...(GRXSTSR):0x%x\n", grx_status);
++			} else {
++				DEBUG_ISR("      => A RESERVED PACKET RECEIVED (NO FIFO CNT BYTE)...(GRXSTSR):0x%x\n", grx_status);
++			}
++
++			grx_status = readl(S3C_UDC_OTG_GRXSTSP);
++
++			// Unmask USB OTG 2.0 interrupt source : INT_RX_FIFO_NOT_EMPTY
++			gintmsk |= INT_RX_FIFO_NOT_EMPTY;
++
++		}
++
++		// Un/Mask USB OTG 2.0 interrupt sources
++		writel(gintmsk, S3C_UDC_OTG_GINTMSK);
++
++		spin_unlock(&dev->lock);
++		return IRQ_HANDLED;
++	}
++
++
++	if (intr_status & INT_NP_TX_FIFO_EMPTY) {
++		DEBUG_ISR("    INT_NP_TX_FIFO_EMPTY (GNPTXSTS):0x%x, (GINTMSK):0x%x, ep_num=%d\n",
++				readl(S3C_UDC_OTG_GNPTXSTS),
++				readl(S3C_UDC_OTG_GINTMSK),
++				tx_ep_num);
++
++		s3c_in_epn(dev, tx_ep_num);
++	}
++
++	spin_unlock(&dev->lock);
++
++	return IRQ_HANDLED;
++}
++
++/** Queue one request
++ *  Kickstart transfer if needed
++ */
++static int s3c_queue(struct usb_ep *_ep, struct usb_request *_req,
++			 gfp_t gfp_flags)
++{
++	struct s3c_request *req;
++	struct s3c_ep *ep;
++	struct s3c_udc *dev;
++	unsigned long flags;
++
++	req = container_of(_req, struct s3c_request, req);
++	if (unlikely(!_req || !_req->complete || !_req->buf
++			|| !list_empty(&req->queue)))
++	{
++		DEBUG("%s: bad params\n", __FUNCTION__);
++		return -EINVAL;
++	}
++
++	ep = container_of(_ep, struct s3c_ep, ep);
++	if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
++		DEBUG("%s: bad ep\n", __FUNCTION__);
++		return -EINVAL;
++	}
++
++	dev = ep->dev;
++	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
++		DEBUG("%s: bogus device state %p\n", __FUNCTION__, dev->driver);
++		return -ESHUTDOWN;
++	}
++
++	DEBUG("\n%s: %s queue req %p, len %d buf %p\n",
++		__FUNCTION__, _ep->name, _req, _req->length, _req->buf);
++
++	spin_lock_irqsave(&dev->lock, flags);
++
++	_req->status = -EINPROGRESS;
++	_req->actual = 0;
++
++	/* kickstart this i/o queue? */
++	DEBUG("%s: Add to ep=%d, Q empty=%d, stopped=%d\n",
++		__FUNCTION__, ep_index(ep), list_empty(&ep->queue), ep->stopped);
++
++	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
++		u32 csr;
++
++		if (unlikely(ep_index(ep) == 0)) {
++			/* EP0 */
++			list_add_tail(&req->queue, &ep->queue);
++			s3c_ep0_kick(dev, ep);
++			req = 0;
++
++		} else if (ep_is_in(ep)) {
++			csr = readl((u32) S3C_UDC_OTG_GINTSTS);
++			DEBUG_IN_EP("%s: ep_is_in, S3C_UDC_OTG_GINTSTS=0x%x\n",
++				__FUNCTION__, csr);
++
++			if((csr & INT_NP_TX_FIFO_EMPTY) &&
++			   (write_fifo(ep, req) == 1)) {
++				req = 0;
++			} else {
++				DEBUG("++++ IN-list_add_taill::req=%p, ep=%d\n",
++					req, ep_index(ep));
++				tx_ep_num = ep_index(ep);
++			}
++		} else {
++			csr = readl((u32) S3C_UDC_OTG_GINTSTS);
++			DEBUG_OUT_EP("%s: ep_is_out, S3C_UDC_OTG_GINTSTS=0x%x\n",
++				__FUNCTION__, csr);
++
++			if((csr & INT_RX_FIFO_NOT_EMPTY) &&
++			   (read_fifo(ep, req) == 1))
++				req = 0;
++			else
++				DEBUG("++++ OUT-list_add_taill::req=%p, DOEPCTL1:0x%x\n",
++					req, readl(S3C_UDC_OTG_DOEPCTL1));
++		}
++	}
++
++	/* pio or dma irq handler advances the queue. */
++	if (likely(req != 0))
++		list_add_tail(&req->queue, &ep->queue);
++
++	spin_unlock_irqrestore(&dev->lock, flags);
++
++	return 0;
++}
++
++/****************************************************************/
++/* End Point 0 related functions                                */
++/****************************************************************/
++
++/* return:  0 = still running, 1 = completed, negative = errno */
++static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req)
++{
++	u32 max;
++	unsigned count;
++	int is_last;
++
++	max = ep_maxpacket(ep);
++
++	DEBUG_EP0("%s: max = %d\n", __FUNCTION__, max);
++
++	count = write_packet(ep, req, max);
++
++	/* last packet is usually short (or a zlp) */
++	if (likely(count != max))
++		is_last = 1;
++	else {
++		if (likely(req->req.length != req->req.actual) || req->req.zero)
++			is_last = 0;
++		else
++			is_last = 1;
++	}
++
++	DEBUG_EP0("%s: wrote %s %d bytes%s %d left %p\n", __FUNCTION__,
++		  ep->ep.name, count,
++		  is_last ? "/L" : "", req->req.length - req->req.actual, req);
++
++	/* requests complete when all IN data is in the FIFO */
++	if (is_last) {
++		return 1;
++	}
++
++	return 0;
++}
++
++static __inline__ int s3c_fifo_read(struct s3c_ep *ep, u32 *cp, int max)
++{
++	int bytes;
++	int count;
++	u32 grx_status = readl(S3C_UDC_OTG_GRXSTSP);
++	bytes = (grx_status & 0x7ff0)>>4;
++
++	DEBUG_EP0("%s: GRXSTSP=0x%x, bytes=%d, ep_index=%d, fifo=0x%x\n",
++			__FUNCTION__, grx_status, bytes, ep_index(ep), ep->fifo);
++
++	// 32 bits interface
++	count = bytes / 4;
++
++	while (count--) {
++		*cp++ = (u32) readl(S3C_UDC_OTG_EP0_FIFO);
++	}
++
++	return bytes;
++}
++
++static int read_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req)
++{
++	u32 csr;
++	u32 *buf;
++	unsigned bufferspace, count, is_short, bytes;
++	u32 fifo = ep->fifo;
++
++	DEBUG_EP0("%s\n", __FUNCTION__);
++
++	csr = readl(S3C_UDC_OTG_GRXSTSP);
++	bytes = (csr & 0x7ff0)>>4;
++
++	buf = req->req.buf + req->req.actual;
++	prefetchw(buf);
++	bufferspace = req->req.length - req->req.actual;
++
++	/* read all bytes from this packet */
++	if (likely((csr & EP_MASK) == EP0_CON)) {
++		count = bytes / 4;
++		req->req.actual += min(bytes, bufferspace);
++
++	} else	{		// zlp
++		count = 0;
++		bytes = 0;
++	}
++
++	is_short = (bytes < ep->ep.maxpacket);
++	DEBUG_EP0("%s: read %s %02x, %d bytes%s req %p %d/%d\n",
++		  __FUNCTION__,
++		  ep->ep.name, csr, bytes,
++		  is_short ? "/S" : "", req, req->req.actual, req->req.length);
++
++	while (likely(count-- != 0)) {
++		u32 byte = (u32) readl(fifo);
++
++		if (unlikely(bufferspace == 0)) {
++			/* this happens when the driver's buffer
++			 * is smaller than what the host sent.
++			 * discard the extra data.
++			 */
++			if (req->req.status != -EOVERFLOW)
++				DEBUG_EP0("%s overflow %d\n", ep->ep.name,
++					  count);
++			req->req.status = -EOVERFLOW;
++		} else {
++			*buf++ = byte;
++			bufferspace = bufferspace - 4;
++		}
++	}
++
++	/* completion */
++	if (is_short || req->req.actual == req->req.length) {
++		return 1;
++	}
++
++	return 0;
++}
++
++/**
++ * udc_set_address - set the USB address for this device
++ * @address:
++ *
++ * Called from control endpoint function
++ * after it decodes a set address setup packet.
++ */
++static void udc_set_address(struct s3c_udc *dev, unsigned char address)
++{
++	u32 ctrl = readl(S3C_UDC_OTG_DCFG);
++	writel(address << 4 | ctrl, S3C_UDC_OTG_DCFG);
++
++	ctrl = readl(S3C_UDC_OTG_DIEPCTL0);
++	writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, S3C_UDC_OTG_DIEPCTL0); /* EP0: Control IN */
++
++	DEBUG_EP0("%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n",
++		__FUNCTION__, address, readl(S3C_UDC_OTG_DCFG));
++
++	dev->usb_address = address;
++}
++
++
++
++static int first_time = 1;
++
++static void s3c_ep0_read(struct s3c_udc *dev)
++{
++	struct s3c_request *req;
++	struct s3c_ep *ep = &dev->ep[0];
++	int ret;
++
++	if (!list_empty(&ep->queue))
++		req = list_entry(ep->queue.next, struct s3c_request, queue);
++	else {
++		printk("%s: ---> BUG\n", __FUNCTION__);
++		BUG();	//logic ensures		-jassi
++		return;
++	}
++
++	DEBUG_EP0("%s: req.length = 0x%x, req.actual = 0x%x\n",
++		__FUNCTION__, req->req.length, req->req.actual);
++
++	if(req->req.length == 0) {
++		dev->ep0state = WAIT_FOR_SETUP;
++		first_time = 1;
++		done(ep, req, 0);
++		return;
++	}
++
++	if(!req->req.actual && first_time){	//for SetUp packet
++		first_time = 0;
++		return;
++	}
++
++	ret = read_fifo_ep0(ep, req);
++	if (ret) {
++		dev->ep0state = WAIT_FOR_SETUP;
++		first_time = 1;
++		done(ep, req, 0);
++		return;
++	}
++
++}
++
++/*
++ * DATA_STATE_XMIT
++ */
++static int s3c_ep0_write(struct s3c_udc *dev)
++{
++	struct s3c_request *req;
++	struct s3c_ep *ep = &dev->ep[0];
++	int ret, need_zlp = 0;
++
++	DEBUG_EP0("%s: ep0 write\n", __FUNCTION__);
++
++	if (list_empty(&ep->queue))
++		req = 0;
++	else
++		req = list_entry(ep->queue.next, struct s3c_request, queue);
++
++	if (!req) {
++		DEBUG_EP0("%s: NULL REQ\n", __FUNCTION__);
++		return 0;
++	}
++
++	DEBUG_EP0("%s: req.length = 0x%x, req.actual = 0x%x\n",
++		__FUNCTION__, req->req.length, req->req.actual);
++
++	if (req->req.length == 0) {
++		dev->ep0state = WAIT_FOR_SETUP;
++	   	done(ep, req, 0);
++		return 1;
++	}
++
++	if (req->req.length - req->req.actual == ep0_fifo_size) {
++		/* Next write will end with the packet size, */
++		/* so we need Zero-length-packet */
++		need_zlp = 1;
++	}
++
++	ret = write_fifo_ep0(ep, req);
++
++	if ((ret == 1) && !need_zlp) {
++		/* Last packet */
++		DEBUG_EP0("%s: finished, waiting for status\n", __FUNCTION__);
++		dev->ep0state = WAIT_FOR_SETUP;
++	} else {
++		DEBUG_EP0("%s: not finished\n", __FUNCTION__);
++	}
++
++	if (need_zlp) {
++		DEBUG_EP0("%s: Need ZLP!\n", __FUNCTION__);
++		dev->ep0state = DATA_STATE_NEED_ZLP;
++	}
++
++	if(ret)
++	   done(ep, req, 0);
++
++	return 1;
++}
++
++static int s3c_udc_set_halt(struct usb_ep *_ep, int value)
++{
++	struct s3c_ep	*ep;
++	u32 ep_num;
++	ep = container_of(_ep, struct s3c_ep, ep);
++	ep_num =ep_index(ep);
++	
++	DEBUG("%s: ep_num = %d, value = %d\n", __FUNCTION__, ep_num, value);
++	/* TODO */
++	return 0;
++}
++
++void s3c_udc_ep_activate(struct s3c_ep *ep)
++{
++	/* TODO */
++}
++
++/*
++ * WAIT_FOR_SETUP (OUT_PKT_RDY)
++ */
++static void s3c_ep0_setup(struct s3c_udc *dev)
++{
++	struct s3c_ep *ep = &dev->ep[0];
++	int i, bytes, is_in;
++	u32 ep_ctrl;
++
++	/* Nuke all previous transfers */
++	nuke(ep, -EPROTO);
++
++	/* read control req from fifo (8 bytes) */
++	bytes = s3c_fifo_read(ep, (u32 *)&usb_ctrl, 8);
++
++	DEBUG_SETUP("Read CTRL REQ %d bytes\n", bytes);
++	DEBUG_SETUP("  CTRL.bRequestType = 0x%x (is_in %d)\n", usb_ctrl.bRequestType,
++		    usb_ctrl.bRequestType & USB_DIR_IN);
++	DEBUG_SETUP("  CTRL.bRequest = 0x%x\n", usb_ctrl.bRequest);
++	DEBUG_SETUP("  CTRL.wLength = 0x%x\n", usb_ctrl.wLength);
++	DEBUG_SETUP("  CTRL.wValue = 0x%x (%d)\n", usb_ctrl.wValue, usb_ctrl.wValue >> 8);
++	DEBUG_SETUP("  CTRL.wIndex = 0x%x\n", usb_ctrl.wIndex);
++
++	/* Set direction of EP0 */
++	if (likely(usb_ctrl.bRequestType & USB_DIR_IN)) {
++		ep->bEndpointAddress |= USB_DIR_IN;
++		is_in = 1;
++	} else {
++		ep->bEndpointAddress &= ~USB_DIR_IN;
++		is_in = 0;
++	}
++
++	dev->req_pending = 1;
++
++	/* Handle some SETUP packets ourselves */
++	switch (usb_ctrl.bRequest) {
++		case USB_REQ_SET_ADDRESS:
++			if (usb_ctrl.bRequestType
++				!= (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
++				break;
++
++			DEBUG_SETUP("%s: *** USB_REQ_SET_ADDRESS (%d)\n",
++					__FUNCTION__, usb_ctrl.wValue);
++			udc_set_address(dev, usb_ctrl.wValue);
++			return;
++
++		case USB_REQ_SET_CONFIGURATION :
++			DEBUG_SETUP("============================================\n");
++			DEBUG_SETUP("%s: USB_REQ_SET_CONFIGURATION (%d)\n",
++					__FUNCTION__, usb_ctrl.wValue);
++config_change:
++			// Just to send ZLP(Zero length Packet) to HOST in response to SET CONFIGURATION
++			ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL0);
++			writel(DEPCTL_EPENA|DEPCTL_CNAK|ep_ctrl, S3C_UDC_OTG_DIEPCTL0); /* EP0: Control IN */
++
++			// For Startng EP1 on this new configuration
++			ep_ctrl = readl(S3C_UDC_OTG_DOEPCTL1);
++			writel(DEPCTL_EPDIS|DEPCTL_CNAK|DEPCTL_BULK_TYPE|DEPCTL_USBACTEP|ep_ctrl, S3C_UDC_OTG_DOEPCTL1); /* EP1: Bulk OUT */
++
++			// For starting EP2 on this new configuration
++			ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL2);
++			writel(DEPCTL_BULK_TYPE|DEPCTL_USBACTEP|ep_ctrl, S3C_UDC_OTG_DIEPCTL2); /* EP2: Bulk IN */
++
++			// For starting EP3 on this new configuration
++			ep_ctrl = readl(S3C_UDC_OTG_DIEPCTL3);
++			writel(DEPCTL_BULK_TYPE|DEPCTL_USBACTEP|ep_ctrl, S3C_UDC_OTG_DIEPCTL3); /* EP3: INTR IN */
++
++			DEBUG_SETUP("%s:(DOEPCTL1):0x%x, (DIEPCTL2):0x%x, (DIEPCTL3):0x%x\n",
++				__FUNCTION__,
++				readl(S3C_UDC_OTG_DOEPCTL1),
++				readl(S3C_UDC_OTG_DIEPCTL2),
++				readl(S3C_UDC_OTG_DIEPCTL3));
++
++			DEBUG_SETUP("============================================\n");
++
++			reset_available = 1;
++			dev->req_config = 1;
++			break;
++
++		case USB_REQ_GET_DESCRIPTOR:
++			DEBUG_SETUP("%s: *** USB_REQ_GET_DESCRIPTOR  \n",__FUNCTION__);
++			break;
++
++		case USB_REQ_SET_INTERFACE:
++			DEBUG_SETUP("%s: *** USB_REQ_SET_INTERFACE (%d)\n",
++					__FUNCTION__, usb_ctrl.wValue);
++
++			set_interface_first = 1;
++			goto config_change;
++			break;
++
++		case USB_REQ_GET_CONFIGURATION:
++			DEBUG_SETUP("%s: *** USB_REQ_GET_CONFIGURATION  \n",__FUNCTION__);
++			break;
++
++		case USB_REQ_GET_STATUS:
++			DEBUG_SETUP("%s: *** USB_REQ_GET_STATUS  \n",__FUNCTION__);
++			break;
++
++		default:
++			DEBUG_SETUP("%s: *** Default of usb_ctrl.bRequest=0x%x happened.\n",
++					__FUNCTION__, usb_ctrl.bRequest);
++			break;
++	}
++
++	if (likely(dev->driver)) {
++		/* device-2-host (IN) or no data setup command,
++		 * process immediately */
++		spin_unlock(&dev->lock);
++		DEBUG_SETUP("%s: ctrlrequest will be passed to fsg_setup()\n", __FUNCTION__);
++		i = dev->driver->setup(&dev->gadget, (struct usb_ctrlrequest *)&usb_ctrl);
++		spin_lock(&dev->lock);
++
++		if (i < 0) {
++			/* setup processing failed, force stall */
++			DEBUG_SETUP("%s: gadget setup FAILED (stalling), setup returned %d\n",
++				__FUNCTION__, i);
++			/* ep->stopped = 1; */
++			dev->ep0state = WAIT_FOR_SETUP;
++		}
++	}
++}
++
++/*
++ * handle ep0 interrupt
++ */
++static void s3c_handle_ep0(struct s3c_udc *dev)
++{
++	if (dev->ep0state == WAIT_FOR_SETUP) {
++		DEBUG_EP0("%s: WAIT_FOR_SETUP\n", __FUNCTION__);
++		s3c_ep0_setup(dev);
++
++	} else {
++		DEBUG_EP0("%s: strange state!!(state = %s)\n",
++			__FUNCTION__, state_names[dev->ep0state]);
++	}
++}
++
++static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep)
++{
++	DEBUG_EP0("%s: ep_is_in = %d\n", __FUNCTION__, ep_is_in(ep));
++	if (ep_is_in(ep)) {
++		dev->ep0state = DATA_STATE_XMIT;
++		s3c_ep0_write(dev);
++	} else {
++		dev->ep0state = DATA_STATE_RECV;
++		s3c_ep0_read(dev);
++	}
++}
+diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
+index ab085f12..4e0fa18a 100644
+--- a/drivers/usb/host/Kconfig
++++ b/drivers/usb/host/Kconfig
+@@ -574,7 +574,31 @@ config USB_OCTEON_OHCI
+ 	  Enable support for the Octeon II SOC's on-chip OHCI
+ 	  controller.  It is needed for low-speed USB 1.0 device
+ 	  support.  All CN6XXX based chips with USB are supported.
++config CONFIG_USB_FH_OTG
++	tristate "FH OTG support, select one or none"
++	depends on USB 
++	help
++		FULLHAN USB SUPPORT
++		if select none, be the OTG mode
+ 
++source "drivers/usb/host/fh_otg/Kconfig"
++		
+ config USB_OCTEON2_COMMON
+ 	bool
+ 	default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
++
++config USB_S3C_OTG_HOST
++	tristate "S3C USB OTG Host support"
++	depends on USB 
++	help
++	  Samsung's S3C64XX processors include high speed USB OTG2.0
++	  controller. It has 15 configurable endpoints, as well as
++	  endpoint zero (for control transfers).
++
++	  This driver support only OTG Host role. If you want to use
++	  OTG Device role, select USB Gadget support and S3C OTG Device.
++
++          Say "y" to link the driver statically, or "m" to build a
++          dynamically linked module called "s3c_otg_hcd" and force all
++          drivers to also be dynamically linked.
++	
+diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
+index 624a362f..ed8d6506 100644
+--- a/drivers/usb/host/Makefile
++++ b/drivers/usb/host/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
+ 
+ obj-$(CONFIG_PCI)		+= pci-quirks.o
+ 
++obj-$(CONFIG_CONFIG_USB_FH_OTG)     += fh_otg/
+ obj-$(CONFIG_USB_EHCI_HCD)	+= ehci-hcd.o
+ obj-$(CONFIG_USB_OXU210HP_HCD)	+= oxu210hp-hcd.o
+ obj-$(CONFIG_USB_ISP116X_HCD)	+= isp116x-hcd.o
+diff --git a/drivers/usb/host/fh_otg/Kconfig b/drivers/usb/host/fh_otg/Kconfig
+new file mode 100644
+index 00000000..e9285782
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/Kconfig
+@@ -0,0 +1 @@
++source "drivers/usb/host/fh_otg/fh_otg/Kconfig"
+diff --git a/drivers/usb/host/fh_otg/Makefile b/drivers/usb/host/fh_otg/Makefile
+new file mode 100644
+index 00000000..f5cd4fb9
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/Makefile
+@@ -0,0 +1,7 @@
++#
++#Makefile for the kernel USB host drivers.
++#
++
++# Object files in subdirectories
++
++obj-$(CONFIG_CONFIG_USB_FH_OTG)		+= fh_common_port/ fh_otg/
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/Makefile b/drivers/usb/host/fh_otg/fh_common_port/Makefile
+new file mode 100644
+index 00000000..0f9ce68d
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/Makefile
+@@ -0,0 +1,49 @@
++#
++# Makefile for FH_common library
++#
++ifneq ($(KERNELRELEASE),)
++
++EXTRA_CFLAGS	+= -DFH_LINUX
++#EXTRA_CFLAGS	+= -DDEBUG
++#EXTRA_CFLAGS	+= -DFH_DEBUG_REGS
++#EXTRA_CFLAGS	+= -DFH_DEBUG_MEMORY
++
++EXTRA_CFLAGS	+= -DFH_LIBMODULE
++EXTRA_CFLAGS	+= -DFH_CCLIB
++EXTRA_CFLAGS	+= -DFH_CRYPTOLIB
++EXTRA_CFLAGS	+= -DFH_NOTIFYLIB
++EXTRA_CFLAGS	+= -DFH_UTFLIB
++
++obj-y			 := fh_common_port_lib.o
++fh_common_port_lib-objs := fh_cc.o fh_modpow.o fh_dh.o \
++			    fh_crypto.o fh_notifier.o \
++			    fh_common_linux.o fh_mem.o
++
++else
++
++ifeq ($(KDIR),)
++$(error Must give "KDIR=/path/to/kernel/source" on command line or in environment)
++endif
++
++ifeq ($(ARCH),)
++$(error Must give "ARCH=<arch>" on command line or in environment. Also, if \
++ cross-compiling, must give "CROSS_COMPILE=/path/to/compiler/plus/tool-prefix-")
++endif
++
++ifeq ($(DOXYGEN),)
++DOXYGEN		:= doxygen
++endif
++
++default:
++	$(MAKE) -C$(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
++
++docs:	$(wildcard *.[hc]) doc/doxygen.cfg
++	$(DOXYGEN) doc/doxygen.cfg
++
++tags:	$(wildcard *.[hc])
++	$(CTAGS) -e $(wildcard *.[hc]) $(wildcard linux/*.[hc]) $(wildcard $(KDIR)/include/linux/usb*.h)
++
++endif
++
++clean:
++	rm -rf *.o *.ko .*.cmd *.mod.c .*.o.d .*.o.tmp modules.order Module.markers Module.symvers .tmp_versions/
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/Makefile.fbsd b/drivers/usb/host/fh_otg/fh_common_port/Makefile.fbsd
+new file mode 100644
+index 00000000..c78002c4
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/Makefile.fbsd
+@@ -0,0 +1,17 @@
++CFLAGS	+= -I/sys/i386/compile/GENERIC -I/sys/i386/include -I/usr/include
++CFLAGS	+= -DFH_FREEBSD
++CFLAGS	+= -DDEBUG
++#CFLAGS	+= -DFH_DEBUG_REGS
++#CFLAGS	+= -DFH_DEBUG_MEMORY
++
++#CFLAGS	+= -DFH_LIBMODULE
++#CFLAGS	+= -DFH_CCLIB
++#CFLAGS	+= -DFH_CRYPTOLIB
++#CFLAGS	+= -DFH_NOTIFYLIB
++#CFLAGS	+= -DFH_UTFLIB
++
++KMOD = fh_common_port_lib
++SRCS = fh_cc.c fh_modpow.c fh_dh.c fh_crypto.c fh_notifier.c \
++       fh_common_fbsd.c fh_mem.c
++
++.include <bsd.kmod.mk>
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/Makefile.linux b/drivers/usb/host/fh_otg/fh_common_port/Makefile.linux
+new file mode 100644
+index 00000000..4dc490cd
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/Makefile.linux
+@@ -0,0 +1,49 @@
++#
++# Makefile for FH_common library
++#
++ifneq ($(KERNELRELEASE),)
++
++EXTRA_CFLAGS	+= -DFH_LINUX
++#EXTRA_CFLAGS	+= -DDEBUG
++#EXTRA_CFLAGS	+= -DFH_DEBUG_REGS
++#EXTRA_CFLAGS	+= -DFH_DEBUG_MEMORY
++
++EXTRA_CFLAGS	+= -DFH_LIBMODULE
++EXTRA_CFLAGS	+= -DFH_CCLIB
++EXTRA_CFLAGS	+= -DFH_CRYPTOLIB
++EXTRA_CFLAGS	+= -DFH_NOTIFYLIB
++EXTRA_CFLAGS	+= -DFH_UTFLIB
++
++obj-m			 := fh_common_port_lib.o
++fh_common_port_lib-objs := fh_cc.o fh_modpow.o fh_dh.o \
++			    fh_crypto.o fh_notifier.o \
++			    fh_common_linux.o fh_mem.o
++
++else
++
++ifeq ($(KDIR),)
++$(error Must give "KDIR=/path/to/kernel/source" on command line or in environment)
++endif
++
++ifeq ($(ARCH),)
++$(error Must give "ARCH=<arch>" on command line or in environment. Also, if \
++ cross-compiling, must give "CROSS_COMPILE=/path/to/compiler/plus/tool-prefix-")
++endif
++
++ifeq ($(DOXYGEN),)
++DOXYGEN		:= doxygen
++endif
++
++default:
++	$(MAKE) -C$(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
++
++docs:	$(wildcard *.[hc]) doc/doxygen.cfg
++	$(DOXYGEN) doc/doxygen.cfg
++
++tags:	$(wildcard *.[hc])
++	$(CTAGS) -e $(wildcard *.[hc]) $(wildcard linux/*.[hc]) $(wildcard $(KDIR)/include/linux/usb*.h)
++
++endif
++
++clean:
++	rm -rf *.o *.ko .*.cmd *.mod.c .*.o.d .*.o.tmp modules.order Module.markers Module.symvers .tmp_versions/
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/changes.txt b/drivers/usb/host/fh_otg/fh_common_port/changes.txt
+new file mode 100644
+index 00000000..f6839f92
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/changes.txt
+@@ -0,0 +1,174 @@
++
++dwc_read_reg32() and friends now take an additional parameter, a pointer to an
++IO context struct. The IO context struct should live in an os-dependent struct
++in your driver. As an example, the dwc_usb3 driver has an os-dependent struct
++named 'os_dep' embedded in the main device struct. So there these calls look
++like this:
++
++	dwc_read_reg32(&usb3_dev->os_dep.ioctx, &pcd->dev_global_regs->dcfg);
++
++	dwc_write_reg32(&usb3_dev->os_dep.ioctx,
++			&pcd->dev_global_regs->dcfg, 0);
++
++Note that for the existing Linux driver ports, it is not necessary to actually
++define the 'ioctx' member in the os-dependent struct. Since Linux does not
++require an IO context, its macros for dwc_read_reg32() and friends do not
++use the context pointer, so it is optimized away by the compiler. But it is
++necessary to add the pointer parameter to all of the call sites, to be ready
++for any future ports (such as FreeBSD) which do require an IO context.
++
++
++Similarly, dwc_alloc(), dwc_alloc_atomic(), dwc_strdup(), and dwc_free() now
++take an additional parameter, a pointer to a memory context. Examples:
++
++	addr = dwc_alloc(&usb3_dev->os_dep.memctx, size);
++
++	dwc_free(&usb3_dev->os_dep.memctx, addr);
++
++Again, for the Linux ports, it is not necessary to actually define the memctx
++member, but it is necessary to add the pointer parameter to all of the call
++sites.
++
++
++Same for dwc_dma_alloc() and dwc_dma_free(). Examples:
++
++	virt_addr = dwc_dma_alloc(&usb3_dev->os_dep.dmactx, size, &phys_addr);
++
++	dwc_dma_free(&usb3_dev->os_dep.dmactx, size, virt_addr, phys_addr);
++
++
++Same for dwc_mutex_alloc() and dwc_mutex_free(). Examples:
++
++	mutex = dwc_mutex_alloc(&usb3_dev->os_dep.mtxctx);
++
++	dwc_mutex_free(&usb3_dev->os_dep.mtxctx, mutex);
++
++
++Same for dwc_spinlock_alloc() and dwc_spinlock_free(). Examples:
++
++	lock = dwc_spinlock_alloc(&usb3_dev->osdep.splctx);
++
++	dwc_spinlock_free(&usb3_dev->osdep.splctx, lock);
++
++
++Same for dwc_timer_alloc(). Example:
++
++	timer = dwc_timer_alloc(&usb3_dev->os_dep.tmrctx, "dwc_usb3_tmr1",
++				cb_func, cb_data);
++
++
++Same for dwc_waitq_alloc(). Example:
++
++	waitq = dwc_waitq_alloc(&usb3_dev->os_dep.wtqctx);
++
++
++Same for dwc_thread_run(). Example:
++
++	thread = dwc_thread_run(&usb3_dev->os_dep.thdctx, func,
++				"dwc_usb3_thd1", data);
++
++
++Same for dwc_workq_alloc(). Example:
++
++	workq = dwc_workq_alloc(&usb3_dev->osdep.wkqctx, "dwc_usb3_wkq1");
++
++
++Same for dwc_task_alloc(). Example:
++
++	task = dwc_task_alloc(&usb3_dev->os_dep.tskctx, "dwc_usb3_tsk1",
++			      cb_func, cb_data);
++
++
++In addition to the context pointer additions, a few core functions have had
++other changes made to their parameters:
++
++The 'flags' parameter to dwc_spinlock_irqsave() and dwc_spinunlock_irqrestore()
++has been changed from a uint64_t to a dwc_irqflags_t.
++
++dwc_thread_should_stop() now takes a 'dwc_thread_t *' parameter, because the
++FreeBSD equivalent of that function requires it.
++
++And, in addition to the context pointer, dwc_task_alloc() also adds a
++'char *name' parameter, to be consistent with dwc_thread_run() and
++dwc_workq_alloc(), and because the FreeBSD equivalent of that function
++requires a unique name.
++
++
++Here is a complete list of the core functions that now take a pointer to a
++context as their first parameter:
++
++	dwc_read_reg32
++	dwc_read_reg64
++	dwc_write_reg32
++	dwc_write_reg64
++	dwc_modify_reg32
++	dwc_modify_reg64
++	dwc_alloc
++	dwc_alloc_atomic
++	dwc_strdup
++	dwc_free
++	dwc_dma_alloc
++	dwc_dma_free
++	dwc_mutex_alloc
++	dwc_mutex_free
++	dwc_spinlock_alloc
++	dwc_spinlock_free
++	dwc_timer_alloc
++	dwc_waitq_alloc
++	dwc_thread_run
++	dwc_workq_alloc
++	dwc_task_alloc     Also adds a 'char *name' as its 2nd parameter
++
++And here are the core functions that have other changes to their parameters:
++
++	dwc_spinlock_irqsave      'flags' param is now a 'dwc_irqflags_t *'
++	dwc_spinunlock_irqrestore 'flags' param is now a 'dwc_irqflags_t'
++	dwc_thread_should_stop    Adds a 'dwc_thread_t *' parameter
++
++
++
++The changes to the core functions also require some of the other library
++functions to change:
++
++	dwc_cc_if_alloc() and dwc_cc_if_free() now take a 'void *memctx'
++	(for memory allocation) as the 1st param and a 'void *mtxctx'
++	(for mutex allocation) as the 2nd param.
++
++	dwc_cc_clear(), dwc_cc_add(), dwc_cc_change(), dwc_cc_remove(),
++	dwc_cc_data_for_save(), and dwc_cc_restore_from_data() now take a
++	'void *memctx' as the 1st param.
++
++	dwc_dh_modpow(), dwc_dh_pk(), and dwc_dh_derive_keys() now take a
++	'void *memctx' as the 1st param.
++
++	dwc_modpow() now takes a 'void *memctx' as the 1st param.
++
++	dwc_alloc_notification_manager() now takes a 'void *memctx' as the
++	1st param and a 'void *wkqctx' (for work queue allocation) as the 2nd
++	param, and also now returns an integer value that is non-zero if
++	allocation of its data structures or work queue fails.
++
++	dwc_register_notifier() now takes a 'void *memctx' as the 1st param.
++
++	dwc_memory_debug_start() now takes a 'void *mem_ctx' as the first
++	param, and also now returns an integer value that is non-zero if
++	allocation of its data structures fails.
++
++
++
++Other miscellaneous changes:
++
++The DEBUG_MEMORY and DEBUG_REGS #define's have been renamed to
++DWC_DEBUG_MEMORY and DWC_DEBUG_REGS.
++
++The following #define's have been added to allow selectively compiling library
++features:
++
++	DWC_CCLIB
++	DWC_CRYPTOLIB
++	DWC_NOTIFYLIB
++	DWC_UTFLIB
++
++A DWC_LIBMODULE #define has also been added. If this is not defined, then the
++module code in dwc_common_linux.c is not compiled in. This allows linking the
++library code directly into a driver module, instead of as a standalone module.
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_cc.c b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.c
+new file mode 100644
+index 00000000..ca1dc878
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.c
+@@ -0,0 +1,532 @@
++/* =========================================================================
++ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_cc.c $
++ * $Revision: #4 $
++ * $Date: 2010/11/04 $
++ * $Change: 1621692 $
++ *
++ * Synopsys Portability Library Software and documentation
++ * (hereinafter, "Software") is an Unsupported proprietary work of
++ * Synopsys, Inc. unless otherwise expressly agreed to in writing
++ * between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for
++ * Licensed Product with Synopsys or any supplement thereto. You are
++ * permitted to use and redistribute this Software in source and binary
++ * forms, with or without modification, provided that redistributions
++ * of source code must retain this notice. You may not view, use,
++ * disclose, copy or distribute this file or any information contained
++ * herein except pursuant to this license grant from Synopsys. If you
++ * do not agree with this notice, including the disclaimer below, then
++ * you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
++ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
++ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
++ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================= */
++#ifdef FH_CCLIB
++
++#include "fh_cc.h"
++
++typedef struct fh_cc
++{
++	uint32_t uid;
++	uint8_t chid[16];
++	uint8_t cdid[16];
++	uint8_t ck[16];
++	uint8_t *name;
++	uint8_t length;
++        FH_CIRCLEQ_ENTRY(fh_cc) list_entry;
++} fh_cc_t;
++
++FH_CIRCLEQ_HEAD(context_list, fh_cc);
++
++/** The main structure for CC management.  */
++struct fh_cc_if
++{
++	fh_mutex_t *mutex;
++	char *filename;
++
++	unsigned is_host:1;
++
++	fh_notifier_t *notifier;
++
++	struct context_list list;
++};
++
++#ifdef DEBUG
++static inline void dump_bytes(char *name, uint8_t *bytes, int len)
++{
++	int i;
++	FH_PRINTF("%s: ", name);
++	for (i=0; i<len; i++) {
++		FH_PRINTF("%02x ", bytes[i]);
++	}
++	FH_PRINTF("\n");
++}
++#else
++#define dump_bytes(x...)
++#endif
++
++static fh_cc_t *alloc_cc(void *mem_ctx, uint8_t *name, uint32_t length)
++{
++	fh_cc_t *cc = fh_alloc(mem_ctx, sizeof(fh_cc_t));
++	if (!cc) {
++		return NULL;
++	}
++	FH_MEMSET(cc, 0, sizeof(fh_cc_t));
++
++	if (name) {
++		cc->length = length;
++		cc->name = fh_alloc(mem_ctx, length);
++		if (!cc->name) {
++			fh_free(mem_ctx, cc);
++			return NULL;
++		}
++
++		FH_MEMCPY(cc->name, name, length);
++	}
++
++	return cc;
++}
++
++static void free_cc(void *mem_ctx, fh_cc_t *cc)
++{
++	if (cc->name) {
++		fh_free(mem_ctx, cc->name);
++	}
++	fh_free(mem_ctx, cc);
++}
++
++static uint32_t next_uid(fh_cc_if_t *cc_if)
++{
++	uint32_t uid = 0;
++	fh_cc_t *cc;
++	FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
++		if (cc->uid > uid) {
++			uid = cc->uid;
++		}
++	}
++
++	if (uid == 0) {
++		uid = 255;
++	}
++
++	return uid + 1;
++}
++
++static fh_cc_t *cc_find(fh_cc_if_t *cc_if, uint32_t uid)
++{
++	fh_cc_t *cc;
++	FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
++		if (cc->uid == uid) {
++			return cc;
++		}
++	}
++	return NULL;
++}
++
++static unsigned int cc_data_size(fh_cc_if_t *cc_if)
++{
++	unsigned int size = 0;
++	fh_cc_t *cc;
++	FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
++		size += (48 + 1);
++		if (cc->name) {
++			size += cc->length;
++		}
++	}
++	return size;
++}
++
++static uint32_t cc_match_chid(fh_cc_if_t *cc_if, uint8_t *chid)
++{
++	uint32_t uid = 0;
++	fh_cc_t *cc;
++
++	FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
++		if (FH_MEMCMP(cc->chid, chid, 16) == 0) {
++			uid = cc->uid;
++			break;
++		}
++	}
++	return uid;
++}
++static uint32_t cc_match_cdid(fh_cc_if_t *cc_if, uint8_t *cdid)
++{
++	uint32_t uid = 0;
++	fh_cc_t *cc;
++
++	FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
++		if (FH_MEMCMP(cc->cdid, cdid, 16) == 0) {
++			uid = cc->uid;
++			break;
++		}
++	}
++	return uid;
++}
++
++/* Internal cc_add */
++static int32_t cc_add(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *chid,
++		      uint8_t *cdid, uint8_t *ck, uint8_t *name, uint8_t length)
++{
++	fh_cc_t *cc;
++	uint32_t uid;
++
++	if (cc_if->is_host) {
++		uid = cc_match_cdid(cc_if, cdid);
++	}
++	else {
++		uid = cc_match_chid(cc_if, chid);
++	}
++
++	if (uid) {
++		FH_DEBUG("Replacing previous connection context id=%d name=%p name_len=%d", uid, name, length);
++		cc = cc_find(cc_if, uid);
++	}
++	else {
++		cc = alloc_cc(mem_ctx, name, length);
++		cc->uid = next_uid(cc_if);
++		FH_CIRCLEQ_INSERT_TAIL(&cc_if->list, cc, list_entry);
++	}
++
++	FH_MEMCPY(&(cc->chid[0]), chid, 16);
++	FH_MEMCPY(&(cc->cdid[0]), cdid, 16);
++	FH_MEMCPY(&(cc->ck[0]), ck, 16);
++
++	FH_DEBUG("Added connection context id=%d name=%p name_len=%d", cc->uid, name, length);
++	dump_bytes("CHID", cc->chid, 16);
++	dump_bytes("CDID", cc->cdid, 16);
++	dump_bytes("CK", cc->ck, 16);
++	return cc->uid;
++}
++
++/* Internal cc_clear */
++static void cc_clear(void *mem_ctx, fh_cc_if_t *cc_if)
++{
++	while (!FH_CIRCLEQ_EMPTY(&cc_if->list)) {
++		fh_cc_t *cc = FH_CIRCLEQ_FIRST(&cc_if->list);
++		FH_CIRCLEQ_REMOVE_INIT(&cc_if->list, cc, list_entry);
++		free_cc(mem_ctx, cc);
++	}
++}
++
++fh_cc_if_t *fh_cc_if_alloc(void *mem_ctx, void *mtx_ctx, 
++			     fh_notifier_t *notifier, unsigned is_host)
++{
++	fh_cc_if_t *cc_if = NULL;
++
++	/* Allocate a common_cc_if structure */
++	cc_if = fh_alloc(mem_ctx, sizeof(fh_cc_if_t));
++
++	if (!cc_if)
++		return NULL;
++
++#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
++	FH_MUTEX_ALLOC_LINUX_DEBUG(cc_if->mutex);
++#else
++	cc_if->mutex = fh_mutex_alloc(mtx_ctx);
++#endif
++	if (!cc_if->mutex) {
++		fh_free(mem_ctx, cc_if);
++		return NULL;
++	}
++
++	FH_CIRCLEQ_INIT(&cc_if->list);
++	cc_if->is_host = is_host;
++	cc_if->notifier = notifier;
++	return cc_if;
++}
++
++void fh_cc_if_free(void *mem_ctx, void *mtx_ctx, fh_cc_if_t *cc_if)
++{
++#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
++	FH_MUTEX_FREE(cc_if->mutex);
++#else
++	fh_mutex_free(mtx_ctx, cc_if->mutex);
++#endif
++	cc_clear(mem_ctx, cc_if);
++	fh_free(mem_ctx, cc_if);
++}
++
++static void cc_changed(fh_cc_if_t *cc_if)
++{
++	if (cc_if->notifier) {
++		fh_notify(cc_if->notifier, FH_CC_LIST_CHANGED_NOTIFICATION, cc_if);
++	}
++}
++
++void fh_cc_clear(void *mem_ctx, fh_cc_if_t *cc_if)
++{
++	FH_MUTEX_LOCK(cc_if->mutex);
++	cc_clear(mem_ctx, cc_if);
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++	cc_changed(cc_if);
++}
++
++int32_t fh_cc_add(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *chid,
++		   uint8_t *cdid, uint8_t *ck, uint8_t *name, uint8_t length)
++{
++	uint32_t uid;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	uid = cc_add(mem_ctx, cc_if, chid, cdid, ck, name, length);
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++	cc_changed(cc_if);
++
++	return uid;
++}
++
++void fh_cc_change(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id, uint8_t *chid,
++		   uint8_t *cdid, uint8_t *ck, uint8_t *name, uint8_t length)
++{
++	fh_cc_t* cc;
++
++	FH_DEBUG("Change connection context %d", id);
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	cc = cc_find(cc_if, id);
++	if (!cc) {
++		FH_ERROR("Uid %d not found in cc list\n", id);
++		FH_MUTEX_UNLOCK(cc_if->mutex);
++		return;
++	}
++
++	if (chid) {
++		FH_MEMCPY(&(cc->chid[0]), chid, 16);
++	}
++	if (cdid) {
++		FH_MEMCPY(&(cc->cdid[0]), cdid, 16);
++	}
++	if (ck) {
++		FH_MEMCPY(&(cc->ck[0]), ck, 16);
++	}
++
++	if (name) {
++		if (cc->name) {
++			fh_free(mem_ctx, cc->name);
++		}
++		cc->name = fh_alloc(mem_ctx, length);
++		if (!cc->name) {
++			FH_ERROR("Out of memory in fh_cc_change()\n");
++			FH_MUTEX_UNLOCK(cc_if->mutex);
++			return;
++		}
++		cc->length = length;
++		FH_MEMCPY(cc->name, name, length);
++	}
++
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++
++	cc_changed(cc_if);
++
++	FH_DEBUG("Changed connection context id=%d\n", id);
++	dump_bytes("New CHID", cc->chid, 16);
++	dump_bytes("New CDID", cc->cdid, 16);
++	dump_bytes("New CK", cc->ck, 16);
++}
++
++void fh_cc_remove(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id)
++{
++	fh_cc_t *cc;
++
++	FH_DEBUG("Removing connection context %d", id);
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	cc = cc_find(cc_if, id);
++	if (!cc) {
++		FH_ERROR("Uid %d not found in cc list\n", id);
++		FH_MUTEX_UNLOCK(cc_if->mutex);
++		return;
++	}
++
++	FH_CIRCLEQ_REMOVE_INIT(&cc_if->list, cc, list_entry);
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++	free_cc(mem_ctx, cc);
++
++	cc_changed(cc_if);
++}
++
++uint8_t *fh_cc_data_for_save(void *mem_ctx, fh_cc_if_t *cc_if, unsigned int *length)
++{
++	uint8_t *buf, *x;
++	uint8_t zero = 0;
++	fh_cc_t *cc;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	*length = cc_data_size(cc_if);
++	if (!(*length)) {
++		FH_MUTEX_UNLOCK(cc_if->mutex);
++		return NULL;
++	}
++
++	FH_DEBUG("Creating data for saving (length=%d)", *length);
++
++	buf = fh_alloc(mem_ctx, *length);
++	if (!buf) {
++		*length = 0;
++		FH_MUTEX_UNLOCK(cc_if->mutex);
++		return NULL;
++	}
++
++	x = buf;
++	FH_CIRCLEQ_FOREACH(cc, &cc_if->list, list_entry) {
++		FH_MEMCPY(x, cc->chid, 16);
++		x += 16;
++		FH_MEMCPY(x, cc->cdid, 16);
++		x += 16;
++		FH_MEMCPY(x, cc->ck, 16);
++		x += 16;
++		if (cc->name) {
++			FH_MEMCPY(x, &cc->length, 1);
++			x += 1;
++			FH_MEMCPY(x, cc->name, cc->length);
++			x += cc->length;
++		}
++		else {
++			FH_MEMCPY(x, &zero, 1);
++			x += 1;
++		}
++	}
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++
++	return buf;
++}
++
++void fh_cc_restore_from_data(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *data, uint32_t length)
++{
++	uint8_t name_length;
++	uint8_t *name;
++	uint8_t *chid;
++	uint8_t *cdid;
++	uint8_t *ck;
++	uint32_t i = 0;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	cc_clear(mem_ctx, cc_if);
++
++	while (i < length) {
++		chid = &data[i];
++		i += 16;
++		cdid = &data[i];
++		i += 16;
++		ck = &data[i];
++		i += 16;
++
++		name_length = data[i];
++		i ++;
++
++		if (name_length) {
++			name = &data[i];
++			i += name_length;
++		}
++		else {
++			name = NULL;
++		}
++
++		/* check to see if we haven't overflown the buffer */
++		if (i > length) {
++			FH_ERROR("Data format error while attempting to load CCs "
++				  "(nlen=%d, iter=%d, buflen=%d).\n", name_length, i, length);
++			break;
++		}
++
++		cc_add(mem_ctx, cc_if, chid, cdid, ck, name, name_length);
++	}
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++
++	cc_changed(cc_if);
++}
++
++uint32_t fh_cc_match_chid(fh_cc_if_t *cc_if, uint8_t *chid)
++{
++	uint32_t uid = 0;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	uid = cc_match_chid(cc_if, chid);
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++	return uid;
++}
++uint32_t fh_cc_match_cdid(fh_cc_if_t *cc_if, uint8_t *cdid)
++{
++	uint32_t uid = 0;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	uid = cc_match_cdid(cc_if, cdid);
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++	return uid;
++}
++
++uint8_t *fh_cc_ck(fh_cc_if_t *cc_if, int32_t id)
++{
++	uint8_t *ck = NULL;
++	fh_cc_t *cc;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	cc = cc_find(cc_if, id);
++	if (cc) {
++		ck = cc->ck;
++	}
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++
++	return ck;
++
++}
++
++uint8_t *fh_cc_chid(fh_cc_if_t *cc_if, int32_t id)
++{
++	uint8_t *retval = NULL;
++	fh_cc_t *cc;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	cc = cc_find(cc_if, id);
++	if (cc) {
++		retval = cc->chid;
++	}
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++
++	return retval;
++}
++
++uint8_t *fh_cc_cdid(fh_cc_if_t *cc_if, int32_t id)
++{
++	uint8_t *retval = NULL;
++	fh_cc_t *cc;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	cc = cc_find(cc_if, id);
++	if (cc) {
++		retval = cc->cdid;
++	}
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++
++	return retval;
++}
++
++uint8_t *fh_cc_name(fh_cc_if_t *cc_if, int32_t id, uint8_t *length)
++{
++	uint8_t *retval = NULL;
++	fh_cc_t *cc;
++
++	FH_MUTEX_LOCK(cc_if->mutex);
++	*length = 0;
++	cc = cc_find(cc_if, id);
++	if (cc) {
++		*length = cc->length;
++		retval = cc->name;
++	}
++	FH_MUTEX_UNLOCK(cc_if->mutex);
++
++	return retval;
++}
++
++#endif	/* FH_CCLIB */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_cc.h b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.h
+new file mode 100644
+index 00000000..926229e9
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_cc.h
+@@ -0,0 +1,225 @@
++/* =========================================================================
++ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_cc.h $
++ * $Revision: #4 $
++ * $Date: 2010/09/28 $
++ * $Change: 1596182 $
++ *
++ * Synopsys Portability Library Software and documentation
++ * (hereinafter, "Software") is an Unsupported proprietary work of
++ * Synopsys, Inc. unless otherwise expressly agreed to in writing
++ * between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for
++ * Licensed Product with Synopsys or any supplement thereto. You are
++ * permitted to use and redistribute this Software in source and binary
++ * forms, with or without modification, provided that redistributions
++ * of source code must retain this notice. You may not view, use,
++ * disclose, copy or distribute this file or any information contained
++ * herein except pursuant to this license grant from Synopsys. If you
++ * do not agree with this notice, including the disclaimer below, then
++ * you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
++ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
++ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
++ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================= */
++#ifndef _FH_CC_H_
++#define _FH_CC_H_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/** @file
++ *
++ * This file defines the Context Context library.
++ *
++ * The main data structure is fh_cc_if_t which is returned by either the
++ * fh_cc_if_alloc function or returned by the module to the user via a provided
++ * function. The data structure is opaque and should only be manipulated via the
++ * functions provied in this API.
++ *
++ * It manages a list of connection contexts and operations can be performed to
++ * add, remove, query, search, and change, those contexts.  Additionally,
++ * a fh_notifier_t object can be requested from the manager so that
++ * the user can be notified whenever the context list has changed.
++ */
++
++#include "fh_os.h"
++#include "fh_list.h"
++#include "fh_notifier.h"
++
++
++/* Notifications */
++#define FH_CC_LIST_CHANGED_NOTIFICATION "FH_CC_LIST_CHANGED_NOTIFICATION"
++
++struct fh_cc_if;
++typedef struct fh_cc_if fh_cc_if_t;
++
++
++/** @name Connection Context Operations */
++/** @{ */
++
++/** This function allocates memory for a fh_cc_if_t structure, initializes
++ * fields to default values, and returns a pointer to the structure or NULL on
++ * error. */
++extern fh_cc_if_t *fh_cc_if_alloc(void *mem_ctx, void *mtx_ctx,
++				    fh_notifier_t *notifier, unsigned is_host);
++
++/** Frees the memory for the specified CC structure allocated from
++ * fh_cc_if_alloc(). */
++extern void fh_cc_if_free(void *mem_ctx, void *mtx_ctx, fh_cc_if_t *cc_if);
++
++/** Removes all contexts from the connection context list */
++extern void fh_cc_clear(void *mem_ctx, fh_cc_if_t *cc_if);
++
++/** Adds a connection context (CHID, CK, CDID, Name) to the connection context list.
++ * If a CHID already exists, the CK and name are overwritten.  Statistics are
++ * not overwritten.
++ *
++ * @param cc_if The cc_if structure.
++ * @param chid A pointer to the 16-byte CHID.  This value will be copied.
++ * @param ck A pointer to the 16-byte CK.  This value will be copied.
++ * @param cdid A pointer to the 16-byte CDID.  This value will be copied.
++ * @param name An optional host friendly name as defined in the association model
++ * spec.  Must be a UTF16-LE unicode string.  Can be NULL to indicated no name.
++ * @param length The length othe unicode string.
++ * @return A unique identifier used to refer to this context that is valid for
++ * as long as this context is still in the list. */
++extern int32_t fh_cc_add(void *mem_ctx, fh_cc_if_t *cc_if, uint8_t *chid,
++			  uint8_t *cdid, uint8_t *ck, uint8_t *name,
++			  uint8_t length);
++
++/** Changes the CHID, CK, CDID, or Name values of a connection context in the
++ * list, preserving any accumulated statistics.  This would typically be called
++ * if the host decideds to change the context with a SET_CONNECTION request.
++ *
++ * @param cc_if The cc_if structure.
++ * @param id The identifier of the connection context.
++ * @param chid A pointer to the 16-byte CHID.  This value will be copied.  NULL
++ * indicates no change.
++ * @param cdid A pointer to the 16-byte CDID.  This value will be copied.  NULL
++ * indicates no change.
++ * @param ck A pointer to the 16-byte CK.  This value will be copied.  NULL
++ * indicates no change.
++ * @param name Host friendly name UTF16-LE.  NULL indicates no change.
++ * @param length Length of name. */
++extern void fh_cc_change(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id,
++			  uint8_t *chid, uint8_t *cdid, uint8_t *ck,
++			  uint8_t *name, uint8_t length);
++
++/** Remove the specified connection context.
++ * @param cc_if The cc_if structure.
++ * @param id The identifier of the connection context to remove. */
++extern void fh_cc_remove(void *mem_ctx, fh_cc_if_t *cc_if, int32_t id);
++
++/** Get a binary block of data for the connection context list and attributes.
++ * This data can be used by the OS specific driver to save the connection
++ * context list into non-volatile memory.
++ *
++ * @param cc_if The cc_if structure.
++ * @param length Return the length of the data buffer.
++ * @return A pointer to the data buffer.  The memory for this buffer should be
++ * freed with FH_FREE() after use. */
++extern uint8_t *fh_cc_data_for_save(void *mem_ctx, fh_cc_if_t *cc_if,
++				     unsigned int *length);
++
++/** Restore the connection context list from the binary data that was previously
++ * returned from a call to fh_cc_data_for_save.  This can be used by the OS specific
++ * driver to load a connection context list from non-volatile memory.
++ *
++ * @param cc_if The cc_if structure.
++ * @param data The data bytes as returned from fh_cc_data_for_save.
++ * @param length The length of the data. */
++extern void fh_cc_restore_from_data(void *mem_ctx, fh_cc_if_t *cc_if,
++				     uint8_t *data, unsigned int length);
++
++/** Find the connection context from the specified CHID.
++ *
++ * @param cc_if The cc_if structure.
++ * @param chid A pointer to the CHID data.
++ * @return A non-zero identifier of the connection context if the CHID matches.
++ * Otherwise returns 0. */
++extern uint32_t fh_cc_match_chid(fh_cc_if_t *cc_if, uint8_t *chid);
++
++/** Find the connection context from the specified CDID.
++ *
++ * @param cc_if The cc_if structure.
++ * @param cdid A pointer to the CDID data.
++ * @return A non-zero identifier of the connection context if the CHID matches.
++ * Otherwise returns 0. */
++extern uint32_t fh_cc_match_cdid(fh_cc_if_t *cc_if, uint8_t *cdid);
++
++/** Retrieve the CK from the specified connection context.
++ *
++ * @param cc_if The cc_if structure.
++ * @param id The identifier of the connection context.
++ * @return A pointer to the CK data.  The memory does not need to be freed. */
++extern uint8_t *fh_cc_ck(fh_cc_if_t *cc_if, int32_t id);
++
++/** Retrieve the CHID from the specified connection context.
++ *
++ * @param cc_if The cc_if structure.
++ * @param id The identifier of the connection context.
++ * @return A pointer to the CHID data.  The memory does not need to be freed. */
++extern uint8_t *fh_cc_chid(fh_cc_if_t *cc_if, int32_t id);
++
++/** Retrieve the CDID from the specified connection context.
++ *
++ * @param cc_if The cc_if structure.
++ * @param id The identifier of the connection context.
++ * @return A pointer to the CDID data.  The memory does not need to be freed. */
++extern uint8_t *fh_cc_cdid(fh_cc_if_t *cc_if, int32_t id);
++
++extern uint8_t *fh_cc_name(fh_cc_if_t *cc_if, int32_t id, uint8_t *length);
++
++/** Checks a buffer for non-zero.
++ * @param id A pointer to a 16 byte buffer. 
++ * @return true if the 16 byte value is non-zero. */
++static inline unsigned fh_assoc_is_not_zero_id(uint8_t *id) {
++	int i;
++	for (i=0; i<16; i++) {
++		if (id[i]) return 1;
++	}
++	return 0;
++}
++
++/** Checks a buffer for zero.
++ * @param id A pointer to a 16 byte buffer. 
++ * @return true if the 16 byte value is zero. */
++static inline unsigned fh_assoc_is_zero_id(uint8_t *id) {
++	return !fh_assoc_is_not_zero_id(id);
++}
++
++/** Prints an ASCII representation for the 16-byte chid, cdid, or ck, into
++ * buffer. */
++static inline int fh_print_id_string(char *buffer, uint8_t *id) {
++	char *ptr = buffer;
++	int i;
++	for (i=0; i<16; i++) {
++		ptr += FH_SPRINTF(ptr, "%02x", id[i]);
++		if (i < 15) {
++			ptr += FH_SPRINTF(ptr, " ");
++		}
++	}
++	return ptr - buffer;
++}
++
++/** @} */
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _FH_CC_H_ */
++
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_common_fbsd.c b/drivers/usb/host/fh_otg/fh_common_port/fh_common_fbsd.c
+new file mode 100644
+index 00000000..ef4b67bb
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_common_fbsd.c
+@@ -0,0 +1,1308 @@
++#include "fh_os.h"
++#include "fh_list.h"
++
++#ifdef FH_CCLIB
++# include "fh_cc.h"
++#endif
++
++#ifdef FH_CRYPTOLIB
++# include "fh_modpow.h"
++# include "fh_dh.h"
++# include "fh_crypto.h"
++#endif
++
++#ifdef FH_NOTIFYLIB
++# include "fh_notifier.h"
++#endif
++
++/* OS-Level Implementations */
++
++/* This is the FreeBSD 7.0 kernel implementation of the FH platform library. */
++
++
++/* MISC */
++
++void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size)
++{
++	return memset(dest, byte, size);
++}
++
++void *FH_MEMCPY(void *dest, void const *src, uint32_t size)
++{
++	return memcpy(dest, src, size);
++}
++
++void *FH_MEMMOVE(void *dest, void *src, uint32_t size)
++{
++	bcopy(src, dest, size);
++	return dest;
++}
++
++int FH_MEMCMP(void *m1, void *m2, uint32_t size)
++{
++	return memcmp(m1, m2, size);
++}
++
++int FH_STRNCMP(void *s1, void *s2, uint32_t size)
++{
++	return strncmp(s1, s2, size);
++}
++
++int FH_STRCMP(void *s1, void *s2)
++{
++	return strcmp(s1, s2);
++}
++
++int FH_STRLEN(char const *str)
++{
++	return strlen(str);
++}
++
++char *FH_STRCPY(char *to, char const *from)
++{
++	return strcpy(to, from);
++}
++
++char *FH_STRDUP(char const *str)
++{
++	int len = FH_STRLEN(str) + 1;
++	char *new = FH_ALLOC_ATOMIC(len);
++
++	if (!new) {
++		return NULL;
++	}
++
++	FH_MEMCPY(new, str, len);
++	return new;
++}
++
++int FH_ATOI(char *str, int32_t *value)
++{
++	char *end = NULL;
++
++	*value = strtol(str, &end, 0);
++	if (*end == '\0') {
++		return 0;
++	}
++
++	return -1;
++}
++
++int FH_ATOUI(char *str, uint32_t *value)
++{
++	char *end = NULL;
++
++	*value = strtoul(str, &end, 0);
++	if (*end == '\0') {
++		return 0;
++	}
++
++	return -1;
++}
++
++
++#ifdef FH_UTFLIB
++/* From usbstring.c */
++
++int FH_UTF8_TO_UTF16LE(uint8_t const *s, uint16_t *cp, unsigned len)
++{
++	int	count = 0;
++	u8	c;
++	u16	uchar;
++
++	/* this insists on correct encodings, though not minimal ones.
++	 * BUT it currently rejects legit 4-byte UTF-8 code points,
++	 * which need surrogate pairs.  (Unicode 3.1 can use them.)
++	 */
++	while (len != 0 && (c = (u8) *s++) != 0) {
++		if (unlikely(c & 0x80)) {
++			// 2-byte sequence:
++			// 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
++			if ((c & 0xe0) == 0xc0) {
++				uchar = (c & 0x1f) << 6;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c;
++
++			// 3-byte sequence (most CJKV characters):
++			// zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
++			} else if ((c & 0xf0) == 0xe0) {
++				uchar = (c & 0x0f) << 12;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c << 6;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c;
++
++				/* no bogus surrogates */
++				if (0xd800 <= uchar && uchar <= 0xdfff)
++					goto fail;
++
++			// 4-byte sequence (surrogate pairs, currently rare):
++			// 11101110wwwwzzzzyy + 110111yyyyxxxxxx
++			//     = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
++			// (uuuuu = wwww + 1)
++			// FIXME accept the surrogate code points (only)
++			} else
++				goto fail;
++		} else
++			uchar = c;
++		put_unaligned (cpu_to_le16 (uchar), cp++);
++		count++;
++		len--;
++	}
++	return count;
++fail:
++	return -1;
++}
++
++#endif	/* FH_UTFLIB */
++
++
++/* fh_debug.h */
++
++fh_bool_t FH_IN_IRQ(void)
++{
++//	return in_irq();
++	return 0;
++}
++
++fh_bool_t FH_IN_BH(void)
++{
++//	return in_softirq();
++	return 0;
++}
++
++void FH_VPRINTF(char *format, va_list args)
++{
++	vprintf(format, args);
++}
++
++int FH_VSNPRINTF(char *str, int size, char *format, va_list args)
++{
++	return vsnprintf(str, size, format, args);
++}
++
++void FH_PRINTF(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++int FH_SPRINTF(char *buffer, char *format, ...)
++{
++	int retval;
++	va_list args;
++
++	va_start(args, format);
++	retval = vsprintf(buffer, format, args);
++	va_end(args);
++	return retval;
++}
++
++int FH_SNPRINTF(char *buffer, int size, char *format, ...)
++{
++	int retval;
++	va_list args;
++
++	va_start(args, format);
++	retval = vsnprintf(buffer, size, format, args);
++	va_end(args);
++	return retval;
++}
++
++void __FH_WARN(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++void __FH_ERROR(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++void FH_EXCEPTION(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++//	BUG_ON(1);	???
++}
++
++#ifdef DEBUG
++void __FH_DEBUG(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++#endif
++
++
++/* fh_mem.h */
++
++#if 0
++fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size,
++				uint32_t align,
++				uint32_t alloc)
++{
++	struct dma_pool *pool = dma_pool_create("Pool", NULL,
++						size, align, alloc);
++	return (fh_pool_t *)pool;
++}
++
++void FH_DMA_POOL_DESTROY(fh_pool_t *pool)
++{
++	dma_pool_destroy((struct dma_pool *)pool);
++}
++
++void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr)
++{
++//	return dma_pool_alloc((struct dma_pool *)pool, GFP_KERNEL, dma_addr);
++	return dma_pool_alloc((struct dma_pool *)pool, M_WAITOK, dma_addr);
++}
++
++void *FH_DMA_POOL_ZALLOC(fh_pool_t *pool, uint64_t *dma_addr)
++{
++	void *vaddr = FH_DMA_POOL_ALLOC(pool, dma_addr);
++	memset(..);
++}
++
++void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr)
++{
++	dma_pool_free(pool, vaddr, daddr);
++}
++#endif
++
++static void dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
++{
++	if (error)
++		return;
++	*(bus_addr_t *)arg = segs[0].ds_addr;
++}
++
++void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
++{
++	fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
++	int error;
++
++	error = bus_dma_tag_create(
++#if __FreeBSD_version >= 700000
++			bus_get_dma_tag(dma->dev),	/* parent */
++#else
++			NULL,				/* parent */
++#endif
++			4, 0,				/* alignment, bounds */
++			BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
++			BUS_SPACE_MAXADDR,		/* highaddr */
++			NULL, NULL,			/* filter, filterarg */
++			size,				/* maxsize */
++			1,				/* nsegments */
++			size,				/* maxsegsize */
++			0,				/* flags */
++			NULL,				/* lockfunc */
++			NULL,				/* lockarg */
++			&dma->dma_tag);
++	if (error) {
++		device_printf(dma->dev, "%s: bus_dma_tag_create failed: %d\n",
++			      __func__, error);
++		goto fail_0;
++	}
++
++	error = bus_dmamem_alloc(dma->dma_tag, &dma->dma_vaddr,
++				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
++	if (error) {
++		device_printf(dma->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
++			      __func__, (uintmax_t)size, error);
++		goto fail_1;
++	}
++
++	dma->dma_paddr = 0;
++	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
++				dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
++	if (error || dma->dma_paddr == 0) {
++		device_printf(dma->dev, "%s: bus_dmamap_load failed: %d\n",
++			      __func__, error);
++		goto fail_2;
++	}
++
++	*dma_addr = dma->dma_paddr;
++	return dma->dma_vaddr;
++
++fail_2:
++	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
++fail_1:
++	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
++	bus_dma_tag_destroy(dma->dma_tag);
++fail_0:
++	dma->dma_map = NULL;
++	dma->dma_tag = NULL;
++
++	return NULL;
++}
++
++void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr)
++{
++	fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
++
++	if (dma->dma_tag == NULL)
++		return;
++	if (dma->dma_map != NULL) {
++		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
++				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
++		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
++		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
++		dma->dma_map = NULL;
++	}
++
++	bus_dma_tag_destroy(dma->dma_tag);
++	dma->dma_tag = NULL;
++}
++
++void *__FH_ALLOC(void *mem_ctx, uint32_t size)
++{
++	return malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
++}
++
++void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size)
++{
++	return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
++}
++
++void __FH_FREE(void *mem_ctx, void *addr)
++{
++	free(addr, M_DEVBUF);
++}
++
++
++#ifdef FH_CRYPTOLIB
++/* fh_crypto.h */
++
++void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length)
++{
++	get_random_bytes(buffer, length);
++}
++
++int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out)
++{
++	struct crypto_blkcipher *tfm;
++	struct blkcipher_desc desc;
++	struct scatterlist sgd;
++	struct scatterlist sgs;
++
++	tfm = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
++	if (tfm == NULL) {
++		printk("failed to load transform for aes CBC\n");
++		return -1;
++	}
++
++	crypto_blkcipher_setkey(tfm, key, keylen);
++	crypto_blkcipher_set_iv(tfm, iv, 16);
++
++	sg_init_one(&sgd, out, messagelen);
++	sg_init_one(&sgs, message, messagelen);
++
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	if (crypto_blkcipher_encrypt(&desc, &sgd, &sgs, messagelen)) {
++		crypto_free_blkcipher(tfm);
++		FH_ERROR("AES CBC encryption failed");
++		return -1;
++	}
++
++	crypto_free_blkcipher(tfm);
++	return 0;
++}
++
++int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out)
++{
++	struct crypto_hash *tfm;
++	struct hash_desc desc;
++	struct scatterlist sg;
++
++	tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
++	if (IS_ERR(tfm)) {
++		FH_ERROR("Failed to load transform for sha256: %ld", PTR_ERR(tfm));
++		return 0;
++	}
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	sg_init_one(&sg, message, len);
++	crypto_hash_digest(&desc, &sg, len, out);
++	crypto_free_hash(tfm);
++
++	return 1;
++}
++
++int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen,
++		    uint8_t *key, uint32_t keylen, uint8_t *out)
++{
++	struct crypto_hash *tfm;
++	struct hash_desc desc;
++	struct scatterlist sg;
++
++	tfm = crypto_alloc_hash("hmac(sha256)", 0, CRYPTO_ALG_ASYNC);
++	if (IS_ERR(tfm)) {
++		FH_ERROR("Failed to load transform for hmac(sha256): %ld", PTR_ERR(tfm));
++		return 0;
++	}
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	sg_init_one(&sg, message, messagelen);
++	crypto_hash_setkey(tfm, key, keylen);
++	crypto_hash_digest(&desc, &sg, messagelen, out);
++	crypto_free_hash(tfm);
++
++	return 1;
++}
++
++#endif	/* FH_CRYPTOLIB */
++
++
++/* Byte Ordering Conversions */
++
++uint32_t FH_CPU_TO_LE32(uint32_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_CPU_TO_BE32(uint32_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_LE32_TO_CPU(uint32_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_BE32_TO_CPU(uint32_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint16_t FH_CPU_TO_LE16(uint16_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_CPU_TO_BE16(uint16_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_LE16_TO_CPU(uint16_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_BE16_TO_CPU(uint16_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++
++/* Registers */
++
++uint32_t FH_READ_REG32(void *io_ctx, uint32_t volatile *reg)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	return bus_space_read_4(io->iot, io->ioh, ior);
++}
++
++#if 0
++uint64_t FH_READ_REG64(void *io_ctx, uint64_t volatile *reg)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	return bus_space_read_8(io->iot, io->ioh, ior);
++}
++#endif
++
++void FH_WRITE_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t value)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_4(io->iot, io->ioh, ior, value);
++}
++
++#if 0
++void FH_WRITE_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t value)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_8(io->iot, io->ioh, ior, value);
++}
++#endif
++
++void FH_MODIFY_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t clear_mask,
++		      uint32_t set_mask)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_4(io->iot, io->ioh, ior,
++			  (bus_space_read_4(io->iot, io->ioh, ior) &
++			   ~clear_mask) | set_mask);
++}
++
++#if 0
++void FH_MODIFY_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t clear_mask,
++		      uint64_t set_mask)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_8(io->iot, io->ioh, ior,
++			  (bus_space_read_8(io->iot, io->ioh, ior) &
++			   ~clear_mask) | set_mask);
++}
++#endif
++
++
++/* Locking */
++
++fh_spinlock_t *FH_SPINLOCK_ALLOC(void)
++{
++	struct mtx *sl = FH_ALLOC(sizeof(*sl));
++
++	if (!sl) {
++		FH_ERROR("Cannot allocate memory for spinlock");
++		return NULL;
++	}
++
++	mtx_init(sl, "dw3spn", NULL, MTX_SPIN);
++	return (fh_spinlock_t *)sl;
++}
++
++void FH_SPINLOCK_FREE(fh_spinlock_t *lock)
++{
++	struct mtx *sl = (struct mtx *)lock;
++
++	mtx_destroy(sl);
++	FH_FREE(sl);
++}
++
++void FH_SPINLOCK(fh_spinlock_t *lock)
++{
++	mtx_lock_spin((struct mtx *)lock);	// ???
++}
++
++void FH_SPINUNLOCK(fh_spinlock_t *lock)
++{
++	mtx_unlock_spin((struct mtx *)lock);	// ???
++}
++
++void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags)
++{
++	mtx_lock_spin((struct mtx *)lock);
++}
++
++void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags)
++{
++	mtx_unlock_spin((struct mtx *)lock);
++}
++
++fh_mutex_t *FH_MUTEX_ALLOC(void)
++{
++	struct mtx *m;
++	fh_mutex_t *mutex = (fh_mutex_t *)FH_ALLOC(sizeof(struct mtx));
++
++	if (!mutex) {
++		FH_ERROR("Cannot allocate memory for mutex");
++		return NULL;
++	}
++
++	m = (struct mtx *)mutex;
++	mtx_init(m, "dw3mtx", NULL, MTX_DEF);
++	return mutex;
++}
++
++#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
++#else
++void FH_MUTEX_FREE(fh_mutex_t *mutex)
++{
++	mtx_destroy((struct mtx *)mutex);
++	FH_FREE(mutex);
++}
++#endif
++
++void FH_MUTEX_LOCK(fh_mutex_t *mutex)
++{
++	struct mtx *m = (struct mtx *)mutex;
++
++	mtx_lock(m);
++}
++
++int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex)
++{
++	struct mtx *m = (struct mtx *)mutex;
++
++	return mtx_trylock(m);
++}
++
++void FH_MUTEX_UNLOCK(fh_mutex_t *mutex)
++{
++	struct mtx *m = (struct mtx *)mutex;
++
++	mtx_unlock(m);
++}
++
++
++/* Timing */
++
++void FH_UDELAY(uint32_t usecs)
++{
++	DELAY(usecs);
++}
++
++void FH_MDELAY(uint32_t msecs)
++{
++	do {
++		DELAY(1000);
++	} while (--msecs);
++}
++
++void FH_MSLEEP(uint32_t msecs)
++{
++	struct timeval tv;
++
++	tv.tv_sec = msecs / 1000;
++	tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
++	pause("dw3slp", tvtohz(&tv));
++}
++
++uint32_t FH_TIME(void)
++{
++	struct timeval tv;
++
++	microuptime(&tv);	// or getmicrouptime? (less precise, but faster)
++	return tv.tv_sec * 1000 + tv.tv_usec / 1000;
++}
++
++
++/* Timers */
++
++struct fh_timer {
++	struct callout t;
++	char *name;
++	fh_spinlock_t *lock;
++	fh_timer_callback_t cb;
++	void *data;
++};
++
++fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data)
++{
++	fh_timer_t *t = FH_ALLOC(sizeof(*t));
++
++	if (!t) {
++		FH_ERROR("Cannot allocate memory for timer");
++		return NULL;
++	}
++
++	callout_init(&t->t, 1);
++
++	t->name = FH_STRDUP(name);
++	if (!t->name) {
++		FH_ERROR("Cannot allocate memory for timer->name");
++		goto no_name;
++	}
++
++	t->lock = FH_SPINLOCK_ALLOC();
++	if (!t->lock) {
++		FH_ERROR("Cannot allocate memory for lock");
++		goto no_lock;
++	}
++
++	t->cb = cb;
++	t->data = data;
++
++	return t;
++
++ no_lock:
++	FH_FREE(t->name);
++ no_name:
++	FH_FREE(t);
++
++	return NULL;
++}
++
++void FH_TIMER_FREE(fh_timer_t *timer)
++{
++	callout_stop(&timer->t);
++	FH_SPINLOCK_FREE(timer->lock);
++	FH_FREE(timer->name);
++	FH_FREE(timer);
++}
++
++void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time)
++{
++	struct timeval tv;
++
++	tv.tv_sec = time / 1000;
++	tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
++	callout_reset(&timer->t, tvtohz(&tv), timer->cb, timer->data);
++}
++
++void FH_TIMER_CANCEL(fh_timer_t *timer)
++{
++	callout_stop(&timer->t);
++}
++
++
++/* Wait Queues */
++
++struct fh_waitq {
++	struct mtx lock;
++	int abort;
++};
++
++fh_waitq_t *FH_WAITQ_ALLOC(void)
++{
++	fh_waitq_t *wq = FH_ALLOC(sizeof(*wq));
++
++	if (!wq) {
++		FH_ERROR("Cannot allocate memory for waitqueue");
++		return NULL;
++	}
++
++	mtx_init(&wq->lock, "dw3wtq", NULL, MTX_DEF);
++	wq->abort = 0;
++
++	return wq;
++}
++
++void FH_WAITQ_FREE(fh_waitq_t *wq)
++{
++	mtx_destroy(&wq->lock);
++	FH_FREE(wq);
++}
++
++int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data)
++{
++//	intrmask_t ipl;
++	int result = 0;
++
++	mtx_lock(&wq->lock);
++//	ipl = splbio();
++
++	/* Skip the sleep if already aborted or triggered */
++	if (!wq->abort && !cond(data)) {
++//		splx(ipl);
++		result = msleep(wq, &wq->lock, PCATCH, "dw3wat", 0); // infinite timeout
++//		ipl = splbio();
++	}
++
++	if (result == ERESTART) {	// signaled - restart
++		result = -FH_E_RESTART;
++
++	} else if (result == EINTR) {	// signaled - interrupt
++		result = -FH_E_ABORT;
++
++	} else if (wq->abort) {
++		result = -FH_E_ABORT;
++
++	} else {
++		result = 0;
++	}
++
++	wq->abort = 0;
++//	splx(ipl);
++	mtx_unlock(&wq->lock);
++	return result;
++}
++
++int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
++			       void *data, int32_t msecs)
++{
++	struct timeval tv, tv1, tv2;
++//	intrmask_t ipl;
++	int result = 0;
++
++	tv.tv_sec = msecs / 1000;
++	tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
++
++	mtx_lock(&wq->lock);
++//	ipl = splbio();
++
++	/* Skip the sleep if already aborted or triggered */
++	if (!wq->abort && !cond(data)) {
++//		splx(ipl);
++		getmicrouptime(&tv1);
++		result = msleep(wq, &wq->lock, PCATCH, "dw3wto", tvtohz(&tv));
++		getmicrouptime(&tv2);
++//		ipl = splbio();
++	}
++
++	if (result == 0) {			// awoken
++		if (wq->abort) {
++			result = -FH_E_ABORT;
++		} else {
++			tv2.tv_usec -= tv1.tv_usec;
++			if (tv2.tv_usec < 0) {
++				tv2.tv_usec += 1000000;
++				tv2.tv_sec--;
++			}
++
++			tv2.tv_sec -= tv1.tv_sec;
++			result = tv2.tv_sec * 1000 + tv2.tv_usec / 1000;
++			result = msecs - result;
++			if (result <= 0)
++				result = 1;
++		}
++	} else if (result == ERESTART) {	// signaled - restart
++		result = -FH_E_RESTART;
++
++	} else if (result == EINTR) {		// signaled - interrupt
++		result = -FH_E_ABORT;
++
++	} else {				// timed out
++		result = -FH_E_TIMEOUT;
++	}
++
++	wq->abort = 0;
++//	splx(ipl);
++	mtx_unlock(&wq->lock);
++	return result;
++}
++
++void FH_WAITQ_TRIGGER(fh_waitq_t *wq)
++{
++	wakeup(wq);
++}
++
++void FH_WAITQ_ABORT(fh_waitq_t *wq)
++{
++//	intrmask_t ipl;
++
++	mtx_lock(&wq->lock);
++//	ipl = splbio();
++	wq->abort = 1;
++	wakeup(wq);
++//	splx(ipl);
++	mtx_unlock(&wq->lock);
++}
++
++
++/* Threading */
++
++struct fh_thread {
++	struct proc *proc;
++	int abort;
++};
++
++fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data)
++{
++	int retval;
++	fh_thread_t *thread = FH_ALLOC(sizeof(*thread));
++
++	if (!thread) {
++		return NULL;
++	}
++
++	thread->abort = 0;
++	retval = kthread_create((void (*)(void *))func, data, &thread->proc,
++				RFPROC | RFNOWAIT, 0, "%s", name);
++	if (retval) {
++		FH_FREE(thread);
++		return NULL;
++	}
++
++	return thread;
++}
++
++int FH_THREAD_STOP(fh_thread_t *thread)
++{
++	int retval;
++
++	thread->abort = 1;
++	retval = tsleep(&thread->abort, 0, "dw3stp", 60 * hz);
++
++	if (retval == 0) {
++		/* FH_THREAD_EXIT() will free the thread struct */
++		return 0;
++	}
++
++	/* NOTE: We leak the thread struct if thread doesn't die */
++
++	if (retval == EWOULDBLOCK) {
++		return -FH_E_TIMEOUT;
++	}
++
++	return -FH_E_UNKNOWN;
++}
++
++fh_bool_t FH_THREAD_SHOULD_STOP(fh_thread_t *thread)
++{
++	return thread->abort;
++}
++
++void FH_THREAD_EXIT(fh_thread_t *thread)
++{
++	wakeup(&thread->abort);
++	FH_FREE(thread);
++	kthread_exit(0);
++}
++
++
++/* tasklets
++ - Runs in interrupt context (cannot sleep)
++ - Each tasklet runs on a single CPU [ How can we ensure this on FreeBSD? Does it matter? ]
++ - Different tasklets can be running simultaneously on different CPUs [ shouldn't matter ]
++ */
++struct fh_tasklet {
++	struct task t;
++	fh_tasklet_callback_t cb;
++	void *data;
++};
++
++static void tasklet_callback(void *data, int pending)	// what to do with pending ???
++{
++	fh_tasklet_t *task = (fh_tasklet_t *)data;
++
++	task->cb(task->data);
++}
++
++fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data)
++{
++	fh_tasklet_t *task = FH_ALLOC(sizeof(*task));
++
++	if (task) {
++		task->cb = cb;
++		task->data = data;
++		TASK_INIT(&task->t, 0, tasklet_callback, task);
++	} else {
++		FH_ERROR("Cannot allocate memory for tasklet");
++	}
++
++	return task;
++}
++
++void FH_TASK_FREE(fh_tasklet_t *task)
++{
++	taskqueue_drain(taskqueue_fast, &task->t);	// ???
++	FH_FREE(task);
++}
++
++void FH_TASK_SCHEDULE(fh_tasklet_t *task)
++{
++	/* Uses predefined system queue */
++	taskqueue_enqueue_fast(taskqueue_fast, &task->t);
++}
++
++
++/* workqueues
++ - Runs in process context (can sleep)
++ */
++typedef struct work_container {
++	fh_work_callback_t cb;
++	void *data;
++	fh_workq_t *wq;
++	char *name;
++	int hz;
++
++#ifdef DEBUG
++	FH_CIRCLEQ_ENTRY(work_container) entry;
++#endif
++	struct task task;
++} work_container_t;
++
++#ifdef DEBUG
++FH_CIRCLEQ_HEAD(work_container_queue, work_container);
++#endif
++
++struct fh_workq {
++	struct taskqueue *taskq;
++	fh_spinlock_t *lock;
++	fh_waitq_t *waitq;
++	int pending;
++
++#ifdef DEBUG
++	struct work_container_queue entries;
++#endif
++};
++
++static void do_work(void *data, int pending)	// what to do with pending ???
++{
++	work_container_t *container = (work_container_t *)data;
++	fh_workq_t *wq = container->wq;
++	fh_irqflags_t flags;
++
++	if (container->hz) {
++		pause("dw3wrk", container->hz);
++	}
++
++	container->cb(container->data);
++	FH_DEBUG("Work done: %s, container=%p", container->name, container);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++
++#ifdef DEBUG
++	FH_CIRCLEQ_REMOVE(&wq->entries, container, entry);
++#endif
++	if (container->name)
++		FH_FREE(container->name);
++	FH_FREE(container);
++	wq->pending--;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++}
++
++static int work_done(void *data)
++{
++	fh_workq_t *workq = (fh_workq_t *)data;
++
++	return workq->pending == 0;
++}
++
++int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout)
++{
++	return FH_WAITQ_WAIT_TIMEOUT(workq->waitq, work_done, workq, timeout);
++}
++
++fh_workq_t *FH_WORKQ_ALLOC(char *name)
++{
++	fh_workq_t *wq = FH_ALLOC(sizeof(*wq));
++
++	if (!wq) {
++		FH_ERROR("Cannot allocate memory for workqueue");
++		return NULL;
++	}
++
++	wq->taskq = taskqueue_create(name, M_NOWAIT, taskqueue_thread_enqueue, &wq->taskq);
++	if (!wq->taskq) {
++		FH_ERROR("Cannot allocate memory for taskqueue");
++		goto no_taskq;
++	}
++
++	wq->pending = 0;
++
++	wq->lock = FH_SPINLOCK_ALLOC();
++	if (!wq->lock) {
++		FH_ERROR("Cannot allocate memory for spinlock");
++		goto no_lock;
++	}
++
++	wq->waitq = FH_WAITQ_ALLOC();
++	if (!wq->waitq) {
++		FH_ERROR("Cannot allocate memory for waitqueue");
++		goto no_waitq;
++	}
++
++	taskqueue_start_threads(&wq->taskq, 1, PWAIT, "%s taskq", "dw3tsk");
++
++#ifdef DEBUG
++	FH_CIRCLEQ_INIT(&wq->entries);
++#endif
++	return wq;
++
++ no_waitq:
++	FH_SPINLOCK_FREE(wq->lock);
++ no_lock:
++	taskqueue_free(wq->taskq);
++ no_taskq:
++	FH_FREE(wq);
++
++	return NULL;
++}
++
++void FH_WORKQ_FREE(fh_workq_t *wq)
++{
++#ifdef DEBUG
++	fh_irqflags_t flags;
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++
++	if (wq->pending != 0) {
++		struct work_container *container;
++
++		FH_ERROR("Destroying work queue with pending work");
++
++		FH_CIRCLEQ_FOREACH(container, &wq->entries, entry) {
++			FH_ERROR("Work %s still pending", container->name);
++		}
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++#endif
++	FH_WAITQ_FREE(wq->waitq);
++	FH_SPINLOCK_FREE(wq->lock);
++	taskqueue_free(wq->taskq);
++	FH_FREE(wq);
++}
++
++void FH_WORKQ_SCHEDULE(fh_workq_t *wq, fh_work_callback_t cb, void *data,
++			char *format, ...)
++{
++	fh_irqflags_t flags;
++	work_container_t *container;
++	static char name[128];
++	va_list args;
++
++	va_start(args, format);
++	FH_VSNPRINTF(name, 128, format, args);
++	va_end(args);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	wq->pending++;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++
++	container = FH_ALLOC_ATOMIC(sizeof(*container));
++	if (!container) {
++		FH_ERROR("Cannot allocate memory for container");
++		return;
++	}
++
++	container->name = FH_STRDUP(name);
++	if (!container->name) {
++		FH_ERROR("Cannot allocate memory for container->name");
++		FH_FREE(container);
++		return;
++	}
++
++	container->cb = cb;
++	container->data = data;
++	container->wq = wq;
++	container->hz = 0;
++
++	FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
++
++	TASK_INIT(&container->task, 0, do_work, container);
++
++#ifdef DEBUG
++	FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
++#endif
++	taskqueue_enqueue_fast(wq->taskq, &container->task);
++}
++
++void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *wq, fh_work_callback_t cb,
++				void *data, uint32_t time, char *format, ...)
++{
++	fh_irqflags_t flags;
++	work_container_t *container;
++	static char name[128];
++	struct timeval tv;
++	va_list args;
++
++	va_start(args, format);
++	FH_VSNPRINTF(name, 128, format, args);
++	va_end(args);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	wq->pending++;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++
++	container = FH_ALLOC_ATOMIC(sizeof(*container));
++	if (!container) {
++		FH_ERROR("Cannot allocate memory for container");
++		return;
++	}
++
++	container->name = FH_STRDUP(name);
++	if (!container->name) {
++		FH_ERROR("Cannot allocate memory for container->name");
++		FH_FREE(container);
++		return;
++	}
++
++	container->cb = cb;
++	container->data = data;
++	container->wq = wq;
++
++	tv.tv_sec = time / 1000;
++	tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
++	container->hz = tvtohz(&tv);
++
++	FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
++
++	TASK_INIT(&container->task, 0, do_work, container);
++
++#ifdef DEBUG
++	FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
++#endif
++	taskqueue_enqueue_fast(wq->taskq, &container->task);
++}
++
++int FH_WORKQ_PENDING(fh_workq_t *wq)
++{
++	return wq->pending;
++}
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_common_linux.c b/drivers/usb/host/fh_otg/fh_common_port/fh_common_linux.c
+new file mode 100644
+index 00000000..da70b191
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_common_linux.c
+@@ -0,0 +1,1426 @@
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++
++#ifdef FH_CCLIB
++# include "fh_cc.h"
++#endif
++
++#ifdef FH_CRYPTOLIB
++# include "fh_modpow.h"
++# include "fh_dh.h"
++# include "fh_crypto.h"
++#endif
++
++#ifdef FH_NOTIFYLIB
++# include "fh_notifier.h"
++#endif
++
++/* OS-Level Implementations */
++
++/* This is the Linux kernel implementation of the FH platform library. */
++#include <linux/moduleparam.h>
++#include <linux/ctype.h>
++#include <linux/crypto.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/cdev.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/jiffies.h>
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/random.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
++#include <linux/stat.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/usb.h>
++
++#include <linux/version.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# include <linux/usb/gadget.h>
++#else
++# include <linux/usb_gadget.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/uaccess.h>
++#include <asm/unaligned.h>
++
++#include "fh_os.h"
++#include "fh_list.h"
++
++
++/* MISC */
++
++void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size)
++{
++	return memset(dest, byte, size);
++}
++
++void *FH_MEMCPY(void *dest, void const *src, uint32_t size)
++{
++	return memcpy(dest, src, size);
++}
++
++void *FH_MEMMOVE(void *dest, void *src, uint32_t size)
++{
++	return memmove(dest, src, size);
++}
++
++int FH_MEMCMP(void *m1, void *m2, uint32_t size)
++{
++	return memcmp(m1, m2, size);
++}
++
++int FH_STRNCMP(void *s1, void *s2, uint32_t size)
++{
++	return strncmp(s1, s2, size);
++}
++
++int FH_STRCMP(void *s1, void *s2)
++{
++	return strcmp(s1, s2);
++}
++
++int FH_STRLEN(char const *str)
++{
++	return strlen(str);
++}
++
++char *FH_STRCPY(char *to, char const *from)
++{
++	return strcpy(to, from);
++}
++
++char *FH_STRDUP(char const *str)
++{
++	int len = FH_STRLEN(str) + 1;
++	char *new = FH_ALLOC_ATOMIC(len);
++
++	if (!new) {
++		return NULL;
++	}
++
++	FH_MEMCPY(new, str, len);
++	return new;
++}
++
++int FH_ATOI(const char *str, int32_t *value)
++{
++	char *end = NULL;
++
++	*value = simple_strtol(str, &end, 0);
++	if (*end == '\0') {
++		return 0;
++	}
++
++	return -1;
++}
++
++int FH_ATOUI(const char *str, uint32_t *value)
++{
++	char *end = NULL;
++
++	*value = simple_strtoul(str, &end, 0);
++	if (*end == '\0') {
++		return 0;
++	}
++
++	return -1;
++}
++
++
++#ifdef FH_UTFLIB
++/* From usbstring.c */
++
++int FH_UTF8_TO_UTF16LE(uint8_t const *s, uint16_t *cp, unsigned len)
++{
++	int	count = 0;
++	u8	c;
++	u16	uchar;
++
++	/* this insists on correct encodings, though not minimal ones.
++	 * BUT it currently rejects legit 4-byte UTF-8 code points,
++	 * which need surrogate pairs.  (Unicode 3.1 can use them.)
++	 */
++	while (len != 0 && (c = (u8) *s++) != 0) {
++		if (unlikely(c & 0x80)) {
++			// 2-byte sequence:
++			// 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
++			if ((c & 0xe0) == 0xc0) {
++				uchar = (c & 0x1f) << 6;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c;
++
++			// 3-byte sequence (most CJKV characters):
++			// zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
++			} else if ((c & 0xf0) == 0xe0) {
++				uchar = (c & 0x0f) << 12;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c << 6;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c;
++
++				/* no bogus surrogates */
++				if (0xd800 <= uchar && uchar <= 0xdfff)
++					goto fail;
++
++			// 4-byte sequence (surrogate pairs, currently rare):
++			// 11101110wwwwzzzzyy + 110111yyyyxxxxxx
++			//     = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
++			// (uuuuu = wwww + 1)
++			// FIXME accept the surrogate code points (only)
++			} else
++				goto fail;
++		} else
++			uchar = c;
++		put_unaligned (cpu_to_le16 (uchar), cp++);
++		count++;
++		len--;
++	}
++	return count;
++fail:
++	return -1;
++}
++#endif	/* FH_UTFLIB */
++
++
++/* fh_debug.h */
++
++fh_bool_t FH_IN_IRQ(void)
++{
++	return in_irq();
++}
++
++fh_bool_t FH_IN_BH(void)
++{
++	return in_softirq();
++}
++
++void FH_VPRINTF(char *format, va_list args)
++{
++	vprintk(format, args);
++}
++
++int FH_VSNPRINTF(char *str, int size, char *format, va_list args)
++{
++	return vsnprintf(str, size, format, args);
++}
++
++void FH_PRINTF(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++int FH_SPRINTF(char *buffer, char *format, ...)
++{
++	int retval;
++	va_list args;
++
++	va_start(args, format);
++	retval = vsprintf(buffer, format, args);
++	va_end(args);
++	return retval;
++}
++
++int FH_SNPRINTF(char *buffer, int size, char *format, ...)
++{
++	int retval;
++	va_list args;
++
++	va_start(args, format);
++	retval = vsnprintf(buffer, size, format, args);
++	va_end(args);
++	return retval;
++}
++
++void __FH_WARN(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_PRINTF(KERN_WARNING);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++void __FH_ERROR(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_PRINTF(KERN_ERR);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++void FH_EXCEPTION(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_PRINTF(KERN_ERR);
++	FH_VPRINTF(format, args);
++	va_end(args);
++	BUG_ON(1);
++}
++
++#ifdef DEBUG
++void __FH_DEBUG(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_PRINTF(KERN_DEBUG);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++#endif
++
++
++/* fh_mem.h */
++
++#if 0
++fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size,
++				uint32_t align,
++				uint32_t alloc)
++{
++	struct dma_pool *pool = dma_pool_create("Pool", NULL,
++						size, align, alloc);
++	return (fh_pool_t *)pool;
++}
++
++void FH_DMA_POOL_DESTROY(fh_pool_t *pool)
++{
++	dma_pool_destroy((struct dma_pool *)pool);
++}
++
++void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr)
++{
++	return dma_pool_alloc((struct dma_pool *)pool, GFP_KERNEL, dma_addr);
++}
++
++void *FH_DMA_POOL_ZALLOC(fh_pool_t *pool, uint64_t *dma_addr)
++{
++	void *vaddr = FH_DMA_POOL_ALLOC(pool, dma_addr);
++	memset(..);
++}
++
++void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr)
++{
++	dma_pool_free(pool, vaddr, daddr);
++}
++#endif
++
++void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
++{
++#ifdef xxCOSIM /* Only works for 32-bit cosim */
++	void *buf = dma_alloc_coherent(dma_ctx, (size_t)size, dma_addr, GFP_KERNEL);
++#else
++	void *buf = dma_alloc_coherent(dma_ctx, (size_t)size, dma_addr, GFP_ATOMIC);
++#endif
++	if (!buf) {
++		return NULL;
++	}
++
++	memset(buf, 0, (size_t)size);
++	return buf;
++}
++
++void *__FH_DMA_ALLOC_ATOMIC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
++{
++	void *buf = dma_alloc_coherent(NULL, (size_t)size, dma_addr, GFP_ATOMIC);
++	if (!buf) {
++		return NULL;
++	}
++	memset(buf, 0, (size_t)size);
++	return buf;
++}
++
++void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr)
++{
++	dma_free_coherent(dma_ctx, size, virt_addr, dma_addr);
++}
++
++void *__FH_ALLOC(void *mem_ctx, uint32_t size)
++{
++	return kzalloc(size, GFP_KERNEL);
++}
++
++void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size)
++{
++	return kzalloc(size, GFP_ATOMIC);
++}
++
++void __FH_FREE(void *mem_ctx, void *addr)
++{
++	kfree(addr);
++}
++
++
++#ifdef FH_CRYPTOLIB
++/* fh_crypto.h */
++
++void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length)
++{
++	get_random_bytes(buffer, length);
++}
++
++int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out)
++{
++	struct crypto_blkcipher *tfm;
++	struct blkcipher_desc desc;
++	struct scatterlist sgd;
++	struct scatterlist sgs;
++
++	tfm = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
++	if (tfm == NULL) {
++		printk("failed to load transform for aes CBC\n");
++		return -1;
++	}
++
++	crypto_blkcipher_setkey(tfm, key, keylen);
++	crypto_blkcipher_set_iv(tfm, iv, 16);
++
++	sg_init_one(&sgd, out, messagelen);
++	sg_init_one(&sgs, message, messagelen);
++
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	if (crypto_blkcipher_encrypt(&desc, &sgd, &sgs, messagelen)) {
++		crypto_free_blkcipher(tfm);
++		FH_ERROR("AES CBC encryption failed");
++		return -1;
++	}
++
++	crypto_free_blkcipher(tfm);
++	return 0;
++}
++
++int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out)
++{
++	struct crypto_hash *tfm;
++	struct hash_desc desc;
++	struct scatterlist sg;
++
++	tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
++	if (IS_ERR(tfm)) {
++		FH_ERROR("Failed to load transform for sha256: %ld\n", PTR_ERR(tfm));
++		return 0;
++	}
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	sg_init_one(&sg, message, len);
++	crypto_hash_digest(&desc, &sg, len, out);
++	crypto_free_hash(tfm);
++
++	return 1;
++}
++
++int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen,
++		    uint8_t *key, uint32_t keylen, uint8_t *out)
++{
++	struct crypto_hash *tfm;
++	struct hash_desc desc;
++	struct scatterlist sg;
++
++	tfm = crypto_alloc_hash("hmac(sha256)", 0, CRYPTO_ALG_ASYNC);
++	if (IS_ERR(tfm)) {
++		FH_ERROR("Failed to load transform for hmac(sha256): %ld\n", PTR_ERR(tfm));
++		return 0;
++	}
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	sg_init_one(&sg, message, messagelen);
++	crypto_hash_setkey(tfm, key, keylen);
++	crypto_hash_digest(&desc, &sg, messagelen, out);
++	crypto_free_hash(tfm);
++
++	return 1;
++}
++#endif	/* FH_CRYPTOLIB */
++
++
++/* Byte Ordering Conversions */
++
++uint32_t FH_CPU_TO_LE32(uint32_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_CPU_TO_BE32(uint32_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_LE32_TO_CPU(uint32_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_BE32_TO_CPU(uint32_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint16_t FH_CPU_TO_LE16(uint16_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_CPU_TO_BE16(uint16_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_LE16_TO_CPU(uint16_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_BE16_TO_CPU(uint16_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++
++/* Registers */
++
++uint32_t FH_READ_REG32(uint32_t volatile *reg)
++{
++	return readl(reg);
++}
++
++#if 0
++uint64_t FH_READ_REG64(uint64_t volatile *reg)
++{
++}
++#endif
++
++void FH_WRITE_REG32(uint32_t volatile *reg, uint32_t value)
++{
++	writel(value, reg);
++}
++
++#if 0
++void FH_WRITE_REG64(uint64_t volatile *reg, uint64_t value)
++{
++}
++#endif
++
++void FH_MODIFY_REG32(uint32_t volatile *reg, uint32_t clear_mask, uint32_t set_mask)
++{
++	writel((readl(reg) & ~clear_mask) | set_mask, reg);
++}
++
++#if 0
++void FH_MODIFY_REG64(uint64_t volatile *reg, uint64_t clear_mask, uint64_t set_mask)
++{
++}
++#endif
++
++
++/* Locking */
++
++fh_spinlock_t *FH_SPINLOCK_ALLOC(void)
++{
++	spinlock_t *sl = (spinlock_t *)1;
++
++#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
++	sl = FH_ALLOC(sizeof(*sl));
++	if (!sl) {
++		FH_ERROR("Cannot allocate memory for spinlock\n");
++		return NULL;
++	}
++
++	spin_lock_init(sl);
++#endif
++	return (fh_spinlock_t *)sl;
++}
++
++void FH_SPINLOCK_FREE(fh_spinlock_t *lock)
++{
++#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
++	FH_FREE(lock);
++#endif
++}
++
++void FH_SPINLOCK(fh_spinlock_t *lock)
++{
++#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
++	spin_lock((spinlock_t *)lock);
++#endif
++}
++
++void FH_SPINUNLOCK(fh_spinlock_t *lock)
++{
++#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
++	spin_unlock((spinlock_t *)lock);
++#endif
++}
++
++void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags)
++{
++	fh_irqflags_t f;
++
++#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
++	spin_lock_irqsave((spinlock_t *)lock, f);
++#else
++	local_irq_save(f);
++#endif
++	*flags = f;
++}
++
++void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags)
++{
++#if defined(CONFIG_PREEMPT) || defined(CONFIG_SMP)
++	spin_unlock_irqrestore((spinlock_t *)lock, flags);
++#else
++	local_irq_restore(flags);
++#endif
++}
++
++fh_mutex_t *FH_MUTEX_ALLOC(void)
++{
++	struct mutex *m;
++	fh_mutex_t *mutex = (fh_mutex_t *)FH_ALLOC(sizeof(struct mutex));
++
++	if (!mutex) {
++		FH_ERROR("Cannot allocate memory for mutex\n");
++		return NULL;
++	}
++
++	m = (struct mutex *)mutex;
++	mutex_init(m);
++	return mutex;
++}
++
++#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
++#else
++void FH_MUTEX_FREE(fh_mutex_t *mutex)
++{
++	mutex_destroy((struct mutex *)mutex);
++	FH_FREE(mutex);
++}
++#endif
++
++void FH_MUTEX_LOCK(fh_mutex_t *mutex)
++{
++	struct mutex *m = (struct mutex *)mutex;
++	mutex_lock(m);
++}
++
++int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex)
++{
++	struct mutex *m = (struct mutex *)mutex;
++	return mutex_trylock(m);
++}
++
++void FH_MUTEX_UNLOCK(fh_mutex_t *mutex)
++{
++	struct mutex *m = (struct mutex *)mutex;
++	mutex_unlock(m);
++}
++
++
++/* Timing */
++
++void FH_UDELAY(uint32_t usecs)
++{
++	udelay(usecs);
++}
++
++void FH_MDELAY(uint32_t msecs)
++{
++	mdelay(msecs);
++}
++
++void FH_MSLEEP(uint32_t msecs)
++{
++	msleep(msecs);
++}
++
++uint32_t FH_TIME(void)
++{
++	return jiffies_to_msecs(jiffies);
++}
++
++
++/* Timers */
++
++struct fh_timer {
++	struct timer_list *t;
++	char *name;
++	fh_timer_callback_t cb;
++	void *data;
++	uint8_t scheduled;
++	fh_spinlock_t *lock;
++};
++
++static void timer_callback(unsigned long data)
++{
++	fh_timer_t *timer = (fh_timer_t *)data;
++	fh_irqflags_t flags;
++
++	FH_SPINLOCK_IRQSAVE(timer->lock, &flags);
++	timer->scheduled = 0;
++	FH_SPINUNLOCK_IRQRESTORE(timer->lock, flags);
++	FH_DEBUG("Timer %s callback", timer->name);
++	timer->cb(timer->data);
++}
++
++fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data)
++{
++	fh_timer_t *t = FH_ALLOC(sizeof(*t));
++
++	if (!t) {
++		FH_ERROR("Cannot allocate memory for timer");
++		return NULL;
++	}
++
++	t->t = FH_ALLOC(sizeof(*t->t));
++	if (!t->t) {
++		FH_ERROR("Cannot allocate memory for timer->t");
++		goto no_timer;
++	}
++
++	t->name = FH_STRDUP(name);
++	if (!t->name) {
++		FH_ERROR("Cannot allocate memory for timer->name");
++		goto no_name;
++	}
++
++	t->lock = FH_SPINLOCK_ALLOC();
++	if (!t->lock) {
++		FH_ERROR("Cannot allocate memory for lock");
++		goto no_lock;
++	}
++
++	t->scheduled = 0;
++	t->t->base = &boot_tvec_bases;
++	t->t->expires = jiffies;
++	setup_timer(t->t, timer_callback, (unsigned long)t);
++
++	t->cb = cb;
++	t->data = data;
++
++	return t;
++
++ no_lock:
++	FH_FREE(t->name);
++ no_name:
++	FH_FREE(t->t);
++ no_timer:
++	FH_FREE(t);
++	return NULL;
++}
++
++void FH_TIMER_FREE(fh_timer_t *timer)
++{
++	fh_irqflags_t flags;
++
++	FH_SPINLOCK_IRQSAVE(timer->lock, &flags);
++
++	if (timer->scheduled) {
++		del_timer(timer->t);
++		timer->scheduled = 0;
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(timer->lock, flags);
++	FH_SPINLOCK_FREE(timer->lock);
++	FH_FREE(timer->t);
++	FH_FREE(timer->name);
++	FH_FREE(timer);
++}
++
++void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time)
++{
++	fh_irqflags_t flags;
++
++	FH_SPINLOCK_IRQSAVE(timer->lock, &flags);
++
++	if (!timer->scheduled) {
++		timer->scheduled = 1;
++		FH_DEBUG("Scheduling timer %s to expire in +%d msec", timer->name, time);
++		timer->t->expires = jiffies + msecs_to_jiffies(time);
++		add_timer(timer->t);
++	} else {
++		FH_DEBUG("Modifying timer %s to expire in +%d msec", timer->name, time);
++		mod_timer(timer->t, jiffies + msecs_to_jiffies(time));
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(timer->lock, flags);
++}
++
++void FH_TIMER_CANCEL(fh_timer_t *timer)
++{
++	del_timer(timer->t);
++}
++
++
++/* Wait Queues */
++
++struct fh_waitq {
++	wait_queue_head_t queue;
++	int abort;
++};
++
++fh_waitq_t *FH_WAITQ_ALLOC(void)
++{
++	fh_waitq_t *wq = FH_ALLOC(sizeof(*wq));
++
++	if (!wq) {
++		FH_ERROR("Cannot allocate memory for waitqueue\n");
++		return NULL;
++	}
++
++	init_waitqueue_head(&wq->queue);
++	wq->abort = 0;
++	return wq;
++}
++
++void FH_WAITQ_FREE(fh_waitq_t *wq)
++{
++	FH_FREE(wq);
++}
++
++int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data)
++{
++	int result = wait_event_interruptible(wq->queue,
++					      cond(data) || wq->abort);
++	if (result == -ERESTARTSYS) {
++		wq->abort = 0;
++		return -FH_E_RESTART;
++	}
++
++	if (wq->abort == 1) {
++		wq->abort = 0;
++		return -FH_E_ABORT;
++	}
++
++	wq->abort = 0;
++
++	if (result == 0) {
++		return 0;
++	}
++
++	return -FH_E_UNKNOWN;
++}
++
++int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
++			       void *data, int32_t msecs)
++{
++	int32_t tmsecs;
++	int result = wait_event_interruptible_timeout(wq->queue,
++						      cond(data) || wq->abort,
++						      msecs_to_jiffies(msecs));
++	if (result == -ERESTARTSYS) {
++		wq->abort = 0;
++		return -FH_E_RESTART;
++	}
++
++	if (wq->abort == 1) {
++		wq->abort = 0;
++		return -FH_E_ABORT;
++	}
++
++	wq->abort = 0;
++
++	if (result > 0) {
++		tmsecs = jiffies_to_msecs(result);
++		if (!tmsecs) {
++			return 1;
++		}
++
++		return tmsecs;
++	}
++
++	if (result == 0) {
++		return -FH_E_TIMEOUT;
++	}
++
++	return -FH_E_UNKNOWN;
++}
++
++void FH_WAITQ_TRIGGER(fh_waitq_t *wq)
++{
++	wq->abort = 0;
++	wake_up_interruptible(&wq->queue);
++}
++
++void FH_WAITQ_ABORT(fh_waitq_t *wq)
++{
++	wq->abort = 1;
++	wake_up_interruptible(&wq->queue);
++}
++
++
++/* Threading */
++
++fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data)
++{
++	struct task_struct *thread = kthread_run(func, data, name);
++
++	if (thread == ERR_PTR(-ENOMEM)) {
++		return NULL;
++	}
++
++	return (fh_thread_t *)thread;
++}
++
++int FH_THREAD_STOP(fh_thread_t *thread)
++{
++	return kthread_stop((struct task_struct *)thread);
++}
++
++fh_bool_t FH_THREAD_SHOULD_STOP(void)
++{
++	return kthread_should_stop();
++}
++
++
++/* tasklets
++ - run in interrupt context (cannot sleep)
++ - each tasklet runs on a single CPU
++ - different tasklets can be running simultaneously on different CPUs
++ */
++struct fh_tasklet {
++	struct tasklet_struct t;
++	fh_tasklet_callback_t cb;
++	void *data;
++};
++
++static void tasklet_callback(unsigned long data)
++{
++	fh_tasklet_t *t = (fh_tasklet_t *)data;
++	t->cb(t->data);
++}
++
++fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data)
++{
++	fh_tasklet_t *t = FH_ALLOC(sizeof(*t));
++
++	if (t) {
++		t->cb = cb;
++		t->data = data;
++		tasklet_init(&t->t, tasklet_callback, (unsigned long)t);
++	} else {
++		FH_ERROR("Cannot allocate memory for tasklet\n");
++	}
++
++	return t;
++}
++
++void FH_TASK_FREE(fh_tasklet_t *task)
++{
++	FH_FREE(task);
++}
++
++void FH_TASK_SCHEDULE(fh_tasklet_t *task)
++{
++	tasklet_schedule(&task->t);
++}
++
++
++/* workqueues
++ - run in process context (can sleep)
++ */
++typedef struct work_container {
++	fh_work_callback_t cb;
++	void *data;
++	fh_workq_t *wq;
++	char *name;
++
++#ifdef DEBUG
++	FH_CIRCLEQ_ENTRY(work_container) entry;
++#endif
++	struct delayed_work work;
++} work_container_t;
++
++#ifdef DEBUG
++FH_CIRCLEQ_HEAD(work_container_queue, work_container);
++#endif
++
++struct fh_workq {
++	struct workqueue_struct *wq;
++	fh_spinlock_t *lock;
++	fh_waitq_t *waitq;
++	int pending;
++
++#ifdef DEBUG
++	struct work_container_queue entries;
++#endif
++};
++
++static void do_work(struct work_struct *work)
++{
++	fh_irqflags_t flags;
++	struct delayed_work *dw = container_of(work, struct delayed_work, work);
++	work_container_t *container = container_of(dw, struct work_container, work);
++	fh_workq_t *wq = container->wq;
++
++	container->cb(container->data);
++
++#ifdef DEBUG
++	FH_CIRCLEQ_REMOVE(&wq->entries, container, entry);
++#endif
++	FH_DEBUG("Work done: %s, container=%p", container->name, container);
++	if (container->name) {
++		FH_FREE(container->name);
++	}
++	FH_FREE(container);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	wq->pending--;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++}
++
++static int work_done(void *data)
++{
++	fh_workq_t *workq = (fh_workq_t *)data;
++	return workq->pending == 0;
++}
++
++int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout)
++{
++	return FH_WAITQ_WAIT_TIMEOUT(workq->waitq, work_done, workq, timeout);
++}
++
++fh_workq_t *FH_WORKQ_ALLOC(char *name)
++{
++	fh_workq_t *wq = FH_ALLOC(sizeof(*wq));
++
++	if (!wq) {
++		return NULL;
++	}
++
++	wq->wq = create_singlethread_workqueue(name);
++	if (!wq->wq) {
++		goto no_wq;
++	}
++
++	wq->pending = 0;
++
++	wq->lock = FH_SPINLOCK_ALLOC();
++	if (!wq->lock) {
++		goto no_lock;
++	}
++
++	wq->waitq = FH_WAITQ_ALLOC();
++	if (!wq->waitq) {
++		goto no_waitq;
++	}
++
++#ifdef DEBUG
++	FH_CIRCLEQ_INIT(&wq->entries);
++#endif
++	return wq;
++
++ no_waitq:
++	FH_SPINLOCK_FREE(wq->lock);
++ no_lock:
++	destroy_workqueue(wq->wq);
++ no_wq:
++	FH_FREE(wq);
++
++	return NULL;
++}
++
++void FH_WORKQ_FREE(fh_workq_t *wq)
++{
++#ifdef DEBUG
++	if (wq->pending != 0) {
++		struct work_container *wc;
++		FH_ERROR("Destroying work queue with pending work");
++		FH_CIRCLEQ_FOREACH(wc, &wq->entries, entry) {
++			FH_ERROR("Work %s still pending", wc->name);
++		}
++	}
++#endif
++	destroy_workqueue(wq->wq);
++	FH_SPINLOCK_FREE(wq->lock);
++	FH_WAITQ_FREE(wq->waitq);
++	FH_FREE(wq);
++}
++bool FH_SCHEDULE_SYSTEM_WORK(struct work_struct *work){
++	
++	 return queue_work(system_wq, work);
++}
++
++void FH_WORKQ_SCHEDULE(fh_workq_t *wq, fh_work_callback_t cb, void *data,
++			char *format, ...)
++{
++	fh_irqflags_t flags;
++	work_container_t *container;
++	static char name[128];
++	va_list args;
++
++	va_start(args, format);
++	FH_VSNPRINTF(name, 128, format, args);
++	va_end(args);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	wq->pending++;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++
++	container = FH_ALLOC_ATOMIC(sizeof(*container));
++	if (!container) {
++		FH_ERROR("Cannot allocate memory for container\n");
++		return;
++	}
++
++	container->name = FH_STRDUP(name);
++	if (!container->name) {
++		FH_ERROR("Cannot allocate memory for container->name\n");
++		FH_FREE(container);
++		return;
++	}
++
++	container->cb = cb;
++	container->data = data;
++	container->wq = wq;
++	FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
++	INIT_WORK(&container->work.work, do_work);
++
++#ifdef DEBUG
++	FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
++#endif
++	queue_work(wq->wq, &container->work.work);
++}
++
++void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *wq, fh_work_callback_t cb,
++				void *data, uint32_t time, char *format, ...)
++{
++	fh_irqflags_t flags;
++	work_container_t *container;
++	static char name[128];
++	va_list args;
++
++	va_start(args, format);
++	FH_VSNPRINTF(name, 128, format, args);
++	va_end(args);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	wq->pending++;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++
++	container = FH_ALLOC_ATOMIC(sizeof(*container));
++	if (!container) {
++		FH_ERROR("Cannot allocate memory for container\n");
++		return;
++	}
++
++	container->name = FH_STRDUP(name);
++	if (!container->name) {
++		FH_ERROR("Cannot allocate memory for container->name\n");
++		FH_FREE(container);
++		return;
++	}
++
++	container->cb = cb;
++	container->data = data;
++	container->wq = wq;
++	FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
++	INIT_DELAYED_WORK(&container->work, do_work);
++
++#ifdef DEBUG
++	FH_CIRCLEQ_INSERT_TAIL(&wq->entries, container, entry);
++#endif
++	queue_delayed_work(wq->wq, &container->work, msecs_to_jiffies(time));
++}
++
++int FH_WORKQ_PENDING(fh_workq_t *wq)
++{
++	return wq->pending;
++}
++
++
++#ifdef FH_LIBMODULE
++
++#ifdef FH_CCLIB
++/* CC */
++EXPORT_SYMBOL(fh_cc_if_alloc);
++EXPORT_SYMBOL(fh_cc_if_free);
++EXPORT_SYMBOL(fh_cc_clear);
++EXPORT_SYMBOL(fh_cc_add);
++EXPORT_SYMBOL(fh_cc_remove);
++EXPORT_SYMBOL(fh_cc_change);
++EXPORT_SYMBOL(fh_cc_data_for_save);
++EXPORT_SYMBOL(fh_cc_restore_from_data);
++EXPORT_SYMBOL(fh_cc_match_chid);
++EXPORT_SYMBOL(fh_cc_match_cdid);
++EXPORT_SYMBOL(fh_cc_ck);
++EXPORT_SYMBOL(fh_cc_chid);
++EXPORT_SYMBOL(fh_cc_cdid);
++EXPORT_SYMBOL(fh_cc_name);
++#endif	/* FH_CCLIB */
++
++#ifdef FH_CRYPTOLIB
++# ifndef CONFIG_MACH_IPMATE
++/* Modpow */
++EXPORT_SYMBOL(fh_modpow);
++
++/* DH */
++EXPORT_SYMBOL(fh_dh_modpow);
++EXPORT_SYMBOL(fh_dh_derive_keys);
++EXPORT_SYMBOL(fh_dh_pk);
++# endif	/* CONFIG_MACH_IPMATE */
++
++/* Crypto */
++EXPORT_SYMBOL(fh_wusb_aes_encrypt);
++EXPORT_SYMBOL(fh_wusb_cmf);
++EXPORT_SYMBOL(fh_wusb_prf);
++EXPORT_SYMBOL(fh_wusb_fill_ccm_nonce);
++EXPORT_SYMBOL(fh_wusb_gen_nonce);
++EXPORT_SYMBOL(fh_wusb_gen_key);
++EXPORT_SYMBOL(fh_wusb_gen_mic);
++#endif	/* FH_CRYPTOLIB */
++
++/* Notification */
++#ifdef FH_NOTIFYLIB
++EXPORT_SYMBOL(fh_alloc_notification_manager);
++EXPORT_SYMBOL(fh_free_notification_manager);
++EXPORT_SYMBOL(fh_register_notifier);
++EXPORT_SYMBOL(fh_unregister_notifier);
++EXPORT_SYMBOL(fh_add_observer);
++EXPORT_SYMBOL(fh_remove_observer);
++EXPORT_SYMBOL(fh_notify);
++#endif
++
++/* Memory Debugging Routines */
++#ifdef FH_DEBUG_MEMORY
++EXPORT_SYMBOL(fh_alloc_debug);
++EXPORT_SYMBOL(fh_alloc_atomic_debug);
++EXPORT_SYMBOL(fh_free_debug);
++EXPORT_SYMBOL(fh_dma_alloc_debug);
++EXPORT_SYMBOL(fh_dma_free_debug);
++#endif
++
++EXPORT_SYMBOL(FH_MEMSET);
++EXPORT_SYMBOL(FH_MEMCPY);
++EXPORT_SYMBOL(FH_MEMMOVE);
++EXPORT_SYMBOL(FH_MEMCMP);
++EXPORT_SYMBOL(FH_STRNCMP);
++EXPORT_SYMBOL(FH_STRCMP);
++EXPORT_SYMBOL(FH_STRLEN);
++EXPORT_SYMBOL(FH_STRCPY);
++EXPORT_SYMBOL(FH_STRDUP);
++EXPORT_SYMBOL(FH_ATOI);
++EXPORT_SYMBOL(FH_ATOUI);
++
++#ifdef FH_UTFLIB
++EXPORT_SYMBOL(FH_UTF8_TO_UTF16LE);
++#endif	/* FH_UTFLIB */
++
++EXPORT_SYMBOL(FH_IN_IRQ);
++EXPORT_SYMBOL(FH_IN_BH);
++EXPORT_SYMBOL(FH_VPRINTF);
++EXPORT_SYMBOL(FH_VSNPRINTF);
++EXPORT_SYMBOL(FH_PRINTF);
++EXPORT_SYMBOL(FH_SPRINTF);
++EXPORT_SYMBOL(FH_SNPRINTF);
++EXPORT_SYMBOL(__FH_WARN);
++EXPORT_SYMBOL(__FH_ERROR);
++EXPORT_SYMBOL(FH_EXCEPTION);
++
++#ifdef DEBUG
++EXPORT_SYMBOL(__FH_DEBUG);
++#endif
++
++EXPORT_SYMBOL(__FH_DMA_ALLOC);
++EXPORT_SYMBOL(__FH_DMA_ALLOC_ATOMIC);
++EXPORT_SYMBOL(__FH_DMA_FREE);
++EXPORT_SYMBOL(__FH_ALLOC);
++EXPORT_SYMBOL(__FH_ALLOC_ATOMIC);
++EXPORT_SYMBOL(__FH_FREE);
++
++#ifdef FH_CRYPTOLIB
++EXPORT_SYMBOL(FH_RANDOM_BYTES);
++EXPORT_SYMBOL(FH_AES_CBC);
++EXPORT_SYMBOL(FH_SHA256);
++EXPORT_SYMBOL(FH_HMAC_SHA256);
++#endif
++
++EXPORT_SYMBOL(FH_CPU_TO_LE32);
++EXPORT_SYMBOL(FH_CPU_TO_BE32);
++EXPORT_SYMBOL(FH_LE32_TO_CPU);
++EXPORT_SYMBOL(FH_BE32_TO_CPU);
++EXPORT_SYMBOL(FH_CPU_TO_LE16);
++EXPORT_SYMBOL(FH_CPU_TO_BE16);
++EXPORT_SYMBOL(FH_LE16_TO_CPU);
++EXPORT_SYMBOL(FH_BE16_TO_CPU);
++EXPORT_SYMBOL(FH_READ_REG32);
++EXPORT_SYMBOL(FH_WRITE_REG32);
++EXPORT_SYMBOL(FH_MODIFY_REG32);
++
++#if 0
++EXPORT_SYMBOL(FH_READ_REG64);
++EXPORT_SYMBOL(FH_WRITE_REG64);
++EXPORT_SYMBOL(FH_MODIFY_REG64);
++#endif
++
++EXPORT_SYMBOL(FH_SPINLOCK_ALLOC);
++EXPORT_SYMBOL(FH_SPINLOCK_FREE);
++EXPORT_SYMBOL(FH_SPINLOCK);
++EXPORT_SYMBOL(FH_SPINUNLOCK);
++EXPORT_SYMBOL(FH_SPINLOCK_IRQSAVE);
++EXPORT_SYMBOL(FH_SPINUNLOCK_IRQRESTORE);
++EXPORT_SYMBOL(FH_MUTEX_ALLOC);
++
++#if (!defined(FH_LINUX) || !defined(CONFIG_DEBUG_MUTEXES))
++EXPORT_SYMBOL(FH_MUTEX_FREE);
++#endif
++
++EXPORT_SYMBOL(FH_MUTEX_LOCK);
++EXPORT_SYMBOL(FH_MUTEX_TRYLOCK);
++EXPORT_SYMBOL(FH_MUTEX_UNLOCK);
++EXPORT_SYMBOL(FH_UDELAY);
++EXPORT_SYMBOL(FH_MDELAY);
++EXPORT_SYMBOL(FH_MSLEEP);
++EXPORT_SYMBOL(FH_TIME);
++EXPORT_SYMBOL(FH_TIMER_ALLOC);
++EXPORT_SYMBOL(FH_TIMER_FREE);
++EXPORT_SYMBOL(FH_TIMER_SCHEDULE);
++EXPORT_SYMBOL(FH_TIMER_CANCEL);
++EXPORT_SYMBOL(FH_WAITQ_ALLOC);
++EXPORT_SYMBOL(FH_WAITQ_FREE);
++EXPORT_SYMBOL(FH_WAITQ_WAIT);
++EXPORT_SYMBOL(FH_WAITQ_WAIT_TIMEOUT);
++EXPORT_SYMBOL(FH_WAITQ_TRIGGER);
++EXPORT_SYMBOL(FH_WAITQ_ABORT);
++EXPORT_SYMBOL(FH_THREAD_RUN);
++EXPORT_SYMBOL(FH_THREAD_STOP);
++EXPORT_SYMBOL(FH_THREAD_SHOULD_STOP);
++EXPORT_SYMBOL(FH_TASK_ALLOC);
++EXPORT_SYMBOL(FH_TASK_FREE);
++EXPORT_SYMBOL(FH_TASK_SCHEDULE);
++EXPORT_SYMBOL(FH_WORKQ_WAIT_WORK_DONE);
++EXPORT_SYMBOL(FH_WORKQ_ALLOC);
++EXPORT_SYMBOL(FH_WORKQ_FREE);
++EXPORT_SYMBOL(FH_SCHEDULE_SYSTEM_WORK);
++EXPORT_SYMBOL(FH_WORKQ_SCHEDULE);
++EXPORT_SYMBOL(FH_WORKQ_SCHEDULE_DELAYED);
++EXPORT_SYMBOL(FH_WORKQ_PENDING);
++
++static int fh_common_port_init_module(void)
++{
++	int result = 0;
++
++	printk(KERN_DEBUG "Module fh_common_port init\n" );
++
++#ifdef FH_DEBUG_MEMORY
++	result = fh_memory_debug_start(NULL);
++	if (result) {
++		printk(KERN_ERR
++		       "fh_memory_debug_start() failed with error %d\n",
++		       result);
++		return result;
++	}
++#endif
++
++#ifdef FH_NOTIFYLIB
++	result = fh_alloc_notification_manager(NULL, NULL);
++	if (result) {
++		printk(KERN_ERR
++		       "fh_alloc_notification_manager() failed with error %d\n",
++		       result);
++		return result;
++	}
++#endif
++	return result;
++}
++
++static void fh_common_port_exit_module(void)
++{
++	printk(KERN_DEBUG "Module fh_common_port exit\n" );
++
++#ifdef FH_NOTIFYLIB
++	fh_free_notification_manager();
++#endif
++
++#ifdef FH_DEBUG_MEMORY
++	fh_memory_debug_stop();
++#endif
++}
++
++module_init(fh_common_port_init_module);
++module_exit(fh_common_port_exit_module);
++
++MODULE_DESCRIPTION("FH Common Library - Portable version");
++MODULE_AUTHOR("Synopsys Inc.");
++MODULE_LICENSE ("GPL");
++
++#endif	/* FH_LIBMODULE */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_common_nbsd.c b/drivers/usb/host/fh_otg/fh_common_port/fh_common_nbsd.c
+new file mode 100644
+index 00000000..188eabc5
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_common_nbsd.c
+@@ -0,0 +1,1275 @@
++#include "fh_os.h"
++#include "fh_list.h"
++
++#ifdef FH_CCLIB
++# include "fh_cc.h"
++#endif
++
++#ifdef FH_CRYPTOLIB
++# include "fh_modpow.h"
++# include "fh_dh.h"
++# include "fh_crypto.h"
++#endif
++
++#ifdef FH_NOTIFYLIB
++# include "fh_notifier.h"
++#endif
++
++/* OS-Level Implementations */
++
++/* This is the NetBSD 4.0.1 kernel implementation of the FH platform library. */
++
++
++/* MISC */
++
++void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size)
++{
++	return memset(dest, byte, size);
++}
++
++void *FH_MEMCPY(void *dest, void const *src, uint32_t size)
++{
++	return memcpy(dest, src, size);
++}
++
++void *FH_MEMMOVE(void *dest, void *src, uint32_t size)
++{
++	bcopy(src, dest, size);
++	return dest;
++}
++
++int FH_MEMCMP(void *m1, void *m2, uint32_t size)
++{
++	return memcmp(m1, m2, size);
++}
++
++int FH_STRNCMP(void *s1, void *s2, uint32_t size)
++{
++	return strncmp(s1, s2, size);
++}
++
++int FH_STRCMP(void *s1, void *s2)
++{
++	return strcmp(s1, s2);
++}
++
++int FH_STRLEN(char const *str)
++{
++	return strlen(str);
++}
++
++char *FH_STRCPY(char *to, char const *from)
++{
++	return strcpy(to, from);
++}
++
++char *FH_STRDUP(char const *str)
++{
++	int len = FH_STRLEN(str) + 1;
++	char *new = FH_ALLOC_ATOMIC(len);
++
++	if (!new) {
++		return NULL;
++	}
++
++	FH_MEMCPY(new, str, len);
++	return new;
++}
++
++int FH_ATOI(char *str, int32_t *value)
++{
++	char *end = NULL;
++
++	/* NetBSD doesn't have 'strtol' in the kernel, but 'strtoul'
++	 * should be equivalent on 2's complement machines
++	 */
++	*value = strtoul(str, &end, 0);
++	if (*end == '\0') {
++		return 0;
++	}
++
++	return -1;
++}
++
++int FH_ATOUI(char *str, uint32_t *value)
++{
++	char *end = NULL;
++
++	*value = strtoul(str, &end, 0);
++	if (*end == '\0') {
++		return 0;
++	}
++
++	return -1;
++}
++
++
++#ifdef FH_UTFLIB
++/* From usbstring.c */
++
++int FH_UTF8_TO_UTF16LE(uint8_t const *s, uint16_t *cp, unsigned len)
++{
++	int	count = 0;
++	u8	c;
++	u16	uchar;
++
++	/* this insists on correct encodings, though not minimal ones.
++	 * BUT it currently rejects legit 4-byte UTF-8 code points,
++	 * which need surrogate pairs.  (Unicode 3.1 can use them.)
++	 */
++	while (len != 0 && (c = (u8) *s++) != 0) {
++		if (unlikely(c & 0x80)) {
++			// 2-byte sequence:
++			// 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
++			if ((c & 0xe0) == 0xc0) {
++				uchar = (c & 0x1f) << 6;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c;
++
++			// 3-byte sequence (most CJKV characters):
++			// zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
++			} else if ((c & 0xf0) == 0xe0) {
++				uchar = (c & 0x0f) << 12;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c << 6;
++
++				c = (u8) *s++;
++				if ((c & 0xc0) != 0xc0)
++					goto fail;
++				c &= 0x3f;
++				uchar |= c;
++
++				/* no bogus surrogates */
++				if (0xd800 <= uchar && uchar <= 0xdfff)
++					goto fail;
++
++			// 4-byte sequence (surrogate pairs, currently rare):
++			// 11101110wwwwzzzzyy + 110111yyyyxxxxxx
++			//     = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
++			// (uuuuu = wwww + 1)
++			// FIXME accept the surrogate code points (only)
++			} else
++				goto fail;
++		} else
++			uchar = c;
++		put_unaligned (cpu_to_le16 (uchar), cp++);
++		count++;
++		len--;
++	}
++	return count;
++fail:
++	return -1;
++}
++
++#endif	/* FH_UTFLIB */
++
++
++/* fh_debug.h */
++
++fh_bool_t FH_IN_IRQ(void)
++{
++//	return in_irq();
++	return 0;
++}
++
++fh_bool_t FH_IN_BH(void)
++{
++//	return in_softirq();
++	return 0;
++}
++
++void FH_VPRINTF(char *format, va_list args)
++{
++	vprintf(format, args);
++}
++
++int FH_VSNPRINTF(char *str, int size, char *format, va_list args)
++{
++	return vsnprintf(str, size, format, args);
++}
++
++void FH_PRINTF(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++int FH_SPRINTF(char *buffer, char *format, ...)
++{
++	int retval;
++	va_list args;
++
++	va_start(args, format);
++	retval = vsprintf(buffer, format, args);
++	va_end(args);
++	return retval;
++}
++
++int FH_SNPRINTF(char *buffer, int size, char *format, ...)
++{
++	int retval;
++	va_list args;
++
++	va_start(args, format);
++	retval = vsnprintf(buffer, size, format, args);
++	va_end(args);
++	return retval;
++}
++
++void __FH_WARN(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++void __FH_ERROR(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++
++void FH_EXCEPTION(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++//	BUG_ON(1);	???
++}
++
++#ifdef DEBUG
++void __FH_DEBUG(char *format, ...)
++{
++	va_list args;
++
++	va_start(args, format);
++	FH_VPRINTF(format, args);
++	va_end(args);
++}
++#endif
++
++
++/* fh_mem.h */
++
++#if 0
++fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size,
++				uint32_t align,
++				uint32_t alloc)
++{
++	struct dma_pool *pool = dma_pool_create("Pool", NULL,
++						size, align, alloc);
++	return (fh_pool_t *)pool;
++}
++
++void FH_DMA_POOL_DESTROY(fh_pool_t *pool)
++{
++	dma_pool_destroy((struct dma_pool *)pool);
++}
++
++void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr)
++{
++//	return dma_pool_alloc((struct dma_pool *)pool, GFP_KERNEL, dma_addr);
++	return dma_pool_alloc((struct dma_pool *)pool, M_WAITOK, dma_addr);
++}
++
++void *FH_DMA_POOL_ZALLOC(fh_pool_t *pool, uint64_t *dma_addr)
++{
++	void *vaddr = FH_DMA_POOL_ALLOC(pool, dma_addr);
++	memset(..);
++}
++
++void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr)
++{
++	dma_pool_free(pool, vaddr, daddr);
++}
++#endif
++
++void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr)
++{
++	fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
++	int error;
++
++	error = bus_dmamem_alloc(dma->dma_tag, size, 1, size, dma->segs,
++				 sizeof(dma->segs) / sizeof(dma->segs[0]),
++				 &dma->nsegs, BUS_DMA_NOWAIT);
++	if (error) {
++		printf("%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__,
++		       (uintmax_t)size, error);
++		goto fail_0;
++	}
++
++	error = bus_dmamem_map(dma->dma_tag, dma->segs, dma->nsegs, size,
++			       (caddr_t *)&dma->dma_vaddr,
++			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
++	if (error) {
++		printf("%s: bus_dmamem_map failed: %d\n", __func__, error);
++		goto fail_1;
++	}
++
++	error = bus_dmamap_create(dma->dma_tag, size, 1, size, 0,
++				  BUS_DMA_NOWAIT, &dma->dma_map);
++	if (error) {
++		printf("%s: bus_dmamap_create failed: %d\n", __func__, error);
++		goto fail_2;
++	}
++
++	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
++				size, NULL, BUS_DMA_NOWAIT);
++	if (error) {
++		printf("%s: bus_dmamap_load failed: %d\n", __func__, error);
++		goto fail_3;
++	}
++
++	dma->dma_paddr = (bus_addr_t)dma->segs[0].ds_addr;
++	*dma_addr = dma->dma_paddr;
++	return dma->dma_vaddr;
++
++fail_3:
++	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
++fail_2:
++	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
++fail_1:
++	bus_dmamem_free(dma->dma_tag, dma->segs, dma->nsegs);
++fail_0:
++	dma->dma_map = NULL;
++	dma->dma_vaddr = NULL;
++	dma->nsegs = 0;
++
++	return NULL;
++}
++
++void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr)
++{
++	fh_dmactx_t *dma = (fh_dmactx_t *)dma_ctx;
++
++	if (dma->dma_map != NULL) {
++		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, size,
++				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
++		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
++		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
++		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
++		bus_dmamem_free(dma->dma_tag, dma->segs, dma->nsegs);
++		dma->dma_paddr = 0;
++		dma->dma_map = NULL;
++		dma->dma_vaddr = NULL;
++		dma->nsegs = 0;
++	}
++}
++
++void *__FH_ALLOC(void *mem_ctx, uint32_t size)
++{
++	return malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
++}
++
++void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size)
++{
++	return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
++}
++
++void __FH_FREE(void *mem_ctx, void *addr)
++{
++	free(addr, M_DEVBUF);
++}
++
++
++#ifdef FH_CRYPTOLIB
++/* fh_crypto.h */
++
++void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length)
++{
++	get_random_bytes(buffer, length);
++}
++
++int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out)
++{
++	struct crypto_blkcipher *tfm;
++	struct blkcipher_desc desc;
++	struct scatterlist sgd;
++	struct scatterlist sgs;
++
++	tfm = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
++	if (tfm == NULL) {
++		printk("failed to load transform for aes CBC\n");
++		return -1;
++	}
++
++	crypto_blkcipher_setkey(tfm, key, keylen);
++	crypto_blkcipher_set_iv(tfm, iv, 16);
++
++	sg_init_one(&sgd, out, messagelen);
++	sg_init_one(&sgs, message, messagelen);
++
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	if (crypto_blkcipher_encrypt(&desc, &sgd, &sgs, messagelen)) {
++		crypto_free_blkcipher(tfm);
++		FH_ERROR("AES CBC encryption failed");
++		return -1;
++	}
++
++	crypto_free_blkcipher(tfm);
++	return 0;
++}
++
++int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out)
++{
++	struct crypto_hash *tfm;
++	struct hash_desc desc;
++	struct scatterlist sg;
++
++	tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
++	if (IS_ERR(tfm)) {
++		FH_ERROR("Failed to load transform for sha256: %ld", PTR_ERR(tfm));
++		return 0;
++	}
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	sg_init_one(&sg, message, len);
++	crypto_hash_digest(&desc, &sg, len, out);
++	crypto_free_hash(tfm);
++
++	return 1;
++}
++
++int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen,
++		    uint8_t *key, uint32_t keylen, uint8_t *out)
++{
++	struct crypto_hash *tfm;
++	struct hash_desc desc;
++	struct scatterlist sg;
++
++	tfm = crypto_alloc_hash("hmac(sha256)", 0, CRYPTO_ALG_ASYNC);
++	if (IS_ERR(tfm)) {
++		FH_ERROR("Failed to load transform for hmac(sha256): %ld", PTR_ERR(tfm));
++		return 0;
++	}
++	desc.tfm = tfm;
++	desc.flags = 0;
++
++	sg_init_one(&sg, message, messagelen);
++	crypto_hash_setkey(tfm, key, keylen);
++	crypto_hash_digest(&desc, &sg, messagelen, out);
++	crypto_free_hash(tfm);
++
++	return 1;
++}
++
++#endif	/* FH_CRYPTOLIB */
++
++
++/* Byte Ordering Conversions */
++
++uint32_t FH_CPU_TO_LE32(uint32_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_CPU_TO_BE32(uint32_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_LE32_TO_CPU(uint32_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint32_t FH_BE32_TO_CPU(uint32_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++
++	return (u_p[3] | (u_p[2] << 8) | (u_p[1] << 16) | (u_p[0] << 24));
++#endif
++}
++
++uint16_t FH_CPU_TO_LE16(uint16_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_CPU_TO_BE16(uint16_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_LE16_TO_CPU(uint16_t *p)
++{
++#ifdef __LITTLE_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++uint16_t FH_BE16_TO_CPU(uint16_t *p)
++{
++#ifdef __BIG_ENDIAN
++	return *p;
++#else
++	uint8_t *u_p = (uint8_t *)p;
++	return (u_p[1] | (u_p[0] << 8));
++#endif
++}
++
++
++/* Registers */
++
++uint32_t FH_READ_REG32(void *io_ctx, uint32_t volatile *reg)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	return bus_space_read_4(io->iot, io->ioh, ior);
++}
++
++#if 0
++uint64_t FH_READ_REG64(void *io_ctx, uint64_t volatile *reg)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	return bus_space_read_8(io->iot, io->ioh, ior);
++}
++#endif
++
++void FH_WRITE_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t value)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_4(io->iot, io->ioh, ior, value);
++}
++
++#if 0
++void FH_WRITE_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t value)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_8(io->iot, io->ioh, ior, value);
++}
++#endif
++
++void FH_MODIFY_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t clear_mask,
++		      uint32_t set_mask)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_4(io->iot, io->ioh, ior,
++			  (bus_space_read_4(io->iot, io->ioh, ior) &
++			   ~clear_mask) | set_mask);
++}
++
++#if 0
++void FH_MODIFY_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t clear_mask,
++		      uint64_t set_mask)
++{
++	fh_ioctx_t *io = (fh_ioctx_t *)io_ctx;
++	bus_size_t ior = (bus_size_t)reg;
++
++	bus_space_write_8(io->iot, io->ioh, ior,
++			  (bus_space_read_8(io->iot, io->ioh, ior) &
++			   ~clear_mask) | set_mask);
++}
++#endif
++
++
++/* Locking */
++
++fh_spinlock_t *FH_SPINLOCK_ALLOC(void)
++{
++	struct simplelock *sl = FH_ALLOC(sizeof(*sl));
++
++	if (!sl) {
++		FH_ERROR("Cannot allocate memory for spinlock");
++		return NULL;
++	}
++
++	simple_lock_init(sl);
++	return (fh_spinlock_t *)sl;
++}
++
++void FH_SPINLOCK_FREE(fh_spinlock_t *lock)
++{
++	struct simplelock *sl = (struct simplelock *)lock;
++
++	FH_FREE(sl);
++}
++
++void FH_SPINLOCK(fh_spinlock_t *lock)
++{
++	simple_lock((struct simplelock *)lock);
++}
++
++void FH_SPINUNLOCK(fh_spinlock_t *lock)
++{
++	simple_unlock((struct simplelock *)lock);
++}
++
++void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags)
++{
++	simple_lock((struct simplelock *)lock);
++	*flags = splbio();
++}
++
++void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags)
++{
++	splx(flags);
++	simple_unlock((struct simplelock *)lock);
++}
++
++fh_mutex_t *FH_MUTEX_ALLOC(void)
++{
++	fh_mutex_t *mutex = FH_ALLOC(sizeof(struct lock));
++
++	if (!mutex) {
++		FH_ERROR("Cannot allocate memory for mutex");
++		return NULL;
++	}
++
++	lockinit((struct lock *)mutex, 0, "dw3mtx", 0, 0);
++	return mutex;
++}
++
++#if (defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES))
++#else
++void FH_MUTEX_FREE(fh_mutex_t *mutex)
++{
++	FH_FREE(mutex);
++}
++#endif
++
++void FH_MUTEX_LOCK(fh_mutex_t *mutex)
++{
++	lockmgr((struct lock *)mutex, LK_EXCLUSIVE, NULL);
++}
++
++int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex)
++{
++	int status;
++
++	status = lockmgr((struct lock *)mutex, LK_EXCLUSIVE | LK_NOWAIT, NULL);
++	return status == 0;
++}
++
++void FH_MUTEX_UNLOCK(fh_mutex_t *mutex)
++{
++	lockmgr((struct lock *)mutex, LK_RELEASE, NULL);
++}
++
++
++/* Timing */
++
++void FH_UDELAY(uint32_t usecs)
++{
++	DELAY(usecs);
++}
++
++void FH_MDELAY(uint32_t msecs)
++{
++	do {
++		DELAY(1000);
++	} while (--msecs);
++}
++
++void FH_MSLEEP(uint32_t msecs)
++{
++	struct timeval tv;
++
++	tv.tv_sec = msecs / 1000;
++	tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
++	tsleep(&tv, 0, "dw3slp", tvtohz(&tv));
++}
++
++uint32_t FH_TIME(void)
++{
++	struct timeval tv;
++
++	microuptime(&tv);	// or getmicrouptime? (less precise, but faster)
++	return tv.tv_sec * 1000 + tv.tv_usec / 1000;
++}
++
++
++/* Timers */
++
++struct fh_timer {
++	struct callout t;
++	char *name;
++	fh_spinlock_t *lock;
++	fh_timer_callback_t cb;
++	void *data;
++};
++
++fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data)
++{
++	fh_timer_t *t = FH_ALLOC(sizeof(*t));
++
++	if (!t) {
++		FH_ERROR("Cannot allocate memory for timer");
++		return NULL;
++	}
++
++	callout_init(&t->t);
++
++	t->name = FH_STRDUP(name);
++	if (!t->name) {
++		FH_ERROR("Cannot allocate memory for timer->name");
++		goto no_name;
++	}
++
++	t->lock = FH_SPINLOCK_ALLOC();
++	if (!t->lock) {
++		FH_ERROR("Cannot allocate memory for timer->lock");
++		goto no_lock;
++	}
++
++	t->cb = cb;
++	t->data = data;
++
++	return t;
++
++ no_lock:
++	FH_FREE(t->name);
++ no_name:
++	FH_FREE(t);
++
++	return NULL;
++}
++
++void FH_TIMER_FREE(fh_timer_t *timer)
++{
++	callout_stop(&timer->t);
++	FH_SPINLOCK_FREE(timer->lock);
++	FH_FREE(timer->name);
++	FH_FREE(timer);
++}
++
++void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time)
++{
++	struct timeval tv;
++
++	tv.tv_sec = time / 1000;
++	tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
++	callout_reset(&timer->t, tvtohz(&tv), timer->cb, timer->data);
++}
++
++void FH_TIMER_CANCEL(fh_timer_t *timer)
++{
++	callout_stop(&timer->t);
++}
++
++
++/* Wait Queues */
++
++struct fh_waitq {
++	struct simplelock lock;
++	int abort;
++};
++
++fh_waitq_t *FH_WAITQ_ALLOC(void)
++{
++	fh_waitq_t *wq = FH_ALLOC(sizeof(*wq));
++
++	if (!wq) {
++		FH_ERROR("Cannot allocate memory for waitqueue");
++		return NULL;
++	}
++
++	simple_lock_init(&wq->lock);
++	wq->abort = 0;
++
++	return wq;
++}
++
++void FH_WAITQ_FREE(fh_waitq_t *wq)
++{
++	FH_FREE(wq);
++}
++
++int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data)
++{
++	int ipl;
++	int result = 0;
++
++	simple_lock(&wq->lock);
++	ipl = splbio();
++
++	/* Skip the sleep if already aborted or triggered */
++	if (!wq->abort && !cond(data)) {
++		splx(ipl);
++		result = ltsleep(wq, PCATCH, "dw3wat", 0, &wq->lock); // infinite timeout
++		ipl = splbio();
++	}
++
++	if (result == 0) {			// awoken
++		if (wq->abort) {
++			wq->abort = 0;
++			result = -FH_E_ABORT;
++		} else {
++			result = 0;
++		}
++
++		splx(ipl);
++		simple_unlock(&wq->lock);
++	} else {
++		wq->abort = 0;
++		splx(ipl);
++		simple_unlock(&wq->lock);
++
++		if (result == ERESTART) {	// signaled - restart
++			result = -FH_E_RESTART;
++		} else {			// signaled - must be EINTR
++			result = -FH_E_ABORT;
++		}
++	}
++
++	return result;
++}
++
++int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
++			       void *data, int32_t msecs)
++{
++	struct timeval tv, tv1, tv2;
++	int ipl;
++	int result = 0;
++
++	tv.tv_sec = msecs / 1000;
++	tv.tv_usec = (msecs - tv.tv_sec * 1000) * 1000;
++
++	simple_lock(&wq->lock);
++	ipl = splbio();
++
++	/* Skip the sleep if already aborted or triggered */
++	if (!wq->abort && !cond(data)) {
++		splx(ipl);
++		getmicrouptime(&tv1);
++		result = ltsleep(wq, PCATCH, "dw3wto", tvtohz(&tv), &wq->lock);
++		getmicrouptime(&tv2);
++		ipl = splbio();
++	}
++
++	if (result == 0) {			// awoken
++		if (wq->abort) {
++			wq->abort = 0;
++			splx(ipl);
++			simple_unlock(&wq->lock);
++			result = -FH_E_ABORT;
++		} else {
++			splx(ipl);
++			simple_unlock(&wq->lock);
++
++			tv2.tv_usec -= tv1.tv_usec;
++			if (tv2.tv_usec < 0) {
++				tv2.tv_usec += 1000000;
++				tv2.tv_sec--;
++			}
++
++			tv2.tv_sec -= tv1.tv_sec;
++			result = tv2.tv_sec * 1000 + tv2.tv_usec / 1000;
++			result = msecs - result;
++			if (result <= 0)
++				result = 1;
++		}
++	} else {
++		wq->abort = 0;
++		splx(ipl);
++		simple_unlock(&wq->lock);
++
++		if (result == ERESTART) {	// signaled - restart
++			result = -FH_E_RESTART;
++
++		} else if (result == EINTR) {		// signaled - interrupt
++			result = -FH_E_ABORT;
++
++		} else {				// timed out
++			result = -FH_E_TIMEOUT;
++		}
++	}
++
++	return result;
++}
++
++void FH_WAITQ_TRIGGER(fh_waitq_t *wq)
++{
++	wakeup(wq);
++}
++
++void FH_WAITQ_ABORT(fh_waitq_t *wq)
++{
++	int ipl;
++
++	simple_lock(&wq->lock);
++	ipl = splbio();
++	wq->abort = 1;
++	wakeup(wq);
++	splx(ipl);
++	simple_unlock(&wq->lock);
++}
++
++
++/* Threading */
++
++struct fh_thread {
++	struct proc *proc;
++	int abort;
++};
++
++fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data)
++{
++	int retval;
++	fh_thread_t *thread = FH_ALLOC(sizeof(*thread));
++
++	if (!thread) {
++		return NULL;
++	}
++
++	thread->abort = 0;
++	retval = kthread_create1((void (*)(void *))func, data, &thread->proc,
++				 "%s", name);
++	if (retval) {
++		FH_FREE(thread);
++		return NULL;
++	}
++
++	return thread;
++}
++
++int FH_THREAD_STOP(fh_thread_t *thread)
++{
++	int retval;
++
++	thread->abort = 1;
++	retval = tsleep(&thread->abort, 0, "dw3stp", 60 * hz);
++
++	if (retval == 0) {
++		/* FH_THREAD_EXIT() will free the thread struct */
++		return 0;
++	}
++
++	/* NOTE: We leak the thread struct if thread doesn't die */
++
++	if (retval == EWOULDBLOCK) {
++		return -FH_E_TIMEOUT;
++	}
++
++	return -FH_E_UNKNOWN;
++}
++
++fh_bool_t FH_THREAD_SHOULD_STOP(fh_thread_t *thread)
++{
++	return thread->abort;
++}
++
++void FH_THREAD_EXIT(fh_thread_t *thread)
++{
++	wakeup(&thread->abort);
++	FH_FREE(thread);
++	kthread_exit(0);
++}
++
++/* tasklets
++ - Runs in interrupt context (cannot sleep)
++ - Each tasklet runs on a single CPU
++ - Different tasklets can be running simultaneously on different CPUs
++ [ On NetBSD there is no corresponding mechanism, drivers don't have bottom-
++   halves. So we just call the callback directly from FH_TASK_SCHEDULE() ]
++ */
++struct fh_tasklet {
++	fh_tasklet_callback_t cb;
++	void *data;
++};
++
++static void tasklet_callback(void *data)
++{
++	fh_tasklet_t *task = (fh_tasklet_t *)data;
++
++	task->cb(task->data);
++}
++
++fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data)
++{
++	fh_tasklet_t *task = FH_ALLOC(sizeof(*task));
++
++	if (task) {
++		task->cb = cb;
++		task->data = data;
++	} else {
++		FH_ERROR("Cannot allocate memory for tasklet");
++	}
++
++	return task;
++}
++
++void FH_TASK_FREE(fh_tasklet_t *task)
++{
++	FH_FREE(task);
++}
++
++void FH_TASK_SCHEDULE(fh_tasklet_t *task)
++{
++	tasklet_callback(task);
++}
++
++
++/* workqueues
++ - Runs in process context (can sleep)
++ */
++typedef struct work_container {
++	fh_work_callback_t cb;
++	void *data;
++	fh_workq_t *wq;
++	char *name;
++	int hz;
++	struct work task;
++} work_container_t;
++
++struct fh_workq {
++	struct workqueue *taskq;
++	fh_spinlock_t *lock;
++	fh_waitq_t *waitq;
++	int pending;
++	struct work_container *container;
++};
++
++static void do_work(struct work *task, void *data)
++{
++	fh_workq_t *wq = (fh_workq_t *)data;
++	work_container_t *container = wq->container;
++	fh_irqflags_t flags;
++
++	if (container->hz) {
++		tsleep(container, 0, "dw3wrk", container->hz);
++	}
++
++	container->cb(container->data);
++	FH_DEBUG("Work done: %s, container=%p", container->name, container);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	if (container->name)
++		FH_FREE(container->name);
++	FH_FREE(container);
++	wq->pending--;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++}
++
++static int work_done(void *data)
++{
++	fh_workq_t *workq = (fh_workq_t *)data;
++
++	return workq->pending == 0;
++}
++
++int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout)
++{
++	return FH_WAITQ_WAIT_TIMEOUT(workq->waitq, work_done, workq, timeout);
++}
++
++fh_workq_t *FH_WORKQ_ALLOC(char *name)
++{
++	int result;
++	fh_workq_t *wq = FH_ALLOC(sizeof(*wq));
++
++	if (!wq) {
++		FH_ERROR("Cannot allocate memory for workqueue");
++		return NULL;
++	}
++
++	result = workqueue_create(&wq->taskq, name, do_work, wq, 0 /*PWAIT*/,
++				  IPL_BIO, 0);
++	if (result) {
++		FH_ERROR("Cannot create workqueue");
++		goto no_taskq;
++	}
++
++	wq->pending = 0;
++
++	wq->lock = FH_SPINLOCK_ALLOC();
++	if (!wq->lock) {
++		FH_ERROR("Cannot allocate memory for spinlock");
++		goto no_lock;
++	}
++
++	wq->waitq = FH_WAITQ_ALLOC();
++	if (!wq->waitq) {
++		FH_ERROR("Cannot allocate memory for waitqueue");
++		goto no_waitq;
++	}
++
++	return wq;
++
++ no_waitq:
++	FH_SPINLOCK_FREE(wq->lock);
++ no_lock:
++	workqueue_destroy(wq->taskq);
++ no_taskq:
++	FH_FREE(wq);
++
++	return NULL;
++}
++
++void FH_WORKQ_FREE(fh_workq_t *wq)
++{
++#ifdef DEBUG
++	fh_irqflags_t flags;
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++
++	if (wq->pending != 0) {
++		struct work_container *container = wq->container;
++
++		FH_ERROR("Destroying work queue with pending work");
++
++		if (container && container->name) {
++			FH_ERROR("Work %s still pending", container->name);
++		}
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++#endif
++	FH_WAITQ_FREE(wq->waitq);
++	FH_SPINLOCK_FREE(wq->lock);
++	workqueue_destroy(wq->taskq);
++	FH_FREE(wq);
++}
++
++void FH_WORKQ_SCHEDULE(fh_workq_t *wq, fh_work_callback_t cb, void *data,
++			char *format, ...)
++{
++	fh_irqflags_t flags;
++	work_container_t *container;
++	static char name[128];
++	va_list args;
++
++	va_start(args, format);
++	FH_VSNPRINTF(name, 128, format, args);
++	va_end(args);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	wq->pending++;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++
++	container = FH_ALLOC_ATOMIC(sizeof(*container));
++	if (!container) {
++		FH_ERROR("Cannot allocate memory for container");
++		return;
++	}
++
++	container->name = FH_STRDUP(name);
++	if (!container->name) {
++		FH_ERROR("Cannot allocate memory for container->name");
++		FH_FREE(container);
++		return;
++	}
++
++	container->cb = cb;
++	container->data = data;
++	container->wq = wq;
++	container->hz = 0;
++	wq->container = container;
++
++	FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
++	workqueue_enqueue(wq->taskq, &container->task);
++}
++
++void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *wq, fh_work_callback_t cb,
++				void *data, uint32_t time, char *format, ...)
++{
++	fh_irqflags_t flags;
++	work_container_t *container;
++	static char name[128];
++	struct timeval tv;
++	va_list args;
++
++	va_start(args, format);
++	FH_VSNPRINTF(name, 128, format, args);
++	va_end(args);
++
++	FH_SPINLOCK_IRQSAVE(wq->lock, &flags);
++	wq->pending++;
++	FH_SPINUNLOCK_IRQRESTORE(wq->lock, flags);
++	FH_WAITQ_TRIGGER(wq->waitq);
++
++	container = FH_ALLOC_ATOMIC(sizeof(*container));
++	if (!container) {
++		FH_ERROR("Cannot allocate memory for container");
++		return;
++	}
++
++	container->name = FH_STRDUP(name);
++	if (!container->name) {
++		FH_ERROR("Cannot allocate memory for container->name");
++		FH_FREE(container);
++		return;
++	}
++
++	container->cb = cb;
++	container->data = data;
++	container->wq = wq;
++	tv.tv_sec = time / 1000;
++	tv.tv_usec = (time - tv.tv_sec * 1000) * 1000;
++	container->hz = tvtohz(&tv);
++	wq->container = container;
++
++	FH_DEBUG("Queueing work: %s, container=%p", container->name, container);
++	workqueue_enqueue(wq->taskq, &container->task);
++}
++
++int FH_WORKQ_PENDING(fh_workq_t *wq)
++{
++	return wq->pending;
++}
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.c b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.c
+new file mode 100644
+index 00000000..c63fd24e
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.c
+@@ -0,0 +1,308 @@
++/* =========================================================================
++ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_crypto.c $
++ * $Revision: #5 $
++ * $Date: 2010/09/28 $
++ * $Change: 1596182 $
++ *
++ * Synopsys Portability Library Software and documentation
++ * (hereinafter, "Software") is an Unsupported proprietary work of
++ * Synopsys, Inc. unless otherwise expressly agreed to in writing
++ * between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for
++ * Licensed Product with Synopsys or any supplement thereto. You are
++ * permitted to use and redistribute this Software in source and binary
++ * forms, with or without modification, provided that redistributions
++ * of source code must retain this notice. You may not view, use,
++ * disclose, copy or distribute this file or any information contained
++ * herein except pursuant to this license grant from Synopsys. If you
++ * do not agree with this notice, including the disclaimer below, then
++ * you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
++ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
++ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
++ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================= */
++
++/** @file
++ * This file contains the WUSB cryptographic routines.
++ */
++
++#ifdef FH_CRYPTOLIB
++
++#include "fh_crypto.h"
++#include "usb.h"
++
++#ifdef DEBUG
++static inline void dump_bytes(char *name, uint8_t *bytes, int len)
++{
++	int i;
++	FH_PRINTF("%s: ", name);
++	for (i=0; i<len; i++) {
++		FH_PRINTF("%02x ", bytes[i]);
++	}
++	FH_PRINTF("\n");
++}
++#else
++#define dump_bytes(x...)
++#endif
++
++/* Display a block */
++void show_block(const u8 *blk, const char *prefix, const char *suffix, int a)
++{
++#ifdef FH_DEBUG_CRYPTO
++	int i, blksize = 16;
++
++	FH_DEBUG("%s", prefix);
++
++	if (suffix == NULL) {
++		suffix = "\n";
++		blksize = a;
++	}
++
++	for (i = 0; i < blksize; i++)
++		FH_PRINT("%02x%s", *blk++, ((i & 3) == 3) ? "  " : " ");
++	FH_PRINT(suffix);
++#endif
++}
++
++/**
++ * Encrypts an array of bytes using the AES encryption engine.
++ * If <code>dst</code> == <code>src</code>, then the bytes will be encrypted
++ * in-place.
++ *
++ * @return  0 on success, negative error code on error.
++ */
++int fh_wusb_aes_encrypt(u8 *src, u8 *key, u8 *dst)
++{
++	u8 block_t[16];
++	FH_MEMSET(block_t, 0, 16);
++
++	return FH_AES_CBC(src, 16, key, 16, block_t, dst);
++}
++
++/**
++ * The CCM-MAC-FUNCTION described in section 6.5 of the WUSB spec.
++ * This function takes a data string and returns the encrypted CBC
++ * Counter-mode MIC.
++ *
++ * @param key     The 128-bit symmetric key.
++ * @param nonce   The CCM nonce.
++ * @param label   The unique 14-byte ASCII text label.
++ * @param bytes   The byte array to be encrypted.
++ * @param len     Length of the byte array.
++ * @param result  Byte array to receive the 8-byte encrypted MIC.
++ */
++void fh_wusb_cmf(u8 *key, u8 *nonce,
++		  char *label, u8 *bytes, int len, u8 *result)
++{
++	u8 block_m[16];
++	u8 block_x[16];
++	u8 block_t[8];
++	int idx, blkNum;
++	u16 la = (u16)(len + 14);
++
++	/* Set the AES-128 key */
++	//fh_aes_setkey(tfm, key, 16);
++
++	/* Fill block B0 from flags = 0x59, N, and l(m) = 0 */
++	block_m[0] = 0x59;
++	for (idx = 0; idx < 13; idx++)
++		block_m[idx + 1] = nonce[idx];
++	block_m[14] = 0;
++	block_m[15] = 0;
++
++	/* Produce the CBC IV */
++	fh_wusb_aes_encrypt(block_m, key, block_x);
++	show_block(block_m, "CBC IV in: ", "\n", 0);
++	show_block(block_x, "CBC IV out:", "\n", 0);
++
++	/* Fill block B1 from l(a) = Blen + 14, and A */
++	block_x[0] ^= (u8)(la >> 8);
++	block_x[1] ^= (u8)la;
++	for (idx = 0; idx < 14; idx++)
++		block_x[idx + 2] ^= label[idx];
++	show_block(block_x, "After xor: ", "b1\n", 16);
++
++	fh_wusb_aes_encrypt(block_x, key, block_x);
++	show_block(block_x, "After AES: ", "b1\n", 16);
++
++	idx = 0;
++	blkNum = 0;
++
++	/* Fill remaining blocks with B */
++	while (len-- > 0) {
++		block_x[idx] ^= *bytes++;
++		if (++idx >= 16) {
++			idx = 0;
++			show_block(block_x, "After xor: ", "\n", blkNum);
++			fh_wusb_aes_encrypt(block_x, key, block_x);
++			show_block(block_x, "After AES: ", "\n", blkNum);
++			blkNum++;
++		}
++	}
++
++	/* Handle partial last block */
++	if (idx > 0) {
++		show_block(block_x, "After xor: ", "\n", blkNum);
++		fh_wusb_aes_encrypt(block_x, key, block_x);
++		show_block(block_x, "After AES: ", "\n", blkNum);
++	}
++
++	/* Save the MIC tag */
++	FH_MEMCPY(block_t, block_x, 8);
++	show_block(block_t, "MIC tag  : ", NULL, 8);
++
++	/* Fill block A0 from flags = 0x01, N, and counter = 0 */
++	block_m[0] = 0x01;
++	block_m[14] = 0;
++	block_m[15] = 0;
++
++	/* Encrypt the counter */
++	fh_wusb_aes_encrypt(block_m, key, block_x);
++	show_block(block_x, "CTR[MIC] : ", NULL, 8);
++
++	/* XOR with MIC tag */
++	for (idx = 0; idx < 8; idx++) {
++		block_t[idx] ^= block_x[idx];
++	}
++
++	/* Return result to caller */
++	FH_MEMCPY(result, block_t, 8);
++	show_block(result, "CCM-MIC  : ", NULL, 8);
++
++}
++
++/**
++ * The PRF function described in section 6.5 of the WUSB spec. This function
++ * concatenates MIC values returned from fh_cmf() to create a value of
++ * the requested length.
++ *
++ * @param prf_len  Length of the PRF function in bits (64, 128, or 256).
++ * @param key, nonce, label, bytes, len  Same as for fh_cmf().
++ * @param result   Byte array to receive the result.
++ */
++void fh_wusb_prf(int prf_len, u8 *key,
++		  u8 *nonce, char *label, u8 *bytes, int len, u8 *result)
++{
++	int i;
++
++	nonce[0] = 0;
++	for (i = 0; i < prf_len >> 6; i++, nonce[0]++) {
++		fh_wusb_cmf(key, nonce, label, bytes, len, result);
++		result += 8;
++	}
++}
++
++/**
++ * Fills in CCM Nonce per the WUSB spec.
++ *
++ * @param[in] haddr Host address.
++ * @param[in] daddr Device address.
++ * @param[in] tkid Session Key(PTK) identifier.
++ * @param[out] nonce Pointer to where the CCM Nonce output is to be written.
++ */
++void fh_wusb_fill_ccm_nonce(uint16_t haddr, uint16_t daddr, uint8_t *tkid,
++			     uint8_t *nonce)
++{
++
++	FH_DEBUG("%s %x %x\n", __func__, daddr, haddr);
++
++	FH_MEMSET(&nonce[0], 0, 16);
++
++	FH_MEMCPY(&nonce[6], tkid, 3);
++	nonce[9] = daddr & 0xFF;
++	nonce[10] = (daddr >> 8) & 0xFF;
++	nonce[11] = haddr & 0xFF;
++	nonce[12] = (haddr >> 8) & 0xFF;
++
++	dump_bytes("CCM nonce", nonce, 16);
++}
++
++/**
++ * Generates a 16-byte cryptographic-grade random number for the Host/Device
++ * Nonce.
++ */
++void fh_wusb_gen_nonce(uint16_t addr, uint8_t *nonce)
++{
++	uint8_t inonce[16];
++	uint32_t temp[4];
++
++	/* Fill in the Nonce */
++	FH_MEMSET(&inonce[0], 0, sizeof(inonce));
++	inonce[9] = addr & 0xFF;
++	inonce[10] = (addr >> 8) & 0xFF;
++	inonce[11] = inonce[9];
++	inonce[12] = inonce[10];
++
++	/* Collect "randomness samples" */
++	FH_RANDOM_BYTES((uint8_t *)temp, 16);
++
++	fh_wusb_prf_128((uint8_t *)temp, nonce,
++			 "Random Numbers", (uint8_t *)temp, sizeof(temp),
++			 nonce);
++}
++
++/**
++ * Generates the Session Key (PTK) and Key Confirmation Key (KCK) per the
++ * WUSB spec.
++ *
++ * @param[in] ccm_nonce Pointer to CCM Nonce.
++ * @param[in] mk Master Key to derive the session from
++ * @param[in] hnonce Pointer to Host Nonce.
++ * @param[in] dnonce Pointer to Device Nonce.
++ * @param[out] kck Pointer to where the KCK output is to be written.
++ * @param[out] ptk Pointer to where the PTK output is to be written.
++ */
++void fh_wusb_gen_key(uint8_t *ccm_nonce, uint8_t *mk, uint8_t *hnonce,
++		      uint8_t *dnonce, uint8_t *kck, uint8_t *ptk)
++{
++	uint8_t idata[32];
++	uint8_t odata[32];
++
++	dump_bytes("ck", mk, 16);
++	dump_bytes("hnonce", hnonce, 16);
++	dump_bytes("dnonce", dnonce, 16);
++
++	/* The data is the HNonce and DNonce concatenated */
++	FH_MEMCPY(&idata[0], hnonce, 16);
++	FH_MEMCPY(&idata[16], dnonce, 16);
++
++	fh_wusb_prf_256(mk, ccm_nonce, "Pair-wise keys", idata, 32, odata);
++
++	/* Low 16 bytes of the result is the KCK, high 16 is the PTK */
++	FH_MEMCPY(kck, &odata[0], 16);
++	FH_MEMCPY(ptk, &odata[16], 16);
++
++	dump_bytes("kck", kck, 16);
++	dump_bytes("ptk", ptk, 16);
++}
++
++/**
++ * Generates the Message Integrity Code over the Handshake data per the
++ * WUSB spec.
++ *
++ * @param ccm_nonce Pointer to CCM Nonce.
++ * @param kck   Pointer to Key Confirmation Key.
++ * @param data  Pointer to Handshake data to be checked.
++ * @param mic   Pointer to where the MIC output is to be written.
++ */
++void fh_wusb_gen_mic(uint8_t *ccm_nonce, uint8_t *kck,
++		      uint8_t *data, uint8_t *mic)
++{
++
++	fh_wusb_prf_64(kck, ccm_nonce, "out-of-bandMIC",
++			data, WUSB_HANDSHAKE_LEN_FOR_MIC, mic);
++}
++
++#endif	/* FH_CRYPTOLIB */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.h b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.h
+new file mode 100644
+index 00000000..3e5cb9fb
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_crypto.h
+@@ -0,0 +1,111 @@
++/* =========================================================================
++ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_crypto.h $
++ * $Revision: #3 $
++ * $Date: 2010/09/28 $
++ * $Change: 1596182 $
++ *
++ * Synopsys Portability Library Software and documentation
++ * (hereinafter, "Software") is an Unsupported proprietary work of
++ * Synopsys, Inc. unless otherwise expressly agreed to in writing
++ * between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for
++ * Licensed Product with Synopsys or any supplement thereto. You are
++ * permitted to use and redistribute this Software in source and binary
++ * forms, with or without modification, provided that redistributions
++ * of source code must retain this notice. You may not view, use,
++ * disclose, copy or distribute this file or any information contained
++ * herein except pursuant to this license grant from Synopsys. If you
++ * do not agree with this notice, including the disclaimer below, then
++ * you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
++ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
++ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
++ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================= */
++
++#ifndef _FH_CRYPTO_H_
++#define _FH_CRYPTO_H_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/** @file
++ *
++ * This file contains declarations for the WUSB Cryptographic routines as
++ * defined in the WUSB spec.  They are only to be used internally by the FH UWB
++ * modules.
++ */
++
++#include "fh_os.h"
++
++int fh_wusb_aes_encrypt(u8 *src, u8 *key, u8 *dst);
++
++void fh_wusb_cmf(u8 *key, u8 *nonce,
++		  char *label, u8 *bytes, int len, u8 *result);
++void fh_wusb_prf(int prf_len, u8 *key,
++		  u8 *nonce, char *label, u8 *bytes, int len, u8 *result);
++
++/**
++ * The PRF-64 function described in section 6.5 of the WUSB spec.
++ *
++ * @param key, nonce, label, bytes, len, result  Same as for fh_prf().
++ */
++static inline void fh_wusb_prf_64(u8 *key, u8 *nonce,
++				   char *label, u8 *bytes, int len, u8 *result)
++{
++	fh_wusb_prf(64, key, nonce, label, bytes, len, result);
++}
++
++/**
++ * The PRF-128 function described in section 6.5 of the WUSB spec.
++ *
++ * @param key, nonce, label, bytes, len, result  Same as for fh_prf().
++ */
++static inline void fh_wusb_prf_128(u8 *key, u8 *nonce,
++				    char *label, u8 *bytes, int len, u8 *result)
++{
++	fh_wusb_prf(128, key, nonce, label, bytes, len, result);
++}
++
++/**
++ * The PRF-256 function described in section 6.5 of the WUSB spec.
++ *
++ * @param key, nonce, label, bytes, len, result  Same as for fh_prf().
++ */
++static inline void fh_wusb_prf_256(u8 *key, u8 *nonce,
++				    char *label, u8 *bytes, int len, u8 *result)
++{
++	fh_wusb_prf(256, key, nonce, label, bytes, len, result);
++}
++
++
++void fh_wusb_fill_ccm_nonce(uint16_t haddr, uint16_t daddr, uint8_t *tkid,
++			       uint8_t *nonce);
++void fh_wusb_gen_nonce(uint16_t addr,
++			  uint8_t *nonce);
++
++void fh_wusb_gen_key(uint8_t *ccm_nonce, uint8_t *mk,
++			uint8_t *hnonce, uint8_t *dnonce,
++			uint8_t *kck, uint8_t *ptk);
++
++
++void fh_wusb_gen_mic(uint8_t *ccm_nonce, uint8_t
++			*kck, uint8_t *data, uint8_t *mic);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _FH_CRYPTO_H_ */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_dh.c b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.c
+new file mode 100644
+index 00000000..502e2a95
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.c
+@@ -0,0 +1,291 @@
++/* =========================================================================
++ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_dh.c $
++ * $Revision: #3 $
++ * $Date: 2010/09/28 $
++ * $Change: 1596182 $
++ *
++ * Synopsys Portability Library Software and documentation
++ * (hereinafter, "Software") is an Unsupported proprietary work of
++ * Synopsys, Inc. unless otherwise expressly agreed to in writing
++ * between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for
++ * Licensed Product with Synopsys or any supplement thereto. You are
++ * permitted to use and redistribute this Software in source and binary
++ * forms, with or without modification, provided that redistributions
++ * of source code must retain this notice. You may not view, use,
++ * disclose, copy or distribute this file or any information contained
++ * herein except pursuant to this license grant from Synopsys. If you
++ * do not agree with this notice, including the disclaimer below, then
++ * you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
++ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
++ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
++ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================= */
++#ifdef FH_CRYPTOLIB
++
++#ifndef CONFIG_MACH_IPMATE
++
++#include "fh_dh.h"
++#include "fh_modpow.h"
++
++#ifdef DEBUG
++/* This function prints out a buffer in the format described in the Association
++ * Model specification. */
++static void dh_dump(char *str, void *_num, int len)
++{
++	uint8_t *num = _num;
++	int i;
++	FH_PRINTF("%s\n", str);
++	for (i = 0; i < len; i ++) {
++		FH_PRINTF("%02x", num[i]);
++		if (((i + 1) % 2) == 0) FH_PRINTF(" ");
++		if (((i + 1) % 26) == 0) FH_PRINTF("\n");
++	}
++
++	FH_PRINTF("\n");
++}
++#else
++#define dh_dump(_x...) do {; } while(0)
++#endif
++
++/* Constant g value */
++static __u32 dh_g[] = {
++	0x02000000,
++};
++
++/* Constant p value */
++static __u32 dh_p[] = {
++	0xFFFFFFFF, 0xFFFFFFFF, 0xA2DA0FC9, 0x34C26821, 0x8B62C6C4, 0xD11CDC80, 0x084E0229, 0x74CC678A,
++	0xA6BE0B02, 0x229B133B, 0x79084A51, 0xDD04348E, 0xB31995EF, 0x1B433ACD, 0x6D0A2B30, 0x37145FF2,
++	0x6D35E14F, 0x45C2516D, 0x76B585E4, 0xC67E5E62, 0xE9424CF4, 0x6BED37A6, 0xB65CFF0B, 0xEDB706F4,
++	0xFB6B38EE, 0xA59F895A, 0x11249FAE, 0xE61F4B7C, 0x51662849, 0x3D5BE4EC, 0xB87C00C2, 0x05BF63A1,
++	0x3648DA98, 0x9AD3551C, 0xA83F1669, 0x5FCF24FD, 0x235D6583, 0x96ADA3DC, 0x56F3621C, 0xBB528520,
++	0x0729D59E, 0x6D969670, 0x4E350C67, 0x0498BC4A, 0x086C74F1, 0x7C2118CA, 0x465E9032, 0x3BCE362E,
++	0x2C779EE3, 0x03860E18, 0xA283279B, 0x8FA207EC, 0xF05DC5B5, 0xC9524C6F, 0xF6CB2BDE, 0x18175895,
++	0x7C499539, 0xE56A95EA, 0x1826D215, 0x1005FA98, 0x5A8E7215, 0x2DC4AA8A, 0x0D1733AD, 0x337A5004,
++	0xAB2155A8, 0x64BA1CDF, 0x0485FBEC, 0x0AEFDB58, 0x5771EA8A, 0x7D0C065D, 0x850F97B3, 0xC7E4E1A6,
++	0x8CAEF5AB, 0xD73309DB, 0xE0948C1E, 0x9D61254A, 0x26D2E3CE, 0x6BEED21A, 0x06FA2FF1, 0x64088AD9,
++	0x730276D8, 0x646AC83E, 0x182B1F52, 0x0C207B17, 0x5717E1BB, 0x6C5D617A, 0xC0880977, 0xE246D9BA,
++	0xA04FE208, 0x31ABE574, 0xFC5BDB43, 0x8E10FDE0, 0x20D1824B, 0xCAD23AA9, 0xFFFFFFFF, 0xFFFFFFFF,
++};
++
++static void dh_swap_bytes(void *_in, void *_out, uint32_t len)
++{
++	uint8_t *in = _in;
++	uint8_t *out = _out;
++	int i;
++	for (i=0; i<len; i++) {
++		out[i] = in[len-1-i];
++	}
++}
++
++/* Computes the modular exponentiation (num^exp % mod).  num, exp, and mod are
++ * big endian numbers of size len, in bytes.  Each len value must be a multiple
++ * of 4. */
++int fh_dh_modpow(void *mem_ctx, void *num, uint32_t num_len,
++		  void *exp, uint32_t exp_len,
++		  void *mod, uint32_t mod_len,
++		  void *out)
++{
++	/* modpow() takes little endian numbers.  AM uses big-endian.  This
++	 * function swaps bytes of numbers before passing onto modpow. */
++
++	int retval = 0;
++	uint32_t *result;
++
++	uint32_t *bignum_num = fh_alloc(mem_ctx, num_len + 4);
++	uint32_t *bignum_exp = fh_alloc(mem_ctx, exp_len + 4);
++	uint32_t *bignum_mod = fh_alloc(mem_ctx, mod_len + 4);
++
++	dh_swap_bytes(num, &bignum_num[1], num_len);
++	bignum_num[0] = num_len / 4;
++
++	dh_swap_bytes(exp, &bignum_exp[1], exp_len);
++	bignum_exp[0] = exp_len / 4;
++
++	dh_swap_bytes(mod, &bignum_mod[1], mod_len);
++	bignum_mod[0] = mod_len / 4;
++
++	result = fh_modpow(mem_ctx, bignum_num, bignum_exp, bignum_mod);
++	if (!result) {
++		retval = -1;
++		goto dh_modpow_nomem;
++	}
++
++	dh_swap_bytes(&result[1], out, result[0] * 4);
++	fh_free(mem_ctx, result);
++
++ dh_modpow_nomem:
++	fh_free(mem_ctx, bignum_num);
++	fh_free(mem_ctx, bignum_exp);
++	fh_free(mem_ctx, bignum_mod);
++	return retval;
++}
++
++
++int fh_dh_pk(void *mem_ctx, uint8_t nd, uint8_t *exp, uint8_t *pk, uint8_t *hash)
++{
++	int retval;
++	uint8_t m3[385];
++
++#ifndef DH_TEST_VECTORS
++	FH_RANDOM_BYTES(exp, 32);
++#endif
++
++	/* Compute the pkd */
++	if ((retval = fh_dh_modpow(mem_ctx, dh_g, 4,
++				    exp, 32,
++				    dh_p, 384, pk))) {
++		return retval;
++	}
++
++	m3[384] = nd;
++	FH_MEMCPY(&m3[0], pk, 384);
++	FH_SHA256(m3, 385, hash);
++
++ 	dh_dump("PK", pk, 384);
++ 	dh_dump("SHA-256(M3)", hash, 32);
++	return 0;
++}
++
++int fh_dh_derive_keys(void *mem_ctx, uint8_t nd, uint8_t *pkh, uint8_t *pkd,
++		       uint8_t *exp, int is_host,
++		       char *dd, uint8_t *ck, uint8_t *kdk)
++{
++	int retval;
++	uint8_t mv[784];
++	uint8_t sha_result[32];
++	uint8_t dhkey[384];
++	uint8_t shared_secret[384];
++	char *message;
++	uint32_t vd;
++
++	uint8_t *pk;
++
++	if (is_host) {
++		pk = pkd;
++	}
++	else {
++		pk = pkh;
++	}
++
++	if ((retval = fh_dh_modpow(mem_ctx, pk, 384,
++				    exp, 32,
++				    dh_p, 384, shared_secret))) {
++		return retval;
++	}
++	dh_dump("Shared Secret", shared_secret, 384);
++
++	FH_SHA256(shared_secret, 384, dhkey);
++	dh_dump("DHKEY", dhkey, 384);
++
++	FH_MEMCPY(&mv[0], pkd, 384);
++	FH_MEMCPY(&mv[384], pkh, 384);
++	FH_MEMCPY(&mv[768], "displayed digest", 16);
++	dh_dump("MV", mv, 784);
++
++	FH_SHA256(mv, 784, sha_result);
++	dh_dump("SHA-256(MV)", sha_result, 32);
++	dh_dump("First 32-bits of SHA-256(MV)", sha_result, 4);
++
++	dh_swap_bytes(sha_result, &vd, 4);
++#ifdef DEBUG
++	FH_PRINTF("Vd (decimal) = %d\n", vd);
++#endif
++
++	switch (nd) {
++	case 2:
++		vd = vd % 100;
++		FH_SPRINTF(dd, "%02d", vd);
++		break;
++	case 3:
++		vd = vd % 1000;
++		FH_SPRINTF(dd, "%03d", vd);
++		break;
++	case 4:
++		vd = vd % 10000;
++		FH_SPRINTF(dd, "%04d", vd);
++		break;
++	}
++#ifdef DEBUG
++	FH_PRINTF("Display Digits: %s\n", dd);
++#endif
++
++	message = "connection key";
++	FH_HMAC_SHA256(message, FH_STRLEN(message), dhkey, 32, sha_result);
++ 	dh_dump("HMAC(SHA-256, DHKey, connection key)", sha_result, 32);
++	FH_MEMCPY(ck, sha_result, 16);
++
++	message = "key derivation key";
++	FH_HMAC_SHA256(message, FH_STRLEN(message), dhkey, 32, sha_result);
++ 	dh_dump("HMAC(SHA-256, DHKey, key derivation key)", sha_result, 32);
++	FH_MEMCPY(kdk, sha_result, 32);
++
++	return 0;
++}
++
++
++#ifdef DH_TEST_VECTORS
++
++static __u8 dh_a[] = {
++	0x44, 0x00, 0x51, 0xd6,
++	0xf0, 0xb5, 0x5e, 0xa9,
++	0x67, 0xab, 0x31, 0xc6,
++	0x8a, 0x8b, 0x5e, 0x37,
++	0xd9, 0x10, 0xda, 0xe0,
++	0xe2, 0xd4, 0x59, 0xa4,
++	0x86, 0x45, 0x9c, 0xaa,
++	0xdf, 0x36, 0x75, 0x16,
++};
++
++static __u8 dh_b[] = {
++	0x5d, 0xae, 0xc7, 0x86,
++	0x79, 0x80, 0xa3, 0x24,
++	0x8c, 0xe3, 0x57, 0x8f,
++	0xc7, 0x5f, 0x1b, 0x0f,
++	0x2d, 0xf8, 0x9d, 0x30,
++	0x6f, 0xa4, 0x52, 0xcd,
++	0xe0, 0x7a, 0x04, 0x8a,
++	0xde, 0xd9, 0x26, 0x56,
++};
++
++void fh_run_dh_test_vectors(void *mem_ctx)
++{
++	uint8_t pkd[384];
++	uint8_t pkh[384];
++	uint8_t hashd[32];
++	uint8_t hashh[32];
++	uint8_t ck[16];
++	uint8_t kdk[32];
++	char dd[5];
++
++	FH_PRINTF("\n\n\nDH_TEST_VECTORS\n\n");
++
++	/* compute the PKd and SHA-256(PKd || Nd) */
++	FH_PRINTF("Computing PKd\n");
++	fh_dh_pk(mem_ctx, 2, dh_a, pkd, hashd);
++
++	/* compute the PKd and SHA-256(PKh || Nd) */
++	FH_PRINTF("Computing PKh\n");
++	fh_dh_pk(mem_ctx, 2, dh_b, pkh, hashh);
++
++	/* compute the dhkey */
++	fh_dh_derive_keys(mem_ctx, 2, pkh, pkd, dh_a, 0, dd, ck, kdk);
++}
++#endif /* DH_TEST_VECTORS */
++
++#endif /* !CONFIG_MACH_IPMATE */
++
++#endif /* FH_CRYPTOLIB */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_dh.h b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.h
+new file mode 100644
+index 00000000..c4c9ccca
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_dh.h
+@@ -0,0 +1,106 @@
++/* =========================================================================
++ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_dh.h $
++ * $Revision: #4 $
++ * $Date: 2010/09/28 $
++ * $Change: 1596182 $
++ *
++ * Synopsys Portability Library Software and documentation
++ * (hereinafter, "Software") is an Unsupported proprietary work of
++ * Synopsys, Inc. unless otherwise expressly agreed to in writing
++ * between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for
++ * Licensed Product with Synopsys or any supplement thereto. You are
++ * permitted to use and redistribute this Software in source and binary
++ * forms, with or without modification, provided that redistributions
++ * of source code must retain this notice. You may not view, use,
++ * disclose, copy or distribute this file or any information contained
++ * herein except pursuant to this license grant from Synopsys. If you
++ * do not agree with this notice, including the disclaimer below, then
++ * you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
++ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
++ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
++ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================= */
++#ifndef _FH_DH_H_
++#define _FH_DH_H_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "fh_os.h"
++
++/** @file
++ *
++ * This file defines the common functions on device and host for performing
++ * numeric association as defined in the WUSB spec.  They are only to be
++ * used internally by the FH UWB modules. */
++
++extern int fh_dh_sha256(uint8_t *message, uint32_t len, uint8_t *out);
++extern int fh_dh_hmac_sha256(uint8_t *message, uint32_t messagelen,
++			      uint8_t *key, uint32_t keylen,
++			      uint8_t *out);
++extern int fh_dh_modpow(void *mem_ctx, void *num, uint32_t num_len,
++			 void *exp, uint32_t exp_len,
++			 void *mod, uint32_t mod_len,
++			 void *out);
++
++/** Computes PKD or PKH, and SHA-256(PKd || Nd)
++ *
++ * PK = g^exp mod p.
++ *
++ * Input:
++ * Nd = Number of digits on the device.
++ *
++ * Output:
++ * exp = A 32-byte buffer to be filled with a randomly generated number.
++ *       used as either A or B.
++ * pk = A 384-byte buffer to be filled with the PKH or PKD.
++ * hash = A 32-byte buffer to be filled with SHA-256(PK || ND).
++ */
++extern int fh_dh_pk(void *mem_ctx, uint8_t nd, uint8_t *exp, uint8_t *pkd, uint8_t *hash);
++
++/** Computes the DHKEY, and VD.
++ *
++ * If called from host, then it will comput DHKEY=PKD^exp % p.
++ * If called from device, then it will comput DHKEY=PKH^exp % p.
++ *
++ * Input:
++ * pkd = The PKD value.
++ * pkh = The PKH value.
++ * exp = The A value (if device) or B value (if host) generated in fh_wudev_dh_pk.
++ * is_host = Set to non zero if a WUSB host is calling this function.
++ *
++ * Output:
++
++ * dd = A pointer to an buffer to be set to the displayed digits string to be shown
++ *      to the user.  This buffer should be at 5 bytes long to hold 4 digits plus a
++ *      null termination character.  This buffer can be used directly for display.
++ * ck = A 16-byte buffer to be filled with the CK.
++ * kdk = A 32-byte buffer to be filled with the KDK.
++ */
++extern int fh_dh_derive_keys(void *mem_ctx, uint8_t nd, uint8_t *pkh, uint8_t *pkd,
++			      uint8_t *exp, int is_host,
++			      char *dd, uint8_t *ck, uint8_t *kdk);
++
++#ifdef DH_TEST_VECTORS
++extern void fh_run_dh_test_vectors(void);
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _FH_DH_H_ */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_list.h b/drivers/usb/host/fh_otg/fh_common_port/fh_list.h
+new file mode 100644
+index 00000000..11cbf687
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_list.h
+@@ -0,0 +1,594 @@
++/*	$OpenBSD: queue.h,v 1.26 2004/05/04 16:59:32 grange Exp $	*/
++/*	$NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $	*/
++
++/*
++ * Copyright (c) 1991, 1993
++ *	The Regents of the University of California.  All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the University nor the names of its contributors
++ *    may be used to endorse or promote products derived from this software
++ *    without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
++ */
++
++#ifndef _FH_LIST_H_
++#define _FH_LIST_H_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/** @file
++ *
++ * This file defines linked list operations.  It is derived from BSD with
++ * only the MACRO names being prefixed with FH_.  This is because a few of
++ * these names conflict with those on Linux.  For documentation on use, see the
++ * inline comments in the source code.  The original license for this source
++ * code applies and is preserved in the fh_list.h source file.
++ */
++
++/*
++ * This file defines five types of data structures: singly-linked lists,
++ * lists, simple queues, tail queues, and circular queues.
++ *
++ *
++ * A singly-linked list is headed by a single forward pointer. The elements
++ * are singly linked for minimum space and pointer manipulation overhead at
++ * the expense of O(n) removal for arbitrary elements. New elements can be
++ * added to the list after an existing element or at the head of the list.
++ * Elements being removed from the head of the list should use the explicit
++ * macro for this purpose for optimum efficiency. A singly-linked list may
++ * only be traversed in the forward direction.  Singly-linked lists are ideal
++ * for applications with large datasets and few or no removals or for
++ * implementing a LIFO queue.
++ *
++ * A list is headed by a single forward pointer (or an array of forward
++ * pointers for a hash table header). The elements are doubly linked
++ * so that an arbitrary element can be removed without a need to
++ * traverse the list. New elements can be added to the list before
++ * or after an existing element or at the head of the list. A list
++ * may only be traversed in the forward direction.
++ *
++ * A simple queue is headed by a pair of pointers, one the head of the
++ * list and the other to the tail of the list. The elements are singly
++ * linked to save space, so elements can only be removed from the
++ * head of the list. New elements can be added to the list before or after
++ * an existing element, at the head of the list, or at the end of the
++ * list. A simple queue may only be traversed in the forward direction.
++ *
++ * A tail queue is headed by a pair of pointers, one to the head of the
++ * list and the other to the tail of the list. The elements are doubly
++ * linked so that an arbitrary element can be removed without a need to
++ * traverse the list. New elements can be added to the list before or
++ * after an existing element, at the head of the list, or at the end of
++ * the list. A tail queue may be traversed in either direction.
++ *
++ * A circle queue is headed by a pair of pointers, one to the head of the
++ * list and the other to the tail of the list. The elements are doubly
++ * linked so that an arbitrary element can be removed without a need to
++ * traverse the list. New elements can be added to the list before or after
++ * an existing element, at the head of the list, or at the end of the list.
++ * A circle queue may be traversed in either direction, but has a more
++ * complex end of list detection.
++ *
++ * For details on the use of these macros, see the queue(3) manual page.
++ */
++
++/*
++ * Double-linked List.
++ */
++
++typedef struct fh_list_link {
++	struct fh_list_link *next;
++	struct fh_list_link *prev;
++} fh_list_link_t;
++
++#define FH_LIST_INIT(link) do {	\
++	(link)->next = (link);		\
++	(link)->prev = (link);		\
++} while (0)
++
++#define FH_LIST_FIRST(link)	((link)->next)
++#define FH_LIST_LAST(link)	((link)->prev)
++#define FH_LIST_END(link)	(link)
++#define FH_LIST_NEXT(link)	((link)->next)
++#define FH_LIST_PREV(link)	((link)->prev)
++#define FH_LIST_EMPTY(link)	\
++	(FH_LIST_FIRST(link) == FH_LIST_END(link))
++#define FH_LIST_ENTRY(link, type, field)			\
++	(type *)((uint8_t *)(link) - (size_t)(&((type *)0)->field))
++
++#if 0
++#define FH_LIST_INSERT_HEAD(list, link) do {			\
++	(link)->next = (list)->next;				\
++	(link)->prev = (list);					\
++	(list)->next->prev = (link);				\
++	(list)->next = (link);					\
++} while (0)
++
++#define FH_LIST_INSERT_TAIL(list, link) do {			\
++	(link)->next = (list);					\
++	(link)->prev = (list)->prev;				\
++	(list)->prev->next = (link);				\
++	(list)->prev = (link);					\
++} while (0)
++#else
++#define FH_LIST_INSERT_HEAD(list, link) do {			\
++	fh_list_link_t *__next__ = (list)->next;		\
++	__next__->prev = (link);				\
++	(link)->next = __next__;				\
++	(link)->prev = (list);					\
++	(list)->next = (link);					\
++} while (0)
++
++#define FH_LIST_INSERT_TAIL(list, link) do {			\
++	fh_list_link_t *__prev__ = (list)->prev;		\
++	(list)->prev = (link);					\
++	(link)->next = (list);					\
++	(link)->prev = __prev__;				\
++	__prev__->next = (link);				\
++} while (0)
++#endif
++
++#if 0
++static inline void __list_add(struct list_head *new,
++                              struct list_head *prev,
++                              struct list_head *next)
++{
++        next->prev = new;
++        new->next = next;
++        new->prev = prev;
++        prev->next = new;
++}
++
++static inline void list_add(struct list_head *new, struct list_head *head)
++{
++        __list_add(new, head, head->next);
++}
++
++static inline void list_add_tail(struct list_head *new, struct list_head *head)
++{
++        __list_add(new, head->prev, head);
++}
++
++static inline void __list_del(struct list_head * prev, struct list_head * next)
++{
++        next->prev = prev;
++        prev->next = next;
++}
++
++static inline void list_del(struct list_head *entry)
++{
++        __list_del(entry->prev, entry->next);
++        entry->next = LIST_POISON1;
++        entry->prev = LIST_POISON2;
++}
++#endif
++
++#define FH_LIST_REMOVE(link) do {				\
++	(link)->next->prev = (link)->prev;			\
++	(link)->prev->next = (link)->next;			\
++} while (0)
++
++#define FH_LIST_REMOVE_INIT(link) do {				\
++	FH_LIST_REMOVE(link);					\
++	FH_LIST_INIT(link);					\
++} while (0)
++
++#define FH_LIST_MOVE_HEAD(list, link) do {			\
++	FH_LIST_REMOVE(link);					\
++	FH_LIST_INSERT_HEAD(list, link);			\
++} while (0)
++
++#define FH_LIST_MOVE_TAIL(list, link) do {			\
++	FH_LIST_REMOVE(link);					\
++	FH_LIST_INSERT_TAIL(list, link);			\
++} while (0)
++
++#define FH_LIST_FOREACH(var, list)				\
++	for((var) = FH_LIST_FIRST(list);			\
++	    (var) != FH_LIST_END(list);			\
++	    (var) = FH_LIST_NEXT(var))
++
++#define FH_LIST_FOREACH_SAFE(var, var2, list)			\
++	for((var) = FH_LIST_FIRST(list), (var2) = FH_LIST_NEXT(var);	\
++	    (var) != FH_LIST_END(list);			\
++	    (var) = (var2), (var2) = FH_LIST_NEXT(var2))
++
++#define FH_LIST_FOREACH_REVERSE(var, list)			\
++	for((var) = FH_LIST_LAST(list);			\
++	    (var) != FH_LIST_END(list);			\
++	    (var) = FH_LIST_PREV(var))
++
++/*
++ * Singly-linked List definitions.
++ */
++#define FH_SLIST_HEAD(name, type)					\
++struct name {								\
++	struct type *slh_first;	/* first element */			\
++}
++
++#define FH_SLIST_HEAD_INITIALIZER(head)				\
++	{ NULL }
++
++#define FH_SLIST_ENTRY(type)						\
++struct {								\
++	struct type *sle_next;	/* next element */			\
++}
++
++/*
++ * Singly-linked List access methods.
++ */
++#define FH_SLIST_FIRST(head)	((head)->slh_first)
++#define FH_SLIST_END(head)		NULL
++#define FH_SLIST_EMPTY(head)	(SLIST_FIRST(head) == SLIST_END(head))
++#define FH_SLIST_NEXT(elm, field)	((elm)->field.sle_next)
++
++#define FH_SLIST_FOREACH(var, head, field)				\
++	for((var) = SLIST_FIRST(head);					\
++	    (var) != SLIST_END(head);					\
++	    (var) = SLIST_NEXT(var, field))
++
++#define FH_SLIST_FOREACH_PREVPTR(var, varp, head, field)		\
++	for((varp) = &SLIST_FIRST((head));				\
++	    ((var) = *(varp)) != SLIST_END(head);			\
++	    (varp) = &SLIST_NEXT((var), field))
++
++/*
++ * Singly-linked List functions.
++ */
++#define FH_SLIST_INIT(head) {						\
++	SLIST_FIRST(head) = SLIST_END(head);				\
++}
++
++#define FH_SLIST_INSERT_AFTER(slistelm, elm, field) do {		\
++	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
++	(slistelm)->field.sle_next = (elm);				\
++} while (0)
++
++#define FH_SLIST_INSERT_HEAD(head, elm, field) do {			\
++	(elm)->field.sle_next = (head)->slh_first;			\
++	(head)->slh_first = (elm);					\
++} while (0)
++
++#define FH_SLIST_REMOVE_NEXT(head, elm, field) do {			\
++	(elm)->field.sle_next = (elm)->field.sle_next->field.sle_next;	\
++} while (0)
++
++#define FH_SLIST_REMOVE_HEAD(head, field) do {				\
++	(head)->slh_first = (head)->slh_first->field.sle_next;		\
++} while (0)
++
++#define FH_SLIST_REMOVE(head, elm, type, field) do {			\
++	if ((head)->slh_first == (elm)) {				\
++		SLIST_REMOVE_HEAD((head), field);			\
++	}								\
++	else {								\
++		struct type *curelm = (head)->slh_first;		\
++		while( curelm->field.sle_next != (elm) )		\
++			curelm = curelm->field.sle_next;		\
++		curelm->field.sle_next =				\
++		    curelm->field.sle_next->field.sle_next;		\
++	}								\
++} while (0)
++
++/*
++ * Simple queue definitions.
++ */
++#define FH_SIMPLEQ_HEAD(name, type)					\
++struct name {								\
++	struct type *sqh_first;	/* first element */			\
++	struct type **sqh_last;	/* addr of last next element */		\
++}
++
++#define FH_SIMPLEQ_HEAD_INITIALIZER(head)				\
++	{ NULL, &(head).sqh_first }
++
++#define FH_SIMPLEQ_ENTRY(type)						\
++struct {								\
++	struct type *sqe_next;	/* next element */			\
++}
++
++/*
++ * Simple queue access methods.
++ */
++#define FH_SIMPLEQ_FIRST(head)	    ((head)->sqh_first)
++#define FH_SIMPLEQ_END(head)	    NULL
++#define FH_SIMPLEQ_EMPTY(head)	    (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
++#define FH_SIMPLEQ_NEXT(elm, field)    ((elm)->field.sqe_next)
++
++#define FH_SIMPLEQ_FOREACH(var, head, field)				\
++	for((var) = SIMPLEQ_FIRST(head);				\
++	    (var) != SIMPLEQ_END(head);					\
++	    (var) = SIMPLEQ_NEXT(var, field))
++
++/*
++ * Simple queue functions.
++ */
++#define FH_SIMPLEQ_INIT(head) do {					\
++	(head)->sqh_first = NULL;					\
++	(head)->sqh_last = &(head)->sqh_first;				\
++} while (0)
++
++#define FH_SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
++	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
++		(head)->sqh_last = &(elm)->field.sqe_next;		\
++	(head)->sqh_first = (elm);					\
++} while (0)
++
++#define FH_SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
++	(elm)->field.sqe_next = NULL;					\
++	*(head)->sqh_last = (elm);					\
++	(head)->sqh_last = &(elm)->field.sqe_next;			\
++} while (0)
++
++#define FH_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {	\
++	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
++		(head)->sqh_last = &(elm)->field.sqe_next;		\
++	(listelm)->field.sqe_next = (elm);				\
++} while (0)
++
++#define FH_SIMPLEQ_REMOVE_HEAD(head, field) do {			\
++	if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
++		(head)->sqh_last = &(head)->sqh_first;			\
++} while (0)
++
++/*
++ * Tail queue definitions.
++ */
++#define FH_TAILQ_HEAD(name, type)					\
++struct name {								\
++	struct type *tqh_first;	/* first element */			\
++	struct type **tqh_last;	/* addr of last next element */		\
++}
++
++#define FH_TAILQ_HEAD_INITIALIZER(head)				\
++	{ NULL, &(head).tqh_first }
++
++#define FH_TAILQ_ENTRY(type)						\
++struct {								\
++	struct type *tqe_next;	/* next element */			\
++	struct type **tqe_prev;	/* address of previous next element */	\
++}
++
++/*
++ * tail queue access methods
++ */
++#define FH_TAILQ_FIRST(head)		((head)->tqh_first)
++#define FH_TAILQ_END(head)		NULL
++#define FH_TAILQ_NEXT(elm, field)	((elm)->field.tqe_next)
++#define FH_TAILQ_LAST(head, headname)					\
++	(*(((struct headname *)((head)->tqh_last))->tqh_last))
++/* XXX */
++#define FH_TAILQ_PREV(elm, headname, field)				\
++	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
++#define FH_TAILQ_EMPTY(head)						\
++	(TAILQ_FIRST(head) == TAILQ_END(head))
++
++#define FH_TAILQ_FOREACH(var, head, field)				\
++	for((var) = TAILQ_FIRST(head);					\
++	    (var) != TAILQ_END(head);					\
++	    (var) = TAILQ_NEXT(var, field))
++
++#define FH_TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
++	for((var) = TAILQ_LAST(head, headname);				\
++	    (var) != TAILQ_END(head);					\
++	    (var) = TAILQ_PREV(var, headname, field))
++
++/*
++ * Tail queue functions.
++ */
++#define FH_TAILQ_INIT(head) do {					\
++	(head)->tqh_first = NULL;					\
++	(head)->tqh_last = &(head)->tqh_first;				\
++} while (0)
++
++#define FH_TAILQ_INSERT_HEAD(head, elm, field) do {			\
++	if (((elm)->field.tqe_next = (head)->tqh_first) != NULL)	\
++		(head)->tqh_first->field.tqe_prev =			\
++		    &(elm)->field.tqe_next;				\
++	else								\
++		(head)->tqh_last = &(elm)->field.tqe_next;		\
++	(head)->tqh_first = (elm);					\
++	(elm)->field.tqe_prev = &(head)->tqh_first;			\
++} while (0)
++
++#define FH_TAILQ_INSERT_TAIL(head, elm, field) do {			\
++	(elm)->field.tqe_next = NULL;					\
++	(elm)->field.tqe_prev = (head)->tqh_last;			\
++	*(head)->tqh_last = (elm);					\
++	(head)->tqh_last = &(elm)->field.tqe_next;			\
++} while (0)
++
++#define FH_TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
++	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
++		(elm)->field.tqe_next->field.tqe_prev =			\
++		    &(elm)->field.tqe_next;				\
++	else								\
++		(head)->tqh_last = &(elm)->field.tqe_next;		\
++	(listelm)->field.tqe_next = (elm);				\
++	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
++} while (0)
++
++#define FH_TAILQ_INSERT_BEFORE(listelm, elm, field) do {		\
++	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
++	(elm)->field.tqe_next = (listelm);				\
++	*(listelm)->field.tqe_prev = (elm);				\
++	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
++} while (0)
++
++#define FH_TAILQ_REMOVE(head, elm, field) do {				\
++	if (((elm)->field.tqe_next) != NULL)				\
++		(elm)->field.tqe_next->field.tqe_prev =			\
++		    (elm)->field.tqe_prev;				\
++	else								\
++		(head)->tqh_last = (elm)->field.tqe_prev;		\
++	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
++} while (0)
++
++#define FH_TAILQ_REPLACE(head, elm, elm2, field) do {			\
++	if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL)	\
++		(elm2)->field.tqe_next->field.tqe_prev =		\
++		    &(elm2)->field.tqe_next;				\
++	else								\
++		(head)->tqh_last = &(elm2)->field.tqe_next;		\
++	(elm2)->field.tqe_prev = (elm)->field.tqe_prev;			\
++	*(elm2)->field.tqe_prev = (elm2);				\
++} while (0)
++
++/*
++ * Circular queue definitions.
++ */
++#define FH_CIRCLEQ_HEAD(name, type)					\
++struct name {								\
++	struct type *cqh_first;		/* first element */		\
++	struct type *cqh_last;		/* last element */		\
++}
++
++#define FH_CIRCLEQ_HEAD_INITIALIZER(head)				\
++	{ FH_CIRCLEQ_END(&head), FH_CIRCLEQ_END(&head) }
++
++#define FH_CIRCLEQ_ENTRY(type)						\
++struct {								\
++	struct type *cqe_next;		/* next element */		\
++	struct type *cqe_prev;		/* previous element */		\
++}
++
++/*
++ * Circular queue access methods
++ */
++#define FH_CIRCLEQ_FIRST(head)		((head)->cqh_first)
++#define FH_CIRCLEQ_LAST(head)		((head)->cqh_last)
++#define FH_CIRCLEQ_END(head)		((void *)(head))
++#define FH_CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
++#define FH_CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
++#define FH_CIRCLEQ_EMPTY(head)						\
++	(FH_CIRCLEQ_FIRST(head) == FH_CIRCLEQ_END(head))
++
++#define FH_CIRCLEQ_EMPTY_ENTRY(elm, field) (((elm)->field.cqe_next == NULL) && ((elm)->field.cqe_prev == NULL))
++
++#define FH_CIRCLEQ_FOREACH(var, head, field)				\
++	for((var) = FH_CIRCLEQ_FIRST(head);				\
++	    (var) != FH_CIRCLEQ_END(head);				\
++	    (var) = FH_CIRCLEQ_NEXT(var, field))
++
++#define FH_CIRCLEQ_FOREACH_SAFE(var, var2, head, field)			\
++	for((var) = FH_CIRCLEQ_FIRST(head), var2 = FH_CIRCLEQ_NEXT(var, field); \
++	    (var) != FH_CIRCLEQ_END(head);					\
++	    (var) = var2, var2 = FH_CIRCLEQ_NEXT(var, field))
++
++#define FH_CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
++	for((var) = FH_CIRCLEQ_LAST(head);				\
++	    (var) != FH_CIRCLEQ_END(head);				\
++	    (var) = FH_CIRCLEQ_PREV(var, field))
++
++/*
++ * Circular queue functions.
++ */
++#define FH_CIRCLEQ_INIT(head) do {					\
++	(head)->cqh_first = FH_CIRCLEQ_END(head);			\
++	(head)->cqh_last = FH_CIRCLEQ_END(head);			\
++} while (0)
++
++#define FH_CIRCLEQ_INIT_ENTRY(elm, field) do {				\
++	(elm)->field.cqe_next = NULL;					\
++	(elm)->field.cqe_prev = NULL;					\
++} while (0)
++
++#define FH_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {	\
++	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
++	(elm)->field.cqe_prev = (listelm);				\
++	if ((listelm)->field.cqe_next == FH_CIRCLEQ_END(head))		\
++		(head)->cqh_last = (elm);				\
++	else								\
++		(listelm)->field.cqe_next->field.cqe_prev = (elm);	\
++	(listelm)->field.cqe_next = (elm);				\
++} while (0)
++
++#define FH_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {	\
++	(elm)->field.cqe_next = (listelm);				\
++	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
++	if ((listelm)->field.cqe_prev == FH_CIRCLEQ_END(head))		\
++		(head)->cqh_first = (elm);				\
++	else								\
++		(listelm)->field.cqe_prev->field.cqe_next = (elm);	\
++	(listelm)->field.cqe_prev = (elm);				\
++} while (0)
++
++#define FH_CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
++	(elm)->field.cqe_next = (head)->cqh_first;			\
++	(elm)->field.cqe_prev = FH_CIRCLEQ_END(head);			\
++	if ((head)->cqh_last == FH_CIRCLEQ_END(head))			\
++		(head)->cqh_last = (elm);				\
++	else								\
++		(head)->cqh_first->field.cqe_prev = (elm);		\
++	(head)->cqh_first = (elm);					\
++} while (0)
++
++#define FH_CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
++	(elm)->field.cqe_next = FH_CIRCLEQ_END(head);			\
++	(elm)->field.cqe_prev = (head)->cqh_last;			\
++	if ((head)->cqh_first == FH_CIRCLEQ_END(head))			\
++		(head)->cqh_first = (elm);				\
++	else								\
++		(head)->cqh_last->field.cqe_next = (elm);		\
++	(head)->cqh_last = (elm);					\
++} while (0)
++
++#define FH_CIRCLEQ_REMOVE(head, elm, field) do {			\
++	if ((elm)->field.cqe_next == FH_CIRCLEQ_END(head))		\
++		(head)->cqh_last = (elm)->field.cqe_prev;		\
++	else								\
++		(elm)->field.cqe_next->field.cqe_prev =			\
++		    (elm)->field.cqe_prev;				\
++	if ((elm)->field.cqe_prev == FH_CIRCLEQ_END(head))		\
++		(head)->cqh_first = (elm)->field.cqe_next;		\
++	else								\
++		(elm)->field.cqe_prev->field.cqe_next =			\
++		    (elm)->field.cqe_next;				\
++} while (0)
++
++#define FH_CIRCLEQ_REMOVE_INIT(head, elm, field) do {			\
++	FH_CIRCLEQ_REMOVE(head, elm, field);				\
++	FH_CIRCLEQ_INIT_ENTRY(elm, field);				\
++} while (0)
++
++#define FH_CIRCLEQ_REPLACE(head, elm, elm2, field) do {		\
++	if (((elm2)->field.cqe_next = (elm)->field.cqe_next) ==		\
++	    FH_CIRCLEQ_END(head))					\
++		(head).cqh_last = (elm2);				\
++	else								\
++		(elm2)->field.cqe_next->field.cqe_prev = (elm2);	\
++	if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) ==		\
++	    FH_CIRCLEQ_END(head))					\
++		(head).cqh_first = (elm2);				\
++	else								\
++		(elm2)->field.cqe_prev->field.cqe_next = (elm2);	\
++} while (0)
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _FH_LIST_H_ */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_mem.c b/drivers/usb/host/fh_otg/fh_common_port/fh_mem.c
+new file mode 100644
+index 00000000..d7fedb34
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_mem.c
+@@ -0,0 +1,245 @@
++/* Memory Debugging */
++#ifdef FH_DEBUG_MEMORY
++
++#include "fh_os.h"
++#include "fh_list.h"
++
++struct allocation {
++	void *addr;
++	void *ctx;
++	char *func;
++	int line;
++	uint32_t size;
++	int dma;
++	FH_CIRCLEQ_ENTRY(allocation) entry;
++};
++
++FH_CIRCLEQ_HEAD(allocation_queue, allocation);
++
++struct allocation_manager {
++	void *mem_ctx;
++	struct allocation_queue allocations;
++
++	/* statistics */
++	int num;
++	int num_freed;
++	int num_active;
++	uint32_t total;
++	uint32_t cur;
++	uint32_t max;
++};
++
++static struct allocation_manager *manager = NULL;
++
++static int add_allocation(void *ctx, uint32_t size, char const *func, int line, void *addr,
++			  int dma)
++{
++	struct allocation *a;
++
++	FH_ASSERT(manager != NULL, "manager not allocated");
++
++	a = __FH_ALLOC_ATOMIC(manager->mem_ctx, sizeof(*a));
++	if (!a) {
++		return -FH_E_NO_MEMORY;
++	}
++
++	a->func = __FH_ALLOC_ATOMIC(manager->mem_ctx, FH_STRLEN(func) + 1);
++	if (!a->func) {
++		__FH_FREE(manager->mem_ctx, a);
++		return -FH_E_NO_MEMORY;
++	}
++
++	FH_MEMCPY(a->func, func, FH_STRLEN(func) + 1);
++	a->addr = addr;
++	a->ctx = ctx;
++	a->line = line;
++	a->size = size;
++	a->dma = dma;
++	FH_CIRCLEQ_INSERT_TAIL(&manager->allocations, a, entry);
++
++	/* Update stats */
++	manager->num++;
++	manager->num_active++;
++	manager->total += size;
++	manager->cur += size;
++
++	if (manager->max < manager->cur) {
++		manager->max = manager->cur;
++	}
++
++	return 0;
++}
++
++static struct allocation *find_allocation(void *ctx, void *addr)
++{
++	struct allocation *a;
++
++	FH_CIRCLEQ_FOREACH(a, &manager->allocations, entry) {
++		if (a->ctx == ctx && a->addr == addr) {
++			return a;
++		}
++	}
++
++	return NULL;
++}
++
++static void free_allocation(void *ctx, void *addr, char const *func, int line)
++{
++	struct allocation *a = find_allocation(ctx, addr);
++
++	if (!a) {
++		FH_ASSERT(0,
++			   "Free of address %p that was never allocated or already freed %s:%d",
++			   addr, func, line);
++		return;
++	}
++
++	FH_CIRCLEQ_REMOVE(&manager->allocations, a, entry);
++
++	manager->num_active--;
++	manager->num_freed++;
++	manager->cur -= a->size;
++	__FH_FREE(manager->mem_ctx, a->func);
++	__FH_FREE(manager->mem_ctx, a);
++}
++
++int fh_memory_debug_start(void *mem_ctx)
++{
++	FH_ASSERT(manager == NULL, "Memory debugging has already started\n");
++
++	if (manager) {
++		return -FH_E_BUSY;
++	}
++
++	manager = __FH_ALLOC(mem_ctx, sizeof(*manager));
++	if (!manager) {
++		return -FH_E_NO_MEMORY;
++	}
++
++	FH_CIRCLEQ_INIT(&manager->allocations);
++	manager->mem_ctx = mem_ctx;
++	manager->num = 0;
++	manager->num_freed = 0;
++	manager->num_active = 0;
++	manager->total = 0;
++	manager->cur = 0;
++	manager->max = 0;
++
++	return 0;
++}
++
++void fh_memory_debug_stop(void)
++{
++	struct allocation *a;
++
++	fh_memory_debug_report();
++
++	FH_CIRCLEQ_FOREACH(a, &manager->allocations, entry) {
++		FH_ERROR("Memory leaked from %s:%d\n", a->func, a->line);
++		free_allocation(a->ctx, a->addr, NULL, -1);
++	}
++
++	__FH_FREE(manager->mem_ctx, manager);
++}
++
++void fh_memory_debug_report(void)
++{
++	struct allocation *a;
++
++	FH_PRINTF("\n\n\n----------------- Memory Debugging Report -----------------\n\n");
++	FH_PRINTF("Num Allocations = %d\n", manager->num);
++	FH_PRINTF("Freed = %d\n", manager->num_freed);
++	FH_PRINTF("Active = %d\n", manager->num_active);
++	FH_PRINTF("Current Memory Used = %d\n", manager->cur);
++	FH_PRINTF("Total Memory Used = %d\n", manager->total);
++	FH_PRINTF("Maximum Memory Used at Once = %d\n", manager->max);
++	FH_PRINTF("Unfreed allocations:\n");
++
++	FH_CIRCLEQ_FOREACH(a, &manager->allocations, entry) {
++		FH_PRINTF("    addr=%p, size=%d from %s:%d, DMA=%d\n",
++			   a->addr, a->size, a->func, a->line, a->dma);
++	}
++}
++
++/* The replacement functions */
++void *fh_alloc_debug(void *mem_ctx, uint32_t size, char const *func, int line)
++{
++	void *addr = __FH_ALLOC(mem_ctx, size);
++
++	if (!addr) {
++		return NULL;
++	}
++
++	if (add_allocation(mem_ctx, size, func, line, addr, 0)) {
++		__FH_FREE(mem_ctx, addr);
++		return NULL;
++	}
++
++	return addr;
++}
++
++void *fh_alloc_atomic_debug(void *mem_ctx, uint32_t size, char const *func,
++			     int line)
++{
++	void *addr = __FH_ALLOC_ATOMIC(mem_ctx, size);
++
++	if (!addr) {
++		return NULL;
++	}
++
++	if (add_allocation(mem_ctx, size, func, line, addr, 0)) {
++		__FH_FREE(mem_ctx, addr);
++		return NULL;
++	}
++
++	return addr;
++}
++
++void fh_free_debug(void *mem_ctx, void *addr, char const *func, int line)
++{
++	free_allocation(mem_ctx, addr, func, line);
++	__FH_FREE(mem_ctx, addr);
++}
++
++void *fh_dma_alloc_debug(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr,
++			  char const *func, int line)
++{
++	void *addr = __FH_DMA_ALLOC(dma_ctx, size, dma_addr);
++
++	if (!addr) {
++		return NULL;
++	}
++
++	if (add_allocation(dma_ctx, size, func, line, addr, 1)) {
++		__FH_DMA_FREE(dma_ctx, size, addr, *dma_addr);
++		return NULL;
++	}
++
++	return addr;
++}
++
++void *fh_dma_alloc_atomic_debug(void *dma_ctx, uint32_t size,
++				 fh_dma_t *dma_addr, char const *func, int line)
++{
++	void *addr = __FH_DMA_ALLOC_ATOMIC(dma_ctx, size, dma_addr);
++
++	if (!addr) {
++		return NULL;
++	}
++
++	if (add_allocation(dma_ctx, size, func, line, addr, 1)) {
++		__FH_DMA_FREE(dma_ctx, size, addr, *dma_addr);
++		return NULL;
++	}
++
++	return addr;
++}
++
++void fh_dma_free_debug(void *dma_ctx, uint32_t size, void *virt_addr,
++			fh_dma_t dma_addr, char const *func, int line)
++{
++	free_allocation(dma_ctx, virt_addr, func, line);
++	__FH_DMA_FREE(dma_ctx, size, virt_addr, dma_addr);
++}
++
++#endif /* FH_DEBUG_MEMORY */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.c b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.c
+new file mode 100644
+index 00000000..625495c7
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.c
+@@ -0,0 +1,634 @@
++/* Bignum routines adapted from PUTTY sources.  PuTTY copyright notice follows.
++ *
++ * PuTTY is copyright 1997-2007 Simon Tatham.
++ *
++ * Portions copyright Robert de Bath, Joris van Rantwijk, Delian
++ * Delchev, Andreas Schultz, Jeroen Massar, Wez Furlong, Nicolas Barry,
++ * Justin Bradford, Ben Harris, Malcolm Smith, Ahmad Khalifa, Markus
++ * Kuhn, and CORE SDI S.A.
++ *
++ * Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation files
++ * (the "Software"), to deal in the Software without restriction,
++ * including without limitation the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT.  IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE
++ * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifdef FH_CRYPTOLIB
++
++#ifndef CONFIG_MACH_IPMATE
++
++#include <asm/div64.h>
++#include "fh_modpow.h"
++
++#define BIGNUM_INT_MASK  0xFFFFFFFFUL
++#define BIGNUM_TOP_BIT   0x80000000UL
++#define BIGNUM_INT_BITS  32
++
++
++static void *snmalloc(void *mem_ctx, size_t n, size_t size)
++{
++    void *p;
++    size *= n;
++    if (size == 0) size = 1;
++    p = fh_alloc(mem_ctx, size);
++    return p;
++}
++
++#define snewn(ctx, n, type) ((type *)snmalloc((ctx), (n), sizeof(type)))
++#define sfree fh_free
++
++/*
++ * Usage notes:
++ *  * Do not call the DIVMOD_WORD macro with expressions such as array
++ *    subscripts, as some implementations object to this (see below).
++ *  * Note that none of the division methods below will cope if the
++ *    quotient won't fit into BIGNUM_INT_BITS. Callers should be careful
++ *    to avoid this case.
++ *    If this condition occurs, in the case of the x86 DIV instruction,
++ *    an overflow exception will occur, which (according to a correspondent)
++ *    will manifest on Windows as something like
++ *      0xC0000095: Integer overflow
++ *    The C variant won't give the right answer, either.
++ */
++
++#define MUL_WORD(w1, w2) ((BignumDblInt)w1 * w2)
++
++#if defined __GNUC__ && defined __i386__
++#define DIVMOD_WORD(q, r, hi, lo, w) \
++    __asm__("div %2" : \
++	    "=d" (r), "=a" (q) : \
++	    "r" (w), "d" (hi), "a" (lo))
++#else
++#define DIVMOD_WORD(q, r, hi, lo, w) do { \
++    BignumDblInt n = (((BignumDblInt)hi) << BIGNUM_INT_BITS) | lo; \
++    r = do_div(n,  w); \
++    q = n; \
++} while (0)
++#endif
++
++#define BIGNUM_INT_BYTES (BIGNUM_INT_BITS / 8)
++
++#define BIGNUM_INTERNAL
++
++static Bignum newbn(void *mem_ctx, int length)
++{
++    Bignum b = snewn(mem_ctx, length + 1, BignumInt);
++    //if (!b)
++    //abort();		       /* FIXME */
++    FH_MEMSET(b, 0, (length + 1) * sizeof(*b));
++    b[0] = length;
++    return b;
++}
++
++void freebn(void *mem_ctx, Bignum b)
++{
++    /*
++     * Burn the evidence, just in case.
++     */
++    FH_MEMSET(b, 0, sizeof(b[0]) * (b[0] + 1));
++    sfree(mem_ctx, b);
++}
++
++/*
++ * Compute c = a * b.
++ * Input is in the first len words of a and b.
++ * Result is returned in the first 2*len words of c.
++ */
++static void internal_mul(BignumInt *a, BignumInt *b,
++			 BignumInt *c, int len)
++{
++    int i, j;
++    BignumDblInt t;
++
++    for (j = 0; j < 2 * len; j++)
++	c[j] = 0;
++
++    for (i = len - 1; i >= 0; i--) {
++	t = 0;
++	for (j = len - 1; j >= 0; j--) {
++	    t += MUL_WORD(a[i], (BignumDblInt) b[j]);
++	    t += (BignumDblInt) c[i + j + 1];
++	    c[i + j + 1] = (BignumInt) t;
++	    t = t >> BIGNUM_INT_BITS;
++	}
++	c[i] = (BignumInt) t;
++    }
++}
++
++static void internal_add_shifted(BignumInt *number,
++				 unsigned n, int shift)
++{
++    int word = 1 + (shift / BIGNUM_INT_BITS);
++    int bshift = shift % BIGNUM_INT_BITS;
++    BignumDblInt addend;
++
++    addend = (BignumDblInt)n << bshift;
++
++    while (addend) {
++	addend += number[word];
++	number[word] = (BignumInt) addend & BIGNUM_INT_MASK;
++	addend >>= BIGNUM_INT_BITS;
++	word++;
++    }
++}
++
++/*
++ * Compute a = a % m.
++ * Input in first alen words of a and first mlen words of m.
++ * Output in first alen words of a
++ * (of which first alen-mlen words will be zero).
++ * The MSW of m MUST have its high bit set.
++ * Quotient is accumulated in the `quotient' array, which is a Bignum
++ * rather than the internal bigendian format. Quotient parts are shifted
++ * left by `qshift' before adding into quot.
++ */
++static void internal_mod(BignumInt *a, int alen,
++			 BignumInt *m, int mlen,
++			 BignumInt *quot, int qshift)
++{
++    BignumInt m0, m1;
++    unsigned int h;
++    int i, k;
++
++    m0 = m[0];
++    if (mlen > 1)
++	m1 = m[1];
++    else
++	m1 = 0;
++
++    for (i = 0; i <= alen - mlen; i++) {
++	BignumDblInt t;
++	unsigned int q, r, c, ai1;
++
++	if (i == 0) {
++	    h = 0;
++	} else {
++	    h = a[i - 1];
++	    a[i - 1] = 0;
++	}
++
++	if (i == alen - 1)
++	    ai1 = 0;
++	else
++	    ai1 = a[i + 1];
++
++	/* Find q = h:a[i] / m0 */
++	if (h >= m0) {
++	    /*
++	     * Special case.
++	     * 
++	     * To illustrate it, suppose a BignumInt is 8 bits, and
++	     * we are dividing (say) A1:23:45:67 by A1:B2:C3. Then
++	     * our initial division will be 0xA123 / 0xA1, which
++	     * will give a quotient of 0x100 and a divide overflow.
++	     * However, the invariants in this division algorithm
++	     * are not violated, since the full number A1:23:... is
++	     * _less_ than the quotient prefix A1:B2:... and so the
++	     * following correction loop would have sorted it out.
++	     * 
++	     * In this situation we set q to be the largest
++	     * quotient we _can_ stomach (0xFF, of course).
++	     */
++	    q = BIGNUM_INT_MASK;
++	} else {
++	    /* Macro doesn't want an array subscript expression passed
++	     * into it (see definition), so use a temporary. */
++	    BignumInt tmplo = a[i];
++	    DIVMOD_WORD(q, r, h, tmplo, m0);
++
++	    /* Refine our estimate of q by looking at
++	     h:a[i]:a[i+1] / m0:m1 */
++	    t = MUL_WORD(m1, q);
++	    if (t > ((BignumDblInt) r << BIGNUM_INT_BITS) + ai1) {
++		q--;
++		t -= m1;
++		r = (r + m0) & BIGNUM_INT_MASK;     /* overflow? */
++		if (r >= (BignumDblInt) m0 &&
++		    t > ((BignumDblInt) r << BIGNUM_INT_BITS) + ai1) q--;
++	    }
++	}
++
++	/* Subtract q * m from a[i...] */
++	c = 0;
++	for (k = mlen - 1; k >= 0; k--) {
++	    t = MUL_WORD(q, m[k]);
++	    t += c;
++	    c = (unsigned)(t >> BIGNUM_INT_BITS);
++	    if ((BignumInt) t > a[i + k])
++		c++;
++	    a[i + k] -= (BignumInt) t;
++	}
++
++	/* Add back m in case of borrow */
++	if (c != h) {
++	    t = 0;
++	    for (k = mlen - 1; k >= 0; k--) {
++		t += m[k];
++		t += a[i + k];
++		a[i + k] = (BignumInt) t;
++		t = t >> BIGNUM_INT_BITS;
++	    }
++	    q--;
++	}
++	if (quot)
++	    internal_add_shifted(quot, q, qshift + BIGNUM_INT_BITS * (alen - mlen - i));
++    }
++}
++
++/*
++ * Compute p % mod.
++ * The most significant word of mod MUST be non-zero.
++ * We assume that the result array is the same size as the mod array.
++ * We optionally write out a quotient if `quotient' is non-NULL.
++ * We can avoid writing out the result if `result' is NULL.
++ */
++void bigdivmod(void *mem_ctx, Bignum p, Bignum mod, Bignum result, Bignum quotient)
++{
++    BignumInt *n, *m;
++    int mshift;
++    int plen, mlen, i, j;
++
++    /* Allocate m of size mlen, copy mod to m */
++    /* We use big endian internally */
++    mlen = mod[0];
++    m = snewn(mem_ctx, mlen, BignumInt);
++    //if (!m)
++    //abort();		       /* FIXME */
++    for (j = 0; j < mlen; j++)
++	m[j] = mod[mod[0] - j];
++
++    /* Shift m left to make msb bit set */
++    for (mshift = 0; mshift < BIGNUM_INT_BITS-1; mshift++)
++	if ((m[0] << mshift) & BIGNUM_TOP_BIT)
++	    break;
++    if (mshift) {
++	for (i = 0; i < mlen - 1; i++)
++	    m[i] = (m[i] << mshift) | (m[i + 1] >> (BIGNUM_INT_BITS - mshift));
++	m[mlen - 1] = m[mlen - 1] << mshift;
++    }
++
++    plen = p[0];
++    /* Ensure plen > mlen */
++    if (plen <= mlen)
++	plen = mlen + 1;
++
++    /* Allocate n of size plen, copy p to n */
++    n = snewn(mem_ctx, plen, BignumInt);
++    //if (!n)
++    //abort();		       /* FIXME */
++    for (j = 0; j < plen; j++)
++	n[j] = 0;
++    for (j = 1; j <= (int)p[0]; j++)
++	n[plen - j] = p[j];
++
++    /* Main computation */
++    internal_mod(n, plen, m, mlen, quotient, mshift);
++
++    /* Fixup result in case the modulus was shifted */
++    if (mshift) {
++	for (i = plen - mlen - 1; i < plen - 1; i++)
++	    n[i] = (n[i] << mshift) | (n[i + 1] >> (BIGNUM_INT_BITS - mshift));
++	n[plen - 1] = n[plen - 1] << mshift;
++	internal_mod(n, plen, m, mlen, quotient, 0);
++	for (i = plen - 1; i >= plen - mlen; i--)
++	    n[i] = (n[i] >> mshift) | (n[i - 1] << (BIGNUM_INT_BITS - mshift));
++    }
++
++    /* Copy result to buffer */
++    if (result) {
++	for (i = 1; i <= (int)result[0]; i++) {
++	    int j = plen - i;
++	    result[i] = j >= 0 ? n[j] : 0;
++	}
++    }
++
++    /* Free temporary arrays */
++    for (i = 0; i < mlen; i++)
++	m[i] = 0;
++    sfree(mem_ctx, m);
++    for (i = 0; i < plen; i++)
++	n[i] = 0;
++    sfree(mem_ctx, n);
++}
++
++/*
++ * Simple remainder.
++ */
++Bignum bigmod(void *mem_ctx, Bignum a, Bignum b)
++{
++    Bignum r = newbn(mem_ctx, b[0]);
++    bigdivmod(mem_ctx, a, b, r, NULL);
++    return r;
++}
++
++/*
++ * Compute (base ^ exp) % mod.
++ */
++Bignum fh_modpow(void *mem_ctx, Bignum base_in, Bignum exp, Bignum mod)
++{
++    BignumInt *a, *b, *n, *m;
++    int mshift;
++    int mlen, i, j;
++    Bignum base, result;
++
++    /*
++     * The most significant word of mod needs to be non-zero. It
++     * should already be, but let's make sure.
++     */
++    //assert(mod[mod[0]] != 0);
++
++    /*
++     * Make sure the base is smaller than the modulus, by reducing
++     * it modulo the modulus if not.
++     */
++    base = bigmod(mem_ctx, base_in, mod);
++
++    /* Allocate m of size mlen, copy mod to m */
++    /* We use big endian internally */
++    mlen = mod[0];
++    m = snewn(mem_ctx, mlen, BignumInt);
++    //if (!m)
++    //abort();		       /* FIXME */
++    for (j = 0; j < mlen; j++)
++	m[j] = mod[mod[0] - j];
++
++    /* Shift m left to make msb bit set */
++    for (mshift = 0; mshift < BIGNUM_INT_BITS - 1; mshift++)
++	if ((m[0] << mshift) & BIGNUM_TOP_BIT)
++	    break;
++    if (mshift) {
++	for (i = 0; i < mlen - 1; i++)
++	    m[i] =
++		(m[i] << mshift) | (m[i + 1] >>
++				    (BIGNUM_INT_BITS - mshift));
++	m[mlen - 1] = m[mlen - 1] << mshift;
++    }
++
++    /* Allocate n of size mlen, copy base to n */
++    n = snewn(mem_ctx, mlen, BignumInt);
++    //if (!n)
++    //abort();		       /* FIXME */
++    i = mlen - base[0];
++    for (j = 0; j < i; j++)
++	n[j] = 0;
++    for (j = 0; j < base[0]; j++)
++	n[i + j] = base[base[0] - j];
++
++    /* Allocate a and b of size 2*mlen. Set a = 1 */
++    a = snewn(mem_ctx, 2 * mlen, BignumInt);
++    //if (!a)
++    //abort();		       /* FIXME */
++    b = snewn(mem_ctx, 2 * mlen, BignumInt);
++    //if (!b)
++    //abort();		       /* FIXME */
++    for (i = 0; i < 2 * mlen; i++)
++	a[i] = 0;
++    a[2 * mlen - 1] = 1;
++
++    /* Skip leading zero bits of exp. */
++    i = 0;
++    j = BIGNUM_INT_BITS - 1;
++    while (i < exp[0] && (exp[exp[0] - i] & (1 << j)) == 0) {
++	j--;
++	if (j < 0) {
++	    i++;
++	    j = BIGNUM_INT_BITS - 1;
++	}
++    }
++
++    /* Main computation */
++    while (i < exp[0]) {
++	while (j >= 0) {
++	    internal_mul(a + mlen, a + mlen, b, mlen);
++	    internal_mod(b, mlen * 2, m, mlen, NULL, 0);
++	    if ((exp[exp[0] - i] & (1 << j)) != 0) {
++		internal_mul(b + mlen, n, a, mlen);
++		internal_mod(a, mlen * 2, m, mlen, NULL, 0);
++	    } else {
++		BignumInt *t;
++		t = a;
++		a = b;
++		b = t;
++	    }
++	    j--;
++	}
++	i++;
++	j = BIGNUM_INT_BITS - 1;
++    }
++
++    /* Fixup result in case the modulus was shifted */
++    if (mshift) {
++	for (i = mlen - 1; i < 2 * mlen - 1; i++)
++	    a[i] =
++		(a[i] << mshift) | (a[i + 1] >>
++				    (BIGNUM_INT_BITS - mshift));
++	a[2 * mlen - 1] = a[2 * mlen - 1] << mshift;
++	internal_mod(a, mlen * 2, m, mlen, NULL, 0);
++	for (i = 2 * mlen - 1; i >= mlen; i--)
++	    a[i] =
++		(a[i] >> mshift) | (a[i - 1] <<
++				    (BIGNUM_INT_BITS - mshift));
++    }
++
++    /* Copy result to buffer */
++    result = newbn(mem_ctx, mod[0]);
++    for (i = 0; i < mlen; i++)
++	result[result[0] - i] = a[i + mlen];
++    while (result[0] > 1 && result[result[0]] == 0)
++	result[0]--;
++
++    /* Free temporary arrays */
++    for (i = 0; i < 2 * mlen; i++)
++	a[i] = 0;
++    sfree(mem_ctx, a);
++    for (i = 0; i < 2 * mlen; i++)
++	b[i] = 0;
++    sfree(mem_ctx, b);
++    for (i = 0; i < mlen; i++)
++	m[i] = 0;
++    sfree(mem_ctx, m);
++    for (i = 0; i < mlen; i++)
++	n[i] = 0;
++    sfree(mem_ctx, n);
++
++    freebn(mem_ctx, base);
++
++    return result;
++}
++
++
++#ifdef UNITTEST
++
++static __u32 dh_p[] = {
++	96,
++	0xFFFFFFFF,
++	0xFFFFFFFF,
++	0xA93AD2CA,
++	0x4B82D120,
++	0xE0FD108E,
++	0x43DB5BFC,
++	0x74E5AB31,
++	0x08E24FA0,
++	0xBAD946E2,
++	0x770988C0,
++	0x7A615D6C,
++	0xBBE11757,
++	0x177B200C,
++	0x521F2B18,
++	0x3EC86A64,
++	0xD8760273,
++	0xD98A0864,
++	0xF12FFA06,
++	0x1AD2EE6B,
++	0xCEE3D226,
++	0x4A25619D,
++	0x1E8C94E0,
++	0xDB0933D7,
++	0xABF5AE8C,
++	0xA6E1E4C7,
++	0xB3970F85,
++	0x5D060C7D,
++	0x8AEA7157,
++	0x58DBEF0A,
++	0xECFB8504,
++	0xDF1CBA64,
++	0xA85521AB,
++	0x04507A33,
++	0xAD33170D,
++	0x8AAAC42D,
++	0x15728E5A,
++	0x98FA0510,
++	0x15D22618,
++	0xEA956AE5,
++	0x3995497C,
++	0x95581718,
++	0xDE2BCBF6,
++	0x6F4C52C9,
++	0xB5C55DF0,
++	0xEC07A28F,
++	0x9B2783A2,
++	0x180E8603,
++	0xE39E772C,
++	0x2E36CE3B,
++	0x32905E46,
++	0xCA18217C,
++	0xF1746C08,
++	0x4ABC9804,
++	0x670C354E,
++	0x7096966D,
++	0x9ED52907,
++	0x208552BB,
++	0x1C62F356,
++	0xDCA3AD96,
++	0x83655D23,
++	0xFD24CF5F,
++	0x69163FA8,
++	0x1C55D39A,
++	0x98DA4836,
++	0xA163BF05,
++	0xC2007CB8,
++	0xECE45B3D,
++	0x49286651,
++	0x7C4B1FE6,
++	0xAE9F2411,
++	0x5A899FA5,
++	0xEE386BFB,
++	0xF406B7ED,
++	0x0BFF5CB6,
++	0xA637ED6B,
++	0xF44C42E9,
++	0x625E7EC6,
++	0xE485B576,
++	0x6D51C245,
++	0x4FE1356D,
++	0xF25F1437,
++	0x302B0A6D,
++	0xCD3A431B,
++	0xEF9519B3,
++	0x8E3404DD,
++	0x514A0879,
++	0x3B139B22,
++	0x020BBEA6,
++	0x8A67CC74,
++	0x29024E08,
++	0x80DC1CD1,
++	0xC4C6628B,
++	0x2168C234,
++	0xC90FDAA2,
++	0xFFFFFFFF,
++	0xFFFFFFFF,
++};
++
++static __u32 dh_a[] = {
++	8,
++	0xdf367516,
++	0x86459caa,
++	0xe2d459a4,
++	0xd910dae0,
++	0x8a8b5e37,
++	0x67ab31c6,
++	0xf0b55ea9,
++	0x440051d6,
++};
++
++static __u32 dh_b[] = {
++	8,
++	0xded92656,
++	0xe07a048a,
++	0x6fa452cd,
++	0x2df89d30,
++	0xc75f1b0f,
++	0x8ce3578f, 
++	0x7980a324,
++	0x5daec786,
++};
++
++static __u32 dh_g[] = {
++	1,
++	2,
++};
++
++int main(void)
++{
++	int i;
++	__u32 *k;
++	k = fh_modpow(NULL, dh_g, dh_a, dh_p);
++
++	printf("\n\n");
++	for (i=0; i<k[0]; i++) {
++		__u32 word32 = k[k[0] - i];
++		__u16 l = word32 & 0xffff;
++		__u16 m = (word32 & 0xffff0000) >> 16;
++		printf("%04x %04x ", m, l);
++		if (!((i + 1)%13)) printf("\n");
++	}
++	printf("\n\n");
++
++	if ((k[0] == 0x60) && (k[1] == 0x28e490e5) && (k[0x60] == 0x5a0d3d4e)) {
++		printf("PASS\n\n");
++	}
++	else {
++		printf("FAIL\n\n");
++	}
++
++}
++
++#endif /* UNITTEST */
++
++#endif /* CONFIG_MACH_IPMATE */
++
++#endif /*FH_CRYPTOLIB */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.h b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.h
+new file mode 100644
+index 00000000..96cdb551
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_modpow.h
+@@ -0,0 +1,34 @@
++/*
++ * fh_modpow.h
++ * See fh_modpow.c for license and changes
++ */
++#ifndef _FH_MODPOW_H
++#define _FH_MODPOW_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "fh_os.h"
++
++/** @file
++ *
++ * This file defines the module exponentiation function which is only used
++ * internally by the FH UWB modules for calculation of PKs during numeric
++ * association.  The routine is taken from the PUTTY, an open source terminal
++ * emulator.  The PUTTY License is preserved in the fh_modpow.c file.
++ *
++ */
++
++typedef uint32_t BignumInt;
++typedef uint64_t BignumDblInt;
++typedef BignumInt *Bignum;
++
++/* Compute modular exponentiaion */
++extern Bignum fh_modpow(void *mem_ctx, Bignum base_in, Bignum exp, Bignum mod);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _LINUX_BIGNUM_H */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.c b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.c
+new file mode 100644
+index 00000000..a2878fe2
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.c
+@@ -0,0 +1,319 @@
++#ifdef FH_NOTIFYLIB
++
++#include "fh_notifier.h"
++#include "fh_list.h"
++
++typedef struct fh_observer {
++	void *observer;
++	fh_notifier_callback_t callback;
++	void *data;
++	char *notification;
++	FH_CIRCLEQ_ENTRY(fh_observer) list_entry;
++} observer_t;
++
++FH_CIRCLEQ_HEAD(observer_queue, fh_observer);
++
++typedef struct fh_notifier {
++	void *mem_ctx;
++	void *object;
++	struct observer_queue observers;
++	FH_CIRCLEQ_ENTRY(fh_notifier) list_entry;
++} notifier_t;
++
++FH_CIRCLEQ_HEAD(notifier_queue, fh_notifier);
++
++typedef struct manager {
++	void *mem_ctx;
++	void *wkq_ctx;
++	fh_workq_t *wq;
++//	fh_mutex_t *mutex;
++	struct notifier_queue notifiers;
++} manager_t;
++
++static manager_t *manager = NULL;
++
++static int create_manager(void *mem_ctx, void *wkq_ctx)
++{
++	manager = fh_alloc(mem_ctx, sizeof(manager_t));
++	if (!manager) {
++		return -FH_E_NO_MEMORY;
++	}
++
++	FH_CIRCLEQ_INIT(&manager->notifiers);
++
++	manager->wq = fh_workq_alloc(wkq_ctx, "FH Notification WorkQ");
++	if (!manager->wq) {
++		return -FH_E_NO_MEMORY;
++	}
++
++	return 0;
++}
++
++static void free_manager(void)
++{
++	fh_workq_free(manager->wq);
++
++	/* All notifiers must have unregistered themselves before this module
++	 * can be removed.  Hitting this assertion indicates a programmer
++	 * error. */
++	FH_ASSERT(FH_CIRCLEQ_EMPTY(&manager->notifiers),
++		   "Notification manager being freed before all notifiers have been removed");
++	fh_free(manager->mem_ctx, manager);
++}
++
++#ifdef DEBUG
++static void dump_manager(void)
++{
++	notifier_t *n;
++	observer_t *o;
++
++	FH_ASSERT(manager, "Notification manager not found");
++
++	FH_DEBUG("List of all notifiers and observers:\n");
++	FH_CIRCLEQ_FOREACH(n, &manager->notifiers, list_entry) {
++		FH_DEBUG("Notifier %p has observers:\n", n->object);
++		FH_CIRCLEQ_FOREACH(o, &n->observers, list_entry) {
++			FH_DEBUG("    %p watching %s\n", o->observer, o->notification);
++		}
++	}
++}
++#else
++#define dump_manager(...)
++#endif
++
++static observer_t *alloc_observer(void *mem_ctx, void *observer, char *notification,
++				  fh_notifier_callback_t callback, void *data)
++{
++	observer_t *new_observer = fh_alloc(mem_ctx, sizeof(observer_t));
++
++	if (!new_observer) {
++		return NULL;
++	}
++
++	FH_CIRCLEQ_INIT_ENTRY(new_observer, list_entry);
++	new_observer->observer = observer;
++	new_observer->notification = notification;
++	new_observer->callback = callback;
++	new_observer->data = data;
++	return new_observer;
++}
++
++static void free_observer(void *mem_ctx, observer_t *observer)
++{
++	fh_free(mem_ctx, observer);
++}
++
++static notifier_t *alloc_notifier(void *mem_ctx, void *object)
++{
++	notifier_t *notifier;
++
++	if (!object) {
++		return NULL;
++	}
++
++	notifier = fh_alloc(mem_ctx, sizeof(notifier_t));
++	if (!notifier) {
++		return NULL;
++	}
++
++	FH_CIRCLEQ_INIT(&notifier->observers);
++	FH_CIRCLEQ_INIT_ENTRY(notifier, list_entry);
++
++	notifier->mem_ctx = mem_ctx;
++	notifier->object = object;
++	return notifier;
++}
++
++static void free_notifier(notifier_t *notifier)
++{
++	observer_t *observer;
++
++	FH_CIRCLEQ_FOREACH(observer, &notifier->observers, list_entry) {
++		free_observer(notifier->mem_ctx, observer);
++	}
++
++	fh_free(notifier->mem_ctx, notifier);
++}
++
++static notifier_t *find_notifier(void *object)
++{
++	notifier_t *notifier;
++
++	FH_ASSERT(manager, "Notification manager not found");
++
++	if (!object) {
++		return NULL;
++	}
++
++	FH_CIRCLEQ_FOREACH(notifier, &manager->notifiers, list_entry) {
++		if (notifier->object == object) {
++			return notifier;
++		}
++	}
++
++	return NULL;
++}
++
++int fh_alloc_notification_manager(void *mem_ctx, void *wkq_ctx)
++{
++	return create_manager(mem_ctx, wkq_ctx);
++}
++
++void fh_free_notification_manager(void)
++{
++	free_manager();
++}
++
++fh_notifier_t *fh_register_notifier(void *mem_ctx, void *object)
++{
++	notifier_t *notifier;
++
++	FH_ASSERT(manager, "Notification manager not found");
++
++	notifier = find_notifier(object);
++	if (notifier) {
++		FH_ERROR("Notifier %p is already registered\n", object);
++		return NULL;
++	}
++
++	notifier = alloc_notifier(mem_ctx, object);
++	if (!notifier) {
++		return NULL;
++	}
++
++	FH_CIRCLEQ_INSERT_TAIL(&manager->notifiers, notifier, list_entry);
++
++	FH_INFO("Notifier %p registered", object);
++	dump_manager();
++
++	return notifier;
++}
++
++void fh_unregister_notifier(fh_notifier_t *notifier)
++{
++	FH_ASSERT(manager, "Notification manager not found");
++
++	if (!FH_CIRCLEQ_EMPTY(&notifier->observers)) {
++		observer_t *o;
++
++		FH_ERROR("Notifier %p has active observers when removing\n", notifier->object);
++		FH_CIRCLEQ_FOREACH(o, &notifier->observers, list_entry) {
++			FH_DEBUG("    %p watching %s\n", o->observer, o->notification);
++		}
++
++		FH_ASSERT(FH_CIRCLEQ_EMPTY(&notifier->observers),
++			   "Notifier %p has active observers when removing", notifier);
++	}
++
++	FH_CIRCLEQ_REMOVE_INIT(&manager->notifiers, notifier, list_entry);
++	free_notifier(notifier);
++
++	FH_INFO("Notifier unregistered");
++	dump_manager();
++}
++
++/* Add an observer to observe the notifier for a particular state, event, or notification. */
++int fh_add_observer(void *observer, void *object, char *notification,
++		     fh_notifier_callback_t callback, void *data)
++{
++	notifier_t *notifier = find_notifier(object);
++	observer_t *new_observer;
++
++	if (!notifier) {
++		FH_ERROR("Notifier %p is not found when adding observer\n", object);
++		return -FH_E_INVALID;
++	}
++
++	new_observer = alloc_observer(notifier->mem_ctx, observer, notification, callback, data);
++	if (!new_observer) {
++		return -FH_E_NO_MEMORY;
++	}
++
++	FH_CIRCLEQ_INSERT_TAIL(&notifier->observers, new_observer, list_entry);
++
++	FH_INFO("Added observer %p to notifier %p observing notification %s, callback=%p, data=%p",
++		 observer, object, notification, callback, data);
++
++	dump_manager();
++	return 0;
++}
++
++int fh_remove_observer(void *observer)
++{
++	notifier_t *n;
++
++	FH_ASSERT(manager, "Notification manager not found");
++
++	FH_CIRCLEQ_FOREACH(n, &manager->notifiers, list_entry) {
++		observer_t *o;
++		observer_t *o2;
++
++		FH_CIRCLEQ_FOREACH_SAFE(o, o2, &n->observers, list_entry) {
++			if (o->observer == observer) {
++				FH_CIRCLEQ_REMOVE_INIT(&n->observers, o, list_entry);
++				FH_INFO("Removing observer %p from notifier %p watching notification %s:",
++					 o->observer, n->object, o->notification);
++				free_observer(n->mem_ctx, o);
++			}
++		}
++	}
++
++	dump_manager();
++	return 0;
++}
++
++typedef struct callback_data {
++	void *mem_ctx;
++	fh_notifier_callback_t cb;
++	void *observer;
++	void *data;
++	void *object;
++	char *notification;
++	void *notification_data;
++} cb_data_t;
++
++static void cb_task(void *data)
++{
++	cb_data_t *cb = (cb_data_t *)data;
++
++	cb->cb(cb->object, cb->notification, cb->observer, cb->notification_data, cb->data);
++	fh_free(cb->mem_ctx, cb);
++}
++
++void fh_notify(fh_notifier_t *notifier, char *notification, void *notification_data)
++{
++	observer_t *o;
++
++	FH_ASSERT(manager, "Notification manager not found");
++
++	FH_CIRCLEQ_FOREACH(o, &notifier->observers, list_entry) {
++		int len = FH_STRLEN(notification);
++
++		if (FH_STRLEN(o->notification) != len) {
++			continue;
++		}
++
++		if (FH_STRNCMP(o->notification, notification, len) == 0) {
++			cb_data_t *cb_data = fh_alloc(notifier->mem_ctx, sizeof(cb_data_t));
++
++			if (!cb_data) {
++				FH_ERROR("Failed to allocate callback data\n");
++				return;
++			}
++
++			cb_data->mem_ctx = notifier->mem_ctx;
++			cb_data->cb = o->callback;
++			cb_data->observer = o->observer;
++			cb_data->data = o->data;
++			cb_data->object = notifier->object;
++			cb_data->notification = notification;
++			cb_data->notification_data = notification_data;
++			FH_DEBUG("Observer found %p for notification %s\n", o->observer, notification);
++			FH_WORKQ_SCHEDULE(manager->wq, cb_task, cb_data,
++					   "Notify callback from %p for Notification %s, to observer %p",
++					   cb_data->object, notification, cb_data->observer);
++		}
++	}
++}
++
++#endif	/* FH_NOTIFYLIB */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.h b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.h
+new file mode 100644
+index 00000000..97386291
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_notifier.h
+@@ -0,0 +1,122 @@
++
++#ifndef __FH_NOTIFIER_H__
++#define __FH_NOTIFIER_H__
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "fh_os.h"
++
++/** @file
++ *
++ * A simple implementation of the Observer pattern.  Any "module" can
++ * register as an observer or notifier.  The notion of "module" is abstract and
++ * can mean anything used to identify either an observer or notifier.  Usually
++ * it will be a pointer to a data structure which contains some state, ie an
++ * object.
++ *
++ * Before any notifiers can be added, the global notification manager must be
++ * brought up with fh_alloc_notification_manager().
++ * fh_free_notification_manager() will bring it down and free all resources.
++ * These would typically be called upon module load and unload.  The
++ * notification manager is a single global instance that handles all registered
++ * observable modules and observers so this should be done only once.
++ *
++ * A module can be observable by using Notifications to publicize some general
++ * information about it's state or operation.  It does not care who listens, or
++ * even if anyone listens, or what they do with the information.  The observable
++ * modules do not need to know any information about it's observers or their
++ * interface, or their state or data.
++ *
++ * Any module can register to emit Notifications.  It should publish a list of
++ * notifications that it can emit and their behavior, such as when they will get
++ * triggered, and what information will be provided to the observer.  Then it
++ * should register itself as an observable module. See fh_register_notifier().
++ *
++ * Any module can observe any observable, registered module, provided it has a
++ * handle to the other module and knows what notifications to observe.  See
++ * fh_add_observer().
++ *
++ * A function of type fh_notifier_callback_t is called whenever a notification
++ * is triggered with one or more observers observing it.  This function is
++ * called in it's own process so it may sleep or block if needed.  It is
++ * guaranteed to be called sometime after the notification has occurred and will
++ * be called once per each time the notification is triggered.  It will NOT be
++ * called in the same process context used to trigger the notification.
++ *
++ * @section Limitiations
++ *
++ * Keep in mind that Notifications that can be triggered in rapid sucession may
++ * schedule too many processes too handle.  Be aware of this limitation when
++ * designing to use notifications, and only add notifications for appropriate
++ * observable information.
++ *
++ * Also Notification callbacks are not synchronous.  If you need to synchronize
++ * the behavior between module/observer you must use other means.  And perhaps
++ * that will mean Notifications are not the proper solution.
++ */
++
++struct fh_notifier;
++typedef struct fh_notifier fh_notifier_t;
++
++/** The callback function must be of this type.
++ *
++ * @param object This is the object that is being observed.
++ * @param notification This is the notification that was triggered.
++ * @param observer This is the observer
++ * @param notification_data This is notification-specific data that the notifier
++ * has included in this notification.  The value of this should be published in
++ * the documentation of the observable module with the notifications.
++ * @param user_data This is any custom data that the observer provided when
++ * adding itself as an observer to the notification. */
++typedef void (*fh_notifier_callback_t)(void *object, char *notification, void *observer,
++					void *notification_data, void *user_data);
++
++/** Brings up the notification manager. */
++extern int fh_alloc_notification_manager(void *mem_ctx, void *wkq_ctx);
++/** Brings down the notification manager. */
++extern void fh_free_notification_manager(void);
++
++/** This function registers an observable module.  A fh_notifier_t object is
++ * returned to the observable module.  This is an opaque object that is used by
++ * the observable module to trigger notifications.  This object should only be
++ * accessible to functions that are authorized to trigger notifications for this
++ * module.  Observers do not need this object. */
++extern fh_notifier_t *fh_register_notifier(void *mem_ctx, void *object);
++
++/** This function unregisters an observable module.  All observers have to be
++ * removed prior to unregistration. */
++extern void fh_unregister_notifier(fh_notifier_t *notifier);
++
++/** Add a module as an observer to the observable module.  The observable module
++ * needs to have previously registered with the notification manager.
++ *
++ * @param observer The observer module
++ * @param object The module to observe
++ * @param notification The notification to observe
++ * @param callback The callback function to call
++ * @param user_data Any additional user data to pass into the callback function */
++extern int fh_add_observer(void *observer, void *object, char *notification,
++			    fh_notifier_callback_t callback, void *user_data);
++
++/** Removes the specified observer from all notifications that it is currently
++ * observing. */
++extern int fh_remove_observer(void *observer);
++
++/** This function triggers a Notification.  It should be called by the
++ * observable module, or any module or library which the observable module
++ * allows to trigger notification on it's behalf.  Such as the fh_cc_t.
++ *
++ * fh_notify is a non-blocking function.  Callbacks are scheduled called in
++ * their own process context for each trigger.  Callbacks can be blocking.
++ * fh_notify can be called from interrupt context if needed.
++ *
++ */
++void fh_notify(fh_notifier_t *notifier, char *notification, void *notification_data);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __FH_NOTIFIER_H__ */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/fh_os.h b/drivers/usb/host/fh_otg/fh_common_port/fh_os.h
+new file mode 100644
+index 00000000..d73ab583
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/fh_os.h
+@@ -0,0 +1,1245 @@
++/* =========================================================================
++ * $File: //dwh/usb_iip/dev/software/fh_common_port_2/fh_os.h $
++ * $Revision: #15 $
++ * $Date: 2015/06/12 $
++ * $Change: 2859407 $
++ *
++ * Synopsys Portability Library Software and documentation
++ * (hereinafter, "Software") is an Unsupported proprietary work of
++ * Synopsys, Inc. unless otherwise expressly agreed to in writing
++ * between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for
++ * Licensed Product with Synopsys or any supplement thereto. You are
++ * permitted to use and redistribute this Software in source and binary
++ * forms, with or without modification, provided that redistributions
++ * of source code must retain this notice. You may not view, use,
++ * disclose, copy or distribute this file or any information contained
++ * herein except pursuant to this license grant from Synopsys. If you
++ * do not agree with this notice, including the disclaimer below, then
++ * you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
++ * FOR A PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL
++ * SYNOPSYS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
++ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================= */
++#ifndef _FH_OS_H_
++#define _FH_OS_H_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/** @file
++ *
++ * FH portability library, low level os-wrapper functions
++ *
++ */
++
++/* These basic types need to be defined by some OS header file or custom header
++ * file for your specific target architecture.
++ *
++ * uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t
++ *
++ * Any custom or alternate header file must be added and enabled here.
++ */
++
++#ifdef FH_LINUX
++# include <linux/types.h>
++# ifdef CONFIG_DEBUG_MUTEXES
++#  include <linux/mutex.h>
++# endif
++# include <linux/errno.h>
++# include <stdarg.h>
++#endif
++
++#if defined(FH_FREEBSD) || defined(FH_NETBSD)
++# include <os_dep.h>
++#endif
++
++
++/** @name Primitive Types and Values */
++
++/** We define a boolean type for consistency.  Can be either YES or NO */
++typedef uint8_t fh_bool_t;
++#define YES  1
++#define NO   0
++
++#ifdef FH_LINUX
++
++/** @name Error Codes */
++#define FH_E_INVALID		EINVAL
++#define FH_E_NO_MEMORY		ENOMEM
++#define FH_E_NO_DEVICE		ENODEV
++#define FH_E_NOT_SUPPORTED	EOPNOTSUPP
++#define FH_E_TIMEOUT		ETIMEDOUT
++#define FH_E_BUSY		EBUSY
++#define FH_E_AGAIN		EAGAIN
++#define FH_E_RESTART		ERESTART
++#define FH_E_ABORT		ECONNABORTED
++#define FH_E_SHUTDOWN		ESHUTDOWN
++#define FH_E_NO_DATA		ENODATA
++#define FH_E_DISCONNECT	ECONNRESET
++#define FH_E_UNKNOWN		EINVAL
++#define FH_E_NO_STREAM_RES	ENOSR
++#define FH_E_COMMUNICATION	ECOMM
++#define FH_E_OVERFLOW		EOVERFLOW
++#define FH_E_PROTOCOL		EPROTO
++#define FH_E_IN_PROGRESS	EINPROGRESS
++#define FH_E_PIPE		EPIPE
++#define FH_E_IO		EIO
++#define FH_E_NO_SPACE		ENOSPC
++
++#else
++
++/** @name Error Codes */
++#define FH_E_INVALID		1001
++#define FH_E_NO_MEMORY		1002
++#define FH_E_NO_DEVICE		1003
++#define FH_E_NOT_SUPPORTED	1004
++#define FH_E_TIMEOUT		1005
++#define FH_E_BUSY		1006
++#define FH_E_AGAIN		1007
++#define FH_E_RESTART		1008
++#define FH_E_ABORT		1009
++#define FH_E_SHUTDOWN		1010
++#define FH_E_NO_DATA		1011
++#define FH_E_DISCONNECT	2000
++#define FH_E_UNKNOWN		3000
++#define FH_E_NO_STREAM_RES	4001
++#define FH_E_COMMUNICATION	4002
++#define FH_E_OVERFLOW		4003
++#define FH_E_PROTOCOL		4004
++#define FH_E_IN_PROGRESS	4005
++#define FH_E_PIPE		4006
++#define FH_E_IO		4007
++#define FH_E_NO_SPACE		4008
++
++#endif
++
++
++/** @name Tracing/Logging Functions
++ *
++ * These function provide the capability to add tracing, debugging, and error
++ * messages, as well exceptions as assertions.  The WUDEV uses these
++ * extensively.  These could be logged to the main console, the serial port, an
++ * internal buffer, etc.  These functions could also be no-op if they are too
++ * expensive on your system.  By default undefining the DEBUG macro already
++ * no-ops some of these functions. */
++
++/** Returns non-zero if in interrupt context. */
++extern fh_bool_t FH_IN_IRQ(void);
++#define fh_in_irq FH_IN_IRQ
++
++/** Returns "IRQ" if FH_IN_IRQ is true. */
++static inline char *fh_irq(void) {
++	return FH_IN_IRQ() ? "IRQ" : "";
++}
++
++/** Returns non-zero if in bottom-half context. */
++extern fh_bool_t FH_IN_BH(void);
++#define fh_in_bh FH_IN_BH
++
++/** Returns "BH" if FH_IN_BH is true. */
++static inline char *fh_bh(void) {
++	return FH_IN_BH() ? "BH" : "";
++}
++
++/**
++ * A vprintf() clone.  Just call vprintf if you've got it.
++ */
++extern void FH_VPRINTF(char *format, va_list args);
++#define fh_vprintf FH_VPRINTF
++
++/**
++ * A vsnprintf() clone.  Just call vprintf if you've got it.
++ */
++extern int FH_VSNPRINTF(char *str, int size, char *format, va_list args);
++#define fh_vsnprintf FH_VSNPRINTF
++
++/**
++ * printf() clone.  Just call printf if you've go it.
++ */
++extern void FH_PRINTF(char *format, ...)
++/* This provides compiler level static checking of the parameters if you're
++ * using GCC. */
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 1, 2)));
++#else
++	;
++#endif
++#define fh_printf FH_PRINTF
++
++/**
++ * sprintf() clone.  Just call sprintf if you've got it.
++ */
++extern int FH_SPRINTF(char *string, char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 2, 3)));
++#else
++	;
++#endif
++#define fh_sprintf FH_SPRINTF
++
++/**
++ * snprintf() clone.  Just call snprintf if you've got it.
++ */
++extern int FH_SNPRINTF(char *string, int size, char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 3, 4)));
++#else
++	;
++#endif
++#define fh_snprintf FH_SNPRINTF
++
++/**
++ * Prints a WARNING message.  On systems that don't differentiate between
++ * warnings and regular log messages, just print it.  Indicates that something
++ * may be wrong with the driver.  Works like printf().
++ *
++ * Use the FH_WARN macro to call this function.
++ */
++extern void __FH_WARN(char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 1, 2)));
++#else
++	;
++#endif
++
++/**
++ * Prints an error message.  On systems that don't differentiate between errors
++ * and regular log messages, just print it.  Indicates that something went wrong
++ * with the driver.  Works like printf().
++ *
++ * Use the FH_ERROR macro to call this function.
++ */
++extern void __FH_ERROR(char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 1, 2)));
++#else
++	;
++#endif
++
++/**
++ * Prints an exception error message and takes some user-defined action such as
++ * print out a backtrace or trigger a breakpoint.  Indicates that something went
++ * abnormally wrong with the driver such as programmer error, or other
++ * exceptional condition.  It should not be ignored so even on systems without
++ * printing capability, some action should be taken to notify the developer of
++ * it.  Works like printf().
++ */
++extern void FH_EXCEPTION(char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 1, 2)));
++#else
++	;
++#endif
++#define fh_exception FH_EXCEPTION
++
++#ifdef DEBUG
++/**
++ * Prints out a debug message.  Used for logging/trace messages.
++ *
++ * Use the FH_DEBUG macro to call this function
++ */
++extern void __FH_DEBUG(char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 1, 2)));
++#else
++	;
++#endif
++#else
++#define __FH_DEBUG(...)
++#endif
++
++/**
++ * Prints out a Debug message.
++ */
++#define FH_DEBUG(_format, _args...) __FH_DEBUG("DEBUG:%s:%s: " _format "\n", \
++						 __func__, fh_irq(), ## _args)
++#define fh_debug FH_DEBUG
++/**
++ * Prints out an informative message.
++ */
++#define FH_INFO(_format, _args...) FH_PRINTF("INFO:%s: " _format "\n", \
++					       fh_irq(), ## _args)
++#define fh_info FH_INFO
++/**
++ * Prints out a warning message.
++ */
++#define FH_WARN(_format, _args...) __FH_WARN("WARN:%s:%s:%d: " _format "\n", \
++					fh_irq(), __func__, __LINE__, ## _args)
++#define fh_warn FH_WARN
++/**
++ * Prints out an error message.
++ */
++#define FH_ERROR(_format, _args...) __FH_ERROR("ERROR:%s:%s:%d: " _format "\n", \
++					fh_irq(), __func__, __LINE__, ## _args)
++#define fh_error FH_ERROR
++
++#define FH_PROTO_ERROR(_format, _args...) __FH_WARN("ERROR:%s:%s:%d: " _format "\n", \
++						fh_irq(), __func__, __LINE__, ## _args)
++#define fh_proto_error FH_PROTO_ERROR
++
++#ifdef DEBUG
++/** Prints out a exception error message if the _expr expression fails.  Disabled
++ * if DEBUG is not enabled. */
++#define FH_ASSERT(_expr, _format, _args...) do { \
++	if (!(_expr)) { FH_EXCEPTION("%s:%s:%d: " _format "\n", fh_irq(), \
++				      __FILE__, __LINE__, ## _args); } \
++	} while (0)
++#else
++#define FH_ASSERT(_x...)
++#endif
++#define fh_assert FH_ASSERT
++
++
++/** @name Byte Ordering
++ * The following functions are for conversions between processor's byte ordering
++ * and specific ordering you want.
++ */
++
++/** Converts 32 bit data in CPU byte ordering to little endian. */
++extern uint32_t FH_CPU_TO_LE32(uint32_t *p);
++#define fh_cpu_to_le32 FH_CPU_TO_LE32
++
++/** Converts 32 bit data in CPU byte orderint to big endian. */
++extern uint32_t FH_CPU_TO_BE32(uint32_t *p);
++#define fh_cpu_to_be32 FH_CPU_TO_BE32
++
++/** Converts 32 bit little endian data to CPU byte ordering. */
++extern uint32_t FH_LE32_TO_CPU(uint32_t *p);
++#define fh_le32_to_cpu FH_LE32_TO_CPU
++
++/** Converts 32 bit big endian data to CPU byte ordering. */
++extern uint32_t FH_BE32_TO_CPU(uint32_t *p);
++#define fh_be32_to_cpu FH_BE32_TO_CPU
++
++/** Converts 16 bit data in CPU byte ordering to little endian. */
++extern uint16_t FH_CPU_TO_LE16(uint16_t *p);
++#define fh_cpu_to_le16 FH_CPU_TO_LE16
++
++/** Converts 16 bit data in CPU byte orderint to big endian. */
++extern uint16_t FH_CPU_TO_BE16(uint16_t *p);
++#define fh_cpu_to_be16 FH_CPU_TO_BE16
++
++/** Converts 16 bit little endian data to CPU byte ordering. */
++extern uint16_t FH_LE16_TO_CPU(uint16_t *p);
++#define fh_le16_to_cpu FH_LE16_TO_CPU
++
++/** Converts 16 bit bi endian data to CPU byte ordering. */
++extern uint16_t FH_BE16_TO_CPU(uint16_t *p);
++#define fh_be16_to_cpu FH_BE16_TO_CPU
++
++
++/** @name Register Read/Write
++ *
++ * The following six functions should be implemented to read/write registers of
++ * 32-bit and 64-bit sizes.  All modules use this to read/write register values.
++ * The reg value is a pointer to the register calculated from the void *base
++ * variable passed into the driver when it is started.  */
++
++#ifdef FH_LINUX
++/* Linux doesn't need any extra parameters for register read/write, so we
++ * just throw away the IO context parameter.
++ */
++/** Reads the content of a 32-bit register. */
++extern uint32_t FH_READ_REG32(uint32_t volatile *reg);
++#define fh_read_reg32(_ctx_,_reg_) FH_READ_REG32(_reg_)
++
++/** Reads the content of a 64-bit register. */
++extern uint64_t FH_READ_REG64(uint64_t volatile *reg);
++#define fh_read_reg64(_ctx_,_reg_) FH_READ_REG64(_reg_)
++
++/** Writes to a 32-bit register. */
++extern void FH_WRITE_REG32(uint32_t volatile *reg, uint32_t value);
++#define fh_write_reg32(_ctx_,_reg_,_val_) FH_WRITE_REG32(_reg_, _val_)
++
++/** Writes to a 64-bit register. */
++extern void FH_WRITE_REG64(uint64_t volatile *reg, uint64_t value);
++#define fh_write_reg64(_ctx_,_reg_,_val_) FH_WRITE_REG64(_reg_, _val_)
++
++/**
++ * Modify bit values in a register.  Using the
++ * algorithm: (reg_contents & ~clear_mask) | set_mask.
++ */
++extern void FH_MODIFY_REG32(uint32_t volatile *reg, uint32_t clear_mask, uint32_t set_mask);
++#define fh_modify_reg32(_ctx_,_reg_,_cmsk_,_smsk_) FH_MODIFY_REG32(_reg_,_cmsk_,_smsk_)
++extern void FH_MODIFY_REG64(uint64_t volatile *reg, uint64_t clear_mask, uint64_t set_mask);
++#define fh_modify_reg64(_ctx_,_reg_,_cmsk_,_smsk_) FH_MODIFY_REG64(_reg_,_cmsk_,_smsk_)
++
++#endif	/* FH_LINUX */
++
++#if defined(FH_FREEBSD) || defined(FH_NETBSD)
++typedef struct fh_ioctx {
++	struct device *dev;
++	bus_space_tag_t iot;
++	bus_space_handle_t ioh;
++} fh_ioctx_t;
++
++/** BSD needs two extra parameters for register read/write, so we pass
++ * them in using the IO context parameter.
++ */
++/** Reads the content of a 32-bit register. */
++extern uint32_t FH_READ_REG32(void *io_ctx, uint32_t volatile *reg);
++#define fh_read_reg32 FH_READ_REG32
++
++/** Reads the content of a 64-bit register. */
++extern uint64_t FH_READ_REG64(void *io_ctx, uint64_t volatile *reg);
++#define fh_read_reg64 FH_READ_REG64
++
++/** Writes to a 32-bit register. */
++extern void FH_WRITE_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t value);
++#define fh_write_reg32 FH_WRITE_REG32
++
++/** Writes to a 64-bit register. */
++extern void FH_WRITE_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t value);
++#define fh_write_reg64 FH_WRITE_REG64
++
++/**
++ * Modify bit values in a register.  Using the
++ * algorithm: (reg_contents & ~clear_mask) | set_mask.
++ */
++extern void FH_MODIFY_REG32(void *io_ctx, uint32_t volatile *reg, uint32_t clear_mask, uint32_t set_mask);
++#define fh_modify_reg32 FH_MODIFY_REG32
++extern void FH_MODIFY_REG64(void *io_ctx, uint64_t volatile *reg, uint64_t clear_mask, uint64_t set_mask);
++#define fh_modify_reg64 FH_MODIFY_REG64
++
++#endif	/* FH_FREEBSD || FH_NETBSD */
++
++/** @cond */
++
++/** @name Some convenience MACROS used internally.  Define FH_DEBUG_REGS to log the
++ * register writes. */
++
++#ifdef FH_LINUX
++
++# ifdef FH_DEBUG_REGS
++
++#define fh_define_read_write_reg_n(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg##_n(_container_type *container, int num) { \
++	return FH_READ_REG32(&container->regs->_reg[num]); \
++} \
++static inline void fh_write_##_reg##_n(_container_type *container, int num, uint32_t data) { \
++	FH_DEBUG("WRITING %8s[%d]: %p: %08x", #_reg, num, \
++		  &(((uint32_t*)container->regs->_reg)[num]), data); \
++	FH_WRITE_REG32(&(((uint32_t*)container->regs->_reg)[num]), data); \
++}
++
++#define fh_define_read_write_reg(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg(_container_type *container) { \
++	return FH_READ_REG32(&container->regs->_reg); \
++} \
++static inline void fh_write_##_reg(_container_type *container, uint32_t data) { \
++	FH_DEBUG("WRITING %11s: %p: %08x", #_reg, &container->regs->_reg, data); \
++	FH_WRITE_REG32(&container->regs->_reg, data); \
++}
++
++# else	/* FH_DEBUG_REGS */
++
++#define fh_define_read_write_reg_n(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg##_n(_container_type *container, int num) { \
++	return FH_READ_REG32(&container->regs->_reg[num]); \
++} \
++static inline void fh_write_##_reg##_n(_container_type *container, int num, uint32_t data) { \
++	FH_WRITE_REG32(&(((uint32_t*)container->regs->_reg)[num]), data); \
++}
++
++#define fh_define_read_write_reg(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg(_container_type *container) { \
++	return FH_READ_REG32(&container->regs->_reg); \
++} \
++static inline void fh_write_##_reg(_container_type *container, uint32_t data) { \
++	FH_WRITE_REG32(&container->regs->_reg, data); \
++}
++
++# endif	/* FH_DEBUG_REGS */
++
++#endif	/* FH_LINUX */
++
++#if defined(FH_FREEBSD) || defined(FH_NETBSD)
++
++# ifdef FH_DEBUG_REGS
++
++#define fh_define_read_write_reg_n(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg##_n(void *io_ctx, _container_type *container, int num) { \
++	return FH_READ_REG32(io_ctx, &container->regs->_reg[num]); \
++} \
++static inline void fh_write_##_reg##_n(void *io_ctx, _container_type *container, int num, uint32_t data) { \
++	FH_DEBUG("WRITING %8s[%d]: %p: %08x", #_reg, num, \
++		  &(((uint32_t*)container->regs->_reg)[num]), data); \
++	FH_WRITE_REG32(io_ctx, &(((uint32_t*)container->regs->_reg)[num]), data); \
++}
++
++#define fh_define_read_write_reg(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg(void *io_ctx, _container_type *container) { \
++	return FH_READ_REG32(io_ctx, &container->regs->_reg); \
++} \
++static inline void fh_write_##_reg(void *io_ctx, _container_type *container, uint32_t data) { \
++	FH_DEBUG("WRITING %11s: %p: %08x", #_reg, &container->regs->_reg, data); \
++	FH_WRITE_REG32(io_ctx, &container->regs->_reg, data); \
++}
++
++# else	/* FH_DEBUG_REGS */
++
++#define fh_define_read_write_reg_n(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg##_n(void *io_ctx, _container_type *container, int num) { \
++	return FH_READ_REG32(io_ctx, &container->regs->_reg[num]); \
++} \
++static inline void fh_write_##_reg##_n(void *io_ctx, _container_type *container, int num, uint32_t data) { \
++	FH_WRITE_REG32(io_ctx, &(((uint32_t*)container->regs->_reg)[num]), data); \
++}
++
++#define fh_define_read_write_reg(_reg,_container_type) \
++static inline uint32_t fh_read_##_reg(void *io_ctx, _container_type *container) { \
++	return FH_READ_REG32(io_ctx, &container->regs->_reg); \
++} \
++static inline void fh_write_##_reg(void *io_ctx, _container_type *container, uint32_t data) { \
++	FH_WRITE_REG32(io_ctx, &container->regs->_reg, data); \
++}
++
++# endif	/* FH_DEBUG_REGS */
++
++#endif	/* FH_FREEBSD || FH_NETBSD */
++
++/** @endcond */
++
++
++#ifdef FH_CRYPTOLIB
++/** @name Crypto Functions
++ *
++ * These are the low-level cryptographic functions used by the driver. */
++
++/** Perform AES CBC */
++extern int FH_AES_CBC(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t iv[16], uint8_t *out);
++#define fh_aes_cbc FH_AES_CBC
++
++/** Fill the provided buffer with random bytes.  These should be cryptographic grade random numbers. */
++extern void FH_RANDOM_BYTES(uint8_t *buffer, uint32_t length);
++#define fh_random_bytes FH_RANDOM_BYTES
++
++/** Perform the SHA-256 hash function */
++extern int FH_SHA256(uint8_t *message, uint32_t len, uint8_t *out);
++#define fh_sha256 FH_SHA256
++
++/** Calculated the HMAC-SHA256 */
++extern int FH_HMAC_SHA256(uint8_t *message, uint32_t messagelen, uint8_t *key, uint32_t keylen, uint8_t *out);
++#define fh_hmac_sha256 FH_HMAC_SHA256
++
++#endif	/* FH_CRYPTOLIB */
++
++
++/** @name Memory Allocation
++ *
++ * These function provide access to memory allocation.  There are only 2 DMA
++ * functions and 3 Regular memory functions that need to be implemented.  None
++ * of the memory debugging routines need to be implemented.  The allocation
++ * routines all ZERO the contents of the memory.
++ *
++ * Defining FH_DEBUG_MEMORY turns on memory debugging and statistic gathering.
++ * This checks for memory leaks, keeping track of alloc/free pairs.  It also
++ * keeps track of how much memory the driver is using at any given time. */
++
++#define FH_PAGE_SIZE 4096
++#define FH_PAGE_OFFSET(addr) (((uint32_t)addr) & 0xfff)
++#define FH_PAGE_ALIGNED(addr) ((((uint32_t)addr) & 0xfff) == 0)
++
++#define FH_INVALID_DMA_ADDR 0x0
++
++#ifdef FH_LINUX
++/** Type for a DMA address */
++typedef dma_addr_t fh_dma_t;
++#endif
++
++#if defined(FH_FREEBSD) || defined(FH_NETBSD)
++typedef bus_addr_t fh_dma_t;
++#endif
++
++#ifdef FH_FREEBSD
++typedef struct fh_dmactx {
++	struct device *dev;
++	bus_dma_tag_t dma_tag;
++	bus_dmamap_t dma_map;
++	bus_addr_t dma_paddr;
++	void *dma_vaddr;
++} fh_dmactx_t;
++#endif
++
++#ifdef FH_NETBSD
++typedef struct fh_dmactx {
++	struct device *dev;
++	bus_dma_tag_t dma_tag;
++	bus_dmamap_t dma_map;
++	bus_dma_segment_t segs[1];
++	int nsegs;
++	bus_addr_t dma_paddr;
++	void *dma_vaddr;
++} fh_dmactx_t;
++#endif
++
++/* @todo these functions will be added in the future */
++#if 0
++/**
++ * Creates a DMA pool from which you can allocate DMA buffers.  Buffers
++ * allocated from this pool will be guaranteed to meet the size, alignment, and
++ * boundary requirements specified.
++ *
++ * @param[in] size Specifies the size of the buffers that will be allocated from
++ * this pool.
++ * @param[in] align Specifies the byte alignment requirements of the buffers
++ * allocated from this pool.  Must be a power of 2.
++ * @param[in] boundary Specifies the N-byte boundary that buffers allocated from
++ * this pool must not cross.
++ *
++ * @returns A pointer to an internal opaque structure which is not to be
++ * accessed outside of these library functions.  Use this handle to specify
++ * which pools to allocate/free DMA buffers from and also to destroy the pool,
++ * when you are done with it.
++ */
++extern fh_pool_t *FH_DMA_POOL_CREATE(uint32_t size, uint32_t align, uint32_t boundary);
++
++/**
++ * Destroy a DMA pool.  All buffers allocated from that pool must be freed first.
++ */
++extern void FH_DMA_POOL_DESTROY(fh_pool_t *pool);
++
++/**
++ * Allocate a buffer from the specified DMA pool and zeros its contents.
++ */
++extern void *FH_DMA_POOL_ALLOC(fh_pool_t *pool, uint64_t *dma_addr);
++
++/**
++ * Free a previously allocated buffer from the DMA pool.
++ */
++extern void FH_DMA_POOL_FREE(fh_pool_t *pool, void *vaddr, void *daddr);
++#endif
++
++/** Allocates a DMA capable buffer and zeroes its contents. */
++extern void *__FH_DMA_ALLOC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr);
++
++/** Allocates a DMA capable buffer and zeroes its contents in atomic contest */
++extern void *__FH_DMA_ALLOC_ATOMIC(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr);
++
++/** Frees a previously allocated buffer. */
++extern void __FH_DMA_FREE(void *dma_ctx, uint32_t size, void *virt_addr, fh_dma_t dma_addr);
++
++/** Allocates a block of memory and zeroes its contents. */
++extern void *__FH_ALLOC(void *mem_ctx, uint32_t size);
++
++/** Allocates a block of memory and zeroes its contents, in an atomic manner
++ * which can be used inside interrupt context.  The size should be sufficiently
++ * small, a few KB at most, such that failures are not likely to occur.  Can just call
++ * __FH_ALLOC if it is atomic. */
++extern void *__FH_ALLOC_ATOMIC(void *mem_ctx, uint32_t size);
++
++/** Frees a previously allocated buffer. */
++extern void __FH_FREE(void *mem_ctx, void *addr);
++
++#ifndef FH_DEBUG_MEMORY
++
++#define FH_ALLOC(_size_) __FH_ALLOC(NULL, _size_)
++#define FH_ALLOC_ATOMIC(_size_) __FH_ALLOC_ATOMIC(NULL, _size_)
++#define FH_FREE(_addr_) __FH_FREE(NULL, _addr_)
++
++# ifdef FH_LINUX
++#define FH_DMA_ALLOC(_size_,_dma_) __FH_DMA_ALLOC(NULL, _size_, _dma_)
++#define FH_DMA_ALLOC_ATOMIC(_size_,_dma_) __FH_DMA_ALLOC_ATOMIC(NULL, _size_,_dma_)
++#define FH_DMA_FREE(_size_,_virt_,_dma_) __FH_DMA_FREE(NULL, _size_, _virt_, _dma_)
++# endif
++
++# if defined(FH_FREEBSD) || defined(FH_NETBSD)
++#define FH_DMA_ALLOC __FH_DMA_ALLOC
++#define FH_DMA_FREE __FH_DMA_FREE
++# endif
++
++#else	/* FH_DEBUG_MEMORY */
++
++extern void *fh_alloc_debug(void *mem_ctx, uint32_t size, char const *func, int line);
++extern void *fh_alloc_atomic_debug(void *mem_ctx, uint32_t size, char const *func, int line);
++extern void fh_free_debug(void *mem_ctx, void *addr, char const *func, int line);
++extern void *fh_dma_alloc_debug(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr,
++				 char const *func, int line);
++extern void *fh_dma_alloc_atomic_debug(void *dma_ctx, uint32_t size, fh_dma_t *dma_addr, 
++				char const *func, int line);
++extern void fh_dma_free_debug(void *dma_ctx, uint32_t size, void *virt_addr,
++			       fh_dma_t dma_addr, char const *func, int line);
++
++extern int fh_memory_debug_start(void *mem_ctx);
++extern void fh_memory_debug_stop(void);
++extern void fh_memory_debug_report(void);
++
++#define FH_ALLOC(_size_) fh_alloc_debug(NULL, _size_, __func__, __LINE__)
++#define FH_ALLOC_ATOMIC(_size_) fh_alloc_atomic_debug(NULL, _size_, \
++							__func__, __LINE__)
++#define FH_FREE(_addr_) fh_free_debug(NULL, _addr_, __func__, __LINE__)
++
++# ifdef FH_LINUX
++#define FH_DMA_ALLOC(_size_,_dma_) fh_dma_alloc_debug(NULL, _size_, \
++						_dma_, __func__, __LINE__)
++#define FH_DMA_ALLOC_ATOMIC(_size_,_dma_) fh_dma_alloc_atomic_debug(NULL, _size_, \
++						_dma_, __func__, __LINE__)
++#define FH_DMA_FREE(_size_,_virt_,_dma_) fh_dma_free_debug(NULL, _size_, \
++						_virt_, _dma_, __func__, __LINE__)
++# endif
++
++# if defined(FH_FREEBSD) || defined(FH_NETBSD)
++#define FH_DMA_ALLOC(_ctx_,_size_,_dma_) fh_dma_alloc_debug(_ctx_, _size_, \
++						_dma_, __func__, __LINE__)
++#define FH_DMA_FREE(_ctx_,_size_,_virt_,_dma_) fh_dma_free_debug(_ctx_, _size_, \
++						 _virt_, _dma_, __func__, __LINE__)
++# endif
++
++#endif /* FH_DEBUG_MEMORY */
++
++#define fh_alloc(_ctx_,_size_) FH_ALLOC(_size_)
++#define fh_alloc_atomic(_ctx_,_size_) FH_ALLOC_ATOMIC(_size_)
++#define fh_free(_ctx_,_addr_) FH_FREE(_addr_)
++
++#ifdef FH_LINUX
++/* Linux doesn't need any extra parameters for DMA buffer allocation, so we
++ * just throw away the DMA context parameter.
++ */
++#define fh_dma_alloc(_ctx_,_size_,_dma_) FH_DMA_ALLOC(_size_, _dma_)
++#define fh_dma_alloc_atomic(_ctx_,_size_,_dma_) FH_DMA_ALLOC_ATOMIC(_size_, _dma_)
++#define fh_dma_free(_ctx_,_size_,_virt_,_dma_) FH_DMA_FREE(_size_, _virt_, _dma_)
++#endif
++
++#if defined(FH_FREEBSD) || defined(FH_NETBSD)
++/** BSD needs several extra parameters for DMA buffer allocation, so we pass
++ * them in using the DMA context parameter.
++ */
++#define fh_dma_alloc FH_DMA_ALLOC
++#define fh_dma_free FH_DMA_FREE
++#endif
++
++
++/** @name Memory and String Processing */
++
++/** memset() clone */
++extern void *FH_MEMSET(void *dest, uint8_t byte, uint32_t size);
++#define fh_memset FH_MEMSET
++
++/** memcpy() clone */
++extern void *FH_MEMCPY(void *dest, void const *src, uint32_t size);
++#define fh_memcpy FH_MEMCPY
++
++/** memmove() clone */
++extern void *FH_MEMMOVE(void *dest, void *src, uint32_t size);
++#define fh_memmove FH_MEMMOVE
++
++/** memcmp() clone */
++extern int FH_MEMCMP(void *m1, void *m2, uint32_t size);
++#define fh_memcmp FH_MEMCMP
++
++/** strcmp() clone */
++extern int FH_STRCMP(void *s1, void *s2);
++#define fh_strcmp FH_STRCMP
++
++/** strncmp() clone */
++extern int FH_STRNCMP(void *s1, void *s2, uint32_t size);
++#define fh_strncmp FH_STRNCMP
++
++/** strlen() clone, for NULL terminated ASCII strings */
++extern int FH_STRLEN(char const *str);
++#define fh_strlen FH_STRLEN
++
++/** strcpy() clone, for NULL terminated ASCII strings */
++extern char *FH_STRCPY(char *to, const char *from);
++#define fh_strcpy FH_STRCPY
++
++/** strdup() clone.  If you wish to use memory allocation debugging, this
++ * implementation of strdup should use the FH_* memory routines instead of
++ * calling a predefined strdup.  Otherwise the memory allocated by this routine
++ * will not be seen by the debugging routines. */
++extern char *FH_STRDUP(char const *str);
++#define fh_strdup(_ctx_,_str_) FH_STRDUP(_str_)
++
++/** NOT an atoi() clone.  Read the description carefully.  Returns an integer
++ * converted from the string str in base 10 unless the string begins with a "0x"
++ * in which case it is base 16.  String must be a NULL terminated sequence of
++ * ASCII characters and may optionally begin with whitespace, a + or -, and a
++ * "0x" prefix if base 16.  The remaining characters must be valid digits for
++ * the number and end with a NULL character.  If any invalid characters are
++ * encountered or it returns with a negative error code and the results of the
++ * conversion are undefined.  On sucess it returns 0.  Overflow conditions are
++ * undefined.  An example implementation using atoi() can be referenced from the
++ * Linux implementation. */
++extern int FH_ATOI(const char *str, int32_t *value);
++#define fh_atoi FH_ATOI
++
++/** Same as above but for unsigned. */
++extern int FH_ATOUI(const char *str, uint32_t *value);
++#define fh_atoui FH_ATOUI
++
++#ifdef FH_UTFLIB
++/** This routine returns a UTF16LE unicode encoded string from a UTF8 string. */
++extern int FH_UTF8_TO_UTF16LE(uint8_t const *utf8string, uint16_t *utf16string, unsigned len);
++#define fh_utf8_to_utf16le FH_UTF8_TO_UTF16LE
++#endif
++
++
++/** @name Wait queues
++ *
++ * Wait queues provide a means of synchronizing between threads or processes.  A
++ * process can block on a waitq if some condition is not true, waiting for it to
++ * become true.  When the waitq is triggered all waiting process will get
++ * unblocked and the condition will be check again.  Waitqs should be triggered
++ * every time a condition can potentially change.*/
++struct fh_waitq;
++
++/** Type for a waitq */
++typedef struct fh_waitq fh_waitq_t;
++
++/** The type of waitq condition callback function.  This is called every time
++ * condition is evaluated. */
++typedef int (*fh_waitq_condition_t)(void *data);
++
++/** Allocate a waitq */
++extern fh_waitq_t *FH_WAITQ_ALLOC(void);
++#define fh_waitq_alloc(_ctx_) FH_WAITQ_ALLOC()
++
++/** Free a waitq */
++extern void FH_WAITQ_FREE(fh_waitq_t *wq);
++#define fh_waitq_free FH_WAITQ_FREE
++
++/** Check the condition and if it is false, block on the waitq.  When unblocked, check the
++ * condition again.  The function returns when the condition becomes true.  The return value
++ * is 0 on condition true, FH_WAITQ_ABORTED on abort or killed, or FH_WAITQ_UNKNOWN on error. */
++extern int32_t FH_WAITQ_WAIT(fh_waitq_t *wq, fh_waitq_condition_t cond, void *data);
++#define fh_waitq_wait FH_WAITQ_WAIT
++
++/** Check the condition and if it is false, block on the waitq.  When unblocked,
++ * check the condition again.  The function returns when the condition become
++ * true or the timeout has passed.  The return value is 0 on condition true or
++ * FH_TIMED_OUT on timeout, or FH_WAITQ_ABORTED, or FH_WAITQ_UNKNOWN on
++ * error. */
++extern int32_t FH_WAITQ_WAIT_TIMEOUT(fh_waitq_t *wq, fh_waitq_condition_t cond,
++				      void *data, int32_t msecs);
++#define fh_waitq_wait_timeout FH_WAITQ_WAIT_TIMEOUT
++
++/** Trigger a waitq, unblocking all processes.  This should be called whenever a condition
++ * has potentially changed. */
++extern void FH_WAITQ_TRIGGER(fh_waitq_t *wq);
++#define fh_waitq_trigger FH_WAITQ_TRIGGER
++
++/** Unblock all processes waiting on the waitq with an ABORTED result. */
++extern void FH_WAITQ_ABORT(fh_waitq_t *wq);
++#define fh_waitq_abort FH_WAITQ_ABORT
++
++
++/** @name Threads
++ *
++ * A thread must be explicitly stopped.  It must check FH_THREAD_SHOULD_STOP
++ * whenever it is woken up, and then return.  The FH_THREAD_STOP function
++ * returns the value from the thread.
++ */
++
++struct fh_thread;
++
++/** Type for a thread */
++typedef struct fh_thread fh_thread_t;
++
++/** The thread function */
++typedef int (*fh_thread_function_t)(void *data);
++
++/** Create a thread and start it running the thread_function.  Returns a handle
++ * to the thread */
++extern fh_thread_t *FH_THREAD_RUN(fh_thread_function_t func, char *name, void *data);
++#define fh_thread_run(_ctx_,_func_,_name_,_data_) FH_THREAD_RUN(_func_, _name_, _data_)
++
++/** Stops a thread.  Return the value returned by the thread.  Or will return
++ * FH_ABORT if the thread never started. */
++extern int FH_THREAD_STOP(fh_thread_t *thread);
++#define fh_thread_stop FH_THREAD_STOP
++
++/** Signifies to the thread that it must stop. */
++#ifdef FH_LINUX
++/* Linux doesn't need any parameters for kthread_should_stop() */
++extern fh_bool_t FH_THREAD_SHOULD_STOP(void);
++#define fh_thread_should_stop(_thrd_) FH_THREAD_SHOULD_STOP()
++
++/* No thread_exit function in Linux */
++#define fh_thread_exit(_thrd_)
++#endif
++
++#if defined(FH_FREEBSD) || defined(FH_NETBSD)
++/** BSD needs the thread pointer for kthread_suspend_check() */
++extern fh_bool_t FH_THREAD_SHOULD_STOP(fh_thread_t *thread);
++#define fh_thread_should_stop FH_THREAD_SHOULD_STOP
++
++/** The thread must call this to exit. */
++extern void FH_THREAD_EXIT(fh_thread_t *thread);
++#define fh_thread_exit FH_THREAD_EXIT
++#endif
++
++
++/** @name Work queues
++ *
++ * Workqs are used to queue a callback function to be called at some later time,
++ * in another thread. */
++struct fh_workq;
++
++/** Type for a system work */
++struct work_struct;
++
++/** Type for a workq */
++typedef struct fh_workq fh_workq_t;
++
++/** The type of the callback function to be called. */
++typedef void (*fh_work_callback_t)(void *data);
++
++/** Allocate a workq */
++extern fh_workq_t *FH_WORKQ_ALLOC(char *name);
++#define fh_workq_alloc(_ctx_,_name_) FH_WORKQ_ALLOC(_name_)
++
++/** Free a workq.  All work must be completed before being freed. */
++extern void FH_WORKQ_FREE(fh_workq_t *workq);
++#define fh_workq_free FH_WORKQ_FREE
++
++/** Schedule a callback on the workq, passing in data.  The function will be
++ * scheduled at some later time. */
++extern void FH_WORKQ_SCHEDULE(fh_workq_t *workq, fh_work_callback_t cb,
++			       void *data, char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 4, 5)));
++#else
++	;
++#endif
++
++#define fh_schedule_system_work FH_SCHEDULE_SYSTEM_WORK
++/** Schedule a work on the system workq  */
++extern bool FH_SCHEDULE_SYSTEM_WORK(struct work_struct *work);
++
++#define fh_workq_schedule FH_WORKQ_SCHEDULE
++
++/** Schedule a callback on the workq, that will be called until at least
++ * given number miliseconds have passed. */
++extern void FH_WORKQ_SCHEDULE_DELAYED(fh_workq_t *workq, fh_work_callback_t cb,
++				       void *data, uint32_t time, char *format, ...)
++#ifdef __GNUC__
++	__attribute__ ((format(printf, 5, 6)));
++#else
++	;
++#endif
++#define fh_workq_schedule_delayed FH_WORKQ_SCHEDULE_DELAYED
++
++/** The number of processes in the workq */
++extern int FH_WORKQ_PENDING(fh_workq_t *workq);
++#define fh_workq_pending FH_WORKQ_PENDING
++
++/** Blocks until all the work in the workq is complete or timed out.  Returns <
++ * 0 on timeout. */
++extern int FH_WORKQ_WAIT_WORK_DONE(fh_workq_t *workq, int timeout);
++#define fh_workq_wait_work_done FH_WORKQ_WAIT_WORK_DONE
++
++
++/** @name Tasklets
++ *
++ */
++struct fh_tasklet;
++
++/** Type for a tasklet */
++typedef struct fh_tasklet fh_tasklet_t;
++
++/** The type of the callback function to be called */
++typedef void (*fh_tasklet_callback_t)(void *data);
++
++/** Allocates a tasklet */
++extern fh_tasklet_t *FH_TASK_ALLOC(char *name, fh_tasklet_callback_t cb, void *data);
++#define fh_task_alloc(_ctx_,_name_,_cb_,_data_) FH_TASK_ALLOC(_name_, _cb_, _data_)
++
++/** Frees a tasklet */
++extern void FH_TASK_FREE(fh_tasklet_t *task);
++#define fh_task_free FH_TASK_FREE
++
++/** Schedules a tasklet to run */
++extern void FH_TASK_SCHEDULE(fh_tasklet_t *task);
++#define fh_task_schedule FH_TASK_SCHEDULE
++
++
++/** @name Timer
++ *
++ * Callbacks must be small and atomic.
++ */
++struct fh_timer;
++
++/** Type for a timer */
++typedef struct fh_timer fh_timer_t;
++
++/** The type of the callback function to be called */
++typedef void (*fh_timer_callback_t)(void *data);
++
++/** Allocates a timer */
++extern fh_timer_t *FH_TIMER_ALLOC(char *name, fh_timer_callback_t cb, void *data);
++#define fh_timer_alloc(_ctx_,_name_,_cb_,_data_) FH_TIMER_ALLOC(_name_,_cb_,_data_)
++
++/** Frees a timer */
++extern void FH_TIMER_FREE(fh_timer_t *timer);
++#define fh_timer_free FH_TIMER_FREE
++
++/** Schedules the timer to run at time ms from now.  And will repeat at every
++ * repeat_interval msec therafter
++ *
++ * Modifies a timer that is still awaiting execution to a new expiration time.
++ * The mod_time is added to the old time.  */
++extern void FH_TIMER_SCHEDULE(fh_timer_t *timer, uint32_t time);
++#define fh_timer_schedule FH_TIMER_SCHEDULE
++
++/** Disables the timer from execution. */
++extern void FH_TIMER_CANCEL(fh_timer_t *timer);
++#define fh_timer_cancel FH_TIMER_CANCEL
++
++
++/** @name Spinlocks
++ *
++ * These locks are used when the work between the lock/unlock is atomic and
++ * short.  Interrupts are also disabled during the lock/unlock and thus they are
++ * suitable to lock between interrupt/non-interrupt context.  They also lock
++ * between processes if you have multiple CPUs or Preemption.  If you don't have
++ * multiple CPUS or Preemption, then the you can simply implement the
++ * FH_SPINLOCK and FH_SPINUNLOCK to disable and enable interrupts.  Because
++ * the work between the lock/unlock is atomic, the process context will never
++ * change, and so you never have to lock between processes.  */
++
++struct fh_spinlock;
++
++/** Type for a spinlock */
++typedef struct fh_spinlock fh_spinlock_t;
++
++/** Type for the 'flags' argument to spinlock funtions */
++typedef unsigned long fh_irqflags_t;
++
++/** Returns an initialized lock variable.  This function should allocate and
++ * initialize the OS-specific data structure used for locking.  This data
++ * structure is to be used for the FH_LOCK and FH_UNLOCK functions and should
++ * be freed by the FH_FREE_LOCK when it is no longer used. */
++extern fh_spinlock_t *FH_SPINLOCK_ALLOC(void);
++#define fh_spinlock_alloc(_ctx_) FH_SPINLOCK_ALLOC()
++
++/** Frees an initialized lock variable. */
++extern void FH_SPINLOCK_FREE(fh_spinlock_t *lock);
++#define fh_spinlock_free(_ctx_,_lock_) FH_SPINLOCK_FREE(_lock_)
++
++/** Disables interrupts and blocks until it acquires the lock.
++ *
++ * @param lock Pointer to the spinlock.
++ * @param flags Unsigned long for irq flags storage.
++ */
++extern void FH_SPINLOCK_IRQSAVE(fh_spinlock_t *lock, fh_irqflags_t *flags);
++#define fh_spinlock_irqsave FH_SPINLOCK_IRQSAVE
++
++/** Re-enables the interrupt and releases the lock.
++ *
++ * @param lock Pointer to the spinlock.
++ * @param flags Unsigned long for irq flags storage.  Must be the same as was
++ * passed into FH_LOCK.
++ */
++extern void FH_SPINUNLOCK_IRQRESTORE(fh_spinlock_t *lock, fh_irqflags_t flags);
++#define fh_spinunlock_irqrestore FH_SPINUNLOCK_IRQRESTORE
++
++/** Blocks until it acquires the lock.
++ *
++ * @param lock Pointer to the spinlock.
++ */
++extern void FH_SPINLOCK(fh_spinlock_t *lock);
++#define fh_spinlock FH_SPINLOCK
++
++/** Releases the lock.
++ *
++ * @param lock Pointer to the spinlock.
++ */
++extern void FH_SPINUNLOCK(fh_spinlock_t *lock);
++#define fh_spinunlock FH_SPINUNLOCK
++
++
++/** @name Mutexes
++ *
++ * Unlike spinlocks Mutexes lock only between processes and the work between the
++ * lock/unlock CAN block, therefore it CANNOT be called from interrupt context.
++ */
++
++struct fh_mutex;
++
++/** Type for a mutex */
++typedef struct fh_mutex fh_mutex_t;
++
++/* For Linux Mutex Debugging make it inline because the debugging routines use
++ * the symbol to determine recursive locking.  This makes it falsely think
++ * recursive locking occurs. */
++#if defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES)
++#define FH_MUTEX_ALLOC_LINUX_DEBUG(__mutexp) ({ \
++	__mutexp = (fh_mutex_t *)FH_ALLOC(sizeof(struct mutex)); \
++	mutex_init((struct mutex *)__mutexp); \
++})
++#endif
++
++/** Allocate a mutex */
++extern fh_mutex_t *FH_MUTEX_ALLOC(void);
++#define fh_mutex_alloc(_ctx_) FH_MUTEX_ALLOC()
++
++/* For memory leak debugging when using Linux Mutex Debugging */
++#if defined(FH_LINUX) && defined(CONFIG_DEBUG_MUTEXES)
++#define FH_MUTEX_FREE(__mutexp) do { \
++	mutex_destroy((struct mutex *)__mutexp); \
++	FH_FREE(__mutexp); \
++} while(0)
++#else
++/** Free a mutex */
++extern void FH_MUTEX_FREE(fh_mutex_t *mutex);
++#define fh_mutex_free(_ctx_,_mutex_) FH_MUTEX_FREE(_mutex_)
++#endif
++
++/** Lock a mutex */
++extern void FH_MUTEX_LOCK(fh_mutex_t *mutex);
++#define fh_mutex_lock FH_MUTEX_LOCK
++
++/** Non-blocking lock returns 1 on successful lock. */
++extern int FH_MUTEX_TRYLOCK(fh_mutex_t *mutex);
++#define fh_mutex_trylock FH_MUTEX_TRYLOCK
++
++/** Unlock a mutex */
++extern void FH_MUTEX_UNLOCK(fh_mutex_t *mutex);
++#define fh_mutex_unlock FH_MUTEX_UNLOCK
++
++
++/** @name Time */
++
++/** Microsecond delay.
++ *
++ * @param usecs  Microseconds to delay.
++ */
++extern void FH_UDELAY(uint32_t usecs);
++#define fh_udelay FH_UDELAY
++
++/** Millisecond delay.
++ *
++ * @param msecs  Milliseconds to delay.
++ */
++extern void FH_MDELAY(uint32_t msecs);
++#define fh_mdelay FH_MDELAY
++
++/** Non-busy waiting.
++ * Sleeps for specified number of milliseconds.
++ *
++ * @param msecs Milliseconds to sleep.
++ */
++extern void FH_MSLEEP(uint32_t msecs);
++#define fh_msleep FH_MSLEEP
++
++/**
++ * Returns number of milliseconds since boot.
++ */
++extern uint32_t FH_TIME(void);
++#define fh_time FH_TIME
++
++
++
++
++/* @mainpage FH Portability and Common Library
++ *
++ * This is the documentation for the FH Portability and Common Library.
++ *
++ * @section intro Introduction
++ *
++ * The FH Portability library consists of wrapper calls and data structures to
++ * all low-level functions which are typically provided by the OS.  The WUDEV
++ * driver uses only these functions.  In order to port the WUDEV driver, only
++ * the functions in this library need to be re-implemented, with the same
++ * behavior as documented here.
++ *
++ * The Common library consists of higher level functions, which rely only on
++ * calling the functions from the FH Portability library.  These common
++ * routines are shared across modules.  Some of the common libraries need to be
++ * used directly by the driver programmer when porting WUDEV.  Such as the
++ * parameter and notification libraries.
++ *
++ * @section low Portability Library OS Wrapper Functions
++ *
++ * Any function starting with FH and in all CAPS is a low-level OS-wrapper that
++ * needs to be implemented when porting, for example FH_MUTEX_ALLOC().  All of
++ * these functions are included in the fh_os.h file.
++ *
++ * There are many functions here covering a wide array of OS services.  Please
++ * see fh_os.h for details, and implementation notes for each function.
++ *
++ * @section common Common Library Functions
++ *
++ * Any function starting with fh and in all lowercase is a common library
++ * routine.  These functions have a portable implementation and do not need to
++ * be reimplemented when porting.  The common routines can be used by any
++ * driver, and some must be used by the end user to control the drivers.  For
++ * example, you must use the Parameter common library in order to set the
++ * parameters in the WUDEV module.
++ *
++ * The common libraries consist of the following:
++ *
++ * - Connection Contexts - Used internally and can be used by end-user.  See fh_cc.h
++ * - Parameters - Used internally and can be used by end-user.  See fh_params.h
++ * - Notifications - Used internally and can be used by end-user.  See fh_notifier.h
++ * - Lists - Used internally and can be used by end-user.  See fh_list.h
++ * - Memory Debugging - Used internally and can be used by end-user.  See fh_os.h
++ * - Modpow - Used internally only.  See fh_modpow.h
++ * - DH - Used internally only.  See fh_dh.h
++ * - Crypto - Used internally only.  See fh_crypto.h
++ *
++ *
++ * @section prereq Prerequistes For fh_os.h
++ * @subsection types Data Types
++ *
++ * The fh_os.h file assumes that several low-level data types are pre defined for the
++ * compilation environment.  These data types are:
++ *
++ * - uint8_t - unsigned 8-bit data type
++ * - int8_t - signed 8-bit data type
++ * - uint16_t - unsigned 16-bit data type
++ * - int16_t - signed 16-bit data type
++ * - uint32_t - unsigned 32-bit data type
++ * - int32_t - signed 32-bit data type
++ * - uint64_t - unsigned 64-bit data type
++ * - int64_t - signed 64-bit data type
++ *
++ * Ensure that these are defined before using fh_os.h.  The easiest way to do
++ * that is to modify the top of the file to include the appropriate header.
++ * This is already done for the Linux environment.  If the FH_LINUX macro is
++ * defined, the correct header will be added.  A standard header <stdint.h> is
++ * also used for environments where standard C headers are available.
++ *
++ * @subsection stdarg Variable Arguments
++ *
++ * Variable arguments are provided by a standard C header <stdarg.h>.  it is
++ * available in Both the Linux and ANSI C enviornment.  An equivalent must be
++ * provided in your enviornment in order to use fh_os.h with the debug and
++ * tracing message functionality.
++ *
++ * @subsection thread Threading
++ *
++ * WUDEV Core must be run on an operating system that provides for multiple
++ * threads/processes.  Threading can be implemented in many ways, even in
++ * embedded systems without an operating system.  At the bare minimum, the
++ * system should be able to start any number of processes at any time to handle
++ * special work.  It need not be a pre-emptive system.  Process context can
++ * change upon a call to a blocking function.  The hardware interrupt context
++ * that calls the module's ISR() function must be differentiable from process
++ * context, even if your processes are impemented via a hardware interrupt.
++ * Further locking mechanism between process must exist (or be implemented), and
++ * process context must have a way to disable interrupts for a period of time to
++ * lock them out.  If all of this exists, the functions in fh_os.h related to
++ * threading should be able to be implemented with the defined behavior.
++ *
++ */
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _FH_OS_H_ */
+diff --git a/drivers/usb/host/fh_otg/fh_common_port/usb.h b/drivers/usb/host/fh_otg/fh_common_port/usb.h
+new file mode 100644
+index 00000000..27bda82d
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_common_port/usb.h
+@@ -0,0 +1,946 @@
++/*
++ * Copyright (c) 1998 The NetBSD Foundation, Inc.
++ * All rights reserved.
++ *
++ * This code is derived from software contributed to The NetBSD Foundation
++ * by Lennart Augustsson (lennart@augustsson.net) at
++ * Carlstedt Research & Technology.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. All advertising materials mentioning features or use of this software
++ *    must display the following acknowledgement:
++ *        This product includes software developed by the NetBSD
++ *        Foundation, Inc. and its contributors.
++ * 4. Neither the name of The NetBSD Foundation nor the names of its
++ *    contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
++ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
++ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/* Modified by Synopsys, Inc, 12/12/2007 */
++
++
++#ifndef _USB_H_
++#define _USB_H_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++ * The USB records contain some unaligned little-endian word
++ * components.  The U[SG]ETW macros take care of both the alignment
++ * and endian problem and should always be used to access non-byte
++ * values.
++ */
++typedef u_int8_t uByte;
++typedef u_int8_t uWord[2];
++typedef u_int8_t uDWord[4];
++
++#define USETW2(w,h,l) ((w)[0] = (u_int8_t)(l), (w)[1] = (u_int8_t)(h))
++#define UCONSTW(x)	{ (x) & 0xff, ((x) >> 8) & 0xff }
++#define UCONSTDW(x)	{ (x) & 0xff, ((x) >> 8) & 0xff, \
++			  ((x) >> 16) & 0xff, ((x) >> 24) & 0xff }
++
++#if 1
++#define UGETW(w) ((w)[0] | ((w)[1] << 8))
++#define USETW(w,v) ((w)[0] = (u_int8_t)(v), (w)[1] = (u_int8_t)((v) >> 8))
++#define UGETDW(w) ((w)[0] | ((w)[1] << 8) | ((w)[2] << 16) | ((w)[3] << 24))
++#define USETDW(w,v) ((w)[0] = (u_int8_t)(v), \
++		     (w)[1] = (u_int8_t)((v) >> 8), \
++		     (w)[2] = (u_int8_t)((v) >> 16), \
++		     (w)[3] = (u_int8_t)((v) >> 24))
++#else
++/*
++ * On little-endian machines that can handle unanliged accesses
++ * (e.g. i386) these macros can be replaced by the following.
++ */
++#define UGETW(w) (*(u_int16_t *)(w))
++#define USETW(w,v) (*(u_int16_t *)(w) = (v))
++#define UGETDW(w) (*(u_int32_t *)(w))
++#define USETDW(w,v) (*(u_int32_t *)(w) = (v))
++#endif
++
++/*
++ * Macros for accessing UAS IU fields, which are big-endian
++ */
++#define IUSETW2(w,h,l) ((w)[0] = (u_int8_t)(h), (w)[1] = (u_int8_t)(l))
++#define IUCONSTW(x)	{ ((x) >> 8) & 0xff, (x) & 0xff }
++#define IUCONSTDW(x)	{ ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
++			((x) >> 8) & 0xff, (x) & 0xff }
++#define IUGETW(w) (((w)[0] << 8) | (w)[1])
++#define IUSETW(w,v) ((w)[0] = (u_int8_t)((v) >> 8), (w)[1] = (u_int8_t)(v))
++#define IUGETDW(w) (((w)[0] << 24) | ((w)[1] << 16) | ((w)[2] << 8) | (w)[3])
++#define IUSETDW(w,v) ((w)[0] = (u_int8_t)((v) >> 24), \
++		      (w)[1] = (u_int8_t)((v) >> 16), \
++		      (w)[2] = (u_int8_t)((v) >> 8), \
++		      (w)[3] = (u_int8_t)(v))
++
++#define UPACKED __attribute__((__packed__))
++
++typedef struct {
++	uByte		bmRequestType;
++	uByte		bRequest;
++	uWord		wValue;
++	uWord		wIndex;
++	uWord		wLength;
++} UPACKED usb_device_request_t;
++
++#define UT_GET_DIR(a) ((a) & 0x80)
++#define UT_WRITE		0x00
++#define UT_READ			0x80
++
++#define UT_GET_TYPE(a) ((a) & 0x60)
++#define UT_STANDARD		0x00
++#define UT_CLASS		0x20
++#define UT_VENDOR		0x40
++
++#define UT_GET_RECIPIENT(a) ((a) & 0x1f)
++#define UT_DEVICE		0x00
++#define UT_INTERFACE		0x01
++#define UT_ENDPOINT		0x02
++#define UT_OTHER		0x03
++
++#define UT_READ_DEVICE		(UT_READ  | UT_STANDARD | UT_DEVICE)
++#define UT_READ_INTERFACE	(UT_READ  | UT_STANDARD | UT_INTERFACE)
++#define UT_READ_ENDPOINT	(UT_READ  | UT_STANDARD | UT_ENDPOINT)
++#define UT_WRITE_DEVICE		(UT_WRITE | UT_STANDARD | UT_DEVICE)
++#define UT_WRITE_INTERFACE	(UT_WRITE | UT_STANDARD | UT_INTERFACE)
++#define UT_WRITE_ENDPOINT	(UT_WRITE | UT_STANDARD | UT_ENDPOINT)
++#define UT_READ_CLASS_DEVICE	(UT_READ  | UT_CLASS | UT_DEVICE)
++#define UT_READ_CLASS_INTERFACE	(UT_READ  | UT_CLASS | UT_INTERFACE)
++#define UT_READ_CLASS_OTHER	(UT_READ  | UT_CLASS | UT_OTHER)
++#define UT_READ_CLASS_ENDPOINT	(UT_READ  | UT_CLASS | UT_ENDPOINT)
++#define UT_WRITE_CLASS_DEVICE	(UT_WRITE | UT_CLASS | UT_DEVICE)
++#define UT_WRITE_CLASS_INTERFACE (UT_WRITE | UT_CLASS | UT_INTERFACE)
++#define UT_WRITE_CLASS_OTHER	(UT_WRITE | UT_CLASS | UT_OTHER)
++#define UT_WRITE_CLASS_ENDPOINT	(UT_WRITE | UT_CLASS | UT_ENDPOINT)
++#define UT_READ_VENDOR_DEVICE	(UT_READ  | UT_VENDOR | UT_DEVICE)
++#define UT_READ_VENDOR_INTERFACE (UT_READ  | UT_VENDOR | UT_INTERFACE)
++#define UT_READ_VENDOR_OTHER	(UT_READ  | UT_VENDOR | UT_OTHER)
++#define UT_READ_VENDOR_ENDPOINT	(UT_READ  | UT_VENDOR | UT_ENDPOINT)
++#define UT_WRITE_VENDOR_DEVICE	(UT_WRITE | UT_VENDOR | UT_DEVICE)
++#define UT_WRITE_VENDOR_INTERFACE (UT_WRITE | UT_VENDOR | UT_INTERFACE)
++#define UT_WRITE_VENDOR_OTHER	(UT_WRITE | UT_VENDOR | UT_OTHER)
++#define UT_WRITE_VENDOR_ENDPOINT (UT_WRITE | UT_VENDOR | UT_ENDPOINT)
++
++/* Requests */
++#define UR_GET_STATUS		0x00
++#define  USTAT_STANDARD_STATUS  0x00
++#define  WUSTAT_WUSB_FEATURE    0x01
++#define  WUSTAT_CHANNEL_INFO    0x02
++#define  WUSTAT_RECEIVED_DATA   0x03
++#define  WUSTAT_MAS_AVAILABILITY 0x04
++#define  WUSTAT_CURRENT_TRANSMIT_POWER 0x05
++#define UR_CLEAR_FEATURE	0x01
++#define UR_SET_FEATURE		0x03
++#define UR_SET_AND_TEST_FEATURE 0x0c
++#define UR_SET_ADDRESS		0x05
++#define UR_GET_DESCRIPTOR	0x06
++#define  UDESC_DEVICE		0x01
++#define  UDESC_CONFIG		0x02
++#define  UDESC_STRING		0x03
++#define  UDESC_INTERFACE	0x04
++#define  UDESC_ENDPOINT		0x05
++#define  UDESC_SS_USB_COMPANION	0x30
++#define  UDESC_DEVICE_QUALIFIER	0x06
++#define  UDESC_OTHER_SPEED_CONFIGURATION 0x07
++#define  UDESC_INTERFACE_POWER	0x08
++#define  UDESC_OTG		0x09
++#define  WUDESC_SECURITY	0x0c
++#define  WUDESC_KEY		0x0d
++#define   WUD_GET_KEY_INDEX(_wValue_) ((_wValue_) & 0xf)
++#define   WUD_GET_KEY_TYPE(_wValue_) (((_wValue_) & 0x30) >> 4)
++#define    WUD_KEY_TYPE_ASSOC    0x01
++#define    WUD_KEY_TYPE_GTK      0x02
++#define   WUD_GET_KEY_ORIGIN(_wValue_) (((_wValue_) & 0x40) >> 6)
++#define    WUD_KEY_ORIGIN_HOST   0x00
++#define    WUD_KEY_ORIGIN_DEVICE 0x01
++#define  WUDESC_ENCRYPTION_TYPE	0x0e
++#define  WUDESC_BOS		0x0f
++#define  WUDESC_DEVICE_CAPABILITY 0x10
++#define  WUDESC_WIRELESS_ENDPOINT_COMPANION 0x11
++#define  UDESC_BOS		0x0f
++#define  UDESC_DEVICE_CAPABILITY 0x10
++#define  UDESC_CS_DEVICE	0x21	/* class specific */
++#define  UDESC_CS_CONFIG	0x22
++#define  UDESC_CS_STRING	0x23
++#define  UDESC_CS_INTERFACE	0x24
++#define  UDESC_CS_ENDPOINT	0x25
++#define  UDESC_HUB		0x29
++#define UR_SET_DESCRIPTOR	0x07
++#define UR_GET_CONFIG		0x08
++#define UR_SET_CONFIG		0x09
++#define UR_GET_INTERFACE	0x0a
++#define UR_SET_INTERFACE	0x0b
++#define UR_SYNCH_FRAME		0x0c
++#define WUR_SET_ENCRYPTION      0x0d
++#define WUR_GET_ENCRYPTION	0x0e
++#define WUR_SET_HANDSHAKE	0x0f
++#define WUR_GET_HANDSHAKE	0x10
++#define WUR_SET_CONNECTION	0x11
++#define WUR_SET_SECURITY_DATA	0x12
++#define WUR_GET_SECURITY_DATA	0x13
++#define WUR_SET_WUSB_DATA	0x14
++#define  WUDATA_DRPIE_INFO	0x01
++#define  WUDATA_TRANSMIT_DATA	0x02
++#define  WUDATA_TRANSMIT_PARAMS	0x03
++#define  WUDATA_RECEIVE_PARAMS	0x04
++#define  WUDATA_TRANSMIT_POWER	0x05
++#define WUR_LOOPBACK_DATA_WRITE	0x15
++#define WUR_LOOPBACK_DATA_READ	0x16
++#define WUR_SET_INTERFACE_DS	0x17
++
++/* Feature numbers */
++#define UF_ENDPOINT_HALT	0
++#define UF_DEVICE_REMOTE_WAKEUP	1
++#define UF_TEST_MODE		2
++#define UF_DEVICE_B_HNP_ENABLE	3
++#define UF_DEVICE_A_HNP_SUPPORT	4
++#define UF_DEVICE_A_ALT_HNP_SUPPORT 5
++#define WUF_WUSB		3
++#define  WUF_TX_DRPIE		0x0
++#define  WUF_DEV_XMIT_PACKET	0x1
++#define  WUF_COUNT_PACKETS	0x2
++#define  WUF_CAPTURE_PACKETS	0x3
++#define UF_FUNCTION_SUSPEND	0
++#define UF_U1_ENABLE		48
++#define UF_U2_ENABLE		49
++#define UF_LTM_ENABLE		50
++
++/* Class requests from the USB 2.0 hub spec, table 11-15 */
++#define UCR_CLEAR_HUB_FEATURE		(0x2000 | UR_CLEAR_FEATURE)
++#define UCR_CLEAR_PORT_FEATURE		(0x2300 | UR_CLEAR_FEATURE)
++#define UCR_GET_HUB_DESCRIPTOR		(0xa000 | UR_GET_DESCRIPTOR)
++#define UCR_GET_HUB_STATUS		(0xa000 | UR_GET_STATUS)
++#define UCR_GET_PORT_STATUS		(0xa300 | UR_GET_STATUS)
++#define UCR_SET_HUB_FEATURE		(0x2000 | UR_SET_FEATURE)
++#define UCR_SET_PORT_FEATURE		(0x2300 | UR_SET_FEATURE)
++#define UCR_SET_AND_TEST_PORT_FEATURE	(0xa300 | UR_SET_AND_TEST_FEATURE)
++
++#ifdef _MSC_VER
++#include <pshpack1.h>
++#endif
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uByte		bDescriptorSubtype;
++} UPACKED usb_descriptor_t;
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++} UPACKED usb_descriptor_header_t;
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uWord		bcdUSB;
++#define UD_USB_2_0		0x0200
++#define UD_IS_USB2(d) (UGETW((d)->bcdUSB) >= UD_USB_2_0)
++	uByte		bDeviceClass;
++	uByte		bDeviceSubClass;
++	uByte		bDeviceProtocol;
++	uByte		bMaxPacketSize;
++	/* The fields below are not part of the initial descriptor. */
++	uWord		idVendor;
++	uWord		idProduct;
++	uWord		bcdDevice;
++	uByte		iManufacturer;
++	uByte		iProduct;
++	uByte		iSerialNumber;
++	uByte		bNumConfigurations;
++} UPACKED usb_device_descriptor_t;
++#define USB_DEVICE_DESCRIPTOR_SIZE 18
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uWord		wTotalLength;
++	uByte		bNumInterface;
++	uByte		bConfigurationValue;
++	uByte		iConfiguration;
++#define UC_ATT_ONE		(1 << 7)	/* must be set */
++#define UC_ATT_SELFPOWER	(1 << 6)	/* self powered */
++#define UC_ATT_WAKEUP		(1 << 5)	/* can wakeup */
++#define UC_ATT_BATTERY		(1 << 4)	/* battery powered */
++	uByte		bmAttributes;
++#define UC_BUS_POWERED		0x80
++#define UC_SELF_POWERED		0x40
++#define UC_REMOTE_WAKEUP	0x20
++	uByte		bMaxPower; /* max current in 2 mA units */
++#define UC_POWER_FACTOR 2
++} UPACKED usb_config_descriptor_t;
++#define USB_CONFIG_DESCRIPTOR_SIZE 9
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uByte		bInterfaceNumber;
++	uByte		bAlternateSetting;
++	uByte		bNumEndpoints;
++	uByte		bInterfaceClass;
++	uByte		bInterfaceSubClass;
++	uByte		bInterfaceProtocol;
++	uByte		iInterface;
++} UPACKED usb_interface_descriptor_t;
++#define USB_INTERFACE_DESCRIPTOR_SIZE 9
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uByte		bEndpointAddress;
++#define UE_GET_DIR(a)	((a) & 0x80)
++#define UE_SET_DIR(a,d)	((a) | (((d)&1) << 7))
++#define UE_DIR_IN	0x80
++#define UE_DIR_OUT	0x00
++#define UE_ADDR		0x0f
++#define UE_GET_ADDR(a)	((a) & UE_ADDR)
++	uByte		bmAttributes;
++#define UE_XFERTYPE	0x03
++#define  UE_CONTROL	0x00
++#define  UE_ISOCHRONOUS	0x01
++#define  UE_BULK	0x02
++#define  UE_INTERRUPT	0x03
++#define UE_GET_XFERTYPE(a)	((a) & UE_XFERTYPE)
++#define UE_ISO_TYPE	0x0c
++#define  UE_ISO_ASYNC	0x04
++#define  UE_ISO_ADAPT	0x08
++#define  UE_ISO_SYNC	0x0c
++#define UE_GET_ISO_TYPE(a)	((a) & UE_ISO_TYPE)
++	uWord		wMaxPacketSize;
++	uByte		bInterval;
++} UPACKED usb_endpoint_descriptor_t;
++#define USB_ENDPOINT_DESCRIPTOR_SIZE 7
++
++typedef struct ss_endpoint_companion_descriptor {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte bMaxBurst;
++#define USSE_GET_MAX_STREAMS(a)		((a) & 0x1f)
++#define USSE_SET_MAX_STREAMS(a, b)	((a) | ((b) & 0x1f))
++#define USSE_GET_MAX_PACKET_NUM(a)	((a) & 0x03)
++#define USSE_SET_MAX_PACKET_NUM(a, b)	((a) | ((b) & 0x03))
++	uByte bmAttributes;
++	uWord wBytesPerInterval;
++} UPACKED ss_endpoint_companion_descriptor_t;
++#define USB_SS_ENDPOINT_COMPANION_DESCRIPTOR_SIZE 6
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uWord		bString[127];
++} UPACKED usb_string_descriptor_t;
++#define USB_MAX_STRING_LEN 128
++#define USB_LANGUAGE_TABLE 0	/* # of the string language id table */
++
++/* Hub specific request */
++#define UR_GET_BUS_STATE	0x02
++#define UR_CLEAR_TT_BUFFER	0x08
++#define UR_RESET_TT		0x09
++#define UR_GET_TT_STATE		0x0a
++#define UR_STOP_TT		0x0b
++
++/* Hub features */
++#define UHF_C_HUB_LOCAL_POWER	0
++#define UHF_C_HUB_OVER_CURRENT	1
++#define UHF_PORT_CONNECTION	0
++#define UHF_PORT_ENABLE		1
++#define UHF_PORT_SUSPEND	2
++#define UHF_PORT_OVER_CURRENT	3
++#define UHF_PORT_RESET		4
++#define UHF_PORT_L1		5
++#define UHF_PORT_POWER		8
++#define UHF_PORT_LOW_SPEED	9
++#define UHF_PORT_HIGH_SPEED	10
++#define UHF_C_PORT_CONNECTION	16
++#define UHF_C_PORT_ENABLE	17
++#define UHF_C_PORT_SUSPEND	18
++#define UHF_C_PORT_OVER_CURRENT	19
++#define UHF_C_PORT_RESET	20
++#define UHF_C_PORT_L1		23
++#define UHF_PORT_TEST		21
++#define UHF_PORT_INDICATOR	22
++
++typedef struct {
++	uByte		bDescLength;
++	uByte		bDescriptorType;
++	uByte		bNbrPorts;
++	uWord		wHubCharacteristics;
++#define UHD_PWR			0x0003
++#define  UHD_PWR_GANGED		0x0000
++#define  UHD_PWR_INDIVIDUAL	0x0001
++#define  UHD_PWR_NO_SWITCH	0x0002
++#define UHD_COMPOUND		0x0004
++#define UHD_OC			0x0018
++#define  UHD_OC_GLOBAL		0x0000
++#define  UHD_OC_INDIVIDUAL	0x0008
++#define  UHD_OC_NONE		0x0010
++#define UHD_TT_THINK		0x0060
++#define  UHD_TT_THINK_8		0x0000
++#define  UHD_TT_THINK_16	0x0020
++#define  UHD_TT_THINK_24	0x0040
++#define  UHD_TT_THINK_32	0x0060
++#define UHD_PORT_IND		0x0080
++	uByte		bPwrOn2PwrGood;	/* delay in 2 ms units */
++#define UHD_PWRON_FACTOR 2
++	uByte		bHubContrCurrent;
++	uByte		DeviceRemovable[32]; /* max 255 ports */
++#define UHD_NOT_REMOV(desc, i) \
++    (((desc)->DeviceRemovable[(i)/8] >> ((i) % 8)) & 1)
++	/* deprecated */ uByte		PortPowerCtrlMask[1];
++} UPACKED usb_hub_descriptor_t;
++#define USB_HUB_DESCRIPTOR_SIZE 9 /* includes deprecated PortPowerCtrlMask */
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uWord		bcdUSB;
++	uByte		bDeviceClass;
++	uByte		bDeviceSubClass;
++	uByte		bDeviceProtocol;
++	uByte		bMaxPacketSize0;
++	uByte		bNumConfigurations;
++	uByte		bReserved;
++} UPACKED usb_device_qualifier_t;
++#define USB_DEVICE_QUALIFIER_SIZE 10
++
++typedef struct {
++	uByte		bLength;
++	uByte		bDescriptorType;
++	uByte		bmAttributes;
++#define UOTG_SRP	0x01
++#define UOTG_HNP	0x02
++} UPACKED usb_otg_descriptor_t;
++
++/* OTG feature selectors */
++#define UOTG_B_HNP_ENABLE	3
++#define UOTG_A_HNP_SUPPORT	4
++#define UOTG_A_ALT_HNP_SUPPORT	5
++
++typedef struct {
++	uWord		wStatus;
++/* Device status flags */
++#define UDS_SELF_POWERED		0x0001
++#define UDS_REMOTE_WAKEUP		0x0002
++/* Endpoint status flags */
++#define UES_HALT			0x0001
++} UPACKED usb_status_t;
++
++typedef struct {
++	uWord		wHubStatus;
++#define UHS_LOCAL_POWER			0x0001
++#define UHS_OVER_CURRENT		0x0002
++	uWord		wHubChange;
++} UPACKED usb_hub_status_t;
++
++typedef struct {
++	uWord		wPortStatus;
++#define UPS_CURRENT_CONNECT_STATUS	0x0001
++#define UPS_PORT_ENABLED		0x0002
++#define UPS_SUSPEND			0x0004
++#define UPS_OVERCURRENT_INDICATOR	0x0008
++#define UPS_RESET			0x0010
++#define UPS_PORT_POWER			0x0100
++#define UPS_LOW_SPEED			0x0200
++#define UPS_HIGH_SPEED			0x0400
++#define UPS_PORT_TEST			0x0800
++#define UPS_PORT_INDICATOR		0x1000
++	uWord		wPortChange;
++#define UPS_C_CONNECT_STATUS		0x0001
++#define UPS_C_PORT_ENABLED		0x0002
++#define UPS_C_SUSPEND			0x0004
++#define UPS_C_OVERCURRENT_INDICATOR	0x0008
++#define UPS_C_PORT_RESET		0x0010
++} UPACKED usb_port_status_t;
++
++#ifdef _MSC_VER
++#include <poppack.h>
++#endif
++
++/* Device class codes */
++#define UDCLASS_IN_INTERFACE	0x00
++#define UDCLASS_COMM		0x02
++#define UDCLASS_HUB		0x09
++#define  UDSUBCLASS_HUB		0x00
++#define  UDPROTO_FSHUB		0x00
++#define  UDPROTO_HSHUBSTT	0x01
++#define  UDPROTO_HSHUBMTT	0x02
++#define UDCLASS_DIAGNOSTIC	0xdc
++#define UDCLASS_WIRELESS	0xe0
++#define  UDSUBCLASS_RF		0x01
++#define   UDPROTO_BLUETOOTH	0x01
++#define UDCLASS_VENDOR		0xff
++
++/* Interface class codes */
++#define UICLASS_UNSPEC		0x00
++
++#define UICLASS_AUDIO		0x01
++#define  UISUBCLASS_AUDIOCONTROL	1
++#define  UISUBCLASS_AUDIOSTREAM		2
++#define  UISUBCLASS_MIDISTREAM		3
++
++#define UICLASS_CDC		0x02 /* communication */
++#define  UISUBCLASS_DIRECT_LINE_CONTROL_MODEL	1
++#define  UISUBCLASS_ABSTRACT_CONTROL_MODEL	2
++#define  UISUBCLASS_TELEPHONE_CONTROL_MODEL	3
++#define  UISUBCLASS_MULTICHANNEL_CONTROL_MODEL	4
++#define  UISUBCLASS_CAPI_CONTROLMODEL		5
++#define  UISUBCLASS_ETHERNET_NETWORKING_CONTROL_MODEL 6
++#define  UISUBCLASS_ATM_NETWORKING_CONTROL_MODEL 7
++#define   UIPROTO_CDC_AT			1
++
++#define UICLASS_HID		0x03
++#define  UISUBCLASS_BOOT	1
++#define  UIPROTO_BOOT_KEYBOARD	1
++
++#define UICLASS_PHYSICAL	0x05
++
++#define UICLASS_IMAGE		0x06
++
++#define UICLASS_PRINTER		0x07
++#define  UISUBCLASS_PRINTER	1
++#define  UIPROTO_PRINTER_UNI	1
++#define  UIPROTO_PRINTER_BI	2
++#define  UIPROTO_PRINTER_1284	3
++
++#define UICLASS_MASS		0x08
++#define  UISUBCLASS_RBC		1
++#define  UISUBCLASS_SFF8020I	2
++#define  UISUBCLASS_QIC157	3
++#define  UISUBCLASS_UFI		4
++#define  UISUBCLASS_SFF8070I	5
++#define  UISUBCLASS_SCSI	6
++#define  UIPROTO_MASS_CBI_I	0
++#define  UIPROTO_MASS_CBI	1
++#define  UIPROTO_MASS_BBB_OLD	2	/* Not in the spec anymore */
++#define  UIPROTO_MASS_BBB	80	/* 'P' for the Iomega Zip drive */
++
++#define UICLASS_HUB		0x09
++#define  UISUBCLASS_HUB		0
++#define  UIPROTO_FSHUB		0
++#define  UIPROTO_HSHUBSTT	0 /* Yes, same as previous */
++#define  UIPROTO_HSHUBMTT	1
++
++#define UICLASS_CDC_DATA	0x0a
++#define  UISUBCLASS_DATA		0
++#define   UIPROTO_DATA_ISDNBRI		0x30    /* Physical iface */
++#define   UIPROTO_DATA_HDLC		0x31    /* HDLC */
++#define   UIPROTO_DATA_TRANSPARENT	0x32    /* Transparent */
++#define   UIPROTO_DATA_Q921M		0x50    /* Management for Q921 */
++#define   UIPROTO_DATA_Q921		0x51    /* Data for Q921 */
++#define   UIPROTO_DATA_Q921TM		0x52    /* TEI multiplexer for Q921 */
++#define   UIPROTO_DATA_V42BIS		0x90    /* Data compression */
++#define   UIPROTO_DATA_Q931		0x91    /* Euro-ISDN */
++#define   UIPROTO_DATA_V120		0x92    /* V.24 rate adaption */
++#define   UIPROTO_DATA_CAPI		0x93    /* CAPI 2.0 commands */
++#define   UIPROTO_DATA_HOST_BASED	0xfd    /* Host based driver */
++#define   UIPROTO_DATA_PUF		0xfe    /* see Prot. Unit Func. Desc.*/
++#define   UIPROTO_DATA_VENDOR		0xff    /* Vendor specific */
++
++#define UICLASS_SMARTCARD	0x0b
++
++/*#define UICLASS_FIRM_UPD	0x0c*/
++
++#define UICLASS_SECURITY	0x0d
++
++#define UICLASS_DIAGNOSTIC	0xdc
++
++#define UICLASS_WIRELESS	0xe0
++#define  UISUBCLASS_RF			0x01
++#define   UIPROTO_BLUETOOTH		0x01
++
++#define UICLASS_APPL_SPEC	0xfe
++#define  UISUBCLASS_FIRMWARE_DOWNLOAD	1
++#define  UISUBCLASS_IRDA		2
++#define  UIPROTO_IRDA			0
++
++#define UICLASS_VENDOR		0xff
++
++#define USB_HUB_MAX_DEPTH 5
++
++/*
++ * Minimum time a device needs to be powered down to go through
++ * a power cycle.  XXX Are these time in the spec?
++ */
++#define USB_POWER_DOWN_TIME	200 /* ms */
++#define USB_PORT_POWER_DOWN_TIME	100 /* ms */
++
++#if 0
++/* These are the values from the spec. */
++#define USB_PORT_RESET_DELAY	10  /* ms */
++#define USB_PORT_ROOT_RESET_DELAY 50  /* ms */
++#define USB_PORT_RESET_RECOVERY	10  /* ms */
++#define USB_PORT_POWERUP_DELAY	100 /* ms */
++#define USB_SET_ADDRESS_SETTLE	2   /* ms */
++#define USB_RESUME_DELAY	(20*5)  /* ms */
++#define USB_RESUME_WAIT		10  /* ms */
++#define USB_RESUME_RECOVERY	10  /* ms */
++#define USB_EXTRA_POWER_UP_TIME	0   /* ms */
++#else
++/* Allow for marginal (i.e. non-conforming) devices. */
++#define USB_PORT_RESET_DELAY	50  /* ms */
++#define USB_PORT_ROOT_RESET_DELAY 250  /* ms */
++#define USB_PORT_RESET_RECOVERY	250  /* ms */
++#define USB_PORT_POWERUP_DELAY	300 /* ms */
++#define USB_SET_ADDRESS_SETTLE	10  /* ms */
++#define USB_RESUME_DELAY	(50*5)  /* ms */
++#define USB_RESUME_WAIT		50  /* ms */
++#define USB_RESUME_RECOVERY	50  /* ms */
++#define USB_EXTRA_POWER_UP_TIME	20  /* ms */
++#endif
++
++#define USB_MIN_POWER		100 /* mA */
++#define USB_MAX_POWER		500 /* mA */
++
++#define USB_BUS_RESET_DELAY	100 /* ms XXX?*/
++
++#define USB_UNCONFIG_NO 0
++#define USB_UNCONFIG_INDEX (-1)
++
++/*** ioctl() related stuff ***/
++
++struct usb_ctl_request {
++	int	ucr_addr;
++	usb_device_request_t ucr_request;
++	void	*ucr_data;
++	int	ucr_flags;
++#define USBD_SHORT_XFER_OK	0x04	/* allow short reads */
++	int	ucr_actlen;		/* actual length transferred */
++};
++
++struct usb_alt_interface {
++	int	uai_config_index;
++	int	uai_interface_index;
++	int	uai_alt_no;
++};
++
++#define USB_CURRENT_CONFIG_INDEX (-1)
++#define USB_CURRENT_ALT_INDEX (-1)
++
++struct usb_config_desc {
++	int	ucd_config_index;
++	usb_config_descriptor_t ucd_desc;
++};
++
++struct usb_interface_desc {
++	int	uid_config_index;
++	int	uid_interface_index;
++	int	uid_alt_index;
++	usb_interface_descriptor_t uid_desc;
++};
++
++struct usb_endpoint_desc {
++	int	ued_config_index;
++	int	ued_interface_index;
++	int	ued_alt_index;
++	int	ued_endpoint_index;
++	usb_endpoint_descriptor_t ued_desc;
++};
++
++struct usb_full_desc {
++	int	ufd_config_index;
++	u_int	ufd_size;
++	u_char	*ufd_data;
++};
++
++struct usb_string_desc {
++	int	usd_string_index;
++	int	usd_language_id;
++	usb_string_descriptor_t usd_desc;
++};
++
++struct usb_ctl_report_desc {
++	int	ucrd_size;
++	u_char	ucrd_data[1024];	/* filled data size will vary */
++};
++
++typedef struct { u_int32_t cookie; } usb_event_cookie_t;
++
++#define USB_MAX_DEVNAMES 4
++#define USB_MAX_DEVNAMELEN 16
++struct usb_device_info {
++	u_int8_t	udi_bus;
++	u_int8_t	udi_addr;	/* device address */
++	usb_event_cookie_t udi_cookie;
++	char		udi_product[USB_MAX_STRING_LEN];
++	char		udi_vendor[USB_MAX_STRING_LEN];
++	char		udi_release[8];
++	u_int16_t	udi_productNo;
++	u_int16_t	udi_vendorNo;
++	u_int16_t	udi_releaseNo;
++	u_int8_t	udi_class;
++	u_int8_t	udi_subclass;
++	u_int8_t	udi_protocol;
++	u_int8_t	udi_config;
++	u_int8_t	udi_speed;
++#define USB_SPEED_UNKNOWN	0
++#define USB_SPEED_LOW		1
++#define USB_SPEED_FULL		2
++#define USB_SPEED_HIGH		3
++#define USB_SPEED_VARIABLE	4
++#define USB_SPEED_SUPER		5
++	int		udi_power;	/* power consumption in mA, 0 if selfpowered */
++	int		udi_nports;
++	char		udi_devnames[USB_MAX_DEVNAMES][USB_MAX_DEVNAMELEN];
++	u_int8_t	udi_ports[16];/* hub only: addresses of devices on ports */
++#define USB_PORT_ENABLED 0xff
++#define USB_PORT_SUSPENDED 0xfe
++#define USB_PORT_POWERED 0xfd
++#define USB_PORT_DISABLED 0xfc
++};
++
++struct usb_ctl_report {
++	int	ucr_report;
++	u_char	ucr_data[1024];	/* filled data size will vary */
++};
++
++struct usb_device_stats {
++	u_long	uds_requests[4];	/* indexed by transfer type UE_* */
++};
++
++#define WUSB_MIN_IE			0x80
++#define WUSB_WCTA_IE			0x80
++#define WUSB_WCONNECTACK_IE		0x81
++#define WUSB_WHOSTINFO_IE		0x82
++#define  WUHI_GET_CA(_bmAttributes_) ((_bmAttributes_) & 0x3)
++#define   WUHI_CA_RECONN		0x00
++#define   WUHI_CA_LIMITED		0x01
++#define   WUHI_CA_ALL			0x03
++#define  WUHI_GET_MLSI(_bmAttributes_) (((_bmAttributes_) & 0x38) >> 3)
++#define WUSB_WCHCHANGEANNOUNCE_IE	0x83
++#define WUSB_WDEV_DISCONNECT_IE		0x84
++#define WUSB_WHOST_DISCONNECT_IE	0x85
++#define WUSB_WRELEASE_CHANNEL_IE	0x86
++#define WUSB_WWORK_IE			0x87
++#define WUSB_WCHANNEL_STOP_IE		0x88
++#define WUSB_WDEV_KEEPALIVE_IE		0x89
++#define WUSB_WISOCH_DISCARD_IE		0x8A
++#define WUSB_WRESETDEVICE_IE		0x8B
++#define WUSB_WXMIT_PACKET_ADJUST_IE	0x8C
++#define WUSB_MAX_IE			0x8C
++
++/* Device Notification Types */
++
++#define WUSB_DN_MIN			0x01
++#define WUSB_DN_CONNECT			0x01
++# define WUSB_DA_OLDCONN	0x00
++# define WUSB_DA_NEWCONN	0x01
++# define WUSB_DA_SELF_BEACON	0x02
++# define WUSB_DA_DIR_BEACON	0x04
++# define WUSB_DA_NO_BEACON	0x06
++#define WUSB_DN_DISCONNECT		0x02
++#define WUSB_DN_EPRDY			0x03
++#define WUSB_DN_MASAVAILCHANGED		0x04
++#define WUSB_DN_REMOTEWAKEUP		0x05
++#define WUSB_DN_SLEEP			0x06
++#define WUSB_DN_ALIVE			0x07
++#define WUSB_DN_MAX			0x07
++
++#ifdef _MSC_VER
++#include <pshpack1.h>
++#endif
++
++/* WUSB Handshake Data.  Used during the SET/GET HANDSHAKE requests */
++typedef struct wusb_hndshk_data {
++	uByte bMessageNumber;
++	uByte bStatus;
++	uByte tTKID[3];
++	uByte bReserved;
++	uByte CDID[16];
++	uByte Nonce[16];
++	uByte MIC[8];
++} UPACKED wusb_hndshk_data_t;
++#define WUSB_HANDSHAKE_LEN_FOR_MIC	38
++
++/* WUSB Connection Context */
++typedef struct wusb_conn_context {
++	uByte CHID [16];
++	uByte CDID [16];
++	uByte CK [16];
++} UPACKED wusb_conn_context_t;
++
++/* WUSB Security Descriptor */
++typedef struct wusb_security_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++	uWord wTotalLength;
++	uByte bNumEncryptionTypes;
++} UPACKED wusb_security_desc_t;
++
++/* WUSB Encryption Type Descriptor */
++typedef struct wusb_encrypt_type_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++
++	uByte bEncryptionType;
++#define WUETD_UNSECURE		0
++#define WUETD_WIRED		1
++#define WUETD_CCM_1		2
++#define WUETD_RSA_1		3
++
++	uByte bEncryptionValue;
++	uByte bAuthKeyIndex;
++} UPACKED wusb_encrypt_type_desc_t;
++
++/* WUSB Key Descriptor */
++typedef struct wusb_key_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte tTKID[3];
++	uByte bReserved;
++	uByte KeyData[1];	/* variable length */
++} UPACKED wusb_key_desc_t;
++
++/* WUSB BOS Descriptor (Binary device Object Store) */
++typedef struct wusb_bos_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++	uWord wTotalLength;
++	uByte bNumDeviceCaps;
++} UPACKED wusb_bos_desc_t;
++
++#define USB_DEVICE_CAPABILITY_20_EXTENSION	0x02
++typedef struct usb_dev_cap_20_ext_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte bDevCapabilityType;
++#define USB_20_EXT_LPM				0x02
++	uDWord bmAttributes;
++} UPACKED usb_dev_cap_20_ext_desc_t;
++
++#define USB_DEVICE_CAPABILITY_SS_USB		0x03
++typedef struct usb_dev_cap_ss_usb {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte bDevCapabilityType;
++#define USB_DC_SS_USB_LTM_CAPABLE		0x02
++	uByte bmAttributes;
++#define USB_DC_SS_USB_SPEED_SUPPORT_LOW		0x01
++#define USB_DC_SS_USB_SPEED_SUPPORT_FULL	0x02
++#define USB_DC_SS_USB_SPEED_SUPPORT_HIGH	0x04
++#define USB_DC_SS_USB_SPEED_SUPPORT_SS		0x08
++	uWord wSpeedsSupported;
++	uByte bFunctionalitySupport;
++	uByte bU1DevExitLat;
++	uWord wU2DevExitLat;
++} UPACKED usb_dev_cap_ss_usb_t;
++
++#define USB_DEVICE_CAPABILITY_CONTAINER_ID	0x04
++typedef struct usb_dev_cap_container_id {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte bDevCapabilityType;
++	uByte bReserved;
++	uByte containerID[16];
++} UPACKED usb_dev_cap_container_id_t;
++
++/* Device Capability Type Codes */
++#define WUSB_DEVICE_CAPABILITY_WIRELESS_USB 0x01
++
++/* Device Capability Descriptor */
++typedef struct wusb_dev_cap_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte bDevCapabilityType;
++	uByte caps[1];	/* Variable length */
++} UPACKED wusb_dev_cap_desc_t;
++
++/* Device Capability Descriptor */
++typedef struct wusb_dev_cap_uwb_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte bDevCapabilityType;
++	uByte bmAttributes;
++	uWord wPHYRates;	/* Bitmap */
++	uByte bmTFITXPowerInfo;
++	uByte bmFFITXPowerInfo;
++	uWord bmBandGroup;
++	uByte bReserved;
++} UPACKED wusb_dev_cap_uwb_desc_t;
++
++/* Wireless USB Endpoint Companion Descriptor */
++typedef struct wusb_endpoint_companion_desc {
++	uByte bLength;
++	uByte bDescriptorType;
++	uByte bMaxBurst;
++	uByte bMaxSequence;
++	uWord wMaxStreamDelay;
++	uWord wOverTheAirPacketSize;
++	uByte bOverTheAirInterval;
++	uByte bmCompAttributes;
++} UPACKED wusb_endpoint_companion_desc_t;
++
++/* Wireless USB Numeric Association M1 Data Structure */
++typedef struct wusb_m1_data {
++	uByte version;
++	uWord langId;
++	uByte deviceFriendlyNameLength;
++	uByte sha_256_m3[32];
++	uByte deviceFriendlyName[256];
++} UPACKED wusb_m1_data_t;
++
++typedef struct wusb_m2_data {
++	uByte version;
++	uWord langId;
++	uByte hostFriendlyNameLength;
++	uByte pkh[384];
++	uByte hostFriendlyName[256];
++} UPACKED wusb_m2_data_t;
++
++typedef struct wusb_m3_data {
++	uByte pkd[384];
++	uByte nd;
++} UPACKED wusb_m3_data_t;
++
++typedef struct wusb_m4_data {
++	uDWord _attributeTypeIdAndLength_1;
++	uWord  associationTypeId;
++
++	uDWord _attributeTypeIdAndLength_2;
++	uWord  associationSubTypeId;
++
++	uDWord _attributeTypeIdAndLength_3;
++	uDWord length;
++
++	uDWord _attributeTypeIdAndLength_4;
++	uDWord associationStatus;
++
++	uDWord _attributeTypeIdAndLength_5;
++	uByte  chid[16];
++
++	uDWord _attributeTypeIdAndLength_6;
++	uByte  cdid[16];
++
++	uDWord _attributeTypeIdAndLength_7;
++	uByte  bandGroups[2];
++} UPACKED wusb_m4_data_t;
++
++#ifdef _MSC_VER
++#include <poppack.h>
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _USB_H_ */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/Kconfig b/drivers/usb/host/fh_otg/fh_otg/Kconfig
+new file mode 100644
+index 00000000..0a22f9a3
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/Kconfig
+@@ -0,0 +1,14 @@
++config FH_HOST_ONLY
++	bool "Host only mode"
++	default y
++	depends on CONFIG_USB_FH_OTG
++	help
++		The USB2.0 high-speed host controller
++		integrated into many SoCs.
++
++config FH_DEVICE_ONLY
++	bool "Gadget only mode"
++	depends on CONFIG_USB_FH_OTG
++	help
++		The USB2.0 high-speed gadget controller
++		integrated into many SoCs.
+diff --git a/drivers/usb/host/fh_otg/fh_otg/Makefile b/drivers/usb/host/fh_otg/fh_otg/Makefile
+new file mode 100644
+index 00000000..8effcb07
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/Makefile
+@@ -0,0 +1,80 @@
++#
++# Makefile for FH_otg Highspeed USB controller driver
++#
++
++ifneq ($(KERNELRELEASE),)
++
++# Use the BUS_INTERFACE variable to compile the software for either 
++# PCI(PCI_INTERFACE) or LM(LM_INTERFACE) bus.
++ifeq ($(BUS_INTERFACE),)
++	BUS_INTERFACE = -DPCI_INTERFACE
++#	BUS_INTERFACE = -DLM_INTERFACE
++endif
++
++#EXTRA_CFLAGS	+= -DDEBUG 
++
++# Use one of the following flags to compile the software in host-only or
++# device-only mode.
++
++ifeq ($(CONFIG_FH_HOST_ONLY)_$(CONFIG_FH_DEVICE_ONLY), y_y)
++$(error "FH_HOST_ONLY FH_DEVICE_ONLY should be one of them or none!!!")
++endif
++
++ifneq ($(CONFIG_FH_HOST_ONLY),)
++EXTRA_CFLAGS        += -DFH_HOST_ONLY
++endif
++ifneq ($(CONFIG_FH_DEVICE_ONLY),)
++EXTRA_CFLAGS        += -DFH_DEVICE_ONLY
++endif
++
++EXTRA_CFLAGS	+= -Dlinux -DFH_HS_ELECT_TST
++#EXTRA_CFLAGS	+= -DFH_EN_ISOC
++EXTRA_CFLAGS   	+= -I$(PWD)/../fh_common_port
++#EXTRA_CFLAGS   	+= -I$(PORTLIB)
++EXTRA_CFLAGS   	+= -DFH_LINUX
++EXTRA_CFLAGS   	+= $(CFI)
++EXTRA_CFLAGS	+= $(BUS_INTERFACE)
++#EXTRA_CFLAGS	+= -DFH_DEV_SRPCAP
++
++obj-y	:= fh_otg.o
++
++fh_otg-objs	:= fh_otg_driver.o fh_otg_attr.o 
++fh_otg-objs	+= fh_otg_cil.o fh_otg_cil_intr.o 
++fh_otg-objs	+= fh_otg_pcd_linux.o fh_otg_pcd.o fh_otg_pcd_intr.o 
++fh_otg-objs	+= fh_otg_hcd.o fh_otg_hcd_linux.o fh_otg_hcd_intr.o fh_otg_hcd_queue.o fh_otg_hcd_ddma.o
++fh_otg-objs	+= fh_otg_adp.o
++ifneq ($(CFI),)
++fh_otg-objs	+= fh_otg_cfi.o
++endif
++
++else
++
++PWD		:= $(shell pwd)
++PORTLIB		:= $(PWD)/../fh_common_port
++
++# Command paths
++CTAGS		:= $(CTAGS)
++DOXYGEN		:= $(DOXYGEN)
++
++default: portlib
++	$(MAKE) -C$(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
++	
++install: default
++	$(MAKE) -C$(KDIR) M=$(PORTLIB) modules_install
++	$(MAKE) -C$(KDIR) M=$(PWD) modules_install	
++
++portlib:
++	$(MAKE) -C$(KDIR) M=$(PORTLIB) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
++	cp $(PORTLIB)/Module.symvers $(PWD)/
++	
++docs:	$(wildcard *.[hc]) doc/doxygen.cfg
++	$(DOXYGEN) doc/doxygen.cfg
++
++tags:	$(wildcard *.[hc])
++	$(CTAGS) -e $(wildcard *.[hc]) $(wildcard linux/*.[hc]) $(wildcard $(KDIR)/include/linux/usb*.h)
++
++
++clean:
++	rm -rf   *.o *.ko .*cmd *.mod.c .tmp_versions Module.symvers
++
++endif
+diff --git a/drivers/usb/host/fh_otg/fh_otg/README b/drivers/usb/host/fh_otg/fh_otg/README
+new file mode 100644
+index 00000000..93cfd3b4
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/README
+@@ -0,0 +1,17 @@
++Instructions for building HSOTG driver
++Portability library will be built on the fly
++---------------------------------------
++- Export necessary environment variables or pass them to the make command line.
++
++# Path to the installed kernel directory
++  % export KDIR=/...
++# Architecture type - for HAPS x86_64/x86, for IPMATE ARM
++  % export ARCH=x86_64
++# If BUS_INTERFACE not exported, PCI_INTERFACE is the default, for IPMATE use LM_INTERFACE
++  
++- Build the driver.
++  % make 
++
++- Install the driver (by default /lib/modules/x.x.xx.x/extra or export INSTALL_MOD_PATH for custom directory)
++  % make install
++
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_cfi_common.h b/drivers/usb/host/fh_otg/fh_otg/fh_cfi_common.h
+new file mode 100644
+index 00000000..34b453fd
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_cfi_common.h
+@@ -0,0 +1,142 @@
++/* ==========================================================================
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ * 
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ * 
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#if !defined(__FH_CFI_COMMON_H__)
++#define __FH_CFI_COMMON_H__
++
++//#include <linux/types.h>
++
++/**
++ * @file 
++ *
++ * This file contains the CFI specific common constants, interfaces
++ * (functions and macros) and structures for Linux. No PCD specific
++ * data structure or definition is to be included in this file.
++ *
++ */
++
++/** This is a request for all Core Features */
++#define VEN_CORE_GET_FEATURES		0xB1
++
++/** This is a request to get the value of a specific Core Feature */
++#define VEN_CORE_GET_FEATURE		0xB2
++
++/** This command allows the host to set the value of a specific Core Feature */
++#define VEN_CORE_SET_FEATURE		0xB3
++
++/** This command allows the host to set the default values of 
++ * either all or any specific Core Feature 
++ */
++#define VEN_CORE_RESET_FEATURES		0xB4
++
++/** This command forces the PCD to write the deferred values of a Core Features */
++#define VEN_CORE_ACTIVATE_FEATURES	0xB5
++
++/** This request reads a DWORD value from a register at the specified offset */
++#define VEN_CORE_READ_REGISTER		0xB6
++
++/** This request writes a DWORD value into a register at the specified offset */
++#define VEN_CORE_WRITE_REGISTER		0xB7
++
++/** This structure is the header of the Core Features dataset returned to 
++ *  the Host
++ */
++struct cfi_all_features_header {
++/** The features header structure length is */
++#define CFI_ALL_FEATURES_HDR_LEN		8
++	/**
++	 * The total length of the features dataset returned to the Host 
++	 */
++	uint16_t wTotalLen;
++
++	/**
++	 * CFI version number inBinary-Coded Decimal (i.e., 1.00 is 100H).
++	 * This field identifies the version of the CFI Specification with which 
++	 * the device is compliant.
++	 */
++	uint16_t wVersion;
++
++	/** The ID of the Core */
++	uint16_t wCoreID;
++#define CFI_CORE_ID_UDC		1
++#define CFI_CORE_ID_OTG		2
++#define CFI_CORE_ID_WUDEV	3
++
++	/** Number of features returned by VEN_CORE_GET_FEATURES request */
++	uint16_t wNumFeatures;
++} UPACKED;
++
++typedef struct cfi_all_features_header cfi_all_features_header_t;
++
++/** This structure is a header of the Core Feature descriptor dataset returned to 
++ *  the Host after the VEN_CORE_GET_FEATURES request
++ */
++struct cfi_feature_desc_header {
++#define CFI_FEATURE_DESC_HDR_LEN	8
++
++	/** The feature ID */
++	uint16_t wFeatureID;
++
++	/** Length of this feature descriptor in bytes - including the
++	 * length of the feature name string
++	 */
++	uint16_t wLength;
++
++	/** The data length of this feature in bytes */
++	uint16_t wDataLength;
++
++	/** 
++	 * Attributes of this features 
++	 * D0: Access rights
++	 * 0 - Read/Write
++	 * 1 - Read only
++	 */
++	uint8_t bmAttributes;
++#define CFI_FEATURE_ATTR_RO		1
++#define CFI_FEATURE_ATTR_RW		0
++
++	/** Length of the feature name in bytes */
++	uint8_t bNameLen;
++
++	/** The feature name buffer */
++	//uint8_t *name;
++} UPACKED;
++
++typedef struct cfi_feature_desc_header cfi_feature_desc_header_t;
++
++/**
++ * This structure describes a NULL terminated string referenced by its id field.
++ * It is very similar to usb_string structure but has the id field type set to 16-bit.
++ */
++struct cfi_string {
++	uint16_t id;
++	const uint8_t *s;
++};
++typedef struct cfi_string cfi_string_t;
++
++#endif
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.c
+new file mode 100644
+index 00000000..55f1e9d5
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.c
+@@ -0,0 +1,908 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_adp.c $
++ * $Revision: #16 $
++ * $Date: 2013/04/22 $
++ * $Change: 2211149 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#include "../fh_common_port/fh_os.h"
++#include "fh_otg_regs.h"
++#include "fh_otg_cil.h"
++#include "fh_otg_adp.h"
++
++/** @file
++ *
++ * This file contains the most of the Attach Detect Protocol implementation for
++ * the driver to support OTG Rev2.0.
++ *
++ */
++
++void fh_otg_adp_write_reg(fh_otg_core_if_t * core_if, uint32_t value)
++{
++	adpctl_data_t adpctl;
++
++	adpctl.d32 = value;
++	adpctl.b.ar = 0x2;
++
++	FH_WRITE_REG32(&core_if->core_global_regs->adpctl, adpctl.d32);
++
++	while (adpctl.b.ar) {
++		adpctl.d32 = FH_READ_REG32(&core_if->core_global_regs->adpctl);
++	}
++
++}
++
++/**
++ * Function is called to read ADP registers
++ */
++uint32_t fh_otg_adp_read_reg(fh_otg_core_if_t * core_if)
++{
++	adpctl_data_t adpctl;
++
++	adpctl.d32 = 0;
++	adpctl.b.ar = 0x1;
++
++	FH_WRITE_REG32(&core_if->core_global_regs->adpctl, adpctl.d32);
++
++	while (adpctl.b.ar) {
++		adpctl.d32 = FH_READ_REG32(&core_if->core_global_regs->adpctl);
++	}
++
++	return adpctl.d32;
++}
++
++/**
++ * Function is called to read ADPCTL register and filter Write-clear bits
++ */
++uint32_t fh_otg_adp_read_reg_filter(fh_otg_core_if_t * core_if)
++{
++	adpctl_data_t adpctl;
++
++	adpctl.d32 = fh_otg_adp_read_reg(core_if);
++	adpctl.b.adp_tmout_int = 0;
++	adpctl.b.adp_prb_int = 0;
++	adpctl.b.adp_tmout_int = 0;
++		
++	return adpctl.d32;
++}
++
++/**
++ * Function is called to write ADP registers
++ */
++void fh_otg_adp_modify_reg(fh_otg_core_if_t * core_if, uint32_t clr,
++			    uint32_t set)
++{
++	fh_otg_adp_write_reg(core_if,
++			      (fh_otg_adp_read_reg(core_if) & (~clr)) | set);
++}
++
++static void adp_probe_func(void * ptr)
++{
++	fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;	
++	fh_otg_adp_probe_start(core_if);
++}
++
++static void adp_sense_timeout(void *ptr)
++{
++	fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
++	core_if->adp.sense_timer_started = 0;
++	FH_DEBUGPL(DBG_PCD, "ADP SENSE TIMEOUT\n");
++	if (core_if->adp_enable) {
++		fh_otg_adp_sense_stop(core_if);
++		FH_WORKQ_SCHEDULE_DELAYED(core_if->wq_otg, adp_probe_func,
++						core_if, 100 , "start probe");
++	}
++}
++
++/**
++ * This function is called when the ADP vbus timer expires. Timeout is 1.1s.
++ */
++static void adp_vbuson_timeout(void *ptr)
++{
++	gpwrdn_data_t gpwrdn;
++	fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
++	hprt0_data_t hprt0 = {.d32 = 0 };
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	FH_PRINTF("%s: 1.1 seconds expire after turning on VBUS\n",__FUNCTION__);
++	if (core_if) {
++		core_if->adp.vbuson_timer_started = 0;
++		if(fh_otg_is_host_mode(core_if)) {
++			/* Turn off vbus */
++			hprt0.b.prtpwr = 1;
++			FH_MODIFY_REG32(core_if->host_if->hprt0, hprt0.d32, 0);
++			cil_hcd_disconnect(core_if);
++		}
++		gpwrdn.d32 = 0;
++
++		/* Power off the core */
++		if (core_if->power_down == 2) {
++			/* Enable Wakeup Logic */
++//                      gpwrdn.b.wkupactiv = 1;
++			gpwrdn.b.pmuactv = 0;
++			gpwrdn.b.pwrdnrstn = 1;
++			gpwrdn.b.pwrdnclmp = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
++					 gpwrdn.d32);
++
++			/* Suspend the Phy Clock */
++			pcgcctl.b.stoppclk = 1;
++			FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
++
++			/* Switch on VDD */
++//                      gpwrdn.b.wkupactiv = 1;
++			gpwrdn.b.pmuactv = 1;
++			gpwrdn.b.pwrdnrstn = 1;
++			gpwrdn.b.pwrdnclmp = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
++					 gpwrdn.d32);
++		} else {
++			/* Enable Power Down Logic */
++			gpwrdn.b.pmuintsel = 1;
++			gpwrdn.b.pmuactv = 1;
++			if(fh_otg_is_host_mode(core_if))
++				gpwrdn.b.dis_vbus = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++		}
++
++		/* Power off the core */
++		if (core_if->power_down == 2) {
++			gpwrdn.d32 = 0;
++			gpwrdn.b.pwrdnswtch = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn,
++					 gpwrdn.d32, 0);
++		}
++
++		/* Unmask SRP detected interrupt from Power Down Logic */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.srp_det_msk = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++
++		fh_mdelay(220);
++		fh_otg_adp_probe_start(core_if);
++	}
++
++}
++
++/**
++ * Start the ADP Initial Probe timer to detect if Port Connected interrupt is 
++ * not asserted within 1.1 seconds.
++ *
++ * @param core_if the pointer to core_if strucure.
++ */
++void fh_otg_adp_vbuson_timer_start(fh_otg_core_if_t * core_if)
++{
++	core_if->adp.vbuson_timer_started = 1;
++	if (core_if->adp.vbuson_timer)
++	{
++		FH_PRINTF("SCHEDULING VBUSON TIMER\n");
++		/* 1.1 secs + 60ms necessary for cil_hcd_start*/
++		FH_TIMER_SCHEDULE(core_if->adp.vbuson_timer, 1160);
++	} else {
++		FH_WARN("VBUSON_TIMER = %p\n",core_if->adp.vbuson_timer);
++	}
++}
++
++#if 0
++/**
++ * Masks all FH OTG core interrupts
++ *
++ */
++static void mask_all_interrupts(fh_otg_core_if_t * core_if)
++{
++	int i;
++	gahbcfg_data_t ahbcfg = {.d32 = 0 };
++
++	/* Mask Host Interrupts */
++
++	/* Clear and disable HCINTs */
++	for (i = 0; i < core_if->core_params->host_channels; i++) {
++		FH_WRITE_REG32(&core_if->host_if->hc_regs[i]->hcintmsk, 0);
++		FH_WRITE_REG32(&core_if->host_if->hc_regs[i]->hcint, 0xFFFFFFFF);
++
++	}
++
++	/* Clear and disable HAINT */
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->haintmsk, 0x0000);
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->haint, 0xFFFFFFFF);
++
++	/* Mask Device Interrupts */
++	if (!core_if->multiproc_int_enable) {
++		/* Clear and disable IN Endpoint interrupts */
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->diepmsk, 0);
++		for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++			FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->
++					diepint, 0xFFFFFFFF);
++		}
++
++		/* Clear and disable OUT Endpoint interrupts */
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->doepmsk, 0);
++		for (i = 0; i <= core_if->dev_if->num_out_eps; i++) {
++			FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->
++					doepint, 0xFFFFFFFF);
++		}
++
++		/* Clear and disable DAINT */
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->daint,
++				0xFFFFFFFF);
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->daintmsk, 0);
++	} else {
++		for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
++			FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
++					diepeachintmsk[i], 0);
++			FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->
++					diepint, 0xFFFFFFFF);
++		}
++
++		for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
++			FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
++					doepeachintmsk[i], 0);
++			FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->
++					doepint, 0xFFFFFFFF);
++		}
++
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->deachintmsk,
++				0);
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->deachint,
++				0xFFFFFFFF);
++
++	}
++
++	/* Disable interrupts */
++	ahbcfg.b.glblintrmsk = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
++
++	/* Disable all interrupts. */
++	FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, 0);
++
++	/* Clear any pending interrupts */
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++
++	/* Clear any pending OTG Interrupts */
++	FH_WRITE_REG32(&core_if->core_global_regs->gotgint, 0xFFFFFFFF);
++}
++
++/**
++ * Unmask Port Connection Detected interrupt
++ *
++ */
++static void unmask_conn_det_intr(fh_otg_core_if_t * core_if)
++{
++	gintmsk_data_t gintmsk = {.d32 = 0,.b.portintr = 1 };
++
++	FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
++}
++#endif
++
++/**
++ * Starts the ADP Probing
++ *
++ * @param core_if the pointer to core_if structure.
++ */
++uint32_t fh_otg_adp_probe_start(fh_otg_core_if_t * core_if)
++{
++
++	adpctl_data_t adpctl = {.d32 = 0};
++	gpwrdn_data_t gpwrdn;
++#if 0
++	adpctl_data_t adpctl_int = {.d32 = 0, .b.adp_prb_int = 1,
++				.b.adp_sns_int = 1, b.adp_tmout_int};
++#endif
++	if (core_if->stop_adpprb) {
++		core_if->stop_adpprb = 0;
++		return 0;
++	}
++	
++	fh_otg_disable_global_interrupts(core_if);
++	FH_DEBUGPL(DBG_ANY, "ADP Probe Start\n");
++	core_if->adp.probe_enabled = 1;
++
++	adpctl.b.adpres = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	while (adpctl.b.adpres) {
++		adpctl.d32 = fh_otg_adp_read_reg(core_if);
++	}
++
++	adpctl.d32 = 0;
++	gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++
++	/* In Host mode unmask SRP detected interrupt also change the 
++	 * probe preiod accordingly */
++	if (!gpwrdn.b.idsts) {
++		gpwrdn.d32 = 0;
++		gpwrdn.b.srp_det_msk = 1;
++		adpctl.b.prb_per = 0;
++	}
++	else {
++		gpwrdn.d32 = 0;
++		gpwrdn.b.srp_det_msk = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, gpwrdn.d32, 0);
++		gpwrdn.d32 = 0;
++		gpwrdn.b.sts_chngint_msk = 1;
++		adpctl.b.prb_per = 1;
++	}
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++
++	adpctl.b.adp_tmout_int_msk = 1;
++	adpctl.b.adp_prb_int_msk = 1;
++	adpctl.b.prb_dschg = 1;
++	adpctl.b.prb_delta = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	adpctl.b.adpen = 1;
++	adpctl.b.enaprb = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++	FH_DEBUGPL(DBG_ANY, "ADP Probe Finish\n");
++
++	return 0;
++}
++
++/**
++ * Starts the ADP Sense timer to detect if ADP Sense interrupt is not asserted 
++ * within 3 seconds.
++ *
++ * @param core_if the pointer to core_if strucure.
++ */
++void fh_otg_adp_sense_timer_start(fh_otg_core_if_t * core_if)
++{
++	core_if->adp.sense_timer_started = 1;
++	FH_TIMER_SCHEDULE(core_if->adp.sense_timer, 3300 /* 3.3 secs */ );
++}
++
++/**
++ * Starts the ADP Sense
++ *
++ * @param core_if the pointer to core_if strucure.
++ */
++uint32_t fh_otg_adp_sense_start(fh_otg_core_if_t * core_if)
++{
++	adpctl_data_t adpctl;
++
++	FH_DEBUGPL(DBG_PCD, "ADP Sense Start\n");
++
++	/* Set ADP reset bit*/
++	adpctl.d32 = fh_otg_adp_read_reg_filter(core_if);
++	adpctl.b.adpres = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	while (adpctl.b.adpres) {
++		adpctl.d32 = fh_otg_adp_read_reg(core_if);
++	}
++
++	/* Unmask ADP sense interrupt and mask all other from the core */
++	adpctl.d32 = fh_otg_adp_read_reg_filter(core_if);
++	adpctl.b.adp_sns_int_msk = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++	fh_otg_disable_global_interrupts(core_if);
++	
++	adpctl.b.adpres = 0;
++	adpctl.b.adpen = 1;
++	adpctl.b.enasns = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	fh_otg_adp_sense_timer_start(core_if);
++
++	return 0;
++}
++
++/**
++ * Stops the ADP Probing
++ *
++ * @param core_if the pointer to core_if strucure.
++ */
++uint32_t fh_otg_adp_probe_stop(fh_otg_core_if_t * core_if)
++{
++
++	adpctl_data_t adpctl;
++	FH_DEBUGPL(DBG_ANY, "Stop ADP probe\n");
++	core_if->adp.probe_enabled = 0;
++	//core_if->adp.probe_counter = 0;
++	adpctl.d32 = fh_otg_adp_read_reg(core_if);
++
++	adpctl.b.adpen = 0;
++	adpctl.b.adp_prb_int = 1;
++	adpctl.b.adp_tmout_int = 1;
++	adpctl.b.adp_sns_int = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	return 0;
++}
++
++/**
++ * Stops the ADP Sensing
++ *
++ * @param core_if the pointer to core_if strucure.
++ */
++uint32_t fh_otg_adp_sense_stop(fh_otg_core_if_t * core_if)
++{
++	adpctl_data_t adpctl;
++
++	core_if->adp.sense_enabled = 0;
++
++	adpctl.d32 = fh_otg_adp_read_reg_filter(core_if);
++	adpctl.b.enasns = 0;
++	adpctl.b.adp_sns_int = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	return 0;
++}
++
++/**
++ * Called to turn on the VBUS after initial ADP probe in host mode.
++ * If port power was already enabled in cil_hcd_start function then
++ * only schedule a timer.
++ *
++ * @param core_if the pointer to core_if structure.
++ */
++void fh_otg_adp_turnon_vbus(fh_otg_core_if_t * core_if)
++{
++	hprt0_data_t hprt0 = {.d32 = 0 };
++	hprt0.d32 = fh_otg_read_hprt0(core_if);
++	FH_PRINTF("Turn on VBUS for 1.1s, port power is %d\n", hprt0.b.prtpwr);
++
++	if (hprt0.b.prtpwr == 0) {
++		hprt0.b.prtpwr = 1;
++		//FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++	}
++	
++	fh_otg_adp_vbuson_timer_start(core_if);
++}
++
++/**
++ * Called right after driver is loaded
++ * to perform initial actions for ADP
++ *
++ * @param core_if the pointer to core_if structure.
++ * @param is_host - flag for current mode of operation either from GINTSTS or GPWRDN
++ */
++void fh_otg_adp_start(fh_otg_core_if_t * core_if, uint8_t is_host)
++{
++	gpwrdn_data_t gpwrdn;
++
++	FH_DEBUGPL(DBG_ANY, "ADP Initial Start\n");
++	core_if->adp.adp_started = 1;
++
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++	fh_otg_disable_global_interrupts(core_if);
++	if (is_host) {
++		FH_PRINTF("HOST MODE\n");
++		//core_if->op_state = A_HOST; - vahrama, modified checking in hcd_start()
++		/* Enable Power Down Logic Interrupt*/
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuintsel = 1;
++		gpwrdn.b.pmuactv = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++		/* Initialize first ADP probe to obtain Ramp Time value */
++		core_if->adp.initial_probe = 1;
++		fh_otg_adp_probe_start(core_if);
++	} else {
++		gotgctl_data_t gotgctl;
++		gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++		FH_DEBUGPL(DBG_ANY, "DEVICE MODE\n");
++		//fh_otg_core_init(core_if);
++		if (gotgctl.b.bsesvld == 0) {
++			/* Enable Power Down Logic Interrupt*/
++			gpwrdn.d32 = 0;
++			FH_DEBUGPL(DBG_ANY, "VBUS is not valid - start ADP probe\n");
++			gpwrdn.b.pmuintsel = 1;
++			gpwrdn.b.pmuactv = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++			/* Do not need to return to inital probe if we are coming back to 
++			 * the device mode after HNP */
++			if (core_if->op_state != B_HOST)
++				core_if->adp.initial_probe = 1;
++			fh_otg_adp_probe_start(core_if);
++		} else {
++			FH_PRINTF("VBUS is valid - initialize core as a Device\n");
++			core_if->op_state = B_PERIPHERAL;
++			//fh_otg_core_init(core_if);
++			fh_otg_enable_global_interrupts(core_if);
++			cil_pcd_start(core_if);
++			fh_otg_dump_global_registers(core_if);
++			fh_otg_dump_dev_registers(core_if);
++		}
++	}
++}
++
++void fh_otg_adp_init(fh_otg_core_if_t * core_if)
++{
++	core_if->adp.adp_started = 0;
++	core_if->adp.initial_probe = 0;
++	core_if->adp.probe_timer_values[0] = -1;
++	core_if->adp.probe_timer_values[1] = -1;
++	core_if->adp.probe_enabled = 0;
++	core_if->adp.sense_enabled = 0;
++	core_if->adp.sense_timer_started = 0;
++	core_if->adp.vbuson_timer_started = 0;
++	core_if->adp.probe_counter = 0;
++	core_if->adp.gpwrdn = 0;
++	core_if->adp.attached = FH_OTG_ADP_UNKOWN;
++	/* Initialize timers */
++	core_if->adp.sense_timer =
++	    FH_TIMER_ALLOC("ADP SENSE TIMER", adp_sense_timeout, core_if);
++	core_if->adp.vbuson_timer =
++	    FH_TIMER_ALLOC("ADP VBUS ON TIMER", adp_vbuson_timeout, core_if);
++	if (!core_if->adp.sense_timer || !core_if->adp.vbuson_timer)
++	{
++		FH_ERROR("Could not allocate memory for ADP timers\n");
++	}
++}
++
++void fh_otg_adp_remove(fh_otg_core_if_t * core_if)
++{
++	gpwrdn_data_t gpwrdn = { .d32 = 0 };
++	gpwrdn.b.pmuintsel = 1;
++	gpwrdn.b.pmuactv = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	if (core_if->adp.probe_enabled)		
++		fh_otg_adp_probe_stop(core_if);
++	if (core_if->adp.sense_enabled)		
++		fh_otg_adp_sense_stop(core_if);
++	if (core_if->adp.sense_timer_started)		
++		FH_TIMER_CANCEL(core_if->adp.sense_timer);
++	if (core_if->adp.vbuson_timer_started)		
++		FH_TIMER_CANCEL(core_if->adp.vbuson_timer);
++	FH_TIMER_FREE(core_if->adp.sense_timer);
++	FH_TIMER_FREE(core_if->adp.vbuson_timer);
++}
++
++/////////////////////////////////////////////////////////////////////
++////////////// ADP Interrupt Handlers ///////////////////////////////
++/////////////////////////////////////////////////////////////////////
++/**
++ * This function sets Ramp Timer values
++ */
++static uint32_t set_timer_value(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	if (core_if->adp.probe_timer_values[0] == -1) {
++		core_if->adp.probe_timer_values[0] = val;
++		core_if->adp.probe_timer_values[1] = -1;
++		return 1;
++	} else {
++		core_if->adp.probe_timer_values[1] =
++		    core_if->adp.probe_timer_values[0];
++		core_if->adp.probe_timer_values[0] = val;
++		return 0;
++	}
++}
++
++/**
++ * This function compares Ramp Timer values
++ */
++static uint32_t compare_timer_values(fh_otg_core_if_t * core_if)
++{
++	uint32_t diff;
++	uint32_t thres;
++	gpwrdn_data_t gpwrdn;
++	
++	/* RTIM difference thresold differs for host and device modes */
++	gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	if (!gpwrdn.b.idsts)
++		thres = HOST_RTIM_THRESHOLD;
++	else
++		thres = DEVICE_RTIM_THRESHOLD;
++
++	FH_DEBUGPL(DBG_ANY, "timer value 0 %d timer value 1 %d\n", 
++		core_if->adp.probe_timer_values[0], core_if->adp.probe_timer_values[1]);
++	if (core_if->adp.probe_timer_values[0] >= core_if->adp.probe_timer_values[1])
++		diff = core_if->adp.probe_timer_values[0] - core_if->adp.probe_timer_values[1];
++	else
++		diff = core_if->adp.probe_timer_values[1] - core_if->adp.probe_timer_values[0];   	
++	if (diff < thres)
++		return 0;
++	else
++		return 1;
++}
++
++/**
++ * This function handles ADP Probe Interrupts
++ */
++static int32_t fh_otg_adp_handle_prb_intr(fh_otg_core_if_t * core_if,
++						 uint32_t val)
++{
++	adpctl_data_t adpctl = {.d32 = 0 };
++	gpwrdn_data_t gpwrdn, temp;
++	adpctl.d32 = val;
++
++	temp.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	
++	core_if->adp.gpwrdn = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	if (adpctl.b.rtim == 0 /*&& !temp.b.idsts*/){
++		FH_PRINTF("RTIM value is 0\n");	
++		goto exit;
++	}
++	core_if->adp.probe_counter++;
++	
++	if (set_timer_value(core_if, adpctl.b.rtim) &&
++	    core_if->adp.initial_probe) {
++		core_if->adp.initial_probe = 0;
++		fh_otg_adp_probe_stop(core_if);
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuactv = 1;
++		gpwrdn.b.pmuintsel = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++
++		/* check which value is for device mode and which for Host mode */
++		if (!temp.b.idsts) {	/* considered host mode value is 0 */
++			/* Choose right op_state depending on previous one */
++			if (core_if->op_state == B_PERIPHERAL)
++				core_if->op_state = B_HOST;
++			else
++				core_if->op_state = A_HOST;
++			fh_otg_enable_global_interrupts(core_if);
++			/*
++			 * Turn on VBUS after initial ADP probe.
++			 */
++			FH_SPINUNLOCK(core_if->lock);
++			cil_hcd_start(core_if);
++			fh_otg_adp_turnon_vbus(core_if);
++			FH_SPINLOCK(core_if->lock);
++		} else {
++			/*
++			 * Initiate SRP after initial ADP probe.
++			 */
++			fh_otg_enable_global_interrupts(core_if);
++			fh_otg_initiate_srp(core_if);
++		}
++	} else if (core_if->adp.probe_counter > 2){
++		gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++		if (compare_timer_values(core_if)) {
++			FH_PRINTF("Difference in timer values !!! \n");
++//          core_if->adp.attached = FH_OTG_ADP_ATTACHED;
++			fh_otg_adp_probe_stop(core_if);
++
++			/* Power on the core */
++			if (core_if->power_down == 2) {
++				gpwrdn.b.pwrdnswtch = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++			}
++
++			/* check which value is for device mode and which for Host mode */
++			if (!temp.b.idsts) {	/* considered host mode value is 0 */
++				/* Disable Interrupt from Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pmuintsel = 1;
++				gpwrdn.b.pmuactv = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, gpwrdn.d32, 0);
++
++				/*
++				 * Initialize the Core for Host mode.
++				 * Choose right op_state depending on previous one
++				 */
++				if (core_if->op_state == B_PERIPHERAL)
++					core_if->op_state = B_HOST;
++				else
++					core_if->op_state = A_HOST;
++
++				fh_otg_core_init(core_if);
++				fh_otg_enable_global_interrupts(core_if);
++				cil_hcd_start(core_if);
++				fh_otg_adp_turnon_vbus(core_if);
++			} else {
++				gotgctl_data_t gotgctl;
++				/* Mask SRP detected interrupt from Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.srp_det_msk = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, gpwrdn.d32, 0);
++
++				/* Disable Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pmuintsel = 1;
++				gpwrdn.b.pmuactv = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, gpwrdn.d32, 0);
++
++				/*
++				 * Initialize the Core for Device mode.
++				 */
++				core_if->op_state = B_PERIPHERAL;
++				//fh_otg_core_init(core_if);
++				cil_pcd_start(core_if);
++				fh_otg_enable_global_interrupts(core_if);
++
++				gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++				if (!gotgctl.b.bsesvld)
++					fh_otg_initiate_srp(core_if);
++			}
++		}
++		if (core_if->power_down == 2) {
++			if (gpwrdn.b.bsessvld) {
++				/* Mask SRP detected interrupt from Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.srp_det_msk = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++				
++				/* Disable Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pmuactv = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++				/*
++				 * Initialize the Core for Device mode.
++				 */
++				core_if->op_state = B_PERIPHERAL;
++				fh_otg_core_init(core_if);
++				fh_otg_enable_global_interrupts(core_if);
++				cil_pcd_start(core_if);
++			}
++		}
++	}
++exit:
++	/* Clear interrupt */
++	adpctl.d32 = fh_otg_adp_read_reg(core_if);
++	adpctl.b.adp_prb_int = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	return 0;
++}
++
++/**
++ * This function hadles ADP Sense Interrupt
++ */
++static int32_t fh_otg_adp_handle_sns_intr(fh_otg_core_if_t * core_if)
++{
++	adpctl_data_t adpctl;
++	/* Stop ADP Sense timer */
++	FH_TIMER_CANCEL(core_if->adp.sense_timer);
++
++	/* Restart ADP Sense timer */
++	fh_otg_adp_sense_timer_start(core_if);
++	
++	/* Clear interrupt */
++	adpctl.d32 = fh_otg_adp_read_reg(core_if);
++	adpctl.b.adp_sns_int = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	return 0;
++}
++
++/**
++ * This function handles ADP Probe Interrupts
++ */
++static int32_t fh_otg_adp_handle_prb_tmout_intr(fh_otg_core_if_t * core_if,
++						 uint32_t val)
++{
++	adpctl_data_t adpctl = {.d32 = 0 };
++	adpctl.d32 = val;
++	set_timer_value(core_if, adpctl.b.rtim);
++	
++	/* Clear interrupt */
++	adpctl.d32 = fh_otg_adp_read_reg(core_if);
++	adpctl.b.adp_tmout_int = 1;
++	fh_otg_adp_write_reg(core_if, adpctl.d32);
++
++	return 0;
++}
++
++/**
++ * ADP Interrupt handler.
++ *
++ */
++int32_t fh_otg_adp_handle_intr(fh_otg_core_if_t * core_if)
++{
++	int retval = 0;
++	adpctl_data_t adpctl = {.d32 = 0};
++
++	adpctl.d32 = fh_otg_adp_read_reg(core_if);
++	FH_DEBUGPL(DBG_ANY, "ADPCTL = %08x RAMP TIME = %d\n", adpctl.d32, adpctl.b.rtim);
++
++	if (adpctl.b.adp_sns_int & adpctl.b.adp_sns_int_msk) {
++		FH_DEBUGPL(DBG_ANY, "ADP Sense interrupt\n");
++		retval |= fh_otg_adp_handle_sns_intr(core_if);
++	}
++	if (adpctl.b.adp_tmout_int & adpctl.b.adp_tmout_int_msk) {
++		FH_DEBUGPL(DBG_ANY, "ADP timeout interrupt\n");
++		retval |= fh_otg_adp_handle_prb_tmout_intr(core_if, adpctl.d32);
++	}
++	if (adpctl.b.adp_prb_int & adpctl.b.adp_prb_int_msk) {
++		FH_DEBUGPL(DBG_ANY, "ADP Probe interrupt\n");
++		adpctl.b.adp_prb_int = 1;	
++		retval |= fh_otg_adp_handle_prb_intr(core_if, adpctl.d32);
++	}
++
++//	fh_otg_adp_modify_reg(core_if, adpctl.d32, 0);
++	//fh_otg_adp_write_reg(core_if, adpctl.d32);
++	FH_DEBUGPL(DBG_ANY, "RETURN FROM ADP ISR\n");
++
++	return retval;
++}
++
++/**
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++int32_t fh_otg_adp_handle_srp_intr(fh_otg_core_if_t * core_if)
++{
++
++#ifndef FH_HOST_ONLY
++	hprt0_data_t hprt0;
++	gpwrdn_data_t gpwrdn;
++	FH_DEBUGPL(DBG_ANY, "++ Power Down Logic Session Request Interrupt++\n");
++
++	gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	/* check which value is for device mode and which for Host mode */
++	if (!gpwrdn.b.idsts) {	/* considered host mode value is 0 */
++		FH_PRINTF("SRP: Host mode\n");
++
++		if (core_if->adp_enable) {
++			fh_otg_adp_probe_stop(core_if);
++
++			/* Power on the core */
++			if (core_if->power_down == 2) {
++				gpwrdn.b.pwrdnswtch = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++			}
++
++			core_if->op_state = A_HOST;
++			fh_otg_core_init(core_if);
++			fh_otg_enable_global_interrupts(core_if);
++			cil_hcd_start(core_if);
++		}
++
++		/* Turn on the port power bit. */
++		hprt0.d32 = fh_otg_read_hprt0(core_if);
++		hprt0.b.prtpwr = 1;
++		FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++		/* Start the Connection timer. So a message can be displayed
++		 * if connect does not occur within 10 seconds. */
++		cil_hcd_session_start(core_if);
++	} else {
++		FH_DEBUGPL(DBG_PCD, "SRP: Device mode %s\n", __FUNCTION__);
++		if (core_if->adp_enable) {
++			fh_otg_adp_probe_stop(core_if);
++
++			/* Power on the core */
++			if (core_if->power_down == 2) {
++				gpwrdn.b.pwrdnswtch = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++			}
++
++			gpwrdn.d32 = 0;
++			gpwrdn.b.pmuactv = 0;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
++					 gpwrdn.d32);
++
++			core_if->op_state = B_PERIPHERAL;
++			fh_otg_core_init(core_if);
++			fh_otg_enable_global_interrupts(core_if);
++			cil_pcd_start(core_if);
++		}
++	}
++#endif
++	return 1;
++}
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.h
+new file mode 100644
+index 00000000..3ecc22ef
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_adp.h
+@@ -0,0 +1,82 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_adp.h $
++ * $Revision: #8 $
++ * $Date: 2013/04/09 $
++ * $Change: 2201932 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#ifndef __FH_OTG_ADP_H__
++#define __FH_OTG_ADP_H__
++
++/**
++ * @file
++ *
++ * This file contains the Attach Detect Protocol interfaces and defines
++ * (functions) and structures for Linux.
++ *
++ */
++
++#define FH_OTG_ADP_UNATTACHED	0
++#define FH_OTG_ADP_ATTACHED	1
++#define FH_OTG_ADP_UNKOWN	2
++#define HOST_RTIM_THRESHOLD 5
++#define DEVICE_RTIM_THRESHOLD 3
++
++typedef struct fh_otg_adp {
++	uint32_t adp_started;	
++	uint32_t initial_probe;
++	int32_t probe_timer_values[2];
++	uint32_t probe_enabled;
++	uint32_t sense_enabled;
++	fh_timer_t *sense_timer;
++	uint32_t sense_timer_started;
++	fh_timer_t *vbuson_timer;
++	uint32_t vbuson_timer_started;
++	uint32_t attached;
++	uint32_t probe_counter;
++	uint32_t gpwrdn;
++} fh_otg_adp_t;
++
++/**
++ * Attach Detect Protocol functions
++ */
++
++extern void fh_otg_adp_write_reg(fh_otg_core_if_t * core_if, uint32_t value);
++extern uint32_t fh_otg_adp_read_reg(fh_otg_core_if_t * core_if);
++extern uint32_t fh_otg_adp_probe_start(fh_otg_core_if_t * core_if);
++extern uint32_t fh_otg_adp_sense_start(fh_otg_core_if_t * core_if);
++extern uint32_t fh_otg_adp_probe_stop(fh_otg_core_if_t * core_if);
++extern uint32_t fh_otg_adp_sense_stop(fh_otg_core_if_t * core_if);
++extern void fh_otg_adp_start(fh_otg_core_if_t * core_if, uint8_t is_host);
++extern void fh_otg_adp_init(fh_otg_core_if_t * core_if);
++extern void fh_otg_adp_remove(fh_otg_core_if_t * core_if);
++extern int32_t fh_otg_adp_handle_intr(fh_otg_core_if_t * core_if);
++extern int32_t fh_otg_adp_handle_srp_intr(fh_otg_core_if_t * core_if);
++
++#endif //__FH_OTG_ADP_H__
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.c
+new file mode 100644
+index 00000000..648b08b9
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.c
+@@ -0,0 +1,1440 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_attr.c $
++ * $Revision: #47 $
++ * $Date: 2015/08/07 $
++ * $Change: 2913245 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++/** @file
++ *
++ * The diagnostic interface will provide access to the controller for
++ * bringing up the hardware and testing.  The Linux driver attributes
++ * feature will be used to provide the Linux Diagnostic
++ * Interface. These attributes are accessed through sysfs.
++ */
++
++/** @page "Linux Module Attributes"
++ *
++ * The Linux module attributes feature is used to provide the Linux
++ * Diagnostic Interface.  These attributes are accessed through sysfs.
++ * The diagnostic interface will provide access to the controller for
++ * bringing up the hardware and testing.
++
++ The following table shows the attributes.
++ <table>
++ <tr>
++ <td><b> Name</b></td>
++ <td><b> Description</b></td>
++ <td><b> Access</b></td>
++ </tr>
++
++ <tr>
++ <td> mode </td>
++ <td> Returns the current mode: 0 for device mode, 1 for host mode</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> hnpcapable </td>
++ <td> Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register.
++ Read returns the current value.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> srpcapable </td>
++ <td> Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register.
++ Read returns the current value.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> hsic_connect </td>
++ <td> Gets or sets the "HSIC-Connect" bit in the GLPMCFG Register.
++ Read returns the current value.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> inv_sel_hsic </td>
++ <td> Gets or sets the "Invert Select HSIC" bit in the GLPMFG Register.
++ Read returns the current value.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> hnp </td>
++ <td> Initiates the Host Negotiation Protocol.  Read returns the status.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> srp </td>
++ <td> Initiates the Session Request Protocol.  Read returns the status.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> buspower </td>
++ <td> Gets or sets the Power State of the bus (0 - Off or 1 - On)</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> bussuspend </td>
++ <td> Suspends the USB bus.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> busconnected </td>
++ <td> Gets the connection status of the bus</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> gotgctl </td>
++ <td> Gets or sets the Core Control Status Register.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> gusbcfg </td>
++ <td> Gets or sets the Core USB Configuration Register</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> grxfsiz </td>
++ <td> Gets or sets the Receive FIFO Size Register</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> gnptxfsiz </td>
++ <td> Gets or sets the non-periodic Transmit Size Register</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> gpvndctl </td>
++ <td> Gets or sets the PHY Vendor Control Register</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> ggpio </td>
++ <td> Gets the value in the lower 16-bits of the General Purpose IO Register
++ or sets the upper 16 bits.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> guid </td>
++ <td> Gets or sets the value of the User ID Register</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> gsnpsid </td>
++ <td> Gets the value of the Synopsys ID Regester</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> devspeed </td>
++ <td> Gets or sets the device speed setting in the DCFG register</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> enumspeed </td>
++ <td> Gets the device enumeration Speed.</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> hptxfsiz </td>
++ <td> Gets the value of the Host Periodic Transmit FIFO</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> hprt0 </td>
++ <td> Gets or sets the value in the Host Port Control and Status Register</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> regoffset </td>
++ <td> Sets the register offset for the next Register Access</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> regvalue </td>
++ <td> Gets or sets the value of the register at the offset in the regoffset attribute.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> remote_wakeup </td>
++ <td> On read, shows the status of Remote Wakeup. On write, initiates a remote
++ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote
++ Wakeup signalling bit in the Device Control Register is set for 1
++ milli-second.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> rem_wakeup_pwrdn </td>
++ <td> On read, shows the status core - hibernated or not. On write, initiates 
++ a remote wakeup of the device from Hibernation. </td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> mode_ch_tim_en </td>
++ <td> This bit is used to enable or disable the host core to wait for 200 PHY 
++ clock cycles at the end of Resume to change the opmode signal to the PHY to 00
++ after Suspend or LPM. </td>
++ <td> Read/Write</td>
++ </tr>
++ 
++ <tr>
++ <td> fr_interval </td>
++ <td> On read, shows the value of HFIR Frame Interval. On write, dynamically 
++ reload HFIR register during runtime. The application can write a value to this
++ register only after the Port Enable bit of the Host Port Control and Status 
++ register (HPRT.PrtEnaPort) has been set </td>
++ <td> Read/Write</td>
++ </tr>
++ 
++ <tr>
++ <td> disconnect_us </td>
++ <td> On read, shows the status of disconnect_device_us. On write, sets disconnect_us
++ which causes soft disconnect for 100us. Applicable only for device mode of operation.</td>
++ <td> Read/Write</td>
++ </tr>
++
++ <tr>
++ <td> regdump </td>
++ <td> Dumps the contents of core registers.</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> spramdump </td>
++ <td> Dumps the contents of core registers.</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> hcddump </td>
++ <td> Dumps the current HCD state.</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> hcd_frrem </td>
++ <td> Shows the average value of the Frame Remaining
++ field in the Host Frame Number/Frame Remaining register when an SOF interrupt
++ occurs. This can be used to determine the average interrupt latency. Also
++ shows the average Frame Remaining value for start_transfer and the "a" and
++ "b" sample points. The "a" and "b" sample points may be used during debugging
++ bto determine how long it takes to execute a section of the HCD code.</td>
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> rd_reg_test </td>
++ <td> Displays the time required to read the GNPTXFSIZ register many times
++ (the output shows the number of times the register is read).
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> wr_reg_test </td>
++ <td> Displays the time required to write the GNPTXFSIZ register many times
++ (the output shows the number of times the register is written).
++ <td> Read</td>
++ </tr>
++
++ <tr>
++ <td> lpm_response </td>
++ <td> Gets or sets lpm_response mode. Applicable only in device mode.
++ <td> Write</td>
++ </tr>
++
++ <tr>
++ <td> sleep_status </td>
++ <td> Shows sleep status of device.
++ <td> Read</td>
++ </tr>
++ 
++ <tr>
++ <td> hird_thres </td>
++ <td> Gets or sets the "HIRD_Thres[3:0]" bits in the Core LPM Configuration Register.
++ <td> Read/Write</td>
++ </tr>
++ 
++ <tr>
++ <td> besl_reject </td>
++ <td> Gets or sets the "besl_reject" bit in the Device Control Register.
++ <td> Read/Write</td>
++ </tr>
++
++ </table>
++
++ Example usage:
++ To get the current mode:
++ cat /sys/devices/lm0/mode
++
++ To power down the USB:
++ echo 0 > /sys/devices/lm0/buspower
++ */
++#include <linux/platform_device.h>
++
++#include "fh_otg_os_dep.h"
++#include "../fh_common_port/fh_os.h"
++#include "fh_otg_driver.h"
++#include "fh_otg_attr.h"
++#include "fh_otg_core_if.h"
++#include "fh_otg_pcd_if.h"
++#include "fh_otg_hcd_if.h"
++
++/*
++ * MACROs for defining sysfs attribute
++ */
++#ifdef LM_INTERFACE
++
++#define FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
++{ \
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);		\
++	uint32_t val; \
++	val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
++	return sprintf (buf, "%s = 0x%x\n", _string_, val); \
++}
++#define FH_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
++					const char *buf, size_t count) \
++{ \
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \
++	uint32_t set = simple_strtoul(buf, NULL, 16); \
++	fh_otg_set_##_otg_attr_name_(otg_dev->core_if, set);\
++	return count; \
++}
++
++#elif defined(PCI_INTERFACE)
++
++#define FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
++{ \
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);	\
++	uint32_t val; \
++	val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
++	return sprintf (buf, "%s = 0x%x\n", _string_, val); \
++}
++#define FH_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
++					const char *buf, size_t count) \
++{ \
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);  \
++	uint32_t set = simple_strtoul(buf, NULL, 16); \
++	fh_otg_set_##_otg_attr_name_(otg_dev->core_if, set);\
++	return count; \
++}
++
++#endif
++
++/*
++ * MACROs for defining sysfs attribute for 32-bit registers
++ */
++#ifdef LM_INTERFACE
++#define FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
++{ \
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \
++	uint32_t val; \
++	val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
++	return sprintf (buf, "%s = 0x%08x\n", _string_, val); \
++}
++#define FH_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
++					const char *buf, size_t count) \
++{ \
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \
++	uint32_t val = simple_strtoul(buf, NULL, 16); \
++	fh_otg_set_##_otg_attr_name_ (otg_dev->core_if, val); \
++	return count; \
++}
++#elif defined(PCI_INTERFACE)
++#define FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
++{ \
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);  \
++	uint32_t val; \
++	val = fh_otg_get_##_otg_attr_name_ (otg_dev->core_if); \
++	return sprintf (buf, "%s = 0x%08x\n", _string_, val); \
++}
++#define FH_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_string_) \
++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
++					const char *buf, size_t count) \
++{ \
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);  \
++	uint32_t val = simple_strtoul(buf, NULL, 16); \
++	fh_otg_set_##_otg_attr_name_ (otg_dev->core_if, val); \
++	return count; \
++}
++
++#endif
++
++#define FH_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_string_) \
++FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
++FH_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_string_) \
++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
++
++#define FH_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_string_) \
++FH_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_string_) \
++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
++
++#define FH_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \
++FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
++FH_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_string_) \
++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
++
++#define FH_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \
++FH_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_string_) \
++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
++
++/** @name Functions for Show/Store of Attributes */
++/**@{*/
++
++/**
++ * Show the register offset of the Register Access.
++ */
++static ssize_t regoffset_show(struct device *_dev,
++			      struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	return snprintf(buf, sizeof("0xFFFFFFFF\n") + 1, "0x%08x\n",
++			otg_dev->os_dep.reg_offset);
++}
++
++/**
++ * Set the register offset for the next Register Access 	Read/Write
++ */
++static ssize_t regoffset_store(struct device *_dev,
++			       struct device_attribute *attr,
++			       const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t offset = simple_strtoul(buf, NULL, 16);
++#ifdef LM_INTERFACE
++	if (offset < SZ_256K) {
++#elif  defined(PCI_INTERFACE)
++	if (offset < 0x00040000) {
++#endif
++		otg_dev->os_dep.reg_offset = offset;
++	} else {
++		dev_err(_dev, "invalid offset\n");
++	}
++
++	return count;
++}
++
++DEVICE_ATTR(regoffset, S_IRUGO | S_IWUSR, regoffset_show, regoffset_store);
++
++/**
++ * Show the value of the register at the offset in the reg_offset
++ * attribute.
++ */
++static ssize_t regvalue_show(struct device *_dev,
++			     struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t val;
++	volatile uint32_t *addr;
++
++	if (otg_dev->os_dep.reg_offset != 0xFFFFFFFF && 0 != otg_dev->os_dep.base) {
++		/* Calculate the address */
++		addr = (uint32_t *) (otg_dev->os_dep.reg_offset +
++				     (uint8_t *) otg_dev->os_dep.base);
++		val = FH_READ_REG32(addr);
++		return snprintf(buf,
++				sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n") + 1,
++				"Reg@0x%06x = 0x%08x\n", otg_dev->os_dep.reg_offset,
++				val);
++	} else {
++		dev_err(_dev, "Invalid offset (0x%0x)\n", otg_dev->os_dep.reg_offset);
++		return sprintf(buf, "invalid offset\n");
++	}
++}
++
++/**
++ * Store the value in the register at the offset in the reg_offset
++ * attribute.
++ *
++ */
++static ssize_t regvalue_store(struct device *_dev,
++			      struct device_attribute *attr,
++			      const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	volatile uint32_t *addr;
++	uint32_t val = simple_strtoul(buf, NULL, 16);
++	//dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val);
++	if (otg_dev->os_dep.reg_offset != 0xFFFFFFFF && 0 != otg_dev->os_dep.base) {
++		/* Calculate the address */
++		addr = (uint32_t *) (otg_dev->os_dep.reg_offset +
++				     (uint8_t *) otg_dev->os_dep.base);
++		FH_WRITE_REG32(addr, val);
++	} else {
++		dev_err(_dev, "Invalid Register Offset (0x%08x)\n",
++			otg_dev->os_dep.reg_offset);
++	}
++	return count;
++}
++
++DEVICE_ATTR(regvalue, S_IRUGO | S_IWUSR, regvalue_show, regvalue_store);
++
++/*
++ * Attributes
++ */
++FH_OTG_DEVICE_ATTR_BITFIELD_RO(mode, "Mode");
++FH_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable, "HNPCapable");
++FH_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable, "SRPCapable");
++FH_OTG_DEVICE_ATTR_BITFIELD_RW(hsic_connect, "HSIC Connect");
++FH_OTG_DEVICE_ATTR_BITFIELD_RW(inv_sel_hsic, "Invert Select HSIC");
++
++//FH_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
++//FH_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
++FH_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected, "Bus Connected");
++
++FH_OTG_DEVICE_ATTR_REG32_RW(gotgctl, 0, "GOTGCTL");
++FH_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,
++			     &(otg_dev->core_if->core_global_regs->gusbcfg),
++			     "GUSBCFG");
++FH_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,
++			     &(otg_dev->core_if->core_global_regs->grxfsiz),
++			     "GRXFSIZ");
++FH_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,
++			     &(otg_dev->core_if->core_global_regs->gnptxfsiz),
++			     "GNPTXFSIZ");
++FH_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,
++			     &(otg_dev->core_if->core_global_regs->gpvndctl),
++			     "GPVNDCTL");
++FH_OTG_DEVICE_ATTR_REG32_RW(ggpio,
++			     &(otg_dev->core_if->core_global_regs->ggpio),
++			     "GGPIO");
++FH_OTG_DEVICE_ATTR_REG32_RW(guid, &(otg_dev->core_if->core_global_regs->guid),
++			     "GUID");
++FH_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,
++			     &(otg_dev->core_if->core_global_regs->gsnpsid),
++			     "GSNPSID");
++FH_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed, "Device Speed");
++FH_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed, "Device Enumeration Speed");
++
++FH_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,
++			     &(otg_dev->core_if->core_global_regs->hptxfsiz),
++			     "HPTXFSIZ");
++FH_OTG_DEVICE_ATTR_REG32_RW(hprt0, otg_dev->core_if->host_if->hprt0, "HPRT0");
++
++/**
++ * @todo Add code to initiate the HNP.
++ */
++/**
++ * Show the HNP status bit
++ */
++static ssize_t hnp_show(struct device *_dev,
++			struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	return sprintf(buf, "HstNegScs = 0x%x\n",
++		       fh_otg_get_hnpstatus(otg_dev->core_if));
++}
++
++/**
++ * Set the HNP Request bit
++ */
++static ssize_t hnp_store(struct device *_dev,
++			 struct device_attribute *attr,
++			 const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	uint32_t in = simple_strtoul(buf, NULL, 16);
++	fh_otg_set_hnpreq(otg_dev->core_if, in);
++	return count;
++}
++
++DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store);
++
++/**
++ * @todo Add code to initiate the SRP.
++ */
++/**
++ * Show the SRP status bit
++ */
++static ssize_t srp_show(struct device *_dev,
++			struct device_attribute *attr, char *buf)
++{
++#ifndef FH_HOST_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	return sprintf(buf, "SesReqScs = 0x%x\n",
++		       fh_otg_get_srpstatus(otg_dev->core_if));
++#else
++	return sprintf(buf, "Host Only Mode!\n");
++#endif
++}
++
++/**
++ * Set the SRP Request bit
++ */
++static ssize_t srp_store(struct device *_dev,
++			 struct device_attribute *attr,
++			 const char *buf, size_t count)
++{
++#ifndef FH_HOST_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	fh_otg_pcd_initiate_srp(otg_dev->pcd);
++#endif
++	return count;
++}
++
++DEVICE_ATTR(srp, 0644, srp_show, srp_store);
++
++/**
++ * @todo Need to do more for power on/off?
++ */
++/**
++ * Show the Bus Power status
++ */
++static ssize_t buspower_show(struct device *_dev,
++			     struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	return sprintf(buf, "Bus Power = 0x%x\n",
++		       fh_otg_get_prtpower(otg_dev->core_if));
++}
++
++/**
++ * Set the Bus Power status
++ */
++static ssize_t buspower_store(struct device *_dev,
++			      struct device_attribute *attr,
++			      const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	uint32_t on = simple_strtoul(buf, NULL, 16);
++	fh_otg_set_prtpower(otg_dev->core_if, on);
++	return count;
++}
++
++DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store);
++
++/**
++ * @todo Need to do more for suspend?
++ */
++/**
++ * Show the Bus Suspend status
++ */
++static ssize_t bussuspend_show(struct device *_dev,
++			       struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	return sprintf(buf, "Bus Suspend = 0x%x\n",
++		       fh_otg_get_prtsuspend(otg_dev->core_if));
++}
++
++/**
++ * Set the Bus Suspend status
++ */
++static ssize_t bussuspend_store(struct device *_dev,
++				struct device_attribute *attr,
++				const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t in = simple_strtoul(buf, NULL, 16);
++	fh_otg_set_prtsuspend(otg_dev->core_if, in);
++	return count;
++}
++
++DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store);
++
++/**
++ * Show the Mode Change Ready Timer status
++ */
++static ssize_t mode_ch_tim_en_show(struct device *_dev,
++				   struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	return sprintf(buf, "Mode Change Ready Timer Enable = 0x%x\n",
++		       fh_otg_get_mode_ch_tim(otg_dev->core_if));
++}
++
++/**
++ * Set the Mode Change Ready Timer status
++ */
++static ssize_t mode_ch_tim_en_store(struct device *_dev,
++				    struct device_attribute *attr,
++				    const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t in = simple_strtoul(buf, NULL, 16);
++	fh_otg_set_mode_ch_tim(otg_dev->core_if, in);
++	return count;
++}
++
++DEVICE_ATTR(mode_ch_tim_en, 0644, mode_ch_tim_en_show, mode_ch_tim_en_store);
++
++/**
++ * Show the value of HFIR Frame Interval bitfield
++ */
++static ssize_t fr_interval_show(struct device *_dev,
++				struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	return sprintf(buf, "Frame Interval = 0x%x\n",
++		       fh_otg_get_fr_interval(otg_dev->core_if));
++}
++
++/**
++ * Set the HFIR Frame Interval value
++ */
++static ssize_t fr_interval_store(struct device *_dev,
++				 struct device_attribute *attr,
++				 const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t in = simple_strtoul(buf, NULL, 10);
++	fh_otg_set_fr_interval(otg_dev->core_if, in);
++	return count;
++}
++
++DEVICE_ATTR(fr_interval, 0644, fr_interval_show, fr_interval_store);
++
++/**
++ * Show the status of Remote Wakeup.
++ */
++static ssize_t remote_wakeup_show(struct device *_dev,
++				  struct device_attribute *attr, char *buf)
++{
++#ifndef FH_HOST_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	return sprintf(buf,
++		       "Remote Wakeup Sig = %d Enabled = %d LPM Remote Wakeup = %d\n",
++		       fh_otg_get_remotewakesig(otg_dev->core_if),
++		       fh_otg_pcd_get_rmwkup_enable(otg_dev->pcd),
++		       fh_otg_get_lpm_remotewakeenabled(otg_dev->core_if));
++#else
++	return sprintf(buf, "Host Only Mode!\n");
++#endif /* FH_HOST_ONLY */
++}
++
++/**
++ * Initiate a remote wakeup of the host.  The Device control register
++ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable
++ * flag is set.
++ *
++ */
++static ssize_t remote_wakeup_store(struct device *_dev,
++				   struct device_attribute *attr,
++				   const char *buf, size_t count)
++{
++#ifndef FH_HOST_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t val = simple_strtoul(buf, NULL, 16);
++
++	if (val & 1) {
++		fh_otg_pcd_remote_wakeup(otg_dev->pcd, 1);
++	} else {
++		fh_otg_pcd_remote_wakeup(otg_dev->pcd, 0);
++	}
++#endif /* FH_HOST_ONLY */
++	return count;
++}
++
++DEVICE_ATTR(remote_wakeup, S_IRUGO | S_IWUSR, remote_wakeup_show,
++	    remote_wakeup_store);
++
++/**
++ * Show the whether core is hibernated or not. 					
++ */
++static ssize_t rem_wakeup_pwrdn_show(struct device *_dev,
++				     struct device_attribute *attr, char *buf)
++{
++#ifndef FH_HOST_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	if (fh_otg_get_core_state(otg_dev->core_if)) {
++		FH_PRINTF("Core is in hibernation\n");
++	} else {
++		FH_PRINTF("Core is not in hibernation\n");
++	}
++#endif /* FH_HOST_ONLY */
++	return 0;
++}
++
++extern int fh_otg_device_hibernation_restore(fh_otg_core_if_t * core_if,
++					      int rem_wakeup, int reset);
++
++/**
++ * Initiate a remote wakeup of the device to exit from hibernation.
++ */
++static ssize_t rem_wakeup_pwrdn_store(struct device *_dev,
++				      struct device_attribute *attr,
++				      const char *buf, size_t count)
++{
++#ifndef FH_HOST_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	fh_otg_device_hibernation_restore(otg_dev->core_if, 1, 0);
++#endif
++	return count;
++}
++
++DEVICE_ATTR(rem_wakeup_pwrdn, S_IRUGO | S_IWUSR, rem_wakeup_pwrdn_show,
++	    rem_wakeup_pwrdn_store);
++
++static ssize_t disconnect_us(struct device *_dev,
++			     struct device_attribute *attr,
++			     const char *buf, size_t count)
++{
++
++#ifndef FH_HOST_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	uint32_t val = simple_strtoul(buf, NULL, 16);
++	FH_PRINTF("The Passed value is %04x\n", val);
++
++	fh_otg_pcd_disconnect_us(otg_dev->pcd, 50);
++
++#endif /* FH_HOST_ONLY */
++	return count;
++}
++
++DEVICE_ATTR(disconnect_us, S_IWUSR, 0, disconnect_us);
++
++/**
++ * Dump global registers and either host or device registers (depending on the
++ * current mode of the core).
++ */
++static ssize_t regdump_show(struct device *_dev,
++			    struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	fh_otg_dump_global_registers(otg_dev->core_if);
++	if (fh_otg_is_host_mode(otg_dev->core_if)) {
++		fh_otg_dump_host_registers(otg_dev->core_if);
++	} else {
++		fh_otg_dump_dev_registers(otg_dev->core_if);
++
++	}
++	return sprintf(buf, "Register Dump\n");
++}
++
++DEVICE_ATTR(regdump, S_IRUGO, regdump_show, 0);
++
++/**
++ * Dump global registers and either host or device registers (depending on the
++ * current mode of the core).
++ */
++static ssize_t spramdump_show(struct device *_dev,
++			      struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	fh_otg_dump_spram(otg_dev->core_if);
++
++	return sprintf(buf, "SPRAM Dump\n");
++}
++
++DEVICE_ATTR(spramdump, S_IRUGO, spramdump_show, 0);
++
++/**
++ * Dump the current hcd state.
++ */
++static ssize_t hcddump_show(struct device *_dev,
++			    struct device_attribute *attr, char *buf)
++{
++#ifndef FH_DEVICE_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	fh_otg_hcd_dump_state(otg_dev->hcd);
++#endif /* FH_DEVICE_ONLY */
++	return sprintf(buf, "HCD Dump\n");
++}
++
++DEVICE_ATTR(hcddump, S_IRUGO, hcddump_show, 0);
++
++/**
++ * Dump the average frame remaining at SOF. This can be used to
++ * determine average interrupt latency. Frame remaining is also shown for
++ * start transfer and two additional sample points.
++ */
++static ssize_t hcd_frrem_show(struct device *_dev,
++			      struct device_attribute *attr, char *buf)
++{
++#ifndef FH_DEVICE_ONLY
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	fh_otg_hcd_dump_frrem(otg_dev->hcd);
++#endif /* FH_DEVICE_ONLY */
++	return sprintf(buf, "HCD Dump Frame Remaining\n");
++}
++
++DEVICE_ATTR(hcd_frrem, S_IRUGO, hcd_frrem_show, 0);
++
++/**
++ * Displays the time required to read the GNPTXFSIZ register many times (the
++ * output shows the number of times the register is read).
++ */
++#define RW_REG_COUNT 10000000
++#define MSEC_PER_JIFFIE 1000/HZ
++static ssize_t rd_reg_test_show(struct device *_dev,
++				struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	int i;
++	int time;
++	int start_jiffies;
++
++	printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
++	       HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
++	start_jiffies = jiffies;
++	for (i = 0; i < RW_REG_COUNT; i++) {
++		fh_otg_get_gnptxfsiz(otg_dev->core_if);
++	}
++	time = jiffies - start_jiffies;
++	return sprintf(buf,
++		       "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
++		       RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
++}
++
++DEVICE_ATTR(rd_reg_test, S_IRUGO, rd_reg_test_show, 0);
++
++/**
++ * Displays the time required to write the GNPTXFSIZ register many times (the
++ * output shows the number of times the register is written).
++ */
++static ssize_t wr_reg_test_show(struct device *_dev,
++				struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t reg_val;
++	int i;
++	int time;
++	int start_jiffies;
++
++	printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
++	       HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
++	reg_val = fh_otg_get_gnptxfsiz(otg_dev->core_if);
++	start_jiffies = jiffies;
++	for (i = 0; i < RW_REG_COUNT; i++) {
++		fh_otg_set_gnptxfsiz(otg_dev->core_if, reg_val);
++	}
++	time = jiffies - start_jiffies;
++	return sprintf(buf,
++		       "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
++		       RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
++}
++
++DEVICE_ATTR(wr_reg_test, S_IRUGO, wr_reg_test_show, 0);
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++
++/**
++* Show the lpm_response attribute.
++*/
++static ssize_t lpmresp_show(struct device *_dev,
++			    struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	if (!fh_otg_get_param_lpm_enable(otg_dev->core_if))
++		return sprintf(buf, "** LPM is DISABLED **\n");
++
++	if (!fh_otg_is_device_mode(otg_dev->core_if)) {
++		return sprintf(buf, "** Current mode is not device mode\n");
++	}
++	return sprintf(buf, "lpm_response = %d\n",
++		       fh_otg_get_lpmresponse(otg_dev->core_if));
++}
++
++/**
++* Store the lpm_response attribute.
++*/
++static ssize_t lpmresp_store(struct device *_dev,
++			     struct device_attribute *attr,
++			     const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	uint32_t val = simple_strtoul(buf, NULL, 16);
++
++	if (!fh_otg_get_param_lpm_enable(otg_dev->core_if)) {
++		return 0;
++	}
++
++	if (!fh_otg_is_device_mode(otg_dev->core_if)) {
++		return 0;
++	}
++
++	fh_otg_set_lpmresponse(otg_dev->core_if, val);
++	return count;
++}
++
++DEVICE_ATTR(lpm_response, S_IRUGO | S_IWUSR, lpmresp_show, lpmresp_store);
++
++/**
++* Show the besl_reject attribute.
++*/
++static ssize_t beslreject_show(struct device *_dev,
++			    struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++    		
++	if (!fh_otg_get_param_lpm_enable(otg_dev->core_if))
++		return sprintf(buf, "** LPM is DISABLED **\n");
++	if (!fh_otg_get_param_besl_enable(otg_dev->core_if))
++		return sprintf(buf, "** EnBesl is DISABLED **\n");	
++
++	if (!fh_otg_is_device_mode(otg_dev->core_if)) {
++		return sprintf(buf, "** Current mode is not device mode\n");
++	}
++			
++	return sprintf(buf, "besl_reject = %d\n",
++		        fh_otg_get_beslreject(otg_dev->core_if));
++}
++
++/**
++* Store the besl_reject attribute.
++*/
++static ssize_t beslreject_store(struct device *_dev,
++			     struct device_attribute *attr,
++			     const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	
++	uint32_t val = simple_strtoul(buf, NULL, 16);
++
++	if (!fh_otg_get_param_lpm_enable(otg_dev->core_if)) {
++		return 0;
++	}
++	
++	if (!fh_otg_get_param_besl_enable(otg_dev->core_if)) {
++		return 0;
++	}
++
++	if (!fh_otg_is_device_mode(otg_dev->core_if)) {
++		return 0;
++	}
++	
++	 fh_otg_set_beslreject(otg_dev->core_if,val);
++			
++	return count;
++}
++
++DEVICE_ATTR(besl_reject, S_IRUGO | S_IWUSR, beslreject_show, beslreject_store);
++
++/**
++* Show the hird_thresh attribute.
++*/
++static ssize_t hirdthresh_show(struct device *_dev,
++			    struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++    		
++	if (!fh_otg_get_param_lpm_enable(otg_dev->core_if))
++		return sprintf(buf, "** LPM is DISABLED **\n");
++	
++	if (!fh_otg_is_device_mode(otg_dev->core_if)) {
++		return sprintf(buf, "** Current mode is not device mode\n");
++	}
++			
++	return sprintf(buf, "hirdthresh = 0x%x\n",
++		        fh_otg_get_hirdthresh(otg_dev->core_if));
++}
++
++/**
++* Store the hird_thresh attribute.
++*/
++static ssize_t hirdthresh_store(struct device *_dev,
++			     struct device_attribute *attr,
++			     const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++	
++	uint32_t val = simple_strtoul(buf, NULL, 16);
++
++	if (!fh_otg_get_param_lpm_enable(otg_dev->core_if)) {
++		return 0;
++	}
++	
++	if (!fh_otg_is_device_mode(otg_dev->core_if)) {
++		return 0;
++	}
++	
++	 fh_otg_set_hirdthresh(otg_dev->core_if,val);
++			
++	return count;
++}
++
++DEVICE_ATTR(hird_thres, S_IRUGO | S_IWUSR, hirdthresh_show, hirdthresh_store);
++
++/**
++* Show the sleep_status attribute.
++*/
++static ssize_t sleepstatus_show(struct device *_dev,
++				struct device_attribute *attr, char *buf)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	return sprintf(buf, "Sleep Status = %d\n",
++		       fh_otg_get_lpm_portsleepstatus(otg_dev->core_if));
++}
++
++/**
++ * Store the sleep_status attribure.
++ */
++static ssize_t sleepstatus_store(struct device *_dev,
++				 struct device_attribute *attr,
++				 const char *buf, size_t count)
++{
++#ifdef LM_INTERFACE
++	struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev);
++	fh_otg_device_t *otg_dev = lm_get_drvdata(lm_dev);
++#elif defined(PCI_INTERFACE)
++	fh_otg_device_t *otg_dev = dev_get_drvdata(_dev);
++#endif
++
++	fh_otg_core_if_t *core_if = otg_dev->core_if;
++
++	if (fh_otg_get_lpm_portsleepstatus(otg_dev->core_if)) {
++		if (fh_otg_is_host_mode(core_if)) {
++
++			FH_PRINTF("Host initiated resume\n");
++			fh_otg_set_prtresume(otg_dev->core_if, 1);
++		}
++	}
++
++	return count;
++}
++
++DEVICE_ATTR(sleep_status, S_IRUGO | S_IWUSR, sleepstatus_show,
++	    sleepstatus_store);
++
++#endif /* CONFIG_USB_FH_OTG_LPM_ENABLE */
++
++/**@}*/
++
++/**
++ * Create the device files
++ */
++void fh_otg_attr_create(struct platform_device *dev)
++{
++	int error;
++
++	error = device_create_file(&dev->dev, &dev_attr_regoffset);
++	error = device_create_file(&dev->dev, &dev_attr_regvalue);
++	error = device_create_file(&dev->dev, &dev_attr_mode);
++	error = device_create_file(&dev->dev, &dev_attr_hnpcapable);
++	error = device_create_file(&dev->dev, &dev_attr_srpcapable);
++	error = device_create_file(&dev->dev, &dev_attr_hsic_connect);
++	error = device_create_file(&dev->dev, &dev_attr_inv_sel_hsic);
++	error = device_create_file(&dev->dev, &dev_attr_hnp);
++	error = device_create_file(&dev->dev, &dev_attr_srp);
++	error = device_create_file(&dev->dev, &dev_attr_buspower);
++	error = device_create_file(&dev->dev, &dev_attr_bussuspend);
++	error = device_create_file(&dev->dev, &dev_attr_mode_ch_tim_en);
++	error = device_create_file(&dev->dev, &dev_attr_fr_interval);
++	error = device_create_file(&dev->dev, &dev_attr_busconnected);
++	error = device_create_file(&dev->dev, &dev_attr_gotgctl);
++	error = device_create_file(&dev->dev, &dev_attr_gusbcfg);
++	error = device_create_file(&dev->dev, &dev_attr_grxfsiz);
++	error = device_create_file(&dev->dev, &dev_attr_gnptxfsiz);
++	error = device_create_file(&dev->dev, &dev_attr_gpvndctl);
++	error = device_create_file(&dev->dev, &dev_attr_ggpio);
++	error = device_create_file(&dev->dev, &dev_attr_guid);
++	error = device_create_file(&dev->dev, &dev_attr_gsnpsid);
++	error = device_create_file(&dev->dev, &dev_attr_devspeed);
++	error = device_create_file(&dev->dev, &dev_attr_enumspeed);
++	error = device_create_file(&dev->dev, &dev_attr_hptxfsiz);
++	error = device_create_file(&dev->dev, &dev_attr_hprt0);
++	error = device_create_file(&dev->dev, &dev_attr_remote_wakeup);
++	error = device_create_file(&dev->dev, &dev_attr_rem_wakeup_pwrdn);
++	error = device_create_file(&dev->dev, &dev_attr_disconnect_us);
++	error = device_create_file(&dev->dev, &dev_attr_regdump);
++	error = device_create_file(&dev->dev, &dev_attr_spramdump);
++	error = device_create_file(&dev->dev, &dev_attr_hcddump);
++	error = device_create_file(&dev->dev, &dev_attr_hcd_frrem);
++	error = device_create_file(&dev->dev, &dev_attr_rd_reg_test);
++	error = device_create_file(&dev->dev, &dev_attr_wr_reg_test);
++#ifdef CONFIG_USB_FH_OTG_LPM
++	error = device_create_file(&dev->dev, &dev_attr_lpm_response);
++	error = device_create_file(&dev->dev, &dev_attr_sleep_status);
++	error = device_create_file(&dev->dev, &dev_attr_besl_reject);
++	error = device_create_file(&dev->dev, &dev_attr_hird_thres);
++#endif
++}
++
++/**
++ * Remove the device files
++ */
++void fh_otg_attr_remove(struct platform_device *dev)
++{
++	device_remove_file(&dev->dev, &dev_attr_regoffset);
++	device_remove_file(&dev->dev, &dev_attr_regvalue);
++	device_remove_file(&dev->dev, &dev_attr_mode);
++	device_remove_file(&dev->dev, &dev_attr_hnpcapable);
++	device_remove_file(&dev->dev, &dev_attr_srpcapable);
++	device_remove_file(&dev->dev, &dev_attr_hsic_connect);
++	device_remove_file(&dev->dev, &dev_attr_inv_sel_hsic);
++	device_remove_file(&dev->dev, &dev_attr_hnp);
++	device_remove_file(&dev->dev, &dev_attr_srp);
++	device_remove_file(&dev->dev, &dev_attr_buspower);
++	device_remove_file(&dev->dev, &dev_attr_bussuspend);
++	device_remove_file(&dev->dev, &dev_attr_mode_ch_tim_en);
++	device_remove_file(&dev->dev, &dev_attr_fr_interval);
++	device_remove_file(&dev->dev, &dev_attr_busconnected);
++	device_remove_file(&dev->dev, &dev_attr_gotgctl);
++	device_remove_file(&dev->dev, &dev_attr_gusbcfg);
++	device_remove_file(&dev->dev, &dev_attr_grxfsiz);
++	device_remove_file(&dev->dev, &dev_attr_gnptxfsiz);
++	device_remove_file(&dev->dev, &dev_attr_gpvndctl);
++	device_remove_file(&dev->dev, &dev_attr_ggpio);
++	device_remove_file(&dev->dev, &dev_attr_guid);
++	device_remove_file(&dev->dev, &dev_attr_gsnpsid);
++	device_remove_file(&dev->dev, &dev_attr_devspeed);
++	device_remove_file(&dev->dev, &dev_attr_enumspeed);
++	device_remove_file(&dev->dev, &dev_attr_hptxfsiz);
++	device_remove_file(&dev->dev, &dev_attr_hprt0);
++	device_remove_file(&dev->dev, &dev_attr_remote_wakeup);
++	device_remove_file(&dev->dev, &dev_attr_rem_wakeup_pwrdn);
++	device_remove_file(&dev->dev, &dev_attr_disconnect_us);
++	device_remove_file(&dev->dev, &dev_attr_regdump);
++	device_remove_file(&dev->dev, &dev_attr_spramdump);
++	device_remove_file(&dev->dev, &dev_attr_hcddump);
++	device_remove_file(&dev->dev, &dev_attr_hcd_frrem);
++	device_remove_file(&dev->dev, &dev_attr_rd_reg_test);
++	device_remove_file(&dev->dev, &dev_attr_wr_reg_test);
++#ifdef CONFIG_USB_FH_OTG_LPM
++	device_remove_file(&dev->dev, &dev_attr_lpm_response);
++	device_remove_file(&dev->dev, &dev_attr_sleep_status);
++	device_remove_file(&dev->dev, &dev_attr_besl_reject);
++	device_remove_file(&dev->dev, &dev_attr_hird_thres);
++#endif
++}
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.h
+new file mode 100644
+index 00000000..cee86fd4
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_attr.h
+@@ -0,0 +1,76 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_attr.h $
++ * $Revision: #13 $
++ * $Date: 2010/06/21 $
++ * $Change: 1532021 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ * 
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ * 
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#if !defined(__FH_OTG_ATTR_H__)
++#define __FH_OTG_ATTR_H__
++
++#include <linux/platform_device.h>
++
++/** @file
++ * This file contains the interface to the Linux device attributes.
++ */
++extern struct device_attribute dev_attr_regoffset;
++extern struct device_attribute dev_attr_regvalue;
++
++extern struct device_attribute dev_attr_mode;
++extern struct device_attribute dev_attr_hnpcapable;
++extern struct device_attribute dev_attr_srpcapable;
++extern struct device_attribute dev_attr_hnp;
++extern struct device_attribute dev_attr_srp;
++extern struct device_attribute dev_attr_buspower;
++extern struct device_attribute dev_attr_bussuspend;
++extern struct device_attribute dev_attr_mode_ch_tim_en;
++extern struct device_attribute dev_attr_fr_interval;
++extern struct device_attribute dev_attr_busconnected;
++extern struct device_attribute dev_attr_gotgctl;
++extern struct device_attribute dev_attr_gusbcfg;
++extern struct device_attribute dev_attr_grxfsiz;
++extern struct device_attribute dev_attr_gnptxfsiz;
++extern struct device_attribute dev_attr_gpvndctl;
++extern struct device_attribute dev_attr_ggpio;
++extern struct device_attribute dev_attr_guid;
++extern struct device_attribute dev_attr_gsnpsid;
++extern struct device_attribute dev_attr_devspeed;
++extern struct device_attribute dev_attr_enumspeed;
++extern struct device_attribute dev_attr_hptxfsiz;
++extern struct device_attribute dev_attr_hprt0;
++#ifdef CONFIG_USB_FH_OTG_LPM
++extern struct device_attribute dev_attr_lpm_response;
++extern struct device_attribute devi_attr_sleep_status;
++#endif
++
++void fh_otg_attr_create(struct platform_device *dev);
++
++void fh_otg_attr_remove(struct platform_device *dev);
++
++#endif
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.c
+new file mode 100644
+index 00000000..d4f3cb87
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.c
+@@ -0,0 +1,1876 @@
++/* ==========================================================================
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ * 
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ * 
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++/** @file 
++ *
++ * This file contains the most of the CFI(Core Feature Interface) 
++ * implementation for the OTG. 
++ */
++
++#ifdef FH_UTE_CFI
++
++#include "fh_otg_pcd.h"
++#include "fh_otg_cfi.h"
++
++/** This definition should actually migrate to the Portability Library */
++#define FH_CONSTANT_CPU_TO_LE16(x) (x)
++
++extern fh_otg_pcd_ep_t *get_ep_by_addr(fh_otg_pcd_t * pcd, u16 wIndex);
++
++static int cfi_core_features_buf(uint8_t * buf, uint16_t buflen);
++static int cfi_get_feature_value(uint8_t * buf, uint16_t buflen,
++				 struct fh_otg_pcd *pcd,
++				 struct cfi_usb_ctrlrequest *ctrl_req);
++static int cfi_set_feature_value(struct fh_otg_pcd *pcd);
++static int cfi_ep_get_sg_val(uint8_t * buf, struct fh_otg_pcd *pcd,
++			     struct cfi_usb_ctrlrequest *req);
++static int cfi_ep_get_concat_val(uint8_t * buf, struct fh_otg_pcd *pcd,
++				 struct cfi_usb_ctrlrequest *req);
++static int cfi_ep_get_align_val(uint8_t * buf, struct fh_otg_pcd *pcd,
++				struct cfi_usb_ctrlrequest *req);
++static int cfi_preproc_reset(struct fh_otg_pcd *pcd,
++			     struct cfi_usb_ctrlrequest *req);
++static void cfi_free_ep_bs_dyn_data(cfi_ep_t * cfiep);
++
++static uint16_t get_dfifo_size(fh_otg_core_if_t * core_if);
++static int32_t get_rxfifo_size(fh_otg_core_if_t * core_if, uint16_t wValue);
++static int32_t get_txfifo_size(struct fh_otg_pcd *pcd, uint16_t wValue);
++
++static uint8_t resize_fifos(fh_otg_core_if_t * core_if);
++
++/** This is the header of the all features descriptor */
++static cfi_all_features_header_t all_props_desc_header = {
++	.wVersion = FH_CONSTANT_CPU_TO_LE16(0x100),
++	.wCoreID = FH_CONSTANT_CPU_TO_LE16(CFI_CORE_ID_OTG),
++	.wNumFeatures = FH_CONSTANT_CPU_TO_LE16(9),
++};
++
++/** This is an array of statically allocated feature descriptors */
++static cfi_feature_desc_header_t prop_descs[] = {
++
++	/* FT_ID_DMA_MODE */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_MODE),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(1),
++	 },
++
++	/* FT_ID_DMA_BUFFER_SETUP */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFFER_SETUP),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(6),
++	 },
++
++	/* FT_ID_DMA_BUFF_ALIGN */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFF_ALIGN),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
++	 },
++
++	/* FT_ID_DMA_CONCAT_SETUP */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CONCAT_SETUP),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 //.wDataLength  = FH_CONSTANT_CPU_TO_LE16(6),
++	 },
++
++	/* FT_ID_DMA_CIRCULAR */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CIRCULAR),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(6),
++	 },
++
++	/* FT_ID_THRESHOLD_SETUP */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_THRESHOLD_SETUP),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(6),
++	 },
++
++	/* FT_ID_DFIFO_DEPTH */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_DFIFO_DEPTH),
++	 .bmAttributes = CFI_FEATURE_ATTR_RO,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
++	 },
++
++	/* FT_ID_TX_FIFO_DEPTH */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_TX_FIFO_DEPTH),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
++	 },
++
++	/* FT_ID_RX_FIFO_DEPTH */
++	{
++	 .wFeatureID = FH_CONSTANT_CPU_TO_LE16(FT_ID_RX_FIFO_DEPTH),
++	 .bmAttributes = CFI_FEATURE_ATTR_RW,
++	 .wDataLength = FH_CONSTANT_CPU_TO_LE16(2),
++	 }
++};
++
++/** The table of feature names */
++cfi_string_t prop_name_table[] = {
++	{FT_ID_DMA_MODE, "dma_mode"},
++	{FT_ID_DMA_BUFFER_SETUP, "buffer_setup"},
++	{FT_ID_DMA_BUFF_ALIGN, "buffer_align"},
++	{FT_ID_DMA_CONCAT_SETUP, "concat_setup"},
++	{FT_ID_DMA_CIRCULAR, "buffer_circular"},
++	{FT_ID_THRESHOLD_SETUP, "threshold_setup"},
++	{FT_ID_DFIFO_DEPTH, "dfifo_depth"},
++	{FT_ID_TX_FIFO_DEPTH, "txfifo_depth"},
++	{FT_ID_RX_FIFO_DEPTH, "rxfifo_depth"},
++	{}
++};
++
++/************************************************************************/
++
++/** 
++ * Returns the name of the feature by its ID 
++ * or NULL if no featute ID matches.
++ * 
++ */
++const uint8_t *get_prop_name(uint16_t prop_id, int *len)
++{
++	cfi_string_t *pstr;
++	*len = 0;
++
++	for (pstr = prop_name_table; pstr && pstr->s; pstr++) {
++		if (pstr->id == prop_id) {
++			*len = FH_STRLEN(pstr->s);
++			return pstr->s;
++		}
++	}
++	return NULL;
++}
++
++/**
++ * This function handles all CFI specific control requests.
++ * 
++ * Return a negative value to stall the DCE.
++ */
++int cfi_setup(struct fh_otg_pcd *pcd, struct cfi_usb_ctrlrequest *ctrl)
++{
++	int retval = 0;
++	fh_otg_pcd_ep_t *ep = NULL;
++	cfiobject_t *cfi = pcd->cfi;
++	struct fh_otg_core_if *coreif = GET_CORE_IF(pcd);
++	uint16_t wLen = FH_LE16_TO_CPU(&ctrl->wLength);
++	uint16_t wValue = FH_LE16_TO_CPU(&ctrl->wValue);
++	uint16_t wIndex = FH_LE16_TO_CPU(&ctrl->wIndex);
++	uint32_t regaddr = 0;
++	uint32_t regval = 0;
++
++	/* Save this Control Request in the CFI object. 
++	 * The data field will be assigned in the data stage completion CB function.
++	 */
++	cfi->ctrl_req = *ctrl;
++	cfi->ctrl_req.data = NULL;
++
++	cfi->need_gadget_att = 0;
++	cfi->need_status_in_complete = 0;
++
++	switch (ctrl->bRequest) {
++	case VEN_CORE_GET_FEATURES:
++		retval = cfi_core_features_buf(cfi->buf_in.buf, CFI_IN_BUF_LEN);
++		if (retval >= 0) {
++			//dump_msg(cfi->buf_in.buf, retval);
++			ep = &pcd->ep0;
++
++			retval = min((uint16_t) retval, wLen);
++			/* Transfer this buffer to the host through the EP0-IN EP */
++			ep->fh_ep.dma_addr = cfi->buf_in.addr;
++			ep->fh_ep.start_xfer_buff = cfi->buf_in.buf;
++			ep->fh_ep.xfer_buff = cfi->buf_in.buf;
++			ep->fh_ep.xfer_len = retval;
++			ep->fh_ep.xfer_count = 0;
++			ep->fh_ep.sent_zlp = 0;
++			ep->fh_ep.total_len = ep->fh_ep.xfer_len;
++
++			pcd->ep0_pending = 1;
++			fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
++		}
++		retval = 0;
++		break;
++
++	case VEN_CORE_GET_FEATURE:
++		CFI_INFO("VEN_CORE_GET_FEATURE\n");
++		retval = cfi_get_feature_value(cfi->buf_in.buf, CFI_IN_BUF_LEN,
++					       pcd, ctrl);
++		if (retval >= 0) {
++			ep = &pcd->ep0;
++
++			retval = min((uint16_t) retval, wLen);
++			/* Transfer this buffer to the host through the EP0-IN EP */
++			ep->fh_ep.dma_addr = cfi->buf_in.addr;
++			ep->fh_ep.start_xfer_buff = cfi->buf_in.buf;
++			ep->fh_ep.xfer_buff = cfi->buf_in.buf;
++			ep->fh_ep.xfer_len = retval;
++			ep->fh_ep.xfer_count = 0;
++			ep->fh_ep.sent_zlp = 0;
++			ep->fh_ep.total_len = ep->fh_ep.xfer_len;
++
++			pcd->ep0_pending = 1;
++			fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
++		}
++		CFI_INFO("VEN_CORE_GET_FEATURE=%d\n", retval);
++		dump_msg(cfi->buf_in.buf, retval);
++		break;
++
++	case VEN_CORE_SET_FEATURE:
++		CFI_INFO("VEN_CORE_SET_FEATURE\n");
++		/* Set up an XFER to get the data stage of the control request,
++		 * which is the new value of the feature to be modified.
++		 */
++		ep = &pcd->ep0;
++		ep->fh_ep.is_in = 0;
++		ep->fh_ep.dma_addr = cfi->buf_out.addr;
++		ep->fh_ep.start_xfer_buff = cfi->buf_out.buf;
++		ep->fh_ep.xfer_buff = cfi->buf_out.buf;
++		ep->fh_ep.xfer_len = wLen;
++		ep->fh_ep.xfer_count = 0;
++		ep->fh_ep.sent_zlp = 0;
++		ep->fh_ep.total_len = ep->fh_ep.xfer_len;
++
++		pcd->ep0_pending = 1;
++		/* Read the control write's data stage */
++		fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
++		retval = 0;
++		break;
++
++	case VEN_CORE_RESET_FEATURES:
++		CFI_INFO("VEN_CORE_RESET_FEATURES\n");
++		cfi->need_gadget_att = 1;
++		cfi->need_status_in_complete = 1;
++		retval = cfi_preproc_reset(pcd, ctrl);
++		CFI_INFO("VEN_CORE_RESET_FEATURES = (%d)\n", retval);
++		break;
++
++	case VEN_CORE_ACTIVATE_FEATURES:
++		CFI_INFO("VEN_CORE_ACTIVATE_FEATURES\n");
++		break;
++
++	case VEN_CORE_READ_REGISTER:
++		CFI_INFO("VEN_CORE_READ_REGISTER\n");
++		/* wValue optionally contains the HI WORD of the register offset and
++		 * wIndex contains the LOW WORD of the register offset 
++		 */
++		if (wValue == 0) {
++			/* @TODO - MAS - fix the access to the base field */
++			regaddr = 0;
++			//regaddr = (uint32_t) pcd->otg_dev->os_dep.base;
++			//GET_CORE_IF(pcd)->co
++			regaddr |= wIndex;
++		} else {
++			regaddr = (wValue << 16) | wIndex;
++		}
++
++		/* Read a 32-bit value of the memory at the regaddr */
++		regval = FH_READ_REG32((uint32_t *) regaddr);
++
++		ep = &pcd->ep0;
++		fh_memcpy(cfi->buf_in.buf, &regval, sizeof(uint32_t));
++		ep->fh_ep.is_in = 1;
++		ep->fh_ep.dma_addr = cfi->buf_in.addr;
++		ep->fh_ep.start_xfer_buff = cfi->buf_in.buf;
++		ep->fh_ep.xfer_buff = cfi->buf_in.buf;
++		ep->fh_ep.xfer_len = wLen;
++		ep->fh_ep.xfer_count = 0;
++		ep->fh_ep.sent_zlp = 0;
++		ep->fh_ep.total_len = ep->fh_ep.xfer_len;
++
++		pcd->ep0_pending = 1;
++		fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
++		cfi->need_gadget_att = 0;
++		retval = 0;
++		break;
++
++	case VEN_CORE_WRITE_REGISTER:
++		CFI_INFO("VEN_CORE_WRITE_REGISTER\n");
++		/* Set up an XFER to get the data stage of the control request,
++		 * which is the new value of the register to be modified.
++		 */
++		ep = &pcd->ep0;
++		ep->fh_ep.is_in = 0;
++		ep->fh_ep.dma_addr = cfi->buf_out.addr;
++		ep->fh_ep.start_xfer_buff = cfi->buf_out.buf;
++		ep->fh_ep.xfer_buff = cfi->buf_out.buf;
++		ep->fh_ep.xfer_len = wLen;
++		ep->fh_ep.xfer_count = 0;
++		ep->fh_ep.sent_zlp = 0;
++		ep->fh_ep.total_len = ep->fh_ep.xfer_len;
++
++		pcd->ep0_pending = 1;
++		/* Read the control write's data stage */
++		fh_otg_ep0_start_transfer(coreif, &ep->fh_ep);
++		retval = 0;
++		break;
++
++	default:
++		retval = -FH_E_NOT_SUPPORTED;
++		break;
++	}
++
++	return retval;
++}
++
++/**
++ * This function prepares the core features descriptors and copies its
++ * raw representation into the buffer <buf>.
++ * 
++ * The buffer structure is as follows:
++ *	all_features_header (8 bytes)
++ *	features_#1 (8 bytes + feature name string length)
++ *	features_#2 (8 bytes + feature name string length)
++ *	.....
++ *	features_#n - where n=the total count of feature descriptors
++ */
++static int cfi_core_features_buf(uint8_t * buf, uint16_t buflen)
++{
++	cfi_feature_desc_header_t *prop_hdr = prop_descs;
++	cfi_feature_desc_header_t *prop;
++	cfi_all_features_header_t *all_props_hdr = &all_props_desc_header;
++	cfi_all_features_header_t *tmp;
++	uint8_t *tmpbuf = buf;
++	const uint8_t *pname = NULL;
++	int i, j, namelen = 0, totlen;
++
++	/* Prepare and copy the core features into the buffer */
++	CFI_INFO("%s:\n", __func__);
++
++	tmp = (cfi_all_features_header_t *) tmpbuf;
++	*tmp = *all_props_hdr;
++	tmpbuf += CFI_ALL_FEATURES_HDR_LEN;
++
++	j = sizeof(prop_descs) / sizeof(cfi_all_features_header_t);
++	for (i = 0; i < j; i++, prop_hdr++) {
++		pname = get_prop_name(prop_hdr->wFeatureID, &namelen);
++		prop = (cfi_feature_desc_header_t *) tmpbuf;
++		*prop = *prop_hdr;
++
++		prop->bNameLen = namelen;
++		prop->wLength =
++		    FH_CONSTANT_CPU_TO_LE16(CFI_FEATURE_DESC_HDR_LEN +
++					     namelen);
++
++		tmpbuf += CFI_FEATURE_DESC_HDR_LEN;
++		fh_memcpy(tmpbuf, pname, namelen);
++		tmpbuf += namelen;
++	}
++
++	totlen = tmpbuf - buf;
++
++	if (totlen > 0) {
++		tmp = (cfi_all_features_header_t *) buf;
++		tmp->wTotalLen = FH_CONSTANT_CPU_TO_LE16(totlen);
++	}
++
++	return totlen;
++}
++
++/**
++ * This function releases all the dynamic memory in the CFI object.
++ */
++static void cfi_release(cfiobject_t * cfiobj)
++{
++	cfi_ep_t *cfiep;
++	fh_list_link_t *tmp;
++
++	CFI_INFO("%s\n", __func__);
++
++	if (cfiobj->buf_in.buf) {
++		FH_DMA_FREE(CFI_IN_BUF_LEN, cfiobj->buf_in.buf,
++			     cfiobj->buf_in.addr);
++		cfiobj->buf_in.buf = NULL;
++	}
++
++	if (cfiobj->buf_out.buf) {
++		FH_DMA_FREE(CFI_OUT_BUF_LEN, cfiobj->buf_out.buf,
++			     cfiobj->buf_out.addr);
++		cfiobj->buf_out.buf = NULL;
++	}
++
++	/* Free the Buffer Setup values for each EP */
++	//list_for_each_entry(cfiep, &cfiobj->active_eps, lh) {
++	FH_LIST_FOREACH(tmp, &cfiobj->active_eps) {
++		cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
++		cfi_free_ep_bs_dyn_data(cfiep);
++	}
++}
++
++/**
++ * This function frees the dynamically allocated EP buffer setup data.
++ */
++static void cfi_free_ep_bs_dyn_data(cfi_ep_t * cfiep)
++{
++	if (cfiep->bm_sg) {
++		FH_FREE(cfiep->bm_sg);
++		cfiep->bm_sg = NULL;
++	}
++
++	if (cfiep->bm_align) {
++		FH_FREE(cfiep->bm_align);
++		cfiep->bm_align = NULL;
++	}
++
++	if (cfiep->bm_concat) {
++		if (NULL != cfiep->bm_concat->wTxBytes) {
++			FH_FREE(cfiep->bm_concat->wTxBytes);
++			cfiep->bm_concat->wTxBytes = NULL;
++		}
++		FH_FREE(cfiep->bm_concat);
++		cfiep->bm_concat = NULL;
++	}
++}
++
++/**
++ * This function initializes the default values of the features
++ * for a specific endpoint and should be called only once when
++ * the EP is enabled first time.
++ */
++static int cfi_ep_init_defaults(struct fh_otg_pcd *pcd, cfi_ep_t * cfiep)
++{
++	int retval = 0;
++
++	cfiep->bm_sg = FH_ALLOC(sizeof(ddma_sg_buffer_setup_t));
++	if (NULL == cfiep->bm_sg) {
++		CFI_INFO("Failed to allocate memory for SG feature value\n");
++		return -FH_E_NO_MEMORY;
++	}
++	fh_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
++
++	/* For the Concatenation feature's default value we do not allocate
++	 * memory for the wTxBytes field - it will be done in the set_feature_value
++	 * request handler.
++	 */
++	cfiep->bm_concat = FH_ALLOC(sizeof(ddma_concat_buffer_setup_t));
++	if (NULL == cfiep->bm_concat) {
++		CFI_INFO
++		    ("Failed to allocate memory for CONCATENATION feature value\n");
++		FH_FREE(cfiep->bm_sg);
++		return -FH_E_NO_MEMORY;
++	}
++	fh_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
++
++	cfiep->bm_align = FH_ALLOC(sizeof(ddma_align_buffer_setup_t));
++	if (NULL == cfiep->bm_align) {
++		CFI_INFO
++		    ("Failed to allocate memory for Alignment feature value\n");
++		FH_FREE(cfiep->bm_sg);
++		FH_FREE(cfiep->bm_concat);
++		return -FH_E_NO_MEMORY;
++	}
++	fh_memset(cfiep->bm_align, 0, sizeof(ddma_align_buffer_setup_t));
++
++	return retval;
++}
++
++/**
++ * The callback function that notifies the CFI on the activation of
++ * an endpoint in the PCD. The following steps are done in this function:
++ *
++ *	Create a dynamically allocated cfi_ep_t object (a CFI wrapper to the PCD's 
++ *		active endpoint)
++ *	Create MAX_DMA_DESCS_PER_EP count DMA Descriptors for the EP
++ *	Set the Buffer Mode to standard
++ *	Initialize the default values for all EP modes (SG, Circular, Concat, Align)
++ *	Add the cfi_ep_t object to the list of active endpoints in the CFI object
++ */
++static int cfi_ep_enable(struct cfiobject *cfi, struct fh_otg_pcd *pcd,
++			 struct fh_otg_pcd_ep *ep)
++{
++	cfi_ep_t *cfiep;
++	int retval = -FH_E_NOT_SUPPORTED;
++
++	CFI_INFO("%s: epname=%s; epnum=0x%02x\n", __func__,
++		 "EP_" /*ep->ep.name */ , ep->desc->bEndpointAddress);
++	/* MAS - Check whether this endpoint already is in the list */
++	cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
++
++	if (NULL == cfiep) {
++		/* Allocate a cfi_ep_t object */
++		cfiep = FH_ALLOC(sizeof(cfi_ep_t));
++		if (NULL == cfiep) {
++			CFI_INFO
++			    ("Unable to allocate memory for <cfiep> in function %s\n",
++			     __func__);
++			return -FH_E_NO_MEMORY;
++		}
++		fh_memset(cfiep, 0, sizeof(cfi_ep_t));
++
++		/* Save the fh_otg_pcd_ep pointer in the cfiep object */
++		cfiep->ep = ep;
++
++		/* Allocate the DMA Descriptors chain of MAX_DMA_DESCS_PER_EP count */
++		ep->fh_ep.descs =
++		    FH_DMA_ALLOC(MAX_DMA_DESCS_PER_EP *
++				  sizeof(fh_otg_dma_desc_t),
++				  &ep->fh_ep.descs_dma_addr);
++
++		if (NULL == ep->fh_ep.descs) {
++			FH_FREE(cfiep);
++			return -FH_E_NO_MEMORY;
++		}
++
++		FH_LIST_INIT(&cfiep->lh);
++
++		/* Set the buffer mode to BM_STANDARD. It will be modified 
++		 * when building descriptors for a specific buffer mode */
++		ep->fh_ep.buff_mode = BM_STANDARD;
++
++		/* Create and initialize the default values for this EP's Buffer modes */
++		if ((retval = cfi_ep_init_defaults(pcd, cfiep)) < 0)
++			return retval;
++
++		/* Add the cfi_ep_t object to the CFI object's list of active endpoints */
++		FH_LIST_INSERT_TAIL(&cfi->active_eps, &cfiep->lh);
++		retval = 0;
++	} else {		/* The sought EP already is in the list */
++		CFI_INFO("%s: The sought EP already is in the list\n",
++			 __func__);
++	}
++
++	return retval;
++}
++
++/**
++ * This function is called when the data stage of a 3-stage Control Write request
++ * is complete.
++ * 
++ */
++static int cfi_ctrl_write_complete(struct cfiobject *cfi,
++				   struct fh_otg_pcd *pcd)
++{
++	uint32_t addr, reg_value;
++	uint16_t wIndex, wValue;
++	uint8_t bRequest;
++	uint8_t *buf = cfi->buf_out.buf;
++	//struct usb_ctrlrequest *ctrl_req = &cfi->ctrl_req_saved;
++	struct cfi_usb_ctrlrequest *ctrl_req = &cfi->ctrl_req;
++	int retval = -FH_E_NOT_SUPPORTED;
++
++	CFI_INFO("%s\n", __func__);
++
++	bRequest = ctrl_req->bRequest;
++	wIndex = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
++	wValue = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
++
++	/* 
++	 * Save the pointer to the data stage in the ctrl_req's <data> field.
++	 * The request should be already saved in the command stage by now.
++	 */
++	ctrl_req->data = cfi->buf_out.buf;
++	cfi->need_status_in_complete = 0;
++	cfi->need_gadget_att = 0;
++
++	switch (bRequest) {
++	case VEN_CORE_WRITE_REGISTER:
++		/* The buffer contains raw data of the new value for the register */
++		reg_value = *((uint32_t *) buf);
++		if (wValue == 0) {
++			addr = 0;
++			//addr = (uint32_t) pcd->otg_dev->os_dep.base;
++			addr += wIndex;
++		} else {
++			addr = (wValue << 16) | wIndex;
++		}
++
++		//writel(reg_value, addr);
++
++		retval = 0;
++		cfi->need_status_in_complete = 1;
++		break;
++
++	case VEN_CORE_SET_FEATURE:
++		/* The buffer contains raw data of the new value of the feature */
++		retval = cfi_set_feature_value(pcd);
++		if (retval < 0)
++			return retval;
++
++		cfi->need_status_in_complete = 1;
++		break;
++
++	default:
++		break;
++	}
++
++	return retval;
++}
++
++/**
++ * This function builds the DMA descriptors for the SG buffer mode.
++ */
++static void cfi_build_sg_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
++			       fh_otg_pcd_request_t * req)
++{
++	struct fh_otg_pcd_ep *ep = cfiep->ep;
++	ddma_sg_buffer_setup_t *sgval = cfiep->bm_sg;
++	struct fh_otg_dma_desc *desc = cfiep->ep->fh_ep.descs;
++	struct fh_otg_dma_desc *desc_last = cfiep->ep->fh_ep.descs;
++	dma_addr_t buff_addr = req->dma;
++	int i;
++	uint32_t txsize, off;
++
++	txsize = sgval->wSize;
++	off = sgval->bOffset;
++
++//      CFI_INFO("%s: %s TXSIZE=0x%08x; OFFSET=0x%08x\n", 
++//              __func__, cfiep->ep->ep.name, txsize, off);
++
++	for (i = 0; i < sgval->bCount; i++) {
++		desc->status.b.bs = BS_HOST_BUSY;
++		desc->buf = buff_addr;
++		desc->status.b.l = 0;
++		desc->status.b.ioc = 0;
++		desc->status.b.sp = 0;
++		desc->status.b.bytes = txsize;
++		desc->status.b.bs = BS_HOST_READY;
++
++		/* Set the next address of the buffer */
++		buff_addr += txsize + off;
++		desc_last = desc;
++		desc++;
++	}
++
++	/* Set the last, ioc and sp bits on the Last DMA Descriptor */
++	desc_last->status.b.l = 1;
++	desc_last->status.b.ioc = 1;
++	desc_last->status.b.sp = ep->fh_ep.sent_zlp;
++	/* Save the last DMA descriptor pointer */
++	cfiep->dma_desc_last = desc_last;
++	cfiep->desc_count = sgval->bCount;
++}
++
++/**
++ * This function builds the DMA descriptors for the Concatenation buffer mode.
++ */
++static void cfi_build_concat_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
++				   fh_otg_pcd_request_t * req)
++{
++	struct fh_otg_pcd_ep *ep = cfiep->ep;
++	ddma_concat_buffer_setup_t *concatval = cfiep->bm_concat;
++	struct fh_otg_dma_desc *desc = cfiep->ep->fh_ep.descs;
++	struct fh_otg_dma_desc *desc_last = cfiep->ep->fh_ep.descs;
++	dma_addr_t buff_addr = req->dma;
++	int i;
++	uint16_t *txsize;
++
++	txsize = concatval->wTxBytes;
++
++	for (i = 0; i < concatval->hdr.bDescCount; i++) {
++		desc->buf = buff_addr;
++		desc->status.b.bs = BS_HOST_BUSY;
++		desc->status.b.l = 0;
++		desc->status.b.ioc = 0;
++		desc->status.b.sp = 0;
++		desc->status.b.bytes = *txsize;
++		desc->status.b.bs = BS_HOST_READY;
++
++		txsize++;
++		/* Set the next address of the buffer */
++		buff_addr += UGETW(ep->desc->wMaxPacketSize);
++		desc_last = desc;
++		desc++;
++	}
++
++	/* Set the last, ioc and sp bits on the Last DMA Descriptor */
++	desc_last->status.b.l = 1;
++	desc_last->status.b.ioc = 1;
++	desc_last->status.b.sp = ep->fh_ep.sent_zlp;
++	cfiep->dma_desc_last = desc_last;
++	cfiep->desc_count = concatval->hdr.bDescCount;
++}
++
++/**
++ * This function builds the DMA descriptors for the Circular buffer mode
++ */
++static void cfi_build_circ_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
++				 fh_otg_pcd_request_t * req)
++{
++	/* @todo: MAS - add implementation when this feature needs to be tested */
++}
++
++/**
++ * This function builds the DMA descriptors for the Alignment buffer mode
++ */
++static void cfi_build_align_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
++				  fh_otg_pcd_request_t * req)
++{
++	struct fh_otg_pcd_ep *ep = cfiep->ep;
++	ddma_align_buffer_setup_t *alignval = cfiep->bm_align;
++	struct fh_otg_dma_desc *desc = cfiep->ep->fh_ep.descs;
++	dma_addr_t buff_addr = req->dma;
++
++	desc->status.b.bs = BS_HOST_BUSY;
++	desc->status.b.l = 1;
++	desc->status.b.ioc = 1;
++	desc->status.b.sp = ep->fh_ep.sent_zlp;
++	desc->status.b.bytes = req->length;
++	/* Adjust the buffer alignment */
++	desc->buf = (buff_addr + alignval->bAlign);
++	desc->status.b.bs = BS_HOST_READY;
++	cfiep->dma_desc_last = desc;
++	cfiep->desc_count = 1;
++}
++
++/**
++ * This function builds the DMA descriptors chain for different modes of the
++ * buffer setup of an endpoint.
++ */
++static void cfi_build_descriptors(struct cfiobject *cfi,
++				  struct fh_otg_pcd *pcd,
++				  struct fh_otg_pcd_ep *ep,
++				  fh_otg_pcd_request_t * req)
++{
++	cfi_ep_t *cfiep;
++
++	/* Get the cfiep by the fh_otg_pcd_ep */
++	cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
++	if (NULL == cfiep) {
++		CFI_INFO("%s: Unable to find a matching active endpoint\n",
++			 __func__);
++		return;
++	}
++
++	cfiep->xfer_len = req->length;
++
++	/* Iterate through all the DMA descriptors */
++	switch (cfiep->ep->fh_ep.buff_mode) {
++	case BM_SG:
++		cfi_build_sg_descs(cfi, cfiep, req);
++		break;
++
++	case BM_CONCAT:
++		cfi_build_concat_descs(cfi, cfiep, req);
++		break;
++
++	case BM_CIRCULAR:
++		cfi_build_circ_descs(cfi, cfiep, req);
++		break;
++
++	case BM_ALIGN:
++		cfi_build_align_descs(cfi, cfiep, req);
++		break;
++
++	default:
++		break;
++	}
++}
++
++/**
++ * Allocate DMA buffer for different Buffer modes.
++ */
++static void *cfi_ep_alloc_buf(struct cfiobject *cfi, struct fh_otg_pcd *pcd,
++			      struct fh_otg_pcd_ep *ep, dma_addr_t * dma,
++			      unsigned size, gfp_t flags)
++{
++	return FH_DMA_ALLOC(size, dma);
++}
++
++/**
++ * This function initializes the CFI object.
++ */
++int init_cfi(cfiobject_t * cfiobj)
++{
++	CFI_INFO("%s\n", __func__);
++
++	/* Allocate a buffer for IN XFERs */
++	cfiobj->buf_in.buf =
++	    FH_DMA_ALLOC(CFI_IN_BUF_LEN, &cfiobj->buf_in.addr);
++	if (NULL == cfiobj->buf_in.buf) {
++		CFI_INFO("Unable to allocate buffer for INs\n");
++		return -FH_E_NO_MEMORY;
++	}
++
++	/* Allocate a buffer for OUT XFERs */
++	cfiobj->buf_out.buf =
++	    FH_DMA_ALLOC(CFI_OUT_BUF_LEN, &cfiobj->buf_out.addr);
++	if (NULL == cfiobj->buf_out.buf) {
++		CFI_INFO("Unable to allocate buffer for OUT\n");
++		return -FH_E_NO_MEMORY;
++	}
++
++	/* Initialize the callback function pointers */
++	cfiobj->ops.release = cfi_release;
++	cfiobj->ops.ep_enable = cfi_ep_enable;
++	cfiobj->ops.ctrl_write_complete = cfi_ctrl_write_complete;
++	cfiobj->ops.build_descriptors = cfi_build_descriptors;
++	cfiobj->ops.ep_alloc_buf = cfi_ep_alloc_buf;
++
++	/* Initialize the list of active endpoints in the CFI object */
++	FH_LIST_INIT(&cfiobj->active_eps);
++
++	return 0;
++}
++
++/**
++ * This function reads the required feature's current value into the buffer
++ *
++ * @retval: Returns negative as error, or the data length of the feature  
++ */
++static int cfi_get_feature_value(uint8_t * buf, uint16_t buflen,
++				 struct fh_otg_pcd *pcd,
++				 struct cfi_usb_ctrlrequest *ctrl_req)
++{
++	int retval = -FH_E_NOT_SUPPORTED;
++	struct fh_otg_core_if *coreif = GET_CORE_IF(pcd);
++	uint16_t dfifo, rxfifo, txfifo;
++
++	switch (ctrl_req->wIndex) {
++		/* Whether the DDMA is enabled or not */
++	case FT_ID_DMA_MODE:
++		*buf = (coreif->dma_enable && coreif->dma_desc_enable) ? 1 : 0;
++		retval = 1;
++		break;
++
++	case FT_ID_DMA_BUFFER_SETUP:
++		retval = cfi_ep_get_sg_val(buf, pcd, ctrl_req);
++		break;
++
++	case FT_ID_DMA_BUFF_ALIGN:
++		retval = cfi_ep_get_align_val(buf, pcd, ctrl_req);
++		break;
++
++	case FT_ID_DMA_CONCAT_SETUP:
++		retval = cfi_ep_get_concat_val(buf, pcd, ctrl_req);
++		break;
++
++	case FT_ID_DMA_CIRCULAR:
++		CFI_INFO("GetFeature value (FT_ID_DMA_CIRCULAR)\n");
++		break;
++
++	case FT_ID_THRESHOLD_SETUP:
++		CFI_INFO("GetFeature value (FT_ID_THRESHOLD_SETUP)\n");
++		break;
++
++	case FT_ID_DFIFO_DEPTH:
++		dfifo = get_dfifo_size(coreif);
++		*((uint16_t *) buf) = dfifo;
++		retval = sizeof(uint16_t);
++		break;
++
++	case FT_ID_TX_FIFO_DEPTH:
++		retval = get_txfifo_size(pcd, ctrl_req->wValue);
++		if (retval >= 0) {
++			txfifo = retval;
++			*((uint16_t *) buf) = txfifo;
++			retval = sizeof(uint16_t);
++		}
++		break;
++
++	case FT_ID_RX_FIFO_DEPTH:
++		retval = get_rxfifo_size(coreif, ctrl_req->wValue);
++		if (retval >= 0) {
++			rxfifo = retval;
++			*((uint16_t *) buf) = rxfifo;
++			retval = sizeof(uint16_t);
++		}
++		break;
++	}
++
++	return retval;
++}
++
++/**
++ * This function resets the SG for the specified EP to its default value
++ */
++static int cfi_reset_sg_val(cfi_ep_t * cfiep)
++{
++	fh_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
++	return 0;
++}
++
++/**
++ * This function resets the Alignment for the specified EP to its default value
++ */
++static int cfi_reset_align_val(cfi_ep_t * cfiep)
++{
++	fh_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
++	return 0;
++}
++
++/**
++ * This function resets the Concatenation for the specified EP to its default value
++ * This function will also set the value of the wTxBytes field to NULL after 
++ * freeing the memory previously allocated for this field.
++ */
++static int cfi_reset_concat_val(cfi_ep_t * cfiep)
++{
++	/* First we need to free the wTxBytes field */
++	if (cfiep->bm_concat->wTxBytes) {
++		FH_FREE(cfiep->bm_concat->wTxBytes);
++		cfiep->bm_concat->wTxBytes = NULL;
++	}
++
++	fh_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
++	return 0;
++}
++
++/**
++ * This function resets all the buffer setups of the specified endpoint
++ */
++static int cfi_ep_reset_all_setup_vals(cfi_ep_t * cfiep)
++{
++	cfi_reset_sg_val(cfiep);
++	cfi_reset_align_val(cfiep);
++	cfi_reset_concat_val(cfiep);
++	return 0;
++}
++
++static int cfi_handle_reset_fifo_val(struct fh_otg_pcd *pcd, uint8_t ep_addr,
++				     uint8_t rx_rst, uint8_t tx_rst)
++{
++	int retval = -FH_E_INVALID;
++	uint16_t tx_siz[15];
++	uint16_t rx_siz = 0;
++	fh_otg_pcd_ep_t *ep = NULL;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
++
++	if (rx_rst) {
++		rx_siz = params->dev_rx_fifo_size;
++		params->dev_rx_fifo_size = GET_CORE_IF(pcd)->init_rxfsiz;
++	}
++
++	if (tx_rst) {
++		if (ep_addr == 0) {
++			int i;
++
++			for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++				tx_siz[i] =
++				    core_if->core_params->dev_tx_fifo_size[i];
++				core_if->core_params->dev_tx_fifo_size[i] =
++				    core_if->init_txfsiz[i];
++			}
++		} else {
++
++			ep = get_ep_by_addr(pcd, ep_addr);
++
++			if (NULL == ep) {
++				CFI_INFO
++				    ("%s: Unable to get the endpoint addr=0x%02x\n",
++				     __func__, ep_addr);
++				return -FH_E_INVALID;
++			}
++
++			tx_siz[0] =
++			    params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num -
++						     1];
++			params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1] =
++			    GET_CORE_IF(pcd)->init_txfsiz[ep->
++							  fh_ep.tx_fifo_num -
++							  1];
++		}
++	}
++
++	if (resize_fifos(GET_CORE_IF(pcd))) {
++		retval = 0;
++	} else {
++		CFI_INFO
++		    ("%s: Error resetting the feature Reset All(FIFO size)\n",
++		     __func__);
++		if (rx_rst) {
++			params->dev_rx_fifo_size = rx_siz;
++		}
++
++		if (tx_rst) {
++			if (ep_addr == 0) {
++				int i;
++				for (i = 0; i < core_if->hwcfg4.b.num_in_eps;
++				     i++) {
++					core_if->
++					    core_params->dev_tx_fifo_size[i] =
++					    tx_siz[i];
++				}
++			} else {
++				params->dev_tx_fifo_size[ep->
++							 fh_ep.tx_fifo_num -
++							 1] = tx_siz[0];
++			}
++		}
++		retval = -FH_E_INVALID;
++	}
++	return retval;
++}
++
++static int cfi_handle_reset_all(struct fh_otg_pcd *pcd, uint8_t addr)
++{
++	int retval = 0;
++	cfi_ep_t *cfiep;
++	cfiobject_t *cfi = pcd->cfi;
++	fh_list_link_t *tmp;
++
++	retval = cfi_handle_reset_fifo_val(pcd, addr, 1, 1);
++	if (retval < 0) {
++		return retval;
++	}
++
++	/* If the EP address is known then reset the features for only that EP */
++	if (addr) {
++		cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
++		if (NULL == cfiep) {
++			CFI_INFO("%s: Error getting the EP address 0x%02x\n",
++				 __func__, addr);
++			return -FH_E_INVALID;
++		}
++		retval = cfi_ep_reset_all_setup_vals(cfiep);
++		cfiep->ep->fh_ep.buff_mode = BM_STANDARD;
++	}
++	/* Otherwise (wValue == 0), reset all features of all EP's */
++	else {
++		/* Traverse all the active EP's and reset the feature(s) value(s) */
++		//list_for_each_entry(cfiep, &cfi->active_eps, lh) {
++		FH_LIST_FOREACH(tmp, &cfi->active_eps) {
++			cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
++			retval = cfi_ep_reset_all_setup_vals(cfiep);
++			cfiep->ep->fh_ep.buff_mode = BM_STANDARD;
++			if (retval < 0) {
++				CFI_INFO
++				    ("%s: Error resetting the feature Reset All\n",
++				     __func__);
++				return retval;
++			}
++		}
++	}
++	return retval;
++}
++
++static int cfi_handle_reset_dma_buff_setup(struct fh_otg_pcd *pcd,
++					   uint8_t addr)
++{
++	int retval = 0;
++	cfi_ep_t *cfiep;
++	cfiobject_t *cfi = pcd->cfi;
++	fh_list_link_t *tmp;
++
++	/* If the EP address is known then reset the features for only that EP */
++	if (addr) {
++		cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
++		if (NULL == cfiep) {
++			CFI_INFO("%s: Error getting the EP address 0x%02x\n",
++				 __func__, addr);
++			return -FH_E_INVALID;
++		}
++		retval = cfi_reset_sg_val(cfiep);
++	}
++	/* Otherwise (wValue == 0), reset all features of all EP's */
++	else {
++		/* Traverse all the active EP's and reset the feature(s) value(s) */
++		//list_for_each_entry(cfiep, &cfi->active_eps, lh) {
++		FH_LIST_FOREACH(tmp, &cfi->active_eps) {
++			cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
++			retval = cfi_reset_sg_val(cfiep);
++			if (retval < 0) {
++				CFI_INFO
++				    ("%s: Error resetting the feature Buffer Setup\n",
++				     __func__);
++				return retval;
++			}
++		}
++	}
++	return retval;
++}
++
++static int cfi_handle_reset_concat_val(struct fh_otg_pcd *pcd, uint8_t addr)
++{
++	int retval = 0;
++	cfi_ep_t *cfiep;
++	cfiobject_t *cfi = pcd->cfi;
++	fh_list_link_t *tmp;
++
++	/* If the EP address is known then reset the features for only that EP */
++	if (addr) {
++		cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
++		if (NULL == cfiep) {
++			CFI_INFO("%s: Error getting the EP address 0x%02x\n",
++				 __func__, addr);
++			return -FH_E_INVALID;
++		}
++		retval = cfi_reset_concat_val(cfiep);
++	}
++	/* Otherwise (wValue == 0), reset all features of all EP's */
++	else {
++		/* Traverse all the active EP's and reset the feature(s) value(s) */
++		//list_for_each_entry(cfiep, &cfi->active_eps, lh) {
++		FH_LIST_FOREACH(tmp, &cfi->active_eps) {
++			cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
++			retval = cfi_reset_concat_val(cfiep);
++			if (retval < 0) {
++				CFI_INFO
++				    ("%s: Error resetting the feature Concatenation Value\n",
++				     __func__);
++				return retval;
++			}
++		}
++	}
++	return retval;
++}
++
++static int cfi_handle_reset_align_val(struct fh_otg_pcd *pcd, uint8_t addr)
++{
++	int retval = 0;
++	cfi_ep_t *cfiep;
++	cfiobject_t *cfi = pcd->cfi;
++	fh_list_link_t *tmp;
++
++	/* If the EP address is known then reset the features for only that EP */
++	if (addr) {
++		cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
++		if (NULL == cfiep) {
++			CFI_INFO("%s: Error getting the EP address 0x%02x\n",
++				 __func__, addr);
++			return -FH_E_INVALID;
++		}
++		retval = cfi_reset_align_val(cfiep);
++	}
++	/* Otherwise (wValue == 0), reset all features of all EP's */
++	else {
++		/* Traverse all the active EP's and reset the feature(s) value(s) */
++		//list_for_each_entry(cfiep, &cfi->active_eps, lh) {
++		FH_LIST_FOREACH(tmp, &cfi->active_eps) {
++			cfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
++			retval = cfi_reset_align_val(cfiep);
++			if (retval < 0) {
++				CFI_INFO
++				    ("%s: Error resetting the feature Aliignment Value\n",
++				     __func__);
++				return retval;
++			}
++		}
++	}
++	return retval;
++
++}
++
++static int cfi_preproc_reset(struct fh_otg_pcd *pcd,
++			     struct cfi_usb_ctrlrequest *req)
++{
++	int retval = 0;
++
++	switch (req->wIndex) {
++	case 0:
++		/* Reset all features */
++		retval = cfi_handle_reset_all(pcd, req->wValue & 0xff);
++		break;
++
++	case FT_ID_DMA_BUFFER_SETUP:
++		/* Reset the SG buffer setup */
++		retval =
++		    cfi_handle_reset_dma_buff_setup(pcd, req->wValue & 0xff);
++		break;
++
++	case FT_ID_DMA_CONCAT_SETUP:
++		/* Reset the Concatenation buffer setup */
++		retval = cfi_handle_reset_concat_val(pcd, req->wValue & 0xff);
++		break;
++
++	case FT_ID_DMA_BUFF_ALIGN:
++		/* Reset the Alignment buffer setup */
++		retval = cfi_handle_reset_align_val(pcd, req->wValue & 0xff);
++		break;
++
++	case FT_ID_TX_FIFO_DEPTH:
++		retval =
++		    cfi_handle_reset_fifo_val(pcd, req->wValue & 0xff, 0, 1);
++		pcd->cfi->need_gadget_att = 0;
++		break;
++
++	case FT_ID_RX_FIFO_DEPTH:
++		retval = cfi_handle_reset_fifo_val(pcd, 0, 1, 0);
++		pcd->cfi->need_gadget_att = 0;
++		break;
++	default:
++		break;
++	}
++	return retval;
++}
++
++/**
++ * This function sets a new value for the SG buffer setup.
++ */
++static int cfi_ep_set_sg_val(uint8_t * buf, struct fh_otg_pcd *pcd)
++{
++	uint8_t inaddr, outaddr;
++	cfi_ep_t *epin, *epout;
++	ddma_sg_buffer_setup_t *psgval;
++	uint32_t desccount, size;
++
++	CFI_INFO("%s\n", __func__);
++
++	psgval = (ddma_sg_buffer_setup_t *) buf;
++	desccount = (uint32_t) psgval->bCount;
++	size = (uint32_t) psgval->wSize;
++
++	/* Check the DMA descriptor count */
++	if ((desccount > MAX_DMA_DESCS_PER_EP) || (desccount == 0)) {
++		CFI_INFO
++		    ("%s: The count of DMA Descriptors should be between 1 and %d\n",
++		     __func__, MAX_DMA_DESCS_PER_EP);
++		return -FH_E_INVALID;
++	}
++
++	/* Check the DMA descriptor count */
++
++	if (size == 0) {
++
++		CFI_INFO("%s: The transfer size should be at least 1 byte\n",
++			 __func__);
++
++		return -FH_E_INVALID;
++
++	}
++
++	inaddr = psgval->bInEndpointAddress;
++	outaddr = psgval->bOutEndpointAddress;
++
++	epin = get_cfi_ep_by_addr(pcd->cfi, inaddr);
++	epout = get_cfi_ep_by_addr(pcd->cfi, outaddr);
++
++	if (NULL == epin || NULL == epout) {
++		CFI_INFO
++		    ("%s: Unable to get the endpoints inaddr=0x%02x outaddr=0x%02x\n",
++		     __func__, inaddr, outaddr);
++		return -FH_E_INVALID;
++	}
++
++	epin->ep->fh_ep.buff_mode = BM_SG;
++	fh_memcpy(epin->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
++
++	epout->ep->fh_ep.buff_mode = BM_SG;
++	fh_memcpy(epout->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
++
++	return 0;
++}
++
++/**
++ * This function sets a new value for the buffer Alignment setup.
++ */
++static int cfi_ep_set_alignment_val(uint8_t * buf, struct fh_otg_pcd *pcd)
++{
++	cfi_ep_t *ep;
++	uint8_t addr;
++	ddma_align_buffer_setup_t *palignval;
++
++	palignval = (ddma_align_buffer_setup_t *) buf;
++	addr = palignval->bEndpointAddress;
++
++	ep = get_cfi_ep_by_addr(pcd->cfi, addr);
++
++	if (NULL == ep) {
++		CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
++			 __func__, addr);
++		return -FH_E_INVALID;
++	}
++
++	ep->ep->fh_ep.buff_mode = BM_ALIGN;
++	fh_memcpy(ep->bm_align, palignval, sizeof(ddma_align_buffer_setup_t));
++
++	return 0;
++}
++
++/**
++ * This function sets a new value for the Concatenation buffer setup.
++ */
++static int cfi_ep_set_concat_val(uint8_t * buf, struct fh_otg_pcd *pcd)
++{
++	uint8_t addr;
++	cfi_ep_t *ep;
++	struct _ddma_concat_buffer_setup_hdr *pConcatValHdr;
++	uint16_t *pVals;
++	uint32_t desccount;
++	int i;
++	uint16_t mps;
++
++	pConcatValHdr = (struct _ddma_concat_buffer_setup_hdr *)buf;
++	desccount = (uint32_t) pConcatValHdr->bDescCount;
++	pVals = (uint16_t *) (buf + BS_CONCAT_VAL_HDR_LEN);
++
++	/* Check the DMA descriptor count */
++	if (desccount > MAX_DMA_DESCS_PER_EP) {
++		CFI_INFO("%s: Maximum DMA Descriptor count should be %d\n",
++			 __func__, MAX_DMA_DESCS_PER_EP);
++		return -FH_E_INVALID;
++	}
++
++	addr = pConcatValHdr->bEndpointAddress;
++	ep = get_cfi_ep_by_addr(pcd->cfi, addr);
++	if (NULL == ep) {
++		CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
++			 __func__, addr);
++		return -FH_E_INVALID;
++	}
++
++	mps = UGETW(ep->ep->desc->wMaxPacketSize);
++
++#if 0
++	for (i = 0; i < desccount; i++) {
++		CFI_INFO("%s: wTxSize[%d]=0x%04x\n", __func__, i, pVals[i]);
++	}
++	CFI_INFO("%s: epname=%s; mps=%d\n", __func__, ep->ep->ep.name, mps);
++#endif
++
++	/* Check the wTxSizes to be less than or equal to the mps */
++	for (i = 0; i < desccount; i++) {
++		if (pVals[i] > mps) {
++			CFI_INFO
++			    ("%s: ERROR - the wTxSize[%d] should be <= MPS (wTxSize=%d)\n",
++			     __func__, i, pVals[i]);
++			return -FH_E_INVALID;
++		}
++	}
++
++	ep->ep->fh_ep.buff_mode = BM_CONCAT;
++	fh_memcpy(ep->bm_concat, pConcatValHdr, BS_CONCAT_VAL_HDR_LEN);
++
++	/* Free the previously allocated storage for the wTxBytes */
++	if (ep->bm_concat->wTxBytes) {
++		FH_FREE(ep->bm_concat->wTxBytes);
++	}
++
++	/* Allocate a new storage for the wTxBytes field */
++	ep->bm_concat->wTxBytes =
++	    FH_ALLOC(sizeof(uint16_t) * pConcatValHdr->bDescCount);
++	if (NULL == ep->bm_concat->wTxBytes) {
++		CFI_INFO("%s: Unable to allocate memory\n", __func__);
++		return -FH_E_NO_MEMORY;
++	}
++
++	/* Copy the new values into the wTxBytes filed */
++	fh_memcpy(ep->bm_concat->wTxBytes, buf + BS_CONCAT_VAL_HDR_LEN,
++		   sizeof(uint16_t) * pConcatValHdr->bDescCount);
++
++	return 0;
++}
++
++/**
++ * This function calculates the total of all FIFO sizes
++ * 
++ * @param core_if Programming view of FH_otg controller
++ *
++ * @return The total of data FIFO sizes.
++ *
++ */
++static uint16_t get_dfifo_size(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_params_t *params = core_if->core_params;
++	uint16_t dfifo_total = 0;
++	int i;
++
++	/* The shared RxFIFO size */
++	dfifo_total =
++	    params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
++
++	/* Add up each TxFIFO size to the total */
++	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++		dfifo_total += params->dev_tx_fifo_size[i];
++	}
++
++	return dfifo_total;
++}
++
++/**
++ * This function returns Rx FIFO size
++ * 
++ * @param core_if Programming view of FH_otg controller
++ *
++ * @return The total of data FIFO sizes.
++ *
++ */
++static int32_t get_rxfifo_size(fh_otg_core_if_t * core_if, uint16_t wValue)
++{
++	switch (wValue >> 8) {
++	case 0:
++		return (core_if->pwron_rxfsiz <
++			32768) ? core_if->pwron_rxfsiz : 32768;
++		break;
++	case 1:
++		return core_if->core_params->dev_rx_fifo_size;
++		break;
++	default:
++		return -FH_E_INVALID;
++		break;
++	}
++}
++
++/**
++ * This function returns Tx FIFO size for IN EP
++ * 
++ * @param core_if Programming view of FH_otg controller
++ *
++ * @return The total of data FIFO sizes.
++ *
++ */
++static int32_t get_txfifo_size(struct fh_otg_pcd *pcd, uint16_t wValue)
++{
++	fh_otg_pcd_ep_t *ep;
++
++	ep = get_ep_by_addr(pcd, wValue & 0xff);
++
++	if (NULL == ep) {
++		CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
++			 __func__, wValue & 0xff);
++		return -FH_E_INVALID;
++	}
++
++	if (!ep->fh_ep.is_in) {
++		CFI_INFO
++		    ("%s: No Tx FIFO assingned to the Out endpoint addr=0x%02x\n",
++		     __func__, wValue & 0xff);
++		return -FH_E_INVALID;
++	}
++
++	switch (wValue >> 8) {
++	case 0:
++		return (GET_CORE_IF(pcd)->pwron_txfsiz
++			[ep->fh_ep.tx_fifo_num - 1] <
++			768) ? GET_CORE_IF(pcd)->pwron_txfsiz[ep->
++							      fh_ep.tx_fifo_num
++							      - 1] : 32768;
++		break;
++	case 1:
++		return GET_CORE_IF(pcd)->core_params->
++		    dev_tx_fifo_size[ep->fh_ep.num - 1];
++		break;
++	default:
++		return -FH_E_INVALID;
++		break;
++	}
++}
++
++/**
++ * This function checks if the submitted combination of 
++ * device mode FIFO sizes is possible or not.
++ * 
++ * @param core_if Programming view of FH_otg controller
++ *
++ * @return 1 if possible, 0 otherwise.
++ *
++ */
++static uint8_t check_fifo_sizes(fh_otg_core_if_t * core_if)
++{
++	uint16_t dfifo_actual = 0;
++	fh_otg_core_params_t *params = core_if->core_params;
++	uint16_t start_addr = 0;
++	int i;
++
++	dfifo_actual =
++	    params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
++
++	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++		dfifo_actual += params->dev_tx_fifo_size[i];
++	}
++
++	if (dfifo_actual > core_if->total_fifo_size) {
++		return 0;
++	}
++
++	if (params->dev_rx_fifo_size > 32768 || params->dev_rx_fifo_size < 16)
++		return 0;
++
++	if (params->dev_nperio_tx_fifo_size > 32768
++	    || params->dev_nperio_tx_fifo_size < 16)
++		return 0;
++
++	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++
++		if (params->dev_tx_fifo_size[i] > 768
++		    || params->dev_tx_fifo_size[i] < 4)
++			return 0;
++	}
++
++	if (params->dev_rx_fifo_size > core_if->pwron_rxfsiz)
++		return 0;
++	start_addr = params->dev_rx_fifo_size;
++
++	if (params->dev_nperio_tx_fifo_size > core_if->pwron_gnptxfsiz)
++		return 0;
++	start_addr += params->dev_nperio_tx_fifo_size;
++
++	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++
++		if (params->dev_tx_fifo_size[i] > core_if->pwron_txfsiz[i])
++			return 0;
++		start_addr += params->dev_tx_fifo_size[i];
++	}
++
++	return 1;
++}
++
++/**
++ * This function resizes Device mode FIFOs
++ * 
++ * @param core_if Programming view of FH_otg controller
++ *
++ * @return 1 if successful, 0 otherwise
++ *
++ */
++static uint8_t resize_fifos(fh_otg_core_if_t * core_if)
++{
++	int i = 0;
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	fh_otg_core_params_t *params = core_if->core_params;
++	uint32_t rx_fifo_size;
++	fifosize_data_t nptxfifosize;
++	fifosize_data_t txfifosize[15];
++
++	uint32_t rx_fsz_bak;
++	uint32_t nptxfsz_bak;
++	uint32_t txfsz_bak[15];
++
++	uint16_t start_address;
++	uint8_t retval = 1;
++
++	if (!check_fifo_sizes(core_if)) {
++		return 0;
++	}
++
++	/* Configure data FIFO sizes */
++	if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
++		rx_fsz_bak = FH_READ_REG32(&global_regs->grxfsiz);
++		rx_fifo_size = params->dev_rx_fifo_size;
++		FH_WRITE_REG32(&global_regs->grxfsiz, rx_fifo_size);
++
++		/*
++		 * Tx FIFOs These FIFOs are numbered from 1 to 15.
++		 * Indexes of the FIFO size module parameters in the
++		 * dev_tx_fifo_size array and the FIFO size registers in
++		 * the dtxfsiz array run from 0 to 14.
++		 */
++
++		/* Non-periodic Tx FIFO */
++		nptxfsz_bak = FH_READ_REG32(&global_regs->gnptxfsiz);
++		nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
++		start_address = params->dev_rx_fifo_size;
++		nptxfifosize.b.startaddr = start_address;
++
++		FH_WRITE_REG32(&global_regs->gnptxfsiz, nptxfifosize.d32);
++
++		start_address += nptxfifosize.b.depth;
++
++		for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++			txfsz_bak[i] = FH_READ_REG32(&global_regs->dtxfsiz[i]);
++
++			txfifosize[i].b.depth = params->dev_tx_fifo_size[i];
++			txfifosize[i].b.startaddr = start_address;
++			FH_WRITE_REG32(&global_regs->dtxfsiz[i],
++					txfifosize[i].d32);
++
++			start_address += txfifosize[i].b.depth;
++		}
++
++		/** Check if register values are set correctly */
++		if (rx_fifo_size != FH_READ_REG32(&global_regs->grxfsiz)) {
++			retval = 0;
++		}
++
++		if (nptxfifosize.d32 != FH_READ_REG32(&global_regs->gnptxfsiz)) {
++			retval = 0;
++		}
++
++		for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++			if (txfifosize[i].d32 !=
++			    FH_READ_REG32(&global_regs->dtxfsiz[i])) {
++				retval = 0;
++			}
++		}
++
++		/** If register values are not set correctly, reset old values */
++		if (retval == 0) {
++			FH_WRITE_REG32(&global_regs->grxfsiz, rx_fsz_bak);
++
++			/* Non-periodic Tx FIFO */
++			FH_WRITE_REG32(&global_regs->gnptxfsiz, nptxfsz_bak);
++
++			for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++				FH_WRITE_REG32(&global_regs->dtxfsiz[i],
++						txfsz_bak[i]);
++			}
++		}
++	} else {
++		return 0;
++	}
++
++	/* Flush the FIFOs */
++	fh_otg_flush_tx_fifo(core_if, 0x10);	/* all Tx FIFOs */
++	fh_otg_flush_rx_fifo(core_if);
++
++	return retval;
++}
++
++/**
++ * This function sets a new value for the buffer Alignment setup.
++ */
++static int cfi_ep_set_tx_fifo_val(uint8_t * buf, fh_otg_pcd_t * pcd)
++{
++	int retval;
++	uint32_t fsiz;
++	uint16_t size;
++	uint16_t ep_addr;
++	fh_otg_pcd_ep_t *ep;
++	fh_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
++	tx_fifo_size_setup_t *ptxfifoval;
++
++	ptxfifoval = (tx_fifo_size_setup_t *) buf;
++	ep_addr = ptxfifoval->bEndpointAddress;
++	size = ptxfifoval->wDepth;
++
++	ep = get_ep_by_addr(pcd, ep_addr);
++
++	CFI_INFO
++	    ("%s: Set Tx FIFO size: endpoint addr=0x%02x, depth=%d, FIFO Num=%d\n",
++	     __func__, ep_addr, size, ep->fh_ep.tx_fifo_num);
++
++	if (NULL == ep) {
++		CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
++			 __func__, ep_addr);
++		return -FH_E_INVALID;
++	}
++
++	fsiz = params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1];
++	params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1] = size;
++
++	if (resize_fifos(GET_CORE_IF(pcd))) {
++		retval = 0;
++	} else {
++		CFI_INFO
++		    ("%s: Error setting the feature Tx FIFO Size for EP%d\n",
++		     __func__, ep_addr);
++		params->dev_tx_fifo_size[ep->fh_ep.tx_fifo_num - 1] = fsiz;
++		retval = -FH_E_INVALID;
++	}
++
++	return retval;
++}
++
++/**
++ * This function sets a new value for the buffer Alignment setup.
++ */
++static int cfi_set_rx_fifo_val(uint8_t * buf, fh_otg_pcd_t * pcd)
++{
++	int retval;
++	uint32_t fsiz;
++	uint16_t size;
++	fh_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
++	rx_fifo_size_setup_t *prxfifoval;
++
++	prxfifoval = (rx_fifo_size_setup_t *) buf;
++	size = prxfifoval->wDepth;
++
++	fsiz = params->dev_rx_fifo_size;
++	params->dev_rx_fifo_size = size;
++
++	if (resize_fifos(GET_CORE_IF(pcd))) {
++		retval = 0;
++	} else {
++		CFI_INFO("%s: Error setting the feature Rx FIFO Size\n",
++			 __func__);
++		params->dev_rx_fifo_size = fsiz;
++		retval = -FH_E_INVALID;
++	}
++
++	return retval;
++}
++
++/**
++ * This function reads the SG of an EP's buffer setup into the buffer buf
++ */
++static int cfi_ep_get_sg_val(uint8_t * buf, struct fh_otg_pcd *pcd,
++			     struct cfi_usb_ctrlrequest *req)
++{
++	int retval = -FH_E_INVALID;
++	uint8_t addr;
++	cfi_ep_t *ep;
++
++	/* The Low Byte of the wValue contains a non-zero address of the endpoint */
++	addr = req->wValue & 0xFF;
++	if (addr == 0)		/* The address should be non-zero */
++		return retval;
++
++	ep = get_cfi_ep_by_addr(pcd->cfi, addr);
++	if (NULL == ep) {
++		CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
++			 __func__, addr);
++		return retval;
++	}
++
++	fh_memcpy(buf, ep->bm_sg, BS_SG_VAL_DESC_LEN);
++	retval = BS_SG_VAL_DESC_LEN;
++	return retval;
++}
++
++/**
++ * This function reads the Concatenation value of an EP's buffer mode into 
++ * the buffer buf
++ */
++static int cfi_ep_get_concat_val(uint8_t * buf, struct fh_otg_pcd *pcd,
++				 struct cfi_usb_ctrlrequest *req)
++{
++	int retval = -FH_E_INVALID;
++	uint8_t addr;
++	cfi_ep_t *ep;
++	uint8_t desc_count;
++
++	/* The Low Byte of the wValue contains a non-zero address of the endpoint */
++	addr = req->wValue & 0xFF;
++	if (addr == 0)		/* The address should be non-zero */
++		return retval;
++
++	ep = get_cfi_ep_by_addr(pcd->cfi, addr);
++	if (NULL == ep) {
++		CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
++			 __func__, addr);
++		return retval;
++	}
++
++	/* Copy the header to the buffer */
++	fh_memcpy(buf, ep->bm_concat, BS_CONCAT_VAL_HDR_LEN);
++	/* Advance the buffer pointer by the header size */
++	buf += BS_CONCAT_VAL_HDR_LEN;
++
++	desc_count = ep->bm_concat->hdr.bDescCount;
++	/* Copy alll the wTxBytes to the buffer */
++	fh_memcpy(buf, ep->bm_concat->wTxBytes, sizeof(uid16_t) * desc_count);
++
++	retval = BS_CONCAT_VAL_HDR_LEN + sizeof(uid16_t) * desc_count;
++	return retval;
++}
++
++/**
++ * This function reads the buffer Alignment value of an EP's buffer mode into 
++ * the buffer buf
++ *
++ * @return The total number of bytes copied to the buffer or negative error code.
++ */
++static int cfi_ep_get_align_val(uint8_t * buf, struct fh_otg_pcd *pcd,
++				struct cfi_usb_ctrlrequest *req)
++{
++	int retval = -FH_E_INVALID;
++	uint8_t addr;
++	cfi_ep_t *ep;
++
++	/* The Low Byte of the wValue contains a non-zero address of the endpoint */
++	addr = req->wValue & 0xFF;
++	if (addr == 0)		/* The address should be non-zero */
++		return retval;
++
++	ep = get_cfi_ep_by_addr(pcd->cfi, addr);
++	if (NULL == ep) {
++		CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
++			 __func__, addr);
++		return retval;
++	}
++
++	fh_memcpy(buf, ep->bm_align, BS_ALIGN_VAL_HDR_LEN);
++	retval = BS_ALIGN_VAL_HDR_LEN;
++
++	return retval;
++}
++
++/**
++ * This function sets a new value for the specified feature
++ * 
++ * @param	pcd	A pointer to the PCD object
++ * 
++ * @return 0 if successful, negative error code otherwise to stall the DCE.
++ */
++static int cfi_set_feature_value(struct fh_otg_pcd *pcd)
++{
++	int retval = -FH_E_NOT_SUPPORTED;
++	uint16_t wIndex, wValue;
++	uint8_t bRequest;
++	struct fh_otg_core_if *coreif;
++	cfiobject_t *cfi = pcd->cfi;
++	struct cfi_usb_ctrlrequest *ctrl_req;
++	uint8_t *buf;
++	ctrl_req = &cfi->ctrl_req;
++
++	buf = pcd->cfi->ctrl_req.data;
++
++	coreif = GET_CORE_IF(pcd);
++	bRequest = ctrl_req->bRequest;
++	wIndex = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
++	wValue = FH_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
++
++	/* See which feature is to be modified */
++	switch (wIndex) {
++	case FT_ID_DMA_BUFFER_SETUP:
++		/* Modify the feature */
++		if ((retval = cfi_ep_set_sg_val(buf, pcd)) < 0)
++			return retval;
++
++		/* And send this request to the gadget */
++		cfi->need_gadget_att = 1;
++		break;
++
++	case FT_ID_DMA_BUFF_ALIGN:
++		if ((retval = cfi_ep_set_alignment_val(buf, pcd)) < 0)
++			return retval;
++		cfi->need_gadget_att = 1;
++		break;
++
++	case FT_ID_DMA_CONCAT_SETUP:
++		/* Modify the feature */
++		if ((retval = cfi_ep_set_concat_val(buf, pcd)) < 0)
++			return retval;
++		cfi->need_gadget_att = 1;
++		break;
++
++	case FT_ID_DMA_CIRCULAR:
++		CFI_INFO("FT_ID_DMA_CIRCULAR\n");
++		break;
++
++	case FT_ID_THRESHOLD_SETUP:
++		CFI_INFO("FT_ID_THRESHOLD_SETUP\n");
++		break;
++
++	case FT_ID_DFIFO_DEPTH:
++		CFI_INFO("FT_ID_DFIFO_DEPTH\n");
++		break;
++
++	case FT_ID_TX_FIFO_DEPTH:
++		CFI_INFO("FT_ID_TX_FIFO_DEPTH\n");
++		if ((retval = cfi_ep_set_tx_fifo_val(buf, pcd)) < 0)
++			return retval;
++		cfi->need_gadget_att = 0;
++		break;
++
++	case FT_ID_RX_FIFO_DEPTH:
++		CFI_INFO("FT_ID_RX_FIFO_DEPTH\n");
++		if ((retval = cfi_set_rx_fifo_val(buf, pcd)) < 0)
++			return retval;
++		cfi->need_gadget_att = 0;
++		break;
++	}
++
++	return retval;
++}
++
++#endif //FH_UTE_CFI
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.h
+new file mode 100644
+index 00000000..97a05fa5
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cfi.h
+@@ -0,0 +1,320 @@
++/* ==========================================================================
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#if !defined(__FH_OTG_CFI_H__)
++#define __FH_OTG_CFI_H__
++
++#include "fh_otg_pcd.h"
++#include "fh_cfi_common.h"
++
++/**
++ * @file
++ * This file contains the CFI related OTG PCD specific common constants, 
++ * interfaces(functions and macros) and data structures.The CFI Protocol is an 
++ * optional interface for internal testing purposes that a DUT may implement to 
++ * support testing of configurable features.
++ *
++ */
++
++struct fh_otg_pcd;
++struct fh_otg_pcd_ep;
++
++/** OTG CFI Features (properties) ID constants */
++/** This is a request for all Core Features */
++#define FT_ID_DMA_MODE					0x0001
++#define FT_ID_DMA_BUFFER_SETUP			0x0002
++#define FT_ID_DMA_BUFF_ALIGN			0x0003
++#define FT_ID_DMA_CONCAT_SETUP			0x0004
++#define FT_ID_DMA_CIRCULAR				0x0005
++#define FT_ID_THRESHOLD_SETUP			0x0006
++#define FT_ID_DFIFO_DEPTH				0x0007
++#define FT_ID_TX_FIFO_DEPTH				0x0008
++#define FT_ID_RX_FIFO_DEPTH				0x0009
++
++/**********************************************************/
++#define CFI_INFO_DEF
++
++#ifdef CFI_INFO_DEF
++#define CFI_INFO(fmt...)	FH_PRINTF("CFI: " fmt);
++#else
++#define CFI_INFO(fmt...)
++#endif
++
++#define min(x,y) ({ \
++	x < y ? x : y; })
++
++#define max(x,y) ({ \
++	x > y ? x : y; })
++
++/**
++ * Descriptor DMA SG Buffer setup structure (SG buffer). This structure is
++ * also used for setting up a buffer for Circular DDMA.
++ */
++struct _ddma_sg_buffer_setup {
++#define BS_SG_VAL_DESC_LEN	6
++	/* The OUT EP address */
++	uint8_t bOutEndpointAddress;
++	/* The IN EP address */
++	uint8_t bInEndpointAddress;
++	/* Number of bytes to put between transfer segments (must be DWORD boundaries) */
++	uint8_t bOffset;
++	/* The number of transfer segments (a DMA descriptors per each segment) */
++	uint8_t bCount;
++	/* Size (in byte) of each transfer segment */
++	uint16_t wSize;
++} __attribute__ ((packed));
++typedef struct _ddma_sg_buffer_setup ddma_sg_buffer_setup_t;
++
++/** Descriptor DMA Concatenation Buffer setup structure */
++struct _ddma_concat_buffer_setup_hdr {
++#define BS_CONCAT_VAL_HDR_LEN	4
++	/* The endpoint for which the buffer is to be set up */
++	uint8_t bEndpointAddress;
++	/* The count of descriptors to be used */
++	uint8_t bDescCount;
++	/* The total size of the transfer */
++	uint16_t wSize;
++} __attribute__ ((packed));
++typedef struct _ddma_concat_buffer_setup_hdr ddma_concat_buffer_setup_hdr_t;
++
++/** Descriptor DMA Concatenation Buffer setup structure */
++struct _ddma_concat_buffer_setup {
++	/* The SG header */
++	ddma_concat_buffer_setup_hdr_t hdr;
++
++	/* The XFER sizes pointer (allocated dynamically) */
++	uint16_t *wTxBytes;
++} __attribute__ ((packed));
++typedef struct _ddma_concat_buffer_setup ddma_concat_buffer_setup_t;
++
++/** Descriptor DMA Alignment Buffer setup structure */
++struct _ddma_align_buffer_setup {
++#define BS_ALIGN_VAL_HDR_LEN	2
++	uint8_t bEndpointAddress;
++	uint8_t bAlign;
++} __attribute__ ((packed));
++typedef struct _ddma_align_buffer_setup ddma_align_buffer_setup_t;
++
++/** Transmit FIFO Size setup structure */
++struct _tx_fifo_size_setup {
++	uint8_t bEndpointAddress;
++	uint16_t wDepth;
++} __attribute__ ((packed));
++typedef struct _tx_fifo_size_setup tx_fifo_size_setup_t;
++
++/** Transmit FIFO Size setup structure */
++struct _rx_fifo_size_setup {
++	uint16_t wDepth;
++} __attribute__ ((packed));
++typedef struct _rx_fifo_size_setup rx_fifo_size_setup_t;
++
++/**
++ * struct cfi_usb_ctrlrequest - the CFI implementation of the struct usb_ctrlrequest
++ * This structure encapsulates the standard usb_ctrlrequest and adds a pointer
++ * to the data returned in the data stage of a 3-stage Control Write requests.
++ */
++struct cfi_usb_ctrlrequest {
++	uint8_t bRequestType;
++	uint8_t bRequest;
++	uint16_t wValue;
++	uint16_t wIndex;
++	uint16_t wLength;
++	uint8_t *data;
++} UPACKED;
++
++/*---------------------------------------------------------------------------*/
++
++/**
++ * The CFI wrapper of the enabled and activated fh_otg_pcd_ep structures.
++ * This structure is used to store the buffer setup data for any
++ * enabled endpoint in the PCD.
++ */
++struct cfi_ep {
++	/* Entry for the list container */
++	fh_list_link_t lh;
++	/* Pointer to the active PCD endpoint structure */
++	struct fh_otg_pcd_ep *ep;
++	/* The last descriptor in the chain of DMA descriptors of the endpoint */
++	struct fh_otg_dma_desc *dma_desc_last;
++	/* The SG feature value */
++	ddma_sg_buffer_setup_t *bm_sg;
++	/* The Circular feature value */
++	ddma_sg_buffer_setup_t *bm_circ;
++	/* The Concatenation feature value */
++	ddma_concat_buffer_setup_t *bm_concat;
++	/* The Alignment feature value */
++	ddma_align_buffer_setup_t *bm_align;
++	/* XFER length */
++	uint32_t xfer_len;
++	/*
++	 * Count of DMA descriptors currently used.
++	 * The total should not exceed the MAX_DMA_DESCS_PER_EP value
++	 * defined in the fh_otg_cil.h
++	 */
++	uint32_t desc_count;
++};
++typedef struct cfi_ep cfi_ep_t;
++
++typedef struct cfi_dma_buff {
++#define CFI_IN_BUF_LEN	1024
++#define CFI_OUT_BUF_LEN	1024
++	dma_addr_t addr;
++	uint8_t *buf;
++} cfi_dma_buff_t;
++
++struct cfiobject;
++
++/**
++ * This is the interface for the CFI operations.
++ *
++ * @param	ep_enable			Called when any endpoint is enabled and activated.
++ * @param	release				Called when the CFI object is released and it needs to correctly
++ *								deallocate the dynamic memory
++ * @param	ctrl_write_complete	Called when the data stage of the request is complete
++ */
++typedef struct cfi_ops {
++	int (*ep_enable) (struct cfiobject * cfi, struct fh_otg_pcd * pcd,
++			  struct fh_otg_pcd_ep * ep);
++	void *(*ep_alloc_buf) (struct cfiobject * cfi, struct fh_otg_pcd * pcd,
++			       struct fh_otg_pcd_ep * ep, dma_addr_t * dma,
++			       unsigned size, gfp_t flags);
++	void (*release) (struct cfiobject * cfi);
++	int (*ctrl_write_complete) (struct cfiobject * cfi,
++				    struct fh_otg_pcd * pcd);
++	void (*build_descriptors) (struct cfiobject * cfi,
++				   struct fh_otg_pcd * pcd,
++				   struct fh_otg_pcd_ep * ep,
++				   fh_otg_pcd_request_t * req);
++} cfi_ops_t;
++
++struct cfiobject {
++	cfi_ops_t ops;
++	struct fh_otg_pcd *pcd;
++	struct usb_gadget *gadget;
++
++	/* Buffers used to send/receive CFI-related request data */
++	cfi_dma_buff_t buf_in;
++	cfi_dma_buff_t buf_out;
++
++	/* CFI specific Control request wrapper */
++	struct cfi_usb_ctrlrequest ctrl_req;
++
++	/* The list of active EP's in the PCD of type cfi_ep_t */
++	fh_list_link_t active_eps;
++
++	/* This flag shall control the propagation of a specific request
++	 * to the gadget's processing routines.
++	 * 0 - no gadget handling
++	 * 1 - the gadget needs to know about this request (w/o completing a status
++	 * phase - just return a 0 to the _setup callback)
++	 */
++	uint8_t need_gadget_att;
++
++	/* Flag indicating whether the status IN phase needs to be
++	 * completed by the PCD
++	 */
++	uint8_t need_status_in_complete;
++};
++typedef struct cfiobject cfiobject_t;
++
++#define DUMP_MSG
++
++#if defined(DUMP_MSG)
++static inline void dump_msg(const u8 * buf, unsigned int length)
++{
++	unsigned int start, num, i;
++	char line[52], *p;
++
++	if (length >= 512)
++		return;
++
++	start = 0;
++	while (length > 0) {
++		num = min(length, 16u);
++		p = line;
++		for (i = 0; i < num; ++i) {
++			if (i == 8)
++				*p++ = ' ';
++			FH_SPRINTF(p, " %02x", buf[i]);
++			p += 3;
++		}
++		*p = 0;
++		FH_DEBUG("%6x: %s\n", start, line);
++		buf += num;
++		start += num;
++		length -= num;
++	}
++}
++#else
++static inline void dump_msg(const u8 * buf, unsigned int length)
++{
++}
++#endif
++
++/**
++ * This function returns a pointer to cfi_ep_t object with the addr address.
++ */
++static inline struct cfi_ep *get_cfi_ep_by_addr(struct cfiobject *cfi,
++						uint8_t addr)
++{
++	struct cfi_ep *pcfiep;
++	fh_list_link_t *tmp;
++
++	FH_LIST_FOREACH(tmp, &cfi->active_eps) {
++		pcfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
++
++		if (pcfiep->ep->desc->bEndpointAddress == addr) {
++			return pcfiep;
++		}
++	}
++
++	return NULL;
++}
++
++/**
++ * This function returns a pointer to cfi_ep_t object that matches
++ * the fh_otg_pcd_ep object.
++ */
++static inline struct cfi_ep *get_cfi_ep_by_pcd_ep(struct cfiobject *cfi,
++						  struct fh_otg_pcd_ep *ep)
++{
++	struct cfi_ep *pcfiep = NULL;
++	fh_list_link_t *tmp;
++
++	FH_LIST_FOREACH(tmp, &cfi->active_eps) {
++		pcfiep = FH_LIST_ENTRY(tmp, struct cfi_ep, lh);
++		if (pcfiep->ep == ep) {
++			return pcfiep;
++		}
++	}
++	return NULL;
++}
++
++int cfi_setup(struct fh_otg_pcd *pcd, struct cfi_usb_ctrlrequest *ctrl);
++
++#endif /* (__FH_OTG_CFI_H__) */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.c
+new file mode 100644
+index 00000000..64a3d762
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.c
+@@ -0,0 +1,7487 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_cil.c $
++ * $Revision: #216 $
++ * $Date: 2015/10/12 $
++ * $Change: 2972621 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++/** @file
++ *
++ * The Core Interface Layer provides basic services for accessing and
++ * managing the FH_otg hardware. These services are used by both the
++ * Host Controller Driver and the Peripheral Controller Driver.
++ *
++ * The CIL manages the memory map for the core so that the HCD and PCD
++ * don't have to do this separately. It also handles basic tasks like
++ * reading/writing the registers and data FIFOs in the controller.
++ * Some of the data access functions provide encapsulation of several
++ * operations required to perform a task, such as writing multiple
++ * registers to start a transfer. Finally, the CIL performs basic
++ * services that are not specific to either the host or device modes
++ * of operation. These services include management of the OTG Host
++ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
++ * Diagnostic API is also provided to allow testing of the controller
++ * hardware.
++ *
++ * The Core Interface Layer has the following requirements:
++ * - Provides basic controller operations.
++ * - Minimal use of OS services. 
++ * - The OS services used will be abstracted by using inline functions
++ *	 or macros.
++ *
++ */
++
++#include <linux/kernel.h>
++#include "../fh_common_port/fh_os.h"
++#include "fh_otg_regs.h"
++#include "fh_otg_cil.h"
++
++static int fh_otg_setup_params(fh_otg_core_if_t * core_if);
++
++/**
++ * This function is called to initialize the FH_otg CSR data
++ * structures. The register addresses in the device and host
++ * structures are initialized from the base address supplied by the
++ * caller. The calling function must make the OS calls to get the
++ * base address of the FH_otg controller registers. The core_params
++ * argument holds the parameters that specify how the core should be
++ * configured.
++ *
++ * @param reg_base_addr Base address of FH_otg core registers
++ *
++ */
++fh_otg_core_if_t *fh_otg_cil_init(const uint32_t * reg_base_addr)
++{
++	fh_otg_core_if_t *core_if = 0;
++	fh_otg_dev_if_t *dev_if = 0;
++	fh_otg_host_if_t *host_if = 0;
++	uint8_t *reg_base = (uint8_t *) reg_base_addr;
++	int i = 0;
++
++	FH_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, reg_base_addr);
++
++	core_if = FH_ALLOC(sizeof(fh_otg_core_if_t));
++
++	if (core_if == NULL) {
++		FH_DEBUGPL(DBG_CIL,
++			    "Allocation of fh_otg_core_if_t failed\n");
++		return 0;
++	}
++	core_if->core_global_regs = (fh_otg_core_global_regs_t *) reg_base;
++
++	/*
++	 * Allocate the Device Mode structures.
++	 */
++	dev_if = FH_ALLOC(sizeof(fh_otg_dev_if_t));
++
++	if (dev_if == NULL) {
++		FH_DEBUGPL(DBG_CIL, "Allocation of fh_otg_dev_if_t failed\n");
++		FH_FREE(core_if);
++		return 0;
++	}
++
++	dev_if->dev_global_regs =
++	    (fh_otg_device_global_regs_t *) (reg_base +
++					      FH_DEV_GLOBAL_REG_OFFSET);
++
++	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++		dev_if->in_ep_regs[i] = (fh_otg_dev_in_ep_regs_t *)
++		    (reg_base + FH_DEV_IN_EP_REG_OFFSET +
++		     (i * FH_EP_REG_OFFSET));
++
++		dev_if->out_ep_regs[i] = (fh_otg_dev_out_ep_regs_t *)
++		    (reg_base + FH_DEV_OUT_EP_REG_OFFSET +
++		     (i * FH_EP_REG_OFFSET));
++		FH_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n",
++			    i, &dev_if->in_ep_regs[i]->diepctl);
++		FH_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n",
++			    i, &dev_if->out_ep_regs[i]->doepctl);
++	}
++
++	dev_if->speed = 0;	// unknown
++
++	core_if->dev_if = dev_if;
++
++	/*
++	 * Allocate the Host Mode structures.
++	 */
++	host_if = FH_ALLOC(sizeof(fh_otg_host_if_t));
++
++	if (host_if == NULL) {
++		FH_DEBUGPL(DBG_CIL,
++			    "Allocation of fh_otg_host_if_t failed\n");
++		FH_FREE(dev_if);
++		FH_FREE(core_if);
++		return 0;
++	}
++
++	host_if->host_global_regs = (fh_otg_host_global_regs_t *)
++	    (reg_base + FH_OTG_HOST_GLOBAL_REG_OFFSET);
++
++	host_if->hprt0 =
++	    (uint32_t *) (reg_base + FH_OTG_HOST_PORT_REGS_OFFSET);
++
++	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++		host_if->hc_regs[i] = (fh_otg_hc_regs_t *)
++		    (reg_base + FH_OTG_HOST_CHAN_REGS_OFFSET +
++		     (i * FH_OTG_CHAN_REGS_OFFSET));
++		FH_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
++			    i, &host_if->hc_regs[i]->hcchar);
++	}
++
++	host_if->num_host_channels = MAX_EPS_CHANNELS;
++	core_if->host_if = host_if;
++
++	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++		core_if->data_fifo[i] =
++		    (uint32_t *) (reg_base + FH_OTG_DATA_FIFO_OFFSET +
++				  (i * FH_OTG_DATA_FIFO_SIZE));
++		FH_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08lx\n",
++			    i, (unsigned long)core_if->data_fifo[i]);
++	}
++
++	core_if->pcgcctl = (uint32_t *) (reg_base + FH_OTG_PCGCCTL_OFFSET);
++
++	/* Initiate lx_state to L3 disconnected state */
++	core_if->lx_state = FH_OTG_L3;
++	/*
++	 * Store the contents of the hardware configuration registers here for
++	 * easy access later.
++	 */
++	core_if->hwcfg1.d32 =
++	    FH_READ_REG32(&core_if->core_global_regs->ghwcfg1);
++	core_if->hwcfg2.d32 =
++	    FH_READ_REG32(&core_if->core_global_regs->ghwcfg2);
++	core_if->hwcfg3.d32 =
++	    FH_READ_REG32(&core_if->core_global_regs->ghwcfg3);
++	core_if->hwcfg4.d32 =
++	    FH_READ_REG32(&core_if->core_global_regs->ghwcfg4);
++
++	/* Force host mode to get HPTXFSIZ exact power on value */
++	{
++		gusbcfg_data_t gusbcfg = {.d32 = 0 };
++		gusbcfg.d32 =  FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++		gusbcfg.b.force_host_mode = 1;
++		FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
++		fh_mdelay(100);
++		core_if->hptxfsiz.d32 =
++		    FH_READ_REG32(&core_if->core_global_regs->hptxfsiz);
++		gusbcfg.d32 =  FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++		gusbcfg.b.force_host_mode = 0;
++		FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
++		fh_mdelay(100);
++	}
++
++	FH_DEBUGPL(DBG_CILV, "hwcfg1=%08x\n", core_if->hwcfg1.d32);
++	FH_DEBUGPL(DBG_CILV, "hwcfg2=%08x\n", core_if->hwcfg2.d32);
++	FH_DEBUGPL(DBG_CILV, "hwcfg3=%08x\n", core_if->hwcfg3.d32);
++	FH_DEBUGPL(DBG_CILV, "hwcfg4=%08x\n", core_if->hwcfg4.d32);
++
++	core_if->hcfg.d32 =
++	    FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
++	core_if->dcfg.d32 =
++	    FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++
++	FH_DEBUGPL(DBG_CILV, "hcfg=%08x\n", core_if->hcfg.d32);
++	FH_DEBUGPL(DBG_CILV, "dcfg=%08x\n", core_if->dcfg.d32);
++
++	FH_DEBUGPL(DBG_CILV, "op_mode=%0x\n", core_if->hwcfg2.b.op_mode);
++	FH_DEBUGPL(DBG_CILV, "arch=%0x\n", core_if->hwcfg2.b.architecture);
++	FH_DEBUGPL(DBG_CILV, "num_dev_ep=%d\n", core_if->hwcfg2.b.num_dev_ep);
++	FH_DEBUGPL(DBG_CILV, "num_host_chan=%d\n",
++		    core_if->hwcfg2.b.num_host_chan);
++	FH_DEBUGPL(DBG_CILV, "nonperio_tx_q_depth=0x%0x\n",
++		    core_if->hwcfg2.b.nonperio_tx_q_depth);
++	FH_DEBUGPL(DBG_CILV, "host_perio_tx_q_depth=0x%0x\n",
++		    core_if->hwcfg2.b.host_perio_tx_q_depth);
++	FH_DEBUGPL(DBG_CILV, "dev_token_q_depth=0x%0x\n",
++		    core_if->hwcfg2.b.dev_token_q_depth);
++
++	FH_DEBUGPL(DBG_CILV, "Total FIFO SZ=%d\n",
++		    core_if->hwcfg3.b.dfifo_depth);
++	FH_DEBUGPL(DBG_CILV, "xfer_size_cntr_width=%0x\n",
++		    core_if->hwcfg3.b.xfer_size_cntr_width);
++
++	/*
++	 * Set the SRP sucess bit for FS-I2c
++	 */
++	core_if->srp_success = 0;
++	core_if->srp_timer_started = 0;
++
++	/*
++	 * Create new workqueue and init works
++	 */
++	core_if->wq_otg = FH_WORKQ_ALLOC("fh_otg");
++	if (core_if->wq_otg == 0) {
++		FH_WARN("FH_WORKQ_ALLOC failed\n");
++		FH_FREE(host_if);
++		FH_FREE(dev_if);
++		FH_FREE(core_if);
++		return 0;
++	}
++	
++	/*
++	 * Allocates hibernation backup registers
++	 */	 
++	if (core_if->hwcfg4.b.hiber==1 || core_if->hwcfg4.b.xhiber==1) {
++		if(!core_if->gr_backup){
++			core_if->gr_backup = FH_ALLOC(sizeof(*core_if->gr_backup));
++			if(!core_if->gr_backup){
++				FH_WARN("can't alloc mem for gr_backup register \n");
++				FH_FREE(host_if);
++				FH_FREE(dev_if);
++				FH_WORKQ_FREE(core_if->wq_otg);
++				FH_FREE(core_if);
++				return 0;
++			}
++		}
++		if(!core_if->dr_backup){
++			core_if->dr_backup = FH_ALLOC(sizeof(*core_if->dr_backup));
++			if(!core_if->dr_backup){
++				FH_WARN("can't alloc mem for dr_backup register \n");
++				FH_FREE(host_if);
++				FH_FREE(dev_if);
++				FH_WORKQ_FREE(core_if->wq_otg);
++				FH_FREE(core_if);
++				FH_FREE(core_if->gr_backup);
++				return 0;
++			}
++		}
++		if(!core_if->hr_backup){
++			core_if->hr_backup = FH_ALLOC(sizeof(*core_if->hr_backup));
++			if(!core_if->hr_backup){
++				FH_WARN("can't alloc mem for hr_backup register \n");
++				FH_FREE(host_if);
++				FH_FREE(dev_if);
++				FH_WORKQ_FREE(core_if->wq_otg);
++				FH_FREE(core_if);
++				FH_FREE(core_if->gr_backup);
++				FH_FREE(core_if->dr_backup);
++				return 0;
++			}
++		}
++	} 
++
++	core_if->snpsid = FH_READ_REG32(&core_if->core_global_regs->gsnpsid);
++
++	FH_PRINTF("Core Release: %x.%x%x%x\n",
++		   (core_if->snpsid >> 12 & 0xF),
++		   (core_if->snpsid >> 8 & 0xF),
++		   (core_if->snpsid >> 4 & 0xF), (core_if->snpsid & 0xF));
++
++	core_if->wkp_timer = FH_TIMER_ALLOC("Wake Up Timer",
++					     w_wakeup_detected, core_if);
++	if (core_if->wkp_timer == 0) {
++		FH_WARN("FH_TIMER_ALLOC failed\n");
++		FH_FREE(host_if);
++		FH_FREE(dev_if);
++		FH_WORKQ_FREE(core_if->wq_otg);
++		FH_FREE(core_if);
++		return 0;
++	}
++
++	if (fh_otg_setup_params(core_if)) {
++		FH_WARN("Error while setting core params\n");
++	}
++
++	core_if->hibernation_suspend = 0;
++	if (core_if->otg_ver)
++		core_if->test_mode = 0;
++
++	/** ADP initialization */
++	fh_otg_adp_init(core_if);
++	
++	return core_if;
++}
++
++/**
++ * This function frees the structures allocated by fh_otg_cil_init().
++ *
++ * @param core_if The core interface pointer returned from
++ * 		  fh_otg_cil_init().
++ *
++ */
++void fh_otg_cil_remove(fh_otg_core_if_t * core_if)
++{
++	dctl_data_t dctl = {.d32 = 0 };
++	/* Disable all interrupts */
++	FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, 1, 0);
++	FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, 0);
++
++	dctl.b.sftdiscon = 1;
++	if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0,
++				 dctl.d32);
++	}
++
++	if (core_if->wq_otg) {
++		FH_WORKQ_WAIT_WORK_DONE(core_if->wq_otg, 500);
++		FH_WORKQ_FREE(core_if->wq_otg);
++	}
++	if (core_if->dev_if) {
++		FH_FREE(core_if->dev_if);
++	}
++	if (core_if->host_if) {
++		FH_FREE(core_if->host_if);
++	}
++
++	/** Remove hibernation recovery registers **/
++	if(core_if->gr_backup){
++		FH_FREE(core_if->gr_backup);
++	}
++	if(core_if->dr_backup){
++		FH_FREE(core_if->dr_backup);
++	}
++	if(core_if->hr_backup){
++		FH_FREE(core_if->hr_backup);
++	}	
++
++	/** Remove ADP Stuff  */
++	fh_otg_adp_remove(core_if);
++	if (core_if->core_params) {
++		FH_FREE(core_if->core_params);
++	}
++	if (core_if->wkp_timer) {
++		FH_TIMER_FREE(core_if->wkp_timer);
++	}
++	if (core_if->srp_timer) {
++		FH_TIMER_FREE(core_if->srp_timer);
++	}
++	FH_FREE(core_if);
++}
++
++/**
++ * This function enables the controller's Global Interrupt in the AHB Config
++ * register.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++void fh_otg_enable_global_interrupts(fh_otg_core_if_t * core_if)
++{
++	gahbcfg_data_t ahbcfg = {.d32 = 0 };
++	ahbcfg.b.glblintrmsk = 1;	/* Enable interrupts */
++	FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
++}
++
++/**
++ * This function disables the controller's Global Interrupt in the AHB Config
++ * register.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++void fh_otg_disable_global_interrupts(fh_otg_core_if_t * core_if)
++{
++	gahbcfg_data_t ahbcfg = {.d32 = 0 };
++	ahbcfg.b.glblintrmsk = 1;	/* Disable interrupts */
++	FH_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
++}
++
++/**
++ * This function initializes the commmon interrupts, used in both
++ * device and host modes.
++ *
++ * @param core_if Programming view of the FH_otg controller
++ *
++ */
++static void fh_otg_enable_common_interrupts(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	/* Clear any pending OTG Interrupts */
++	FH_WRITE_REG32(&global_regs->gotgint, 0xFFFFFFFF);
++
++	/* Clear any pending interrupts */
++	FH_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
++
++	/*
++	 * Enable the interrupts in the GINTMSK.
++	 */
++	if (!core_if->core_params->otg_ver)
++	/* To avoid system hang during OTG 2.0 role switch */
++		intr_mask.b.modemismatch = 1;
++	intr_mask.b.otgintr = 1;
++
++	if (!core_if->dma_enable) {
++		intr_mask.b.rxstsqlvl = 1;
++	}
++
++	intr_mask.b.conidstschng = 1;
++	intr_mask.b.wkupintr = 1;
++	intr_mask.b.disconnect = 0;
++	intr_mask.b.usbsuspend = 1;
++	intr_mask.b.sessreqintr = 1;
++#ifdef CONFIG_USB_FH_OTG_LPM
++	if (core_if->core_params->lpm_enable) {
++		intr_mask.b.lpmtranrcvd = 1;
++	}
++#endif
++	FH_WRITE_REG32(&global_regs->gintmsk, intr_mask.d32);
++}
++
++/*
++ * The restore operation is modified to support Synopsys Emulated Powerdown and
++ * Hibernation. This function is for exiting from Device mode hibernation by
++ * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
++ * @param core_if Programming view of FH_otg controller.
++ * @param rem_wakeup - indicates whether resume is initiated by Device or Host.
++ * @param reset - indicates whether resume is initiated by Reset.
++ */
++int fh_otg_device_hibernation_restore(fh_otg_core_if_t * core_if,
++				       int rem_wakeup, int reset)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	dctl_data_t dctl = {.d32 = 0 };
++
++	int timeout = 2000;
++
++	if (!core_if->hibernation_suspend) {
++		FH_PRINTF("Already exited from Hibernation\n");
++		return 1;
++	}
++
++	FH_DEBUGPL(DBG_PCD, "%s called\n", __FUNCTION__);
++	/* Switch-on voltage to the core */
++	gpwrdn.b.pwrdnswtch = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Reset core */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Assert Restore signal */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.restore = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++	fh_udelay(10);
++
++	/* Disable power clamps */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnclmp = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	if (rem_wakeup) {
++		fh_udelay(70);
++	}
++
++	/* Deassert Reset core */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++	fh_udelay(10);
++
++	/* Disable PMU interrupt */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuintsel = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	/* Mask interrupts from gpwrdn */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.connect_det_msk = 1;
++	gpwrdn.b.srp_det_msk = 1;
++	gpwrdn.b.disconn_det_msk = 1;
++	gpwrdn.b.rst_det_msk = 1;
++	gpwrdn.b.lnstchng_msk = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	/* Indicates that we are going out from hibernation */
++	core_if->hibernation_suspend = 0;
++
++	/*
++	 * Set Restore Essential Regs bit in PCGCCTL register, restore_mode = 1
++	 * indicates restore from remote_wakeup
++	 */
++	restore_essential_regs(core_if, rem_wakeup, 0);
++
++	/*
++	 * Wait a little for seeing new value of variable hibernation_suspend if
++	 * Restore done interrupt received before polling
++	 */
++	fh_udelay(10);
++
++	if (core_if->hibernation_suspend == 0) {
++		/*
++		 * Wait For Restore_done Interrupt. This mechanism of polling the 
++		 * interrupt is introduced to avoid any possible race conditions
++		 */
++		do {
++			gintsts_data_t gintsts;
++			gintsts.d32 =
++			    FH_READ_REG32(&core_if->core_global_regs->gintsts);
++			if (gintsts.b.restoredone) {
++				gintsts.d32 = 0;
++				gintsts.b.restoredone = 1;
++				FH_WRITE_REG32(&core_if->core_global_regs->
++						gintsts, gintsts.d32);
++				FH_PRINTF("Restore Done Interrupt seen\n");
++				break;
++			}
++			fh_udelay(10);
++		} while (--timeout);
++		if (!timeout) {
++			FH_PRINTF("Restore Done interrupt wasn't generated here\n");
++		}
++	}
++	/* Clear all pending interupts */
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++
++	/* De-assert Restore */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.restore = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	if (!rem_wakeup) {
++		pcgcctl.d32 = 0;
++		pcgcctl.b.rstpdwnmodule = 1;
++		FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++	}
++
++	/* Restore GUSBCFG,DCFG and DCTL */
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
++			core_if->gr_backup->gusbcfg_local);
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg,
++			core_if->dr_backup->dcfg);
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
++            core_if->dr_backup->dctl);
++
++	/* De-assert Wakeup Logic */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuactv = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	if (!rem_wakeup) {
++		/* Set Device programming done bit */
++		dctl.b.pwronprgdone = 1;
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
++	} else {
++		/* Start Remote Wakeup Signaling */
++		dctl.d32 = core_if->dr_backup->dctl;
++		dctl.b.rmtwkupsig = 1;
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
++	}
++
++	fh_mdelay(2);
++	/* Clear all pending interupts */
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++
++	/* Restore global registers */
++	fh_otg_restore_global_regs(core_if);
++	/* Restore device global registers */
++	fh_otg_restore_dev_regs(core_if, rem_wakeup);
++
++	if (rem_wakeup) {
++		fh_mdelay(7);
++		dctl.d32 = 0;
++		dctl.b.rmtwkupsig = 1;
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
++	}
++
++	core_if->hibernation_suspend = 0;
++	/* The core will be in ON STATE */
++	core_if->lx_state = FH_OTG_L0;
++	FH_PRINTF("Hibernation recovery completes here\n");
++
++	return 1;
++}
++
++/*
++ * The restore operation is modified to support Synopsys Emulated Powerdown and
++ * Hibernation. This function is for exiting from Host mode hibernation by
++ * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
++ * @param core_if Programming view of FH_otg controller.
++ * @param rem_wakeup - indicates whether resume is initiated by Device or Host.
++ * @param reset - indicates whether resume is initiated by Reset.
++ */
++int fh_otg_host_hibernation_restore(fh_otg_core_if_t * core_if,
++				     int rem_wakeup, int reset)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	hprt0_data_t hprt0 = {.d32 = 0 };
++
++	int timeout = 2000;
++
++	FH_DEBUGPL(DBG_HCD, "%s called\n", __FUNCTION__);
++	/* Switch-on voltage to the core */
++	gpwrdn.b.pwrdnswtch = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Reset core */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Assert Restore signal */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.restore = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++	fh_udelay(10);
++
++	/* Disable power clamps */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnclmp = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	if (!rem_wakeup) {
++		fh_udelay(50);
++	}
++
++	/* Deassert Reset core */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++	fh_udelay(10);
++
++	/* Disable PMU interrupt */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuintsel = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	gpwrdn.d32 = 0;
++	gpwrdn.b.connect_det_msk = 1;
++	gpwrdn.b.srp_det_msk = 1;
++	gpwrdn.b.disconn_det_msk = 1;
++	gpwrdn.b.rst_det_msk = 1;
++	gpwrdn.b.lnstchng_msk = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	/* Indicates that we are going out from hibernation */
++	core_if->hibernation_suspend = 0;
++
++	/* Set Restore Essential Regs bit in PCGCCTL register */
++	restore_essential_regs(core_if, rem_wakeup, 1);
++
++	/* Wait a little for seeing new value of variable hibernation_suspend if
++	 * Restore done interrupt received before polling */
++	fh_udelay(10);
++
++	if (core_if->hibernation_suspend == 0) {
++		/* Wait For Restore_done Interrupt. This mechanism of polling the
++		 * interrupt is introduced to avoid any possible race conditions
++		 */
++		do {
++			gintsts_data_t gintsts;
++			gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++			if (gintsts.b.restoredone) {
++				gintsts.d32 = 0;
++				gintsts.b.restoredone = 1;
++         		FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++				FH_DEBUGPL(DBG_HCD,"Restore Done Interrupt seen\n");	
++				break;
++			}
++			fh_udelay(10);
++		} while (--timeout);
++		if (!timeout) {
++			FH_WARN("Restore Done interrupt wasn't generated\n");
++		}
++	}
++
++	/* Set the flag's value to 0 again after receiving restore done interrupt */
++	core_if->hibernation_suspend = 0;
++
++	/* This step is not described in functional spec but if not wait for this
++	 * delay, mismatch interrupts occurred because just after restore core is
++	 * in Device mode(gintsts.curmode == 0) */
++	fh_mdelay(100);
++
++	/* Clear all pending interrupts */
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++
++	/* De-assert Restore */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.restore = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Restore GUSBCFG and HCFG */
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
++			core_if->gr_backup->gusbcfg_local);
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg,
++			core_if->hr_backup->hcfg_local);
++
++	/* De-assert Wakeup Logic */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuactv = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Start the Resume operation by programming HPRT0 */
++	hprt0.d32 = core_if->hr_backup->hprt0_local;
++	hprt0.b.prtpwr = 1;
++	hprt0.b.prtena = 0;
++	hprt0.b.prtsusp = 0;
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++	FH_PRINTF("Resume Starts Now\n");
++	if (!reset) {		// Indicates it is Resume Operation
++		hprt0.d32 = core_if->hr_backup->hprt0_local;
++		hprt0.b.prtres = 1;
++		hprt0.b.prtpwr = 1;
++		hprt0.b.prtena = 0;
++		hprt0.b.prtsusp = 0;
++		FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++		if (!rem_wakeup)
++			hprt0.b.prtres = 0;
++		/* Wait for Resume time and then program HPRT again */
++		fh_mdelay(100);
++		FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++	} else {		// Indicates it is Reset Operation
++		hprt0.d32 = core_if->hr_backup->hprt0_local;
++		hprt0.b.prtrst = 1;
++		hprt0.b.prtpwr = 1;
++		hprt0.b.prtena = 0;
++		hprt0.b.prtsusp = 0;
++		FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++		/* Wait for Reset time and then program HPRT again */
++		fh_mdelay(60);
++		hprt0.b.prtrst = 0;
++		FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++	}
++	/* Clear all interrupt status */
++	hprt0.d32 = fh_otg_read_hprt0(core_if);
++	hprt0.b.prtconndet = 1;
++	hprt0.b.prtenchng = 1;
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++	/* Clear all pending interupts */
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++	
++
++	/* Restore global registers */
++	fh_otg_restore_global_regs(core_if);
++	/* Restore host global registers */
++	fh_otg_restore_host_regs(core_if, reset);
++
++	/* The core will be in ON STATE */
++	core_if->lx_state = FH_OTG_L0;
++	FH_PRINTF("Hibernation recovery is complete here\n");
++	return 0;
++}
++
++/** Saves some register values into system memory. */
++int fh_otg_save_global_regs(fh_otg_core_if_t * core_if)
++{
++	struct fh_otg_global_regs_backup *gr;
++	int i;
++
++	gr = core_if->gr_backup;
++	if (!gr) {
++			FH_WARN("gr_backup is not allocated!\n");
++			return -FH_E_NO_MEMORY;
++	}
++
++	gr->gotgctl_local = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++	gr->gintmsk_local = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
++	gr->gahbcfg_local = FH_READ_REG32(&core_if->core_global_regs->gahbcfg);
++	gr->gusbcfg_local = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++	gr->grxfsiz_local = FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
++	gr->gnptxfsiz_local = FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz);
++	gr->hptxfsiz_local = FH_READ_REG32(&core_if->core_global_regs->hptxfsiz);
++#ifdef CONFIG_USB_FH_OTG_LPM
++	gr->glpmcfg_local = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++#endif
++	gr->gi2cctl_local = FH_READ_REG32(&core_if->core_global_regs->gi2cctl);
++	gr->pcgcctl_local = FH_READ_REG32(core_if->pcgcctl);
++	gr->gdfifocfg_local =
++	    FH_READ_REG32(&core_if->core_global_regs->gdfifocfg);
++	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++		gr->dtxfsiz_local[i] =
++		    FH_READ_REG32(&(core_if->core_global_regs->dtxfsiz[i]));
++	}
++
++	FH_DEBUGPL(DBG_ANY, "===========Backing Global registers==========\n");
++	FH_DEBUGPL(DBG_ANY, "Backed up gotgctl   = %08x\n", gr->gotgctl_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up gintmsk   = %08x\n", gr->gintmsk_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up gahbcfg   = %08x\n", gr->gahbcfg_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up gusbcfg   = %08x\n", gr->gusbcfg_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up grxfsiz   = %08x\n", gr->grxfsiz_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up gnptxfsiz = %08x\n",
++		    gr->gnptxfsiz_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up hptxfsiz  = %08x\n",
++		    gr->hptxfsiz_local);
++#ifdef CONFIG_USB_FH_OTG_LPM
++	FH_DEBUGPL(DBG_ANY, "Backed up glpmcfg   = %08x\n", gr->glpmcfg_local);
++#endif
++	FH_DEBUGPL(DBG_ANY, "Backed up gi2cctl   = %08x\n", gr->gi2cctl_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up pcgcctl   = %08x\n", gr->pcgcctl_local);
++	FH_DEBUGPL(DBG_ANY,"Backed up gdfifocfg   = %08x\n",gr->gdfifocfg_local);
++
++	return 0;
++}
++
++/** Saves GINTMSK register before setting the msk bits. */
++int fh_otg_save_gintmsk_reg(fh_otg_core_if_t * core_if)
++{
++	struct fh_otg_global_regs_backup *gr;
++
++	gr = core_if->gr_backup;
++	if (!gr) {
++			FH_WARN("gr_backup is not allocated!\n");
++			return -FH_E_NO_MEMORY;
++	}
++
++	gr->gintmsk_local = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
++
++	FH_DEBUGPL(DBG_ANY,"=============Backing GINTMSK registers============\n");
++	FH_DEBUGPL(DBG_ANY, "Backed up gintmsk   = %08x\n", gr->gintmsk_local);
++
++	return 0;
++}
++
++int fh_otg_save_dev_regs(fh_otg_core_if_t * core_if)
++{
++	struct fh_otg_dev_regs_backup *dr;
++	int i;
++
++	dr = core_if->dr_backup;
++	if (!dr) {
++			FH_WARN("dr_backup is not allocated!\n");
++			return -FH_E_NO_MEMORY;		
++	}
++
++	dr->dcfg = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++	dr->dctl = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++	dr->daintmsk =
++	    FH_READ_REG32(&core_if->dev_if->dev_global_regs->daintmsk);
++	dr->diepmsk =
++	    FH_READ_REG32(&core_if->dev_if->dev_global_regs->diepmsk);
++	dr->doepmsk =
++	    FH_READ_REG32(&core_if->dev_if->dev_global_regs->doepmsk);
++
++	for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
++		dr->diepctl[i] =
++		    FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
++		dr->dieptsiz[i] =
++		    FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->dieptsiz);
++		dr->diepdma[i] =
++		    FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepdma);
++	}
++	
++	for (i = 0; i <= core_if->dev_if->num_out_eps; ++i) {
++		dr->doepctl[i] =
++		    FH_READ_REG32(&core_if->dev_if->out_ep_regs[i]->doepctl);
++		dr->doeptsiz[i] =
++		    FH_READ_REG32(&core_if->dev_if->out_ep_regs[i]->doeptsiz);
++		dr->doepdma[i] =
++		    FH_READ_REG32(&core_if->dev_if->out_ep_regs[i]->doepdma);
++	}
++	
++	
++
++	FH_DEBUGPL(DBG_ANY,
++		    "=============Backing Device registers==============\n");
++	FH_DEBUGPL(DBG_ANY, "Backed up dcfg            = %08x\n", dr->dcfg);
++	FH_DEBUGPL(DBG_ANY, "Backed up dctl        = %08x\n", dr->dctl);
++	FH_DEBUGPL(DBG_ANY, "Backed up daintmsk            = %08x\n",
++		    dr->daintmsk);
++	FH_DEBUGPL(DBG_ANY, "Backed up diepmsk        = %08x\n", dr->diepmsk);
++	FH_DEBUGPL(DBG_ANY, "Backed up doepmsk        = %08x\n", dr->doepmsk);
++	for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
++		FH_DEBUGPL(DBG_ANY, "Backed up diepctl[%d]        = %08x\n", i,
++			    dr->diepctl[i]);
++		FH_DEBUGPL(DBG_ANY, "Backed up dieptsiz[%d]        = %08x\n",
++			    i, dr->dieptsiz[i]);
++		FH_DEBUGPL(DBG_ANY, "Backed up diepdma[%d]        = %08x\n", i,
++			    dr->diepdma[i]);
++	}
++	
++	for (i = 0; i <= core_if->dev_if->num_out_eps; ++i) {
++		FH_DEBUGPL(DBG_ANY, "Backed up doepctl[%d]        = %08x\n", i,
++			    dr->doepctl[i]);
++		FH_DEBUGPL(DBG_ANY, "Backed up doeptsiz[%d]        = %08x\n",
++			    i, dr->doeptsiz[i]);
++		FH_DEBUGPL(DBG_ANY, "Backed up doepdma[%d]        = %08x\n", i,
++			    dr->doepdma[i]);
++	}
++
++	return 0;
++}
++
++int fh_otg_save_host_regs(fh_otg_core_if_t * core_if)
++{
++	struct fh_otg_host_regs_backup *hr;
++	int i;
++
++	hr = core_if->hr_backup;
++	if (!hr) {
++		FH_WARN("hr_backup is not allocated!\n");
++		return -FH_E_NO_MEMORY;
++	}
++
++	hr->hcfg_local =
++	    FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
++	hr->haintmsk_local =
++	    FH_READ_REG32(&core_if->host_if->host_global_regs->haintmsk);
++	for (i = 0; i < fh_otg_get_param_host_channels(core_if); ++i) {
++		hr->hcintmsk_local[i] =
++		    FH_READ_REG32(&core_if->host_if->hc_regs[i]->hcintmsk);
++	}
++	hr->hprt0_local = FH_READ_REG32(core_if->host_if->hprt0);
++	hr->hfir_local =
++	    FH_READ_REG32(&core_if->host_if->host_global_regs->hfir);
++
++	FH_DEBUGPL(DBG_ANY,
++		    "=============Backing Host registers===============\n");
++	FH_DEBUGPL(DBG_ANY, "Backed up hcfg		= %08x\n",
++		    hr->hcfg_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up haintmsk = %08x\n", hr->haintmsk_local);
++	for (i = 0; i <= fh_otg_get_param_host_channels(core_if); ++i) {
++		FH_DEBUGPL(DBG_ANY, "Backed up hcintmsk[%02d]=%08x\n", i,
++			    hr->hcintmsk_local[i]);
++	}
++	FH_DEBUGPL(DBG_ANY, "Backed up hprt0           = %08x\n",
++		    hr->hprt0_local);
++	FH_DEBUGPL(DBG_ANY, "Backed up hfir           = %08x\n",
++		    hr->hfir_local);
++
++	return 0;
++}
++
++int fh_otg_restore_global_regs(fh_otg_core_if_t * core_if)
++{
++	struct fh_otg_global_regs_backup *gr;
++	int i;
++
++	gr = core_if->gr_backup;
++	if (!gr) {
++		return -FH_E_INVALID;
++	}
++
++	FH_WRITE_REG32(&core_if->core_global_regs->gotgctl, gr->gotgctl_local);
++	FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gr->gintmsk_local);
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gr->gusbcfg_local);
++	FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gr->gahbcfg_local);
++	FH_WRITE_REG32(&core_if->core_global_regs->grxfsiz, gr->grxfsiz_local);
++	FH_WRITE_REG32(&core_if->core_global_regs->gnptxfsiz, gr->gnptxfsiz_local);
++	FH_WRITE_REG32(&core_if->core_global_regs->hptxfsiz, gr->hptxfsiz_local);
++	FH_WRITE_REG32(&core_if->core_global_regs->gdfifocfg, gr->gdfifocfg_local);
++	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++		FH_WRITE_REG32(&core_if->core_global_regs->dtxfsiz[i], gr->dtxfsiz_local[i]);
++	}
++
++//	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++//	FH_WRITE_REG32(core_if->host_if->hprt0, 0x0000100A); Done in fh_otg_host_hibernation_restore no need here  //mvardan
++	FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, (gr->gahbcfg_local));
++	return 0;
++}
++
++int fh_otg_restore_dev_regs(fh_otg_core_if_t * core_if, int rem_wakeup)
++{
++	struct fh_otg_dev_regs_backup *dr;
++	int i;
++
++	dr = core_if->dr_backup;
++
++	if (!dr) {
++		return -FH_E_INVALID;
++	}
++
++	if (!rem_wakeup) {
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
++				dr->dctl);
++	}
++	
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->daintmsk, dr->daintmsk);
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->diepmsk, dr->diepmsk);
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->doepmsk, dr->doepmsk);
++
++	for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
++		FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->dieptsiz, dr->dieptsiz[i]);
++		FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->diepdma, dr->diepdma[i]);
++		FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl, dr->diepctl[i]);
++	}
++	
++	for (i = 0; i <= core_if->dev_if->num_out_eps; ++i) {
++		FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doeptsiz, dr->doeptsiz[i]);
++		FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepdma, dr->doepdma[i]);
++		FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepctl, dr->doepctl[i]);
++	}
++
++	return 0;
++}
++
++int fh_otg_restore_host_regs(fh_otg_core_if_t * core_if, int reset)
++{
++	struct fh_otg_host_regs_backup *hr;
++	int i;
++	hr = core_if->hr_backup;
++
++	if (!hr) {
++		return -FH_E_INVALID;
++	}
++
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hr->hcfg_local);
++	//if (!reset)
++	//{
++	//      FH_WRITE_REG32(&core_if->host_if->host_global_regs->hfir, hr->hfir_local);
++	//}
++
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->haintmsk,
++			hr->haintmsk_local);
++	for (i = 0; i < fh_otg_get_param_host_channels(core_if); ++i) {
++		FH_WRITE_REG32(&core_if->host_if->hc_regs[i]->hcintmsk,
++				hr->hcintmsk_local[i]);
++	}
++
++	return 0;
++}
++
++int restore_lpm_i2c_regs(fh_otg_core_if_t * core_if)
++{
++	struct fh_otg_global_regs_backup *gr;
++
++	gr = core_if->gr_backup;
++
++	/* Restore values for LPM and I2C */
++#ifdef CONFIG_USB_FH_OTG_LPM
++	FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, gr->glpmcfg_local);
++#endif
++	FH_WRITE_REG32(&core_if->core_global_regs->gi2cctl, gr->gi2cctl_local);
++
++	return 0;
++}
++
++int restore_essential_regs(fh_otg_core_if_t * core_if, int rmode, int is_host)
++{
++	struct fh_otg_global_regs_backup *gr;
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	gahbcfg_data_t gahbcfg = {.d32 = 0 };
++	gusbcfg_data_t gusbcfg = {.d32 = 0 };
++	gintmsk_data_t gintmsk = {.d32 = 0 };
++
++	/* Restore LPM and I2C registers */
++	restore_lpm_i2c_regs(core_if);
++
++	/* Set PCGCCTL to 0 */
++	FH_WRITE_REG32(core_if->pcgcctl, 0x00000000);
++
++	gr = core_if->gr_backup;
++	/* Load restore values for [31:14] bits */
++	FH_WRITE_REG32(core_if->pcgcctl,
++			((gr->pcgcctl_local & 0xffffc000) | 0x00020000));
++
++	/* Umnask global Interrupt in GAHBCFG and restore it */
++	gahbcfg.d32 = gr->gahbcfg_local;
++	gahbcfg.b.glblintrmsk = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gahbcfg.d32);
++
++	/* Clear all pending interupts */
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++
++	/* Unmask restore done interrupt */
++	gintmsk.b.restoredone = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
++
++	/* Restore GUSBCFG and HCFG/DCFG */
++	gusbcfg.d32 = core_if->gr_backup->gusbcfg_local;
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
++
++	if (is_host) {
++		hcfg_data_t hcfg = {.d32 = 0 };
++		hcfg.d32 = core_if->hr_backup->hcfg_local;
++		FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg,
++				hcfg.d32);
++
++		/* Load restore values for [31:14] bits */
++		pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
++		pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
++
++		if (rmode)
++			pcgcctl.b.restoremode = 1;
++		FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++		fh_udelay(10);
++
++		/* Load restore values for [31:14] bits and set EssRegRestored bit */
++		pcgcctl.d32 = gr->pcgcctl_local | 0xffffc000;
++		pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
++		pcgcctl.b.ess_reg_restored = 1;
++		if (rmode)
++			pcgcctl.b.restoremode = 1;
++		FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++	} else {
++		dcfg_data_t dcfg = {.d32 = 0 };
++		dcfg.d32 = core_if->dr_backup->dcfg;
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
++
++		/* Load restore values for [31:14] bits */
++		pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
++		pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
++		if (!rmode) {
++			pcgcctl.d32 |= 0x208;
++		}
++		FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++		fh_udelay(10);
++
++		/* Load restore values for [31:14] bits */
++		pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
++		pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
++		pcgcctl.b.ess_reg_restored = 1;
++		if (!rmode)
++			pcgcctl.d32 |= 0x208;
++		FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++	}
++
++	return 0;
++}
++
++/**
++ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
++ * type.
++ */
++static void init_fslspclksel(fh_otg_core_if_t * core_if)
++{
++	uint32_t val;
++	hcfg_data_t hcfg;
++
++	if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
++	     (core_if->hwcfg2.b.fs_phy_type == 1) &&
++	     (core_if->core_params->ulpi_fs_ls)) ||
++	    (core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS)) {
++		/* Full speed PHY */
++		val = FH_HCFG_48_MHZ;
++	} else {
++		/* High speed PHY running at full speed or high speed */
++		val = FH_HCFG_30_60_MHZ;
++	}
++
++	FH_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
++	hcfg.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
++	hcfg.b.fslspclksel = val;
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
++}
++
++/**
++ * Initializes the DevSpd field of the DCFG register depending on the PHY type
++ * and the enumeration speed of the device.
++ */
++static void init_devspd(fh_otg_core_if_t * core_if)
++{
++	uint32_t val;
++	dcfg_data_t dcfg;
++
++	if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
++	     (core_if->hwcfg2.b.fs_phy_type == 1) &&
++	     (core_if->core_params->ulpi_fs_ls)) ||
++	    (core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS)) {
++		/* Full speed PHY */
++		val = 0x3;
++	} else if (core_if->core_params->speed == FH_SPEED_PARAM_FULL) {
++		/* High speed PHY running at full speed */
++		val = 0x1;
++	} else {
++		/* High speed PHY running at high speed */
++		val = 0x0;
++	}
++
++	FH_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
++
++	dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++	dcfg.b.devspd = val;
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
++}
++
++/**
++ * This function calculates the number of IN EPS
++ * using GHWCFG1 and GHWCFG2 registers values
++ *
++ * @param core_if Programming view of the FH_otg controller
++ */
++static uint32_t calc_num_in_eps(fh_otg_core_if_t * core_if)
++{
++	uint32_t num_in_eps = 0;
++	uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
++	uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3;
++	uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps;
++	int i;
++
++	for (i = 0; i < num_eps; ++i) {
++		if (!(hwcfg1 & 0x1))
++			num_in_eps++;
++
++		hwcfg1 >>= 2;
++	}
++
++	if (core_if->hwcfg4.b.ded_fifo_en) {
++		num_in_eps =
++		    (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
++	}
++
++	return num_in_eps;
++}
++
++/**
++ * This function calculates the number of OUT EPS
++ * using GHWCFG1 and GHWCFG2 registers values
++ *
++ * @param core_if Programming view of the FH_otg controller
++ */
++static uint32_t calc_num_out_eps(fh_otg_core_if_t * core_if)
++{
++	uint32_t num_out_eps = 0;
++	uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
++	uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2;
++	int i;
++
++	for (i = 0; i < num_eps; ++i) {
++		if (!(hwcfg1 & 0x1))
++			num_out_eps++;
++
++		hwcfg1 >>= 2;
++	}
++	return num_out_eps;
++}
++
++/**
++ * This function initializes the FH_otg controller registers and
++ * prepares the core for device mode or host mode operation.
++ *
++ * @param core_if Programming view of the FH_otg controller
++ *
++ */
++void fh_otg_core_init(fh_otg_core_if_t * core_if)
++{
++	int i = 0;
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	gahbcfg_data_t ahbcfg = {.d32 = 0 };
++	gusbcfg_data_t usbcfg = {.d32 = 0 };
++	gi2cctl_data_t i2cctl = {.d32 = 0 };
++
++	FH_DEBUGPL(DBG_CILV, "fh_otg_core_init(%p)\n", core_if);
++
++	/* Common Initialization */
++	usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
++
++	/* Program the ULPI External VBUS bit if needed */
++	usbcfg.b.ulpi_ext_vbus_drv =
++	    (core_if->core_params->phy_ulpi_ext_vbus ==
++	     FH_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
++
++	/* Set external TS Dline pulsing */
++	usbcfg.b.term_sel_dl_pulse =
++	    (core_if->core_params->ts_dline == 1) ? 1 : 0;
++	FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
++
++	/* Reset the Controller */
++	fh_otg_core_reset(core_if);
++
++	core_if->adp_enable = core_if->core_params->adp_supp_enable;
++	core_if->power_down = core_if->core_params->power_down;
++
++	/* Initialize parameters from Hardware configuration registers. */
++	dev_if->num_in_eps = calc_num_in_eps(core_if);
++	dev_if->num_out_eps = calc_num_out_eps(core_if);
++
++	FH_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",
++		    core_if->hwcfg4.b.num_dev_perio_in_ep);
++
++	for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
++		dev_if->perio_tx_fifo_size[i] =
++		    FH_READ_REG32(&global_regs->dtxfsiz[i]) >> 16;
++		FH_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n",
++			    i, dev_if->perio_tx_fifo_size[i]);
++	}
++
++	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++		dev_if->tx_fifo_size[i] =
++		    FH_READ_REG32(&global_regs->dtxfsiz[i]) >> 16;
++		FH_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n",
++			    i, dev_if->tx_fifo_size[i]);
++	}
++
++	core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth;
++	core_if->rx_fifo_size = FH_READ_REG32(&global_regs->grxfsiz);
++	core_if->nperio_tx_fifo_size =
++	    FH_READ_REG32(&global_regs->gnptxfsiz) >> 16;
++
++	FH_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size);
++	FH_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size);
++	FH_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n",
++		    core_if->nperio_tx_fifo_size);
++
++	/* This programming sequence needs to happen in FS mode before any other
++	 * programming occurs */
++	if ((core_if->core_params->speed == FH_SPEED_PARAM_FULL) &&
++	    (core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS)) {
++		/* If FS mode with FS PHY */
++
++		/* core_init() is now called on every switch so only call the
++		 * following for the first time through. */
++		if (!core_if->phy_init_done) {
++			core_if->phy_init_done = 1;
++			FH_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
++			usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
++			usbcfg.b.physel = 1;
++			FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
++
++			/* Reset after a PHY select */
++			fh_otg_core_reset(core_if);
++		}
++
++		/* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS.      Also
++		 * do this on HNP Dev/Host mode switches (done in dev_init and
++		 * host_init). */
++		if (fh_otg_is_host_mode(core_if)) {
++			init_fslspclksel(core_if);
++		} else {
++			init_devspd(core_if);
++		}
++
++		if (core_if->core_params->i2c_enable) {
++			FH_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
++			/* Program GUSBCFG.OtgUtmifsSel to I2C */
++			usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
++			usbcfg.b.otgutmifssel = 1;
++			FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
++
++			/* Program GI2CCTL.I2CEn */
++			i2cctl.d32 = FH_READ_REG32(&global_regs->gi2cctl);
++			i2cctl.b.i2cdevaddr = 1;
++			i2cctl.b.i2cen = 0;
++			FH_WRITE_REG32(&global_regs->gi2cctl, i2cctl.d32);
++			i2cctl.b.i2cen = 1;
++			FH_WRITE_REG32(&global_regs->gi2cctl, i2cctl.d32);
++		}
++
++	} /* endif speed == FH_SPEED_PARAM_FULL */
++	else {
++		/* High speed PHY. */
++		if (!core_if->phy_init_done) {
++			core_if->phy_init_done = 1;
++			/* HS PHY parameters.  These parameters are preserved
++			 * during soft reset so only program the first time.  Do
++			 * a soft reset immediately after setting phyif.  */
++
++			if (core_if->core_params->phy_type == 2) {
++				/* ULPI interface */
++				usbcfg.b.ulpi_utmi_sel = 1;
++				usbcfg.b.phyif = 0;
++				usbcfg.b.ddrsel =
++				    core_if->core_params->phy_ulpi_ddr;
++			} else if (core_if->core_params->phy_type == 1) {
++				/* UTMI+ interface */
++				usbcfg.b.ulpi_utmi_sel = 0;
++				if (core_if->core_params->phy_utmi_width == 16) {
++					usbcfg.b.phyif = 1;
++
++				} else {
++					usbcfg.b.phyif = 0;
++				}
++			} else {
++				FH_ERROR("FS PHY TYPE\n");
++			}
++			FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
++			/* Reset after setting the PHY parameters */
++			fh_otg_core_reset(core_if);
++		}
++	}
++
++	if ((core_if->hwcfg2.b.hs_phy_type == 2) &&
++	    (core_if->hwcfg2.b.fs_phy_type == 1) &&
++	    (core_if->core_params->ulpi_fs_ls)) {
++		FH_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
++		usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
++		usbcfg.b.ulpi_fsls = 1;
++		usbcfg.b.ulpi_clk_sus_m = 1;
++		FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
++	} else {
++		usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
++		usbcfg.b.ulpi_fsls = 0;
++		usbcfg.b.ulpi_clk_sus_m = 0;
++		FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
++	}
++
++	/* Program the GAHBCFG Register. */
++	switch (core_if->hwcfg2.b.architecture) {
++
++	case FH_SLAVE_ONLY_ARCH:
++		FH_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
++		ahbcfg.b.nptxfemplvl_txfemplvl =
++		    FH_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
++		ahbcfg.b.ptxfemplvl = FH_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
++		core_if->dma_enable = 0;
++		core_if->dma_desc_enable = 0;
++		break;
++
++	case FH_EXT_DMA_ARCH:
++		FH_DEBUGPL(DBG_CIL, "External DMA Mode\n");
++		{
++			uint8_t brst_sz = core_if->core_params->dma_burst_size;
++			ahbcfg.b.hburstlen = 0;
++			while (brst_sz > 1) {
++				ahbcfg.b.hburstlen++;
++				brst_sz >>= 1;
++			}
++		}
++		core_if->dma_enable = (core_if->core_params->dma_enable != 0);
++		core_if->dma_desc_enable =
++		    (core_if->core_params->dma_desc_enable != 0);
++		break;
++
++	case FH_INT_DMA_ARCH:
++		FH_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
++		/* Old value was FH_GAHBCFG_INT_DMA_BURST_INCR - done for 
++		   Host mode ISOC in issue fix - vahrama */
++		ahbcfg.b.hburstlen = FH_GAHBCFG_INT_DMA_BURST_INCR4;
++		core_if->dma_enable = (core_if->core_params->dma_enable != 0);
++		core_if->dma_desc_enable =
++		    (core_if->core_params->dma_desc_enable != 0);
++		break;
++
++	}
++	if (core_if->dma_enable) {
++		if (core_if->dma_desc_enable) {
++			FH_PRINTF("Using Descriptor DMA mode\n");
++		} else {
++			FH_PRINTF("Using Buffer DMA mode\n");
++		}
++	} else {
++		FH_PRINTF("Using Slave mode\n");
++		core_if->dma_desc_enable = 0;
++	}
++
++	if (core_if->core_params->ahb_single) {
++		ahbcfg.b.ahbsingle = 1;
++	}
++
++	ahbcfg.b.dmaenable = core_if->dma_enable;
++	FH_WRITE_REG32(&global_regs->gahbcfg, ahbcfg.d32);
++
++	core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en;
++
++	core_if->pti_enh_enable = core_if->core_params->pti_enable != 0;
++	core_if->multiproc_int_enable = core_if->core_params->mpi_enable;
++	FH_PRINTF("Periodic Transfer Interrupt Enhancement - %s\n",
++		   ((core_if->pti_enh_enable) ? "enabled" : "disabled"));
++	FH_PRINTF("Multiprocessor Interrupt Enhancement - %s\n",
++		   ((core_if->multiproc_int_enable) ? "enabled" : "disabled"));
++
++	/*
++	 * Program the GUSBCFG register.
++	 */
++	usbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
++
++	switch (core_if->hwcfg2.b.op_mode) {
++	case FH_MODE_HNP_SRP_CAPABLE:
++		usbcfg.b.hnpcap = (core_if->core_params->otg_cap ==
++				   FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
++		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
++				   FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
++		break;
++
++	case FH_MODE_SRP_ONLY_CAPABLE:
++		usbcfg.b.hnpcap = 0;
++		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
++				   FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
++		break;
++
++	case FH_MODE_NO_HNP_SRP_CAPABLE:
++		usbcfg.b.hnpcap = 0;
++		usbcfg.b.srpcap = 0;
++		break;
++
++	case FH_MODE_SRP_CAPABLE_DEVICE:
++		usbcfg.b.hnpcap = 0;
++		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
++				   FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
++		break;
++
++	case FH_MODE_NO_SRP_CAPABLE_DEVICE:
++		usbcfg.b.hnpcap = 0;
++		usbcfg.b.srpcap = 0;
++		break;
++
++	case FH_MODE_SRP_CAPABLE_HOST:
++		usbcfg.b.hnpcap = 0;
++		usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
++				   FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
++		break;
++
++	case FH_MODE_NO_SRP_CAPABLE_HOST:
++		usbcfg.b.hnpcap = 0;
++		usbcfg.b.srpcap = 0;
++		break;
++	}
++
++	FH_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++	if (core_if->core_params->lpm_enable) {
++		glpmcfg_data_t lpmcfg = {.d32 = 0 };
++
++		/* To enable LPM support set lpm_cap_en bit */
++		lpmcfg.b.lpm_cap_en = 1;
++
++		/* Make AppL1Res ACK */
++		lpmcfg.b.appl_resp = 1;
++
++		/* Retry 3 times */
++		lpmcfg.b.retry_count = 3;
++
++		FH_MODIFY_REG32(&core_if->core_global_regs->glpmcfg,
++				 0, lpmcfg.d32);
++
++	}
++#endif
++	if (core_if->core_params->ic_usb_cap) {
++		gusbcfg_data_t gusbcfg = {.d32 = 0 };
++		gusbcfg.b.ic_usb_cap = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gusbcfg,
++				 0, gusbcfg.d32);
++	}
++	{
++		gotgctl_data_t gotgctl = {.d32 = 0 };
++		gotgctl.b.otgver = core_if->core_params->otg_ver;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl, 0,
++				 gotgctl.d32);
++		/* Set OTG version supported */
++		core_if->otg_ver = core_if->core_params->otg_ver;
++		FH_PRINTF("OTG VER PARAM: %d, OTG VER FLAG: %d\n",
++			   core_if->core_params->otg_ver, core_if->otg_ver);
++	}
++
++	/* Enable common interrupts */
++	fh_otg_enable_common_interrupts(core_if);
++	
++	/* Do device or host intialization based on mode during PCD
++	 * and HCD initialization  */
++	if (fh_otg_is_host_mode(core_if)) {
++		FH_DEBUGPL(DBG_ANY, "Host Mode\n");
++		core_if->op_state = A_HOST;
++	} else {
++		FH_DEBUGPL(DBG_ANY, "Device Mode\n");
++		core_if->op_state = B_PERIPHERAL;
++#ifdef FH_DEVICE_ONLY
++		fh_otg_core_dev_init(core_if);
++#endif
++	}
++}
++
++
++/**
++ * This function enables the Device mode interrupts.
++ *
++ * @param core_if Programming view of FH_otg controller
++ */
++void fh_otg_enable_device_interrupts(fh_otg_core_if_t * core_if)
++{
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++
++	FH_DEBUGPL(DBG_CIL, "%s()\n", __func__);
++
++	/* Disable all interrupts. */
++	FH_WRITE_REG32(&global_regs->gintmsk, 0);
++
++	/* Clear any pending interrupts */
++	FH_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
++
++	/* Enable the common interrupts */
++	fh_otg_enable_common_interrupts(core_if);
++
++	/* Enable interrupts */
++	intr_mask.b.usbreset = 1;
++	intr_mask.b.enumdone = 1;
++	/* Disable Disconnect interrupt in Device mode */
++	intr_mask.b.disconnect = 0;
++
++	if (!core_if->multiproc_int_enable) {
++		intr_mask.b.inepintr = 1;
++		intr_mask.b.outepintr = 1;
++	}
++
++	intr_mask.b.erlysuspend = 1;
++
++	if (core_if->en_multiple_tx_fifo == 0) {
++		intr_mask.b.epmismatch = 1;
++	}
++
++	//intr_mask.b.incomplisoout = 1;
++	if (!core_if->dma_desc_enable)
++		intr_mask.b.incomplisoin = 1;
++
++/* Enable the ignore frame number for ISOC xfers - MAS */
++/* Disable to support high bandwith ISOC transfers - manukz */
++#if 0
++#ifdef FH_UTE_PER_IO
++	if (core_if->dma_enable) {
++		if (core_if->dma_desc_enable) {
++			dctl_data_t dctl1 = {.d32 = 0 };
++			dctl1.b.ifrmnum = 1;
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++					 dctl, 0, dctl1.d32);
++			FH_DEBUG("----Enabled Ignore frame number (0x%08x)",
++				  FH_READ_REG32(&core_if->dev_if->
++						 dev_global_regs->dctl));
++		}
++	}
++#endif
++#endif
++#ifdef FH_EN_ISOC
++	if (core_if->dma_enable) {
++		if (core_if->dma_desc_enable == 0) {
++			if (core_if->pti_enh_enable) {
++				dctl_data_t dctl = {.d32 = 0 };
++				dctl.b.ifrmnum = 1;
++				FH_MODIFY_REG32(&core_if->
++						 dev_if->dev_global_regs->dctl,
++						 0, dctl.d32);
++			} else {
++				intr_mask.b.incomplisoin = 1;
++				intr_mask.b.incomplisoout = 1;
++			}
++		}
++	} else {
++		intr_mask.b.incomplisoin = 1;
++		intr_mask.b.incomplisoout = 1;
++	}
++#endif /* FH_EN_ISOC */
++
++	/** @todo NGS: Should this be a module parameter? */
++#ifdef USE_PERIODIC_EP
++	intr_mask.b.isooutdrop = 1;
++	intr_mask.b.eopframe = 1;
++	intr_mask.b.incomplisoin = 1;
++	intr_mask.b.incomplisoout = 1;
++#endif
++
++	FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
++
++	FH_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
++		    FH_READ_REG32(&global_regs->gintmsk));
++}
++
++/**
++ * This function initializes the FH_otg controller registers for
++ * device mode.
++ *
++ * @param core_if Programming view of FH_otg controller
++ *
++ */
++void fh_otg_core_dev_init(fh_otg_core_if_t * core_if)
++{
++	int i;
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	fh_otg_core_params_t *params = core_if->core_params;
++	dcfg_data_t dcfg = {.d32 = 0 };
++	depctl_data_t diepctl = {.d32 = 0 };
++	grstctl_t resetctl = {.d32 = 0 };
++	uint32_t rx_fifo_size;
++	fifosize_data_t nptxfifosize;
++	fifosize_data_t txfifosize;
++	dthrctl_data_t dthrctl;
++	fifosize_data_t ptxfifosize;
++	uint16_t rxfsiz, nptxfsiz;
++	gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
++	hwcfg3_data_t hwcfg3 = {.d32 = 0 };
++	gotgctl_data_t gotgctl = {.d32 = 0 };
++
++	/* Restart the Phy Clock */
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	/* Restart the Phy Clock */
++	pcgcctl.b.stoppclk = 1;
++	FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++	fh_udelay(10);
++
++	/* Device configuration register */
++	init_devspd(core_if);
++	dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
++	dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0;
++	dcfg.b.perfrint = FH_DCFG_FRAME_INTERVAL_80;
++	/* Enable Device OUT NAK in case of DDMA mode */
++	if (core_if->core_params->dev_out_nak) {
++		dcfg.b.endevoutnak = 1;
++	}
++
++	if (core_if->core_params->cont_on_bna) {
++		dctl_data_t dctl = {.d32 = 0 };
++		dctl.b.encontonbna = 1;
++		FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
++	}
++	/** should be done before every reset */
++	if (core_if->otg_ver) {
++		core_if->otg_sts = 0;
++		gotgctl.b.devhnpen = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl, gotgctl.d32, 0);
++	}
++	
++	FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
++
++	/* Configure data FIFO sizes */
++	if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
++		FH_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
++			    core_if->total_fifo_size);
++		FH_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
++			    params->dev_rx_fifo_size);
++		FH_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
++			    params->dev_nperio_tx_fifo_size);
++
++		/* Rx FIFO */
++		FH_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->grxfsiz));
++
++#ifdef FH_UTE_CFI
++		core_if->pwron_rxfsiz = FH_READ_REG32(&global_regs->grxfsiz);
++		core_if->init_rxfsiz = params->dev_rx_fifo_size;
++#endif
++		rx_fifo_size = params->dev_rx_fifo_size;
++		FH_WRITE_REG32(&global_regs->grxfsiz, rx_fifo_size);
++
++		FH_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->grxfsiz));
++
++		/** Set Periodic Tx FIFO Mask all bits 0 */
++		core_if->p_tx_msk = 0;
++
++		/** Set Tx FIFO Mask all bits 0 */
++		core_if->tx_msk = 0;
++
++		if (core_if->en_multiple_tx_fifo == 0) {
++			/* Non-periodic Tx FIFO */
++			FH_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
++				    FH_READ_REG32(&global_regs->gnptxfsiz));
++
++			nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
++			nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
++
++			FH_WRITE_REG32(&global_regs->gnptxfsiz,
++					nptxfifosize.d32);
++
++			FH_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
++				    FH_READ_REG32(&global_regs->gnptxfsiz));
++
++			/**@todo NGS: Fix Periodic FIFO Sizing! */
++			/*
++			 * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
++			 * Indexes of the FIFO size module parameters in the
++			 * dev_perio_tx_fifo_size array and the FIFO size registers in
++			 * the dptxfsiz array run from 0 to 14.
++			 */
++			/** @todo Finish debug of this */
++			ptxfifosize.b.startaddr =
++			    nptxfifosize.b.startaddr + nptxfifosize.b.depth;
++			for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
++				ptxfifosize.b.depth =
++				    params->dev_perio_tx_fifo_size[i];
++				FH_DEBUGPL(DBG_CIL,
++					    "initial dtxfsiz[%d]=%08x\n", i,
++					    FH_READ_REG32(&global_regs->dtxfsiz
++							   [i]));
++				FH_WRITE_REG32(&global_regs->dtxfsiz[i],
++						ptxfifosize.d32);
++				FH_DEBUGPL(DBG_CIL, "new dtxfsiz[%d]=%08x\n",
++					    i,
++					    FH_READ_REG32(&global_regs->dtxfsiz
++							   [i]));
++				ptxfifosize.b.startaddr += ptxfifosize.b.depth;
++			}
++		} else {
++			/*
++			 * Tx FIFOs These FIFOs are numbered from 1 to 15.
++			 * Indexes of the FIFO size module parameters in the
++			 * dev_tx_fifo_size array and the FIFO size registers in
++			 * the dtxfsiz array run from 0 to 14.
++			 */
++
++			/* Non-periodic Tx FIFO */
++			FH_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
++				    FH_READ_REG32(&global_regs->gnptxfsiz));
++
++#ifdef FH_UTE_CFI
++			core_if->pwron_gnptxfsiz =
++			    (FH_READ_REG32(&global_regs->gnptxfsiz) >> 16);
++			core_if->init_gnptxfsiz =
++			    params->dev_nperio_tx_fifo_size;
++#endif
++			nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
++			nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
++
++			FH_WRITE_REG32(&global_regs->gnptxfsiz,
++					nptxfifosize.d32);
++
++			FH_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
++				    FH_READ_REG32(&global_regs->gnptxfsiz));
++
++			txfifosize.b.startaddr =
++			    nptxfifosize.b.startaddr + nptxfifosize.b.depth;
++
++			for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
++
++				txfifosize.b.depth =
++				    params->dev_tx_fifo_size[i];
++
++#ifdef FH_UTE_CFI
++				core_if->pwron_txfsiz[i] =
++				    (FH_READ_REG32
++				     (&global_regs->dtxfsiz[i]) >> 16);
++				core_if->init_txfsiz[i] =
++				    params->dev_tx_fifo_size[i];
++#endif
++				FH_WRITE_REG32(&global_regs->dtxfsiz[i],
++						txfifosize.d32);
++
++				FH_DEBUGPL(DBG_CIL,
++					    "new dtxfsiz[%d]=%08x\n",
++					    i,
++					    FH_READ_REG32(&global_regs->dtxfsiz
++							   [i]));
++
++				txfifosize.b.startaddr += txfifosize.b.depth;
++			}
++			
++			/* Calculating DFIFOCFG for Device mode to include RxFIFO and NPTXFIFO 
++			 * Before 3.00a EpInfoBase was being configured in ep enable/disable 
++			 * routine as well. Starting from 3.00a it will be set to the end of
++			 * allocated FIFO space here due to ep 0 OUT always keeping enabled
++			 */
++			gdfifocfg.d32 = FH_READ_REG32(&global_regs->gdfifocfg);
++			hwcfg3.d32 = FH_READ_REG32(&global_regs->ghwcfg3);
++			gdfifocfg.b.gdfifocfg = (FH_READ_REG32(&global_regs->ghwcfg3) >> 16);
++			FH_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
++			if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
++				rxfsiz = (FH_READ_REG32(&global_regs->grxfsiz) & 0x0000ffff);
++				nptxfsiz = (FH_READ_REG32(&global_regs->gnptxfsiz) >> 16);
++				gdfifocfg.b.epinfobase = rxfsiz + nptxfsiz;
++			} else {
++				gdfifocfg.b.epinfobase = txfifosize.b.startaddr;
++			}
++			FH_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
++		}
++	}
++
++	/* Flush the FIFOs */
++	fh_otg_flush_tx_fifo(core_if, 0x10);	/* all Tx FIFOs */
++	fh_otg_flush_rx_fifo(core_if);
++
++	/* Flush the Learning Queue. */
++	resetctl.b.intknqflsh = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
++
++	if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable) {
++		core_if->start_predict = 0;
++		for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
++			core_if->nextep_seq[i] = 0xff;	// 0xff - EP not active
++		}
++		core_if->nextep_seq[0] = 0;
++		core_if->first_in_nextep_seq = 0;
++		diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
++		diepctl.b.nextep = 0;
++		FH_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
++
++		/* Update IN Endpoint Mismatch Count by active IN NP EP count + 1 */
++		dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
++		dcfg.b.epmscnt = 2;
++		FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
++
++		FH_DEBUGPL(DBG_CILV,
++			    "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
++			    __func__, core_if->first_in_nextep_seq);
++		for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++			FH_DEBUGPL(DBG_CILV, "%2d ", core_if->nextep_seq[i]);
++		}
++		FH_DEBUGPL(DBG_CILV, "\n");
++	}
++
++	/* Clear all pending Device Interrupts */
++	/** @todo - if the condition needed to be checked
++	 *  or in any case all pending interrutps should be cleared?
++     */
++	if (core_if->multiproc_int_enable) {
++		for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
++			FH_WRITE_REG32(&dev_if->dev_global_regs->
++					diepeachintmsk[i], 0);
++		}
++
++		for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
++			FH_WRITE_REG32(&dev_if->dev_global_regs->
++					doepeachintmsk[i], 0);
++		}
++
++		FH_WRITE_REG32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF);
++		FH_WRITE_REG32(&dev_if->dev_global_regs->deachintmsk, 0);
++	} else {
++		FH_WRITE_REG32(&dev_if->dev_global_regs->diepmsk, 0);
++		FH_WRITE_REG32(&dev_if->dev_global_regs->doepmsk, 0);
++		FH_WRITE_REG32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
++		FH_WRITE_REG32(&dev_if->dev_global_regs->daintmsk, 0);
++	}
++
++	for (i = 0; i <= dev_if->num_in_eps; i++) {
++		depctl_data_t depctl;
++		depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++		if (depctl.b.epena) {
++			depctl.d32 = 0;
++			depctl.b.epdis = 1;
++			depctl.b.snak = 1;
++		} else {
++			depctl.d32 = 0;
++		}
++
++		FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
++
++		FH_WRITE_REG32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
++		FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepdma, 0);
++		FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
++	}
++
++	for (i = 1; i <= dev_if->num_out_eps; i++) {
++		depctl_data_t depctl;
++		depctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
++		if (depctl.b.epena) {
++			int j = 0;
++			dctl_data_t dctl = {.d32 = 0 };
++			gintmsk_data_t gintsts = {.d32 = 0 };
++			doepint_data_t doepint = {.d32 = 0 };
++			device_grxsts_data_t status;
++			dctl.b.sgoutnak = 1;
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
++			if (!core_if->dma_enable) {
++				do {
++					j++;
++					fh_udelay(10);
++					gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++					if (j == 100000) {
++						FH_ERROR("SNAK is not set during 10s\n");
++						break;
++					}
++				} while (!gintsts.b.rxstsqlvl);
++				status.d32 = FH_READ_REG32(&global_regs->grxstsp);
++				if (status.b.pktsts == FH_DSTS_GOUT_NAK)
++					FH_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
++				gintsts.d32 = 0;
++				gintsts.b.rxstsqlvl = 1;
++				FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++			}
++			j = 0;
++			do {
++				j++;
++				fh_udelay(10);
++				gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++				if (j == 100000) {
++					FH_ERROR("SNAK is not set during 10s\n");
++					break;
++				}
++			} while (!gintsts.b.goutnakeff);
++			gintsts.d32 = 0;
++			gintsts.b.goutnakeff = 1;
++			FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++			depctl.d32 = 0;
++			depctl.b.epdis = 1;
++			depctl.b.snak = 1;
++			j = 0;
++			FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepctl, depctl.d32);
++			do {
++				fh_udelay(10);
++				doepint.d32 = FH_READ_REG32(&core_if->dev_if->
++					out_ep_regs[i]->doepint);
++				if (j == 100000) {
++					FH_ERROR("EPDIS was not set during 10s\n");
++					break;
++				}
++			} while (!doepint.b.epdisabled);
++
++			doepint.b.epdisabled = 1;
++			FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[i]->doepint, doepint.d32);
++
++			dctl.d32 = 0;
++			dctl.b.cgoutnak = 1;
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
++		} else {
++			depctl.d32 = 0;
++		}
++
++		FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
++		FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
++		FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepdma, 0);
++		FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
++	}
++
++	if (core_if->en_multiple_tx_fifo && core_if->dma_enable) {
++		dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1;
++		dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1;
++		dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1;
++
++		dev_if->rx_thr_length = params->rx_thr_length;
++		dev_if->tx_thr_length = params->tx_thr_length;
++
++		dev_if->setup_desc_index = 0;
++
++		dthrctl.d32 = 0;
++		dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
++		dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
++		dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
++		dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
++		dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
++		dthrctl.b.ahb_thr_ratio = params->ahb_thr_ratio;
++
++		FH_WRITE_REG32(&dev_if->dev_global_regs->dtknqr3_dthrctl,
++				dthrctl.d32);
++
++		FH_DEBUGPL(DBG_CIL,
++			    "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
++			    dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en,
++			    dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len,
++			    dthrctl.b.rx_thr_len);
++
++	}
++
++	fh_otg_enable_device_interrupts(core_if);
++
++	{
++		diepmsk_data_t msk = {.d32 = 0 };
++		msk.b.txfifoundrn = 1;
++		if (core_if->multiproc_int_enable) {
++			FH_MODIFY_REG32(&dev_if->dev_global_regs->
++					 diepeachintmsk[0], msk.d32, msk.d32);
++		} else {
++			FH_MODIFY_REG32(&dev_if->dev_global_regs->diepmsk,
++					 msk.d32, msk.d32);
++		}
++	}
++
++	if (core_if->multiproc_int_enable) {
++		/* Set NAK on Babble */
++		dctl_data_t dctl = {.d32 = 0 };
++		dctl.b.nakonbble = 1;
++		FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
++	}
++
++	if (core_if->snpsid >= OTG_CORE_REV_2_94a) {
++		dctl_data_t dctl = {.d32 = 0 };
++		dctl.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dctl);
++		dctl.b.sftdiscon = 0;
++		FH_WRITE_REG32(&dev_if->dev_global_regs->dctl, dctl.d32);
++	}
++}
++
++/**
++ * This function enables the Host mode interrupts.
++ *
++ * @param core_if Programming view of FH_otg controller
++ */
++void fh_otg_enable_host_interrupts(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_DEBUGPL(DBG_CIL, "%s()\n", __func__);
++
++	/* Disable all interrupts. */
++	FH_WRITE_REG32(&global_regs->gintmsk, 0);
++
++	/* Clear any pending interrupts. */
++	FH_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
++
++	/* Enable the common interrupts */
++	fh_otg_enable_common_interrupts(core_if);
++
++	/*
++	 * Enable host mode interrupts without disturbing common
++	 * interrupts.
++	 */
++
++	intr_mask.b.disconnect = 1;
++	intr_mask.b.portintr = 1;
++	intr_mask.b.hcintr = 1;
++
++	FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
++}
++
++/**
++ * This function disables the Host Mode interrupts.
++ *
++ * @param core_if Programming view of FH_otg controller
++ */
++void fh_otg_disable_host_interrupts(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_DEBUGPL(DBG_CILV, "%s()\n", __func__);
++
++	/*
++	 * Disable host mode interrupts without disturbing common
++	 * interrupts.
++	 */
++	intr_mask.b.sofintr = 1;
++	intr_mask.b.portintr = 1;
++	intr_mask.b.hcintr = 1;
++	intr_mask.b.ptxfempty = 1;
++	intr_mask.b.nptxfempty = 1;
++
++	FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, 0);
++}
++
++/**
++ * This function initializes the FH_otg controller registers for
++ * host mode.
++ *
++ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
++ * request queues. Host channels are reset to ensure that they are ready for
++ * performing transfers.
++ *
++ * @param core_if Programming view of FH_otg controller
++ *
++ */
++void fh_otg_core_host_init(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	fh_otg_host_if_t *host_if = core_if->host_if;
++	fh_otg_core_params_t *params = core_if->core_params;
++	hprt0_data_t hprt0 = {.d32 = 0 };
++	fifosize_data_t nptxfifosize;
++	fifosize_data_t ptxfifosize;
++	uint16_t rxfsiz, nptxfsiz, hptxfsiz;
++	gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
++	int i;
++	hcchar_data_t hcchar;
++	hcfg_data_t hcfg;
++	hfir_data_t hfir;
++	fh_otg_hc_regs_t *hc_regs;
++	int num_channels;
++	gotgctl_data_t gotgctl = {.d32 = 0 };
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	gintsts_data_t gintsts;
++
++	FH_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, core_if);
++
++	/* Restart the Phy Clock */
++	pcgcctl.b.stoppclk = 1;
++	FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++	fh_udelay(10);
++	
++	if ((core_if->otg_ver == 1) && (core_if->op_state == A_HOST)) {	
++		FH_PRINTF("Init: Port Power? op_state=%d\n", core_if->op_state);
++		hprt0.d32 = fh_otg_read_hprt0(core_if);
++		FH_PRINTF("Init: Power Port (%d)\n", hprt0.b.prtpwr);
++		if (hprt0.b.prtpwr == 0) {
++			hprt0.b.prtpwr = 1;
++			FH_WRITE_REG32(host_if->hprt0, hprt0.d32);
++		}
++	}
++
++	/* Initialize Host Configuration Register */
++	init_fslspclksel(core_if);
++	if (core_if->core_params->speed == FH_SPEED_PARAM_FULL) {
++		hcfg.d32 = FH_READ_REG32(&host_if->host_global_regs->hcfg);
++		hcfg.b.fslssupp = 1;
++		FH_WRITE_REG32(&host_if->host_global_regs->hcfg, hcfg.d32);
++
++	}
++
++	/* This bit allows dynamic reloading of the HFIR register
++	 * during runtime. This bit needs to be programmed during 
++	 * initial configuration and its value must not be changed
++	 * during runtime.*/
++	if (core_if->core_params->reload_ctl == 1) {
++		hfir.d32 = FH_READ_REG32(&host_if->host_global_regs->hfir);
++		hfir.b.hfirrldctrl = 1;
++		FH_WRITE_REG32(&host_if->host_global_regs->hfir, hfir.d32);
++	}
++
++	if (core_if->core_params->dma_desc_enable) {
++		uint8_t op_mode = core_if->hwcfg2.b.op_mode;
++		if (!
++		    (core_if->hwcfg4.b.desc_dma
++		     && (core_if->snpsid >= OTG_CORE_REV_2_90a)
++		     && ((op_mode == FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
++			 || (op_mode == FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
++			 || (op_mode ==
++			     FH_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG)
++			 || (op_mode == FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)
++			 || (op_mode ==
++			     FH_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST)))) {
++
++			FH_ERROR("Host can't operate in Descriptor DMA mode.\n"
++				  "Either core version is below 2.90a or "
++				  "GHWCFG2, GHWCFG4 registers' values do not allow Descriptor DMA in host mode.\n"
++				  "To run the driver in Buffer DMA host mode set dma_desc_enable "
++				  "module parameter to 0.\n");
++			return;
++		}
++		hcfg.d32 = FH_READ_REG32(&host_if->host_global_regs->hcfg);
++		hcfg.b.descdma = 1;
++		FH_WRITE_REG32(&host_if->host_global_regs->hcfg, hcfg.d32);
++	}
++
++	/* Configure data FIFO sizes */
++	if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
++		FH_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
++			    core_if->total_fifo_size);
++		FH_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
++			    params->host_rx_fifo_size);
++		FH_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
++			    params->host_nperio_tx_fifo_size);
++		FH_DEBUGPL(DBG_CIL, "P Tx FIFO Size=%d\n",
++			    params->host_perio_tx_fifo_size);
++
++		/* Rx FIFO */
++		FH_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->grxfsiz));
++		FH_WRITE_REG32(&global_regs->grxfsiz,
++				params->host_rx_fifo_size);
++		FH_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->grxfsiz));
++
++		/* Non-periodic Tx FIFO */
++		FH_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->gnptxfsiz));
++		nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
++		nptxfifosize.b.startaddr = params->host_rx_fifo_size;
++		FH_WRITE_REG32(&global_regs->gnptxfsiz, nptxfifosize.d32);
++		FH_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->gnptxfsiz));
++
++		/* Periodic Tx FIFO */
++		FH_DEBUGPL(DBG_CIL, "initial hptxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->hptxfsiz));
++		ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
++		ptxfifosize.b.startaddr =
++		    nptxfifosize.b.startaddr + nptxfifosize.b.depth;
++		FH_WRITE_REG32(&global_regs->hptxfsiz, ptxfifosize.d32);
++		FH_DEBUGPL(DBG_CIL, "new hptxfsiz=%08x\n",
++			    FH_READ_REG32(&global_regs->hptxfsiz));
++
++		if (core_if->en_multiple_tx_fifo) {
++			/* Global DFIFOCFG calculation for Host mode - include RxFIFO, NPTXFIFO and HPTXFIFO */
++			gdfifocfg.d32 = FH_READ_REG32(&global_regs->gdfifocfg);
++			rxfsiz = (FH_READ_REG32(&global_regs->grxfsiz) & 0x0000ffff);
++			nptxfsiz = (FH_READ_REG32(&global_regs->gnptxfsiz) >> 16);
++			hptxfsiz = (FH_READ_REG32(&global_regs->hptxfsiz) >> 16);
++			gdfifocfg.b.epinfobase = rxfsiz + nptxfsiz + hptxfsiz;
++			FH_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
++		}
++	}
++
++	/* TODO - check this */
++	/* Clear Host Set HNP Enable in the OTG Control Register */
++	gotgctl.b.hstsethnpen = 1;
++	FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
++	/* Make sure the FIFOs are flushed. */
++	fh_otg_flush_tx_fifo(core_if, 0x10 /* all TX FIFOs */ );
++	fh_otg_flush_rx_fifo(core_if);
++
++	/* Clear Host Set HNP Enable in the OTG Control Register */
++	gotgctl.b.hstsethnpen = 1;
++	FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
++
++	if (!core_if->core_params->dma_desc_enable) {
++		/* Flush out any leftover queued requests. */
++		num_channels = core_if->core_params->host_channels;
++
++		for (i = 0; i < num_channels; i++) {
++			hc_regs = core_if->host_if->hc_regs[i];
++			hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++			hcchar.b.chen = 0;
++			hcchar.b.chdis = 1;
++			hcchar.b.epdir = 0;
++			FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++		}
++
++		
++		/* Halt all channels to put them into a known state. */
++		for (i = 0; i < num_channels; i++) {
++			int count = 0;
++			hc_regs = core_if->host_if->hc_regs[i];
++			hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++			hcchar.b.chen = 1;
++			hcchar.b.chdis = 1;
++			hcchar.b.epdir = 0;
++			FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++			
++			if(!core_if->core_params->dma_enable) {
++				do {
++					gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++					if (++count > 1000) {
++						FH_ERROR
++							("%s: RxSTSQLVL interrupt wasn't seen for channel %d\n",
++							__func__, i);
++						break;
++					}
++					fh_udelay(1);
++				} while (!gintsts.b.rxstsqlvl);
++				
++				if (count<=1000)
++					FH_READ_REG32(&core_if->core_global_regs->grxstsp);
++				count=0;
++			}
++			
++			FH_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
++			do {
++				hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++				if (++count > 1000) {
++					FH_ERROR
++					    ("%s: Unable to clear halt on channel %d\n",
++					     __func__, i);
++					break;
++				}
++				fh_udelay(1);
++			} while (hcchar.b.chen);
++		}
++		
++	}
++
++	/* Turn on the vbus power. */
++	if ((core_if->otg_ver == 0) && (core_if->op_state == A_HOST)) {
++		hprt0.d32 = fh_otg_read_hprt0(core_if);
++		FH_PRINTF("Init: Power Port (%d)\n", hprt0.b.prtpwr);
++		if (hprt0.b.prtpwr == 0) {
++			hprt0.b.prtpwr = 1;
++			FH_WRITE_REG32(host_if->hprt0, hprt0.d32);
++		}
++	}
++
++	fh_otg_enable_host_interrupts(core_if);
++}
++
++/**
++ * Prepares a host channel for transferring packets to/from a specific
++ * endpoint. The HCCHARn register is set up with the characteristics specified
++ * in _hc. Host channel interrupts that may need to be serviced while this
++ * transfer is in progress are enabled.
++ *
++ * @param core_if Programming view of FH_otg controller
++ * @param hc Information needed to initialize the host channel
++ */
++void fh_otg_hc_init(fh_otg_core_if_t * core_if, fh_hc_t * hc)
++{
++	uint32_t intr_enable;
++	hcintmsk_data_t hc_intr_mask;
++	gintmsk_data_t gintmsk = {.d32 = 0 };
++	hcchar_data_t hcchar;
++	hcsplt_data_t hcsplt;
++
++	uint8_t hc_num = hc->hc_num;
++	fh_otg_host_if_t *host_if = core_if->host_if;
++	fh_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num];
++
++	/* Clear old interrupt conditions for this host channel. */
++	hc_intr_mask.d32 = 0xFFFFFFFF;
++	hc_intr_mask.b.reserved14_31 = 0;
++	FH_WRITE_REG32(&hc_regs->hcint, hc_intr_mask.d32);
++
++	/* Enable channel interrupts required for this transfer. */
++	hc_intr_mask.d32 = 0;
++	hc_intr_mask.b.chhltd = 1;
++	if (core_if->dma_enable) {
++		/* For Descriptor DMA mode core halts the channel on AHB error. Interrupt is not required */
++		if (!core_if->dma_desc_enable)
++			hc_intr_mask.b.ahberr = 1;
++		else {
++			if (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
++				hc_intr_mask.b.xfercompl = 1;
++		}
++
++		if (hc->error_state && !hc->do_split &&
++		    hc->ep_type != FH_OTG_EP_TYPE_ISOC) {
++			hc_intr_mask.b.ack = 1;
++			if (hc->ep_is_in) {
++				hc_intr_mask.b.datatglerr = 1;
++				if (hc->ep_type != FH_OTG_EP_TYPE_INTR) {
++					hc_intr_mask.b.nak = 1;
++				}
++			}
++		}
++	} else {
++		switch (hc->ep_type) {
++		case FH_OTG_EP_TYPE_CONTROL:
++		case FH_OTG_EP_TYPE_BULK:
++			hc_intr_mask.b.xfercompl = 1;
++			hc_intr_mask.b.stall = 1;
++			hc_intr_mask.b.xacterr = 1;
++			hc_intr_mask.b.datatglerr = 1;
++			if (hc->ep_is_in) {
++				hc_intr_mask.b.bblerr = 1;
++			} else {
++				hc_intr_mask.b.nak = 1;
++				hc_intr_mask.b.nyet = 1;
++				if (hc->do_ping) {
++					hc_intr_mask.b.ack = 1;
++				}
++			}
++
++			if (hc->do_split) {
++				hc_intr_mask.b.nak = 1;
++				if (hc->complete_split) {
++					hc_intr_mask.b.nyet = 1;
++				} else {
++					hc_intr_mask.b.ack = 1;
++				}
++			}
++
++			if (hc->error_state) {
++				hc_intr_mask.b.ack = 1;
++			}
++			break;
++		case FH_OTG_EP_TYPE_INTR:
++			hc_intr_mask.b.xfercompl = 1;
++			hc_intr_mask.b.nak = 1;
++			hc_intr_mask.b.stall = 1;
++			hc_intr_mask.b.xacterr = 1;
++			hc_intr_mask.b.datatglerr = 1;
++			hc_intr_mask.b.frmovrun = 1;
++
++			if (hc->ep_is_in) {
++				hc_intr_mask.b.bblerr = 1;
++			}
++			if (hc->error_state) {
++				hc_intr_mask.b.ack = 1;
++			}
++			if (hc->do_split) {
++				if (hc->complete_split) {
++					hc_intr_mask.b.nyet = 1;
++				} else {
++					hc_intr_mask.b.ack = 1;
++				}
++			}
++			break;
++		case FH_OTG_EP_TYPE_ISOC:
++			hc_intr_mask.b.xfercompl = 1;
++			hc_intr_mask.b.frmovrun = 1;
++			hc_intr_mask.b.ack = 1;
++
++			if (hc->ep_is_in) {
++				hc_intr_mask.b.xacterr = 1;
++				hc_intr_mask.b.bblerr = 1;
++			}
++			break;
++		}
++	}
++	FH_WRITE_REG32(&hc_regs->hcintmsk, hc_intr_mask.d32);
++
++	/* Enable the top level host channel interrupt. */
++	intr_enable = (1 << hc_num);
++	FH_MODIFY_REG32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
++
++	/* Make sure host channel interrupts are enabled. */
++	gintmsk.b.hcintr = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
++
++	/*
++	 * Program the HCCHARn register with the endpoint characteristics for
++	 * the current transfer.
++	 */
++	hcchar.d32 = 0;
++	hcchar.b.devaddr = hc->dev_addr;
++	hcchar.b.epnum = hc->ep_num;
++	hcchar.b.epdir = hc->ep_is_in;
++	hcchar.b.lspddev = (hc->speed == FH_OTG_EP_SPEED_LOW);
++	hcchar.b.eptype = hc->ep_type;
++	hcchar.b.mps = hc->max_packet;
++
++	FH_WRITE_REG32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
++
++	FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
++	FH_DEBUGPL(DBG_HCDV, "	 Dev Addr: %d\n", hcchar.b.devaddr);
++	FH_DEBUGPL(DBG_HCDV, "	 Ep Num: %d\n", hcchar.b.epnum);
++	FH_DEBUGPL(DBG_HCDV, "	 Is In: %d\n", hcchar.b.epdir);
++	FH_DEBUGPL(DBG_HCDV, "	 Is Low Speed: %d\n", hcchar.b.lspddev);
++	FH_DEBUGPL(DBG_HCDV, "	 Ep Type: %d\n", hcchar.b.eptype);
++	FH_DEBUGPL(DBG_HCDV, "	 Max Pkt: %d\n", hcchar.b.mps);
++	FH_DEBUGPL(DBG_HCDV, "	 Multi Cnt: %d\n", hcchar.b.multicnt);
++
++	/*
++	 * Program the HCSPLIT register for SPLITs
++	 */
++	hcsplt.d32 = 0;
++	if (hc->do_split) {
++		FH_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n",
++			    hc->hc_num,
++			    hc->complete_split ? "CSPLIT" : "SSPLIT");
++		hcsplt.b.compsplt = hc->complete_split;
++		hcsplt.b.xactpos = hc->xact_pos;
++		hcsplt.b.hubaddr = hc->hub_addr;
++		hcsplt.b.prtaddr = hc->port_addr;
++		FH_DEBUGPL(DBG_HCDV, "	  comp split %d\n", hc->complete_split);
++		FH_DEBUGPL(DBG_HCDV, "	  xact pos %d\n", hc->xact_pos);
++		FH_DEBUGPL(DBG_HCDV, "	  hub addr %d\n", hc->hub_addr);
++		FH_DEBUGPL(DBG_HCDV, "	  port addr %d\n", hc->port_addr);
++		FH_DEBUGPL(DBG_HCDV, "	  is_in %d\n", hc->ep_is_in);
++		FH_DEBUGPL(DBG_HCDV, "	  Max Pkt: %d\n", hcchar.b.mps);
++		FH_DEBUGPL(DBG_HCDV, "	  xferlen: %d\n", hc->xfer_len);
++	}
++	FH_WRITE_REG32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
++
++}
++
++/**
++ * Attempts to halt a host channel. This function should only be called in
++ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
++ * normal circumstances in DMA mode, the controller halts the channel when the
++ * transfer is complete or a condition occurs that requires application
++ * intervention.
++ *
++ * In slave mode, checks for a free request queue entry, then sets the Channel
++ * Enable and Channel Disable bits of the Host Channel Characteristics
++ * register of the specified channel to intiate the halt. If there is no free
++ * request queue entry, sets only the Channel Disable bit of the HCCHARn
++ * register to flush requests for this channel. In the latter case, sets a
++ * flag to indicate that the host channel needs to be halted when a request
++ * queue slot is open.
++ *
++ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
++ * HCCHARn register. The controller ensures there is space in the request
++ * queue before submitting the halt request.
++ *
++ * Some time may elapse before the core flushes any posted requests for this
++ * host channel and halts. The Channel Halted interrupt handler completes the
++ * deactivation of the host channel.
++ *
++ * @param core_if Controller register interface.
++ * @param hc Host channel to halt.
++ * @param halt_status Reason for halting the channel.
++ */
++void fh_otg_hc_halt(fh_otg_core_if_t * core_if,
++		     fh_hc_t * hc, fh_otg_halt_status_e halt_status)
++{
++	gnptxsts_data_t nptxsts;
++	hptxsts_data_t hptxsts;
++	hcchar_data_t hcchar;
++	fh_otg_hc_regs_t *hc_regs;
++	fh_otg_core_global_regs_t *global_regs;
++	fh_otg_host_global_regs_t *host_global_regs;
++
++	hc_regs = core_if->host_if->hc_regs[hc->hc_num];
++	global_regs = core_if->core_global_regs;
++	host_global_regs = core_if->host_if->host_global_regs;
++
++	FH_ASSERT(!(halt_status == FH_OTG_HC_XFER_NO_HALT_STATUS),
++		   "halt_status = %d\n", halt_status);
++
++	if (halt_status == FH_OTG_HC_XFER_URB_DEQUEUE ||
++	    halt_status == FH_OTG_HC_XFER_AHB_ERR) {
++		/*
++		 * Disable all channel interrupts except Ch Halted. The QTD
++		 * and QH state associated with this transfer has been cleared
++		 * (in the case of URB_DEQUEUE), so the channel needs to be
++		 * shut down carefully to prevent crashes.
++		 */
++		int wtd = 10000;
++		hcintmsk_data_t hcintmsk;
++		hcintmsk.d32 = 0;
++		hcintmsk.b.chhltd = 1;
++		FH_WRITE_REG32(&hc_regs->hcintmsk, hcintmsk.d32);
++
++		/*
++		 * Make sure no other interrupts besides halt are currently
++		 * pending. Handling another interrupt could cause a crash due
++		 * to the QTD and QH state.
++		 */
++		FH_WRITE_REG32(&hc_regs->hcint, ~hcintmsk.d32);
++
++		/*
++		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
++		 * even if the channel was already halted for some other
++		 * reason.
++		 */
++		hc->halt_status = halt_status;
++
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++		while (wtd--) {
++			hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++			if (hcchar.b.chen == 0)
++				break;
++		}
++
++		if (hcchar.b.chen == 0) {
++			/*
++			 * The channel is either already halted or it hasn't
++			 * started yet. In DMA mode, the transfer may halt if
++			 * it finishes normally or a condition occurs that
++			 * requires driver intervention. Don't want to halt
++			 * the channel again. In either Slave or DMA mode,
++			 * it's possible that the transfer has been assigned
++			 * to a channel, but not started yet when an URB is
++			 * dequeued. Don't want to halt a channel that hasn't
++			 * started yet.
++			 */
++			return;
++		}
++	}
++	if (hc->halt_pending) {
++		/*
++		 * A halt has already been issued for this channel. This might
++		 * happen when a transfer is aborted by a higher level in
++		 * the stack.
++		 */
++#ifdef DEBUG
++		FH_PRINTF
++		    ("*** %s: Channel %d, _hc->halt_pending already set ***\n",
++		     __func__, hc->hc_num);
++
++#endif
++		return;
++	}
++
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++	/* No need to set the bit in DDMA for disabling the channel */
++	//TODO check it everywhere channel is disabled          
++	if (!core_if->core_params->dma_desc_enable)
++		hcchar.b.chen = 1;
++	hcchar.b.chdis = 1;
++
++	if (!core_if->dma_enable) {
++		/* Check for space in the request queue to issue the halt. */
++		if (hc->ep_type == FH_OTG_EP_TYPE_CONTROL ||
++		    hc->ep_type == FH_OTG_EP_TYPE_BULK) {
++			nptxsts.d32 = FH_READ_REG32(&global_regs->gnptxsts);
++			if (nptxsts.b.nptxqspcavail == 0) {
++				hcchar.b.chen = 0;
++			}
++		} else {
++			hptxsts.d32 =
++			    FH_READ_REG32(&host_global_regs->hptxsts);
++			if ((hptxsts.b.ptxqspcavail == 0)
++			    || (core_if->queuing_high_bandwidth)) {
++				hcchar.b.chen = 0;
++			}
++		}
++	}
++	FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++
++	hc->halt_status = halt_status;
++
++	if (hcchar.b.chen) {
++		hc->halt_pending = 1;
++		hc->halt_on_queue = 0;
++	} else {
++		hc->halt_on_queue = 1;
++	}
++
++	FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
++	FH_DEBUGPL(DBG_HCDV, "	 hcchar: 0x%08x\n", hcchar.d32);
++	FH_DEBUGPL(DBG_HCDV, "	 halt_pending: %d\n", hc->halt_pending);
++	FH_DEBUGPL(DBG_HCDV, "	 halt_on_queue: %d\n", hc->halt_on_queue);
++	FH_DEBUGPL(DBG_HCDV, "	 halt_status: %d\n", hc->halt_status);
++
++	return;
++}
++
++/**
++ * Clears the transfer state for a host channel. This function is normally
++ * called after a transfer is done and the host channel is being released.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param hc Identifies the host channel to clean up.
++ */
++void fh_otg_hc_cleanup(fh_otg_core_if_t * core_if, fh_hc_t * hc)
++{
++	fh_otg_hc_regs_t *hc_regs;
++
++	hc->xfer_started = 0;
++
++	/*
++	 * Clear channel interrupt enables and any unhandled channel interrupt
++	 * conditions.
++	 */
++	hc_regs = core_if->host_if->hc_regs[hc->hc_num];
++	FH_WRITE_REG32(&hc_regs->hcintmsk, 0);
++	FH_WRITE_REG32(&hc_regs->hcint, 0xFFFFFFFF);
++#ifdef DEBUG
++	FH_TIMER_CANCEL(core_if->hc_xfer_timer[hc->hc_num]);
++#endif
++}
++
++/**
++ * Sets the channel property that indicates in which frame a periodic transfer
++ * should occur. This is always set to the _next_ frame. This function has no
++ * effect on non-periodic transfers.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param hc Identifies the host channel to set up and its properties.
++ * @param hcchar Current value of the HCCHAR register for the specified host
++ * channel.
++ */
++static inline void hc_set_even_odd_frame(fh_otg_core_if_t * core_if,
++					 fh_hc_t * hc, hcchar_data_t * hcchar)
++{
++	if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
++	    hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++		hfnum_data_t hfnum;
++		hfnum.d32 =
++		    FH_READ_REG32(&core_if->host_if->host_global_regs->hfnum);
++
++		/* 1 if _next_ frame is odd, 0 if it's even */
++		hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
++#ifdef DEBUG
++		if (hc->ep_type == FH_OTG_EP_TYPE_INTR && hc->do_split
++		    && !hc->complete_split) {
++			switch (hfnum.b.frnum & 0x7) {
++			case 7:
++				core_if->hfnum_7_samples++;
++				core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
++				break;
++			case 0:
++				core_if->hfnum_0_samples++;
++				core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
++				break;
++			default:
++				core_if->hfnum_other_samples++;
++				core_if->hfnum_other_frrem_accum +=
++				    hfnum.b.frrem;
++				break;
++			}
++		}
++#endif
++	}
++}
++
++#ifdef DEBUG
++void hc_xfer_timeout(void *ptr)
++{
++	hc_xfer_info_t *xfer_info = NULL;
++	int hc_num = 0;
++
++	if (ptr)
++		xfer_info = (hc_xfer_info_t *) ptr;
++
++	if (!xfer_info->hc) {
++		FH_ERROR("xfer_info->hc = %p\n", xfer_info->hc);
++		return;
++	}
++
++	hc_num = xfer_info->hc->hc_num;
++	FH_WARN("%s: timeout on channel %d\n", __func__, hc_num);
++	FH_WARN("	start_hcchar_val 0x%08x\n",
++		 xfer_info->core_if->start_hcchar_val[hc_num]);
++}
++#endif
++
++void ep_xfer_timeout(void *ptr)
++{
++	ep_xfer_info_t *xfer_info = NULL;
++	int ep_num = 0;
++	dctl_data_t dctl = {.d32 = 0 };
++	gintsts_data_t gintsts = {.d32 = 0 };
++	gintmsk_data_t gintmsk = {.d32 = 0 };
++
++	if (ptr)
++		xfer_info = (ep_xfer_info_t *) ptr;
++
++	if (!xfer_info->ep) {
++		FH_ERROR("xfer_info->ep = %p\n", xfer_info->ep);
++		return;
++	}
++
++	ep_num = xfer_info->ep->num;
++	FH_WARN("%s: timeout on endpoit %d\n", __func__, ep_num);
++	/* Put the sate to 2 as it was time outed */
++	xfer_info->state = 2;
++
++	dctl.d32 =
++	    FH_READ_REG32(&xfer_info->core_if->dev_if->dev_global_regs->dctl);
++	gintsts.d32 =
++	    FH_READ_REG32(&xfer_info->core_if->core_global_regs->gintsts);
++	gintmsk.d32 =
++	    FH_READ_REG32(&xfer_info->core_if->core_global_regs->gintmsk);
++
++	if (!gintmsk.b.goutnakeff) {
++		/* Unmask it */
++		gintmsk.b.goutnakeff = 1;
++		FH_WRITE_REG32(&xfer_info->core_if->core_global_regs->gintmsk,
++				gintmsk.d32);
++
++	}
++
++	if (!gintsts.b.goutnakeff) {
++		dctl.b.sgoutnak = 1;
++	}
++	FH_WRITE_REG32(&xfer_info->core_if->dev_if->dev_global_regs->dctl,
++			dctl.d32);
++
++}
++
++void set_pid_isoc(fh_hc_t * hc)
++{
++	/* Set up the initial PID for the transfer. */
++	if (hc->speed == FH_OTG_EP_SPEED_HIGH) {
++		if (hc->ep_is_in) {
++			if (hc->multi_count == 1) {
++				hc->data_pid_start = FH_OTG_HC_PID_DATA0;
++			} else if (hc->multi_count == 2) {
++				hc->data_pid_start = FH_OTG_HC_PID_DATA1;
++			} else {
++				hc->data_pid_start = FH_OTG_HC_PID_DATA2;
++			}
++		} else {
++			if (hc->multi_count == 1) {
++				hc->data_pid_start = FH_OTG_HC_PID_DATA0;
++			} else {
++				hc->data_pid_start = FH_OTG_HC_PID_MDATA;
++			}
++		}
++	} else {
++		hc->data_pid_start = FH_OTG_HC_PID_DATA0;
++	}
++}
++
++/**
++ * This function does the setup for a data transfer for a host channel and
++ * starts the transfer. May be called in either Slave mode or DMA mode. In
++ * Slave mode, the caller must ensure that there is sufficient space in the
++ * request queue and Tx Data FIFO.
++ *
++ * For an OUT transfer in Slave mode, it loads a data packet into the
++ * appropriate FIFO. If necessary, additional data packets will be loaded in
++ * the Host ISR.
++ *
++ * For an IN transfer in Slave mode, a data packet is requested. The data
++ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
++ * additional data packets are requested in the Host ISR.
++ *
++ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
++ * register along with a packet count of 1 and the channel is enabled. This
++ * causes a single PING transaction to occur. Other fields in HCTSIZ are
++ * simply set to 0 since no data transfer occurs in this case.
++ *
++ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
++ * all the information required to perform the subsequent data transfer. In
++ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
++ * controller performs the entire PING protocol, then starts the data
++ * transfer.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param hc Information needed to initialize the host channel. The xfer_len
++ * value may be reduced to accommodate the max widths of the XferSize and
++ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
++ * to reflect the final xfer_len value.
++ */
++void fh_otg_hc_start_transfer(fh_otg_core_if_t * core_if, fh_hc_t * hc)
++{
++	hcchar_data_t hcchar;
++	hctsiz_data_t hctsiz;
++	uint16_t num_packets;
++	uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size;
++	uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count;
++	fh_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
++
++	hctsiz.d32 = 0;
++
++	if (hc->do_ping) {
++		if (!core_if->dma_enable) {
++			fh_otg_hc_do_ping(core_if, hc);
++			hc->xfer_started = 1;
++			return;
++		} else {
++			hctsiz.b.dopng = 1;
++		}
++	}
++
++	if (hc->do_split) {
++		num_packets = 1;
++
++		if (hc->complete_split && !hc->ep_is_in) {
++			/* For CSPLIT OUT Transfer, set the size to 0 so the
++			 * core doesn't expect any data written to the FIFO */
++			hc->xfer_len = 0;
++		} else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) {
++			hc->xfer_len = hc->max_packet;
++		} else if (!hc->ep_is_in && (hc->xfer_len > 188)) {
++			hc->xfer_len = 188;
++		}
++
++		hctsiz.b.xfersize = hc->xfer_len;
++	} else {
++		/*
++		 * Ensure that the transfer length and packet count will fit
++		 * in the widths allocated for them in the HCTSIZn register.
++		 */
++		if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
++		    hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++			/*
++			 * Make sure the transfer size is no larger than one
++			 * (micro)frame's worth of data. (A check was done
++			 * when the periodic transfer was accepted to ensure
++			 * that a (micro)frame's worth of data can be
++			 * programmed into a channel.)
++			 */
++			uint32_t max_periodic_len =
++			    hc->multi_count * hc->max_packet;
++			if (hc->xfer_len > max_periodic_len) {
++				hc->xfer_len = max_periodic_len;
++			} else {
++			}
++		} else if (hc->xfer_len > max_hc_xfer_size) {
++			/* Make sure that xfer_len is a multiple of max packet size. */
++			hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1;
++		}
++
++		if (hc->xfer_len > 0) {
++			num_packets =
++			    (hc->xfer_len + hc->max_packet -
++			     1) / hc->max_packet;
++			if (num_packets > max_hc_pkt_count) {
++				num_packets = max_hc_pkt_count;
++				hc->xfer_len = num_packets * hc->max_packet;
++			}
++		} else {
++			/* Need 1 packet for transfer length of 0. */
++			num_packets = 1;
++		}
++
++		if (hc->ep_is_in) {
++			/* Always program an integral # of max packets for IN transfers. */
++			hc->xfer_len = num_packets * hc->max_packet;
++		}
++
++		if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
++		    hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++			/*
++			 * Make sure that the multi_count field matches the
++			 * actual transfer length.
++			 */
++			hc->multi_count = num_packets;
++		}
++
++		if (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
++			set_pid_isoc(hc);
++
++		hctsiz.b.xfersize = hc->xfer_len;
++	}
++
++	hc->start_pkt_count = num_packets;
++	hctsiz.b.pktcnt = num_packets;
++	hctsiz.b.pid = hc->data_pid_start;
++	FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
++
++	FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
++	FH_DEBUGPL(DBG_HCDV, "	 Xfer Size: %d\n", hctsiz.b.xfersize);
++	FH_DEBUGPL(DBG_HCDV, "	 Num Pkts: %d\n", hctsiz.b.pktcnt);
++	FH_DEBUGPL(DBG_HCDV, "	 Start PID: %d\n", hctsiz.b.pid);
++
++	if (core_if->dma_enable) {
++		fh_dma_t dma_addr;
++		if (hc->align_buff) {
++			dma_addr = hc->align_buff;
++		} else {
++			dma_addr = ((unsigned long)hc->xfer_buff & 0xffffffff);
++		}
++		FH_WRITE_REG32(&hc_regs->hcdma, dma_addr);
++	}
++
++	/* Start the split */
++	if (hc->do_split) {
++		hcsplt_data_t hcsplt;
++		hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
++		hcsplt.b.spltena = 1;
++		FH_WRITE_REG32(&hc_regs->hcsplt, hcsplt.d32);
++	}
++
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	hcchar.b.multicnt = hc->multi_count;
++	hc_set_even_odd_frame(core_if, hc, &hcchar);
++#ifdef DEBUG
++	core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
++	if (hcchar.b.chdis) {
++		FH_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
++			 __func__, hc->hc_num, hcchar.d32);
++	}
++#endif
++
++	/* Set host channel enable after all other setup is complete. */
++	hcchar.b.chen = 1;
++	hcchar.b.chdis = 0;
++	FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++
++	hc->xfer_started = 1;
++	hc->requests++;
++
++	if (!core_if->dma_enable && !hc->ep_is_in && hc->xfer_len > 0) {
++		/* Load OUT packet into the appropriate Tx FIFO. */
++		fh_otg_hc_write_packet(core_if, hc);
++	}
++#ifdef DEBUG
++	if (hc->ep_type != FH_OTG_EP_TYPE_INTR) {
++		core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
++		core_if->hc_xfer_info[hc->hc_num].hc = hc;
++
++		/* Start a timer for this transfer. */
++		FH_TIMER_SCHEDULE(core_if->hc_xfer_timer[hc->hc_num], 10000);
++	}
++#endif
++}
++
++/**
++ * This function does the setup for a data transfer for a host channel
++ * and starts the transfer in Descriptor DMA mode.
++ *
++ * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
++ * Sets PID and NTD values. For periodic transfers
++ * initializes SCHED_INFO field with micro-frame bitmap.
++ *
++ * Initializes HCDMA register with descriptor list address and CTD value
++ * then starts the transfer via enabling the channel.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param hc Information needed to initialize the host channel.
++ */
++void fh_otg_hc_start_transfer_ddma(fh_otg_core_if_t * core_if, fh_hc_t * hc)
++{
++	fh_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
++	hcchar_data_t hcchar;
++	hctsiz_data_t hctsiz;
++	hcdma_data_t hcdma;
++
++	hctsiz.d32 = 0;
++
++	if (hc->do_ping)
++		hctsiz.b_ddma.dopng = 1;
++
++	if (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
++		set_pid_isoc(hc);
++
++	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
++	hctsiz.b_ddma.pid = hc->data_pid_start;
++	hctsiz.b_ddma.ntd = hc->ntd - 1;	/* 0 - 1 descriptor, 1 - 2 descriptors, etc. */
++	hctsiz.b_ddma.schinfo = hc->schinfo;	/* Non-zero only for high-speed interrupt endpoints */
++
++	FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
++	FH_DEBUGPL(DBG_HCDV, "	 Start PID: %d\n", hctsiz.b.pid);
++	FH_DEBUGPL(DBG_HCDV, "	 NTD: %d\n", hctsiz.b_ddma.ntd);
++
++	FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
++
++	hcdma.d32 = 0;
++	hcdma.b.dma_addr = ((uint32_t) hc->desc_list_addr) >> 11;
++
++	/* Always start from first descriptor. */
++	hcdma.b.ctd = 0;
++	FH_WRITE_REG32(&hc_regs->hcdma, hcdma.d32);
++
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	hcchar.b.multicnt = hc->multi_count;
++
++#ifdef DEBUG
++	core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
++	if (hcchar.b.chdis) {
++		FH_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
++			 __func__, hc->hc_num, hcchar.d32);
++	}
++#endif
++
++	/* Set host channel enable after all other setup is complete. */
++	hcchar.b.chen = 1;
++	hcchar.b.chdis = 0;
++
++	FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++
++	hc->xfer_started = 1;
++	hc->requests++;
++
++#ifdef DEBUG
++	if ((hc->ep_type != FH_OTG_EP_TYPE_INTR)
++	    && (hc->ep_type != FH_OTG_EP_TYPE_ISOC)) {
++		core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
++		core_if->hc_xfer_info[hc->hc_num].hc = hc;
++		/* Start a timer for this transfer. */
++		FH_TIMER_SCHEDULE(core_if->hc_xfer_timer[hc->hc_num], 10000);
++	}
++#endif
++
++}
++
++/**
++ * This function continues a data transfer that was started by previous call
++ * to <code>fh_otg_hc_start_transfer</code>. The caller must ensure there is
++ * sufficient space in the request queue and Tx Data FIFO. This function
++ * should only be called in Slave mode. In DMA mode, the controller acts
++ * autonomously to complete transfers programmed to a host channel.
++ *
++ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
++ * if there is any data remaining to be queued. For an IN transfer, another
++ * data packet is always requested. For the SETUP phase of a control transfer,
++ * this function does nothing.
++ *
++ * @return 1 if a new request is queued, 0 if no more requests are required
++ * for this transfer.
++ */
++int fh_otg_hc_continue_transfer(fh_otg_core_if_t * core_if, fh_hc_t * hc)
++{
++	FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
++
++	if (hc->do_split) {
++		/* SPLITs always queue just once per channel */
++		return 0;
++	} else if (hc->data_pid_start == FH_OTG_HC_PID_SETUP) {
++		/* SETUPs are queued only once since they can't be NAKed. */
++		return 0;
++	} else if (hc->ep_is_in) {
++		/*
++		 * Always queue another request for other IN transfers. If
++		 * back-to-back INs are issued and NAKs are received for both,
++		 * the driver may still be processing the first NAK when the
++		 * second NAK is received. When the interrupt handler clears
++		 * the NAK interrupt for the first NAK, the second NAK will
++		 * not be seen. So we can't depend on the NAK interrupt
++		 * handler to requeue a NAKed request. Instead, IN requests
++		 * are issued each time this function is called. When the
++		 * transfer completes, the extra requests for the channel will
++		 * be flushed.
++		 */
++		hcchar_data_t hcchar;
++		fh_otg_hc_regs_t *hc_regs =
++		    core_if->host_if->hc_regs[hc->hc_num];
++
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++		hc_set_even_odd_frame(core_if, hc, &hcchar);
++		hcchar.b.chen = 1;
++		hcchar.b.chdis = 0;
++		FH_DEBUGPL(DBG_HCDV, "	 IN xfer: hcchar = 0x%08x\n",
++			    hcchar.d32);
++		FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++		hc->requests++;
++		return 1;
++	} else {
++		/* OUT transfers. */
++		if (hc->xfer_count < hc->xfer_len) {
++			if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
++			    hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++				hcchar_data_t hcchar;
++				fh_otg_hc_regs_t *hc_regs;
++				hc_regs = core_if->host_if->hc_regs[hc->hc_num];
++				hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++				hc_set_even_odd_frame(core_if, hc, &hcchar);
++			}
++
++			/* Load OUT packet into the appropriate Tx FIFO. */
++			fh_otg_hc_write_packet(core_if, hc);
++			hc->requests++;
++			return 1;
++		} else {
++			return 0;
++		}
++	}
++}
++
++/**
++ * Starts a PING transfer. This function should only be called in Slave mode.
++ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
++ */
++void fh_otg_hc_do_ping(fh_otg_core_if_t * core_if, fh_hc_t * hc)
++{
++	hcchar_data_t hcchar;
++	hctsiz_data_t hctsiz;
++	fh_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
++
++	FH_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
++
++	hctsiz.d32 = 0;
++	hctsiz.b.dopng = 1;
++	hctsiz.b.pktcnt = 1;
++	FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
++
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	hcchar.b.chen = 1;
++	hcchar.b.chdis = 0;
++	FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++}
++
++/*
++ * This function writes a packet into the Tx FIFO associated with the Host
++ * Channel. For a channel associated with a non-periodic EP, the non-periodic
++ * Tx FIFO is written. For a channel associated with a periodic EP, the
++ * periodic Tx FIFO is written. This function should only be called in Slave
++ * mode.
++ *
++ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
++ * then number of bytes written to the Tx FIFO.
++ */
++void fh_otg_hc_write_packet(fh_otg_core_if_t * core_if, fh_hc_t * hc)
++{
++	uint32_t i;
++	uint32_t remaining_count;
++	uint32_t byte_count;
++	uint32_t dword_count;
++
++	uint32_t *data_buff = (uint32_t *) (hc->xfer_buff);
++	uint32_t *data_fifo = core_if->data_fifo[hc->hc_num];
++
++	remaining_count = hc->xfer_len - hc->xfer_count;
++	if (remaining_count > hc->max_packet) {
++		byte_count = hc->max_packet;
++	} else {
++		byte_count = remaining_count;
++	}
++
++	dword_count = (byte_count + 3) / 4;
++
++	if ((((unsigned long)data_buff) & 0x3) == 0) {
++		/* xfer_buff is DWORD aligned. */
++		for (i = 0; i < dword_count; i++, data_buff++) {
++			FH_WRITE_REG32(data_fifo, *data_buff);
++		}
++	} else {
++		/* xfer_buff is not DWORD aligned. */
++		for (i = 0; i < dword_count; i++, data_buff++) {
++			uint32_t data;
++			data =
++			    (data_buff[0] | data_buff[1] << 8 | data_buff[2] <<
++			     16 | data_buff[3] << 24);
++			FH_WRITE_REG32(data_fifo, data);
++		}
++	}
++
++	hc->xfer_count += byte_count;
++	hc->xfer_buff += byte_count;
++}
++
++/**
++ * Gets the current USB frame number. This is the frame number from the last
++ * SOF packet.
++ */
++uint32_t fh_otg_get_frame_number(fh_otg_core_if_t * core_if)
++{
++	dsts_data_t dsts;
++	dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++
++	/* read current frame/microframe number from DSTS register */
++	return dsts.b.soffn;
++}
++
++/**
++ * Calculates and gets the frame Interval value of HFIR register according PHY 
++ * type and speed.The application can modify a value of HFIR register only after
++ * the Port Enable bit of the Host Port Control and Status register 
++ * (HPRT.PrtEnaPort) has been set.
++*/
++
++uint32_t calc_frame_interval(fh_otg_core_if_t * core_if)
++{
++	gusbcfg_data_t usbcfg;
++	hwcfg2_data_t hwcfg2;
++	hprt0_data_t hprt0;
++	int clock = 60;		// default value
++	usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++	hwcfg2.d32 = FH_READ_REG32(&core_if->core_global_regs->ghwcfg2);
++	hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
++	if (!usbcfg.b.physel && usbcfg.b.ulpi_utmi_sel && !usbcfg.b.phyif)
++		clock = 60;
++	if (usbcfg.b.physel && hwcfg2.b.fs_phy_type == 3)
++		clock = 48;
++	if (!usbcfg.b.phylpwrclksel && !usbcfg.b.physel &&
++	    !usbcfg.b.ulpi_utmi_sel && usbcfg.b.phyif)
++		clock = 30;
++	if (!usbcfg.b.phylpwrclksel && !usbcfg.b.physel &&
++	    !usbcfg.b.ulpi_utmi_sel && !usbcfg.b.phyif)
++		clock = 60;
++	if (usbcfg.b.phylpwrclksel && !usbcfg.b.physel &&
++	    !usbcfg.b.ulpi_utmi_sel && usbcfg.b.phyif)
++		clock = 48;
++	if (usbcfg.b.physel && !usbcfg.b.phyif && hwcfg2.b.fs_phy_type == 2)
++		clock = 48;
++	if (usbcfg.b.physel && hwcfg2.b.fs_phy_type == 1)
++		clock = 48;
++	if (hprt0.b.prtspd == 0)
++		/* High speed case */
++		return 125 * clock - 1;
++	else
++		/* FS/LS case */
++		return 1000 * clock - 1;
++}
++
++/**
++ * This function reads a setup packet from the Rx FIFO into the destination
++ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
++ * Interrupt routine when a SETUP packet has been received in Slave mode.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param dest Destination buffer for packet data.
++ */
++void fh_otg_read_setup_packet(fh_otg_core_if_t * core_if, uint32_t * dest)
++{
++	device_grxsts_data_t status;
++	/* Get the 8 bytes of a setup transaction data */
++
++	/* Pop 2 DWORDS off the receive data FIFO into memory */
++	dest[0] = FH_READ_REG32(core_if->data_fifo[0]);
++	dest[1] = FH_READ_REG32(core_if->data_fifo[0]);
++	if (core_if->snpsid >= OTG_CORE_REV_3_00a && core_if->snpsid < OTG_CORE_REV_3_30a) {
++		status.d32 =
++		    FH_READ_REG32(&core_if->core_global_regs->grxstsp);
++		FH_DEBUGPL(DBG_ANY,
++			    "EP:%d BCnt:%d " "pktsts:%x Frame:%d(0x%0x)\n",
++			    status.b.epnum, status.b.bcnt, status.b.pktsts,
++			    status.b.fn, status.b.fn);
++	}
++}
++
++/**
++ * This function enables EP0 OUT to receive SETUP packets and configures EP0
++ * IN for transmitting packets. It is normally called when the
++ * "Enumeration Done" interrupt occurs.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP0 data.
++ */
++void fh_otg_ep0_activate(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	dsts_data_t dsts;
++	depctl_data_t diepctl;
++	depctl_data_t doepctl;
++	dctl_data_t dctl = {.d32 = 0 };
++
++	ep->stp_rollover = 0;
++	/* Read the Device Status and Endpoint 0 Control registers */
++	dsts.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dsts);
++	diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
++	doepctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl);
++
++	/* Set the MPS of the IN EP based on the enumeration speed */
++	switch (dsts.b.enumspd) {
++	case FH_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
++	case FH_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
++	case FH_DSTS_ENUMSPD_FS_PHY_48MHZ:
++		diepctl.b.mps = FH_DEP0CTL_MPS_64;
++		break;
++	case FH_DSTS_ENUMSPD_LS_PHY_6MHZ:
++		diepctl.b.mps = FH_DEP0CTL_MPS_8;
++		break;
++	}
++
++	FH_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
++
++	/* Enable OUT EP for receive */
++	if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
++		doepctl.b.epena = 1;
++		FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
++	}
++#ifdef VERBOSE
++	FH_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
++		    FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
++	FH_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
++		    FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl));
++#endif
++	dctl.b.cgnpinnak = 1;
++
++	FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
++	FH_DEBUGPL(DBG_PCDV, "dctl=%0x\n",
++		    FH_READ_REG32(&dev_if->dev_global_regs->dctl));
++
++}
++
++/**
++ * This function activates an EP.  The Device EP control register for
++ * the EP is configured as defined in the ep structure. Note: This
++ * function is not used for EP0.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to activate.
++ */
++void fh_otg_ep_activate(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	depctl_data_t depctl;
++	volatile uint32_t *addr;
++	daint_data_t daintmsk = {.d32 = 0 };
++	dcfg_data_t dcfg;
++	uint8_t i;
++
++	FH_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num,
++		    (ep->is_in ? "IN" : "OUT"));
++
++#ifdef FH_UTE_PER_IO
++	ep->xiso_frame_num = 0xFFFFFFFF;
++	ep->xiso_active_xfers = 0;
++	ep->xiso_queued_xfers = 0;
++#endif
++	/* Read DEPCTLn register */
++	if (ep->is_in == 1) {
++		addr = &dev_if->in_ep_regs[ep->num]->diepctl;
++		daintmsk.ep.in = 1 << ep->num;
++	} else {
++		addr = &dev_if->out_ep_regs[ep->num]->doepctl;
++		daintmsk.ep.out = 1 << ep->num;
++	}
++
++	/* If the EP is already active don't change the EP Control
++	 * register. */
++	depctl.d32 = FH_READ_REG32(addr);
++	if (!depctl.b.usbactep) {
++		depctl.b.mps = ep->maxpacket;
++		depctl.b.eptype = ep->type;
++		depctl.b.txfnum = ep->tx_fifo_num;
++
++		if (ep->type == FH_OTG_EP_TYPE_ISOC) {
++			depctl.b.setd0pid = 1;	// ???
++		} else {
++			depctl.b.setd0pid = 1;
++		}
++		depctl.b.usbactep = 1;
++
++		/* Update nextep_seq array and EPMSCNT in DCFG */
++		if (!(depctl.b.eptype & 1) && (ep->is_in == 1)) {	// NP IN EP
++			for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++				if (core_if->nextep_seq[i] == core_if->first_in_nextep_seq)
++					break;
++			}
++			core_if->nextep_seq[i] = ep->num;
++			core_if->nextep_seq[ep->num] = core_if->first_in_nextep_seq;
++			depctl.b.nextep = core_if->nextep_seq[ep->num];
++			dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
++			dcfg.b.epmscnt++;
++			FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
++
++			FH_DEBUGPL(DBG_PCDV,
++				    "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
++				    __func__, core_if->first_in_nextep_seq);
++			for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++				FH_DEBUGPL(DBG_PCDV, "%2d\n",
++					    core_if->nextep_seq[i]);
++			}
++
++		}
++
++
++		FH_WRITE_REG32(addr, depctl.d32);
++		FH_DEBUGPL(DBG_PCDV, "DEPCTL=%08x\n", FH_READ_REG32(addr));
++	}
++
++	/* Enable the Interrupt for this EP */
++	if (core_if->multiproc_int_enable) {
++		if (ep->is_in == 1) {
++			diepmsk_data_t diepmsk = {.d32 = 0 };
++			diepmsk.b.xfercompl = 1;
++			diepmsk.b.timeout = 1;
++			diepmsk.b.epdisabled = 1;
++			diepmsk.b.ahberr = 1;
++			diepmsk.b.intknepmis = 1;
++			if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
++				diepmsk.b.intknepmis = 0;
++			diepmsk.b.txfifoundrn = 1;	//?????
++			if (ep->type == FH_OTG_EP_TYPE_ISOC) {
++				diepmsk.b.nak = 1;
++			}
++
++/*
++			if (core_if->dma_desc_enable) {
++				diepmsk.b.bna = 1;
++			}
++*/
++/*			
++			if (core_if->dma_enable) {
++				doepmsk.b.nak = 1;
++			}
++*/
++			FH_WRITE_REG32(&dev_if->dev_global_regs->
++					diepeachintmsk[ep->num], diepmsk.d32);
++
++		} else {
++			doepmsk_data_t doepmsk = {.d32 = 0 };
++			doepmsk.b.xfercompl = 1;
++			doepmsk.b.ahberr = 1;
++			doepmsk.b.epdisabled = 1;
++			if (ep->type == FH_OTG_EP_TYPE_ISOC)
++				doepmsk.b.outtknepdis = 1;
++
++/*			
++
++			if (core_if->dma_desc_enable) {
++				doepmsk.b.bna = 1;
++			}
++*/
++/*			
++			doepmsk.b.babble = 1;
++			doepmsk.b.nyet = 1;
++			doepmsk.b.nak = 1;
++*/
++			FH_WRITE_REG32(&dev_if->dev_global_regs->
++					doepeachintmsk[ep->num], doepmsk.d32);
++		}
++		FH_MODIFY_REG32(&dev_if->dev_global_regs->deachintmsk,
++				 0, daintmsk.d32);
++	} else {
++		if (ep->type == FH_OTG_EP_TYPE_ISOC) {
++			if (ep->is_in) {
++				diepmsk_data_t diepmsk = {.d32 = 0 };
++				diepmsk.b.nak = 1;
++				FH_MODIFY_REG32(&dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32);
++			} else {
++				doepmsk_data_t doepmsk = {.d32 = 0 };
++				doepmsk.b.outtknepdis = 1;
++				FH_MODIFY_REG32(&dev_if->dev_global_regs->doepmsk, 0, doepmsk.d32);
++			}
++		}
++		FH_MODIFY_REG32(&dev_if->dev_global_regs->daintmsk,
++				 0, daintmsk.d32);
++	}
++
++	FH_DEBUGPL(DBG_PCDV, "DAINTMSK=%0x\n",
++		    FH_READ_REG32(&dev_if->dev_global_regs->daintmsk));
++
++	ep->stall_clear_flag = 0;
++
++	return;
++}
++
++/**
++ * This function deactivates an EP. This is done by clearing the USB Active
++ * EP bit in the Device EP control register. Note: This function is not used
++ * for EP0. EP0 cannot be deactivated.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to deactivate.
++ */
++void fh_otg_ep_deactivate(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	depctl_data_t depctl = {.d32 = 0 };
++	volatile uint32_t *addr;
++	daint_data_t daintmsk = {.d32 = 0 };
++	dcfg_data_t dcfg;
++	uint8_t i = 0;
++
++#ifdef FH_UTE_PER_IO
++	ep->xiso_frame_num = 0xFFFFFFFF;
++	ep->xiso_active_xfers = 0;
++	ep->xiso_queued_xfers = 0;
++#endif
++
++	/* Read DEPCTLn register */
++	if (ep->is_in == 1) {
++		addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
++		daintmsk.ep.in = 1 << ep->num;
++	} else {
++		addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
++		daintmsk.ep.out = 1 << ep->num;
++	}
++
++	depctl.d32 = FH_READ_REG32(addr);
++
++	depctl.b.usbactep = 0;
++
++	/* Update nextep_seq array and EPMSCNT in DCFG */
++	if (!(depctl.b.eptype & 1) && ep->is_in == 1) {	// NP EP IN
++		for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++			if (core_if->nextep_seq[i] == ep->num)
++				break;
++		}
++		core_if->nextep_seq[i] = core_if->nextep_seq[ep->num];
++		if (core_if->first_in_nextep_seq == ep->num)
++			core_if->first_in_nextep_seq = i;
++		core_if->nextep_seq[ep->num] = 0xff;
++		depctl.b.nextep = 0;
++		dcfg.d32 =
++		    FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++		dcfg.b.epmscnt--;
++		FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg,
++				dcfg.d32);
++
++		FH_DEBUGPL(DBG_PCDV,
++			    "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
++			    __func__, core_if->first_in_nextep_seq);
++		for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++			FH_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
++		}
++	}
++		
++	if (ep->is_in == 1)
++		depctl.b.txfnum = 0;
++
++	if (core_if->dma_desc_enable)
++		depctl.b.epdis = 1;
++
++	FH_WRITE_REG32(addr, depctl.d32);
++	depctl.d32 = FH_READ_REG32(addr);
++	if (core_if->dma_enable && ep->type == FH_OTG_EP_TYPE_ISOC
++	    && depctl.b.epena) {
++		depctl_data_t depctl = {.d32 = 0 };
++		if (ep->is_in) {
++			diepint_data_t diepint = {.d32 = 0 };
++
++			depctl.b.snak = 1;
++			FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
++					diepctl, depctl.d32);
++			do {
++				fh_udelay(10);
++				diepint.d32 =
++				    FH_READ_REG32(&core_if->
++						   dev_if->in_ep_regs[ep->num]->
++						   diepint);
++			} while (!diepint.b.inepnakeff);
++			diepint.b.inepnakeff = 1;
++			FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
++					diepint, diepint.d32);
++			depctl.d32 = 0;
++			depctl.b.epdis = 1;
++			FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
++					diepctl, depctl.d32);
++			do {
++				fh_udelay(10);
++				diepint.d32 =
++				    FH_READ_REG32(&core_if->
++						   dev_if->in_ep_regs[ep->num]->
++						   diepint);
++			} while (!diepint.b.epdisabled);
++			diepint.b.epdisabled = 1;
++			FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
++					diepint, diepint.d32);
++		} else {
++			dctl_data_t dctl = {.d32 = 0};
++			gintmsk_data_t gintsts = {.d32 = 0};
++			doepint_data_t doepint = {.d32 = 0};
++			dctl.b.sgoutnak = 1;
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++					 dctl, 0, dctl.d32);
++			do {
++				fh_udelay(10);
++				gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++			} while (!gintsts.b.goutnakeff); 
++			gintsts.d32 = 0;
++			gintsts.b.goutnakeff = 1;
++			FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++			depctl.d32 = 0;
++			depctl.b.epdis = 1;
++			depctl.b.snak = 1;
++			FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->doepctl, depctl.d32);
++			do 
++			{
++				fh_udelay(10);
++				doepint.d32 = FH_READ_REG32(&core_if->dev_if->
++											out_ep_regs[ep->num]->doepint);
++			} while (!doepint.b.epdisabled); 
++
++			doepint.b.epdisabled = 1;
++			FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->doepint, doepint.d32);
++
++			dctl.d32 = 0;
++			dctl.b.cgoutnak = 1;
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
++		}		
++	}
++
++	/* Disable the Interrupt for this EP */
++	if (core_if->multiproc_int_enable) {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->deachintmsk,
++				 daintmsk.d32, 0);
++
++		if (ep->is_in == 1) {
++			FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
++					diepeachintmsk[ep->num], 0);
++		} else {
++			FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->
++					doepeachintmsk[ep->num], 0);
++		}
++	} else {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->daintmsk,
++				 daintmsk.d32, 0);
++	}
++
++}
++
++/**
++ * This function initializes dma descriptor chain.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ */
++static void init_dma_desc_chain(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	fh_otg_dev_dma_desc_t *dma_desc;
++	uint32_t offset;
++	uint32_t xfer_est;
++	int i;
++	unsigned maxxfer_local, total_len;
++
++	if (!ep->is_in && ep->type == FH_OTG_EP_TYPE_INTR &&
++	    (ep->maxpacket % 4)) {
++		maxxfer_local = ep->maxpacket;
++		total_len = ep->xfer_len;
++	} else {
++		maxxfer_local = ep->maxxfer;
++		total_len = ep->total_len;
++	}
++
++	ep->desc_cnt = (total_len / maxxfer_local) +
++	    ((total_len % maxxfer_local) ? 1 : 0);
++
++	if (!ep->desc_cnt)
++		ep->desc_cnt = 1;
++
++	if (ep->desc_cnt > MAX_DMA_DESC_CNT)
++		ep->desc_cnt = MAX_DMA_DESC_CNT;
++
++	dma_desc = ep->desc_addr;
++	if (maxxfer_local == ep->maxpacket) {
++		if ((total_len % maxxfer_local) &&
++		    (total_len / maxxfer_local < MAX_DMA_DESC_CNT)) {
++			xfer_est = (ep->desc_cnt - 1) * maxxfer_local +
++			    (total_len % maxxfer_local);
++		} else
++			xfer_est = ep->desc_cnt * maxxfer_local;
++	} else
++		xfer_est = total_len;
++	offset = 0;
++	for (i = 0; i < ep->desc_cnt; ++i) {
++		/** DMA Descriptor Setup */
++		if (xfer_est > maxxfer_local) {
++			dma_desc->status.b.bs = BS_HOST_BUSY;
++			dma_desc->status.b.l = 0;
++			dma_desc->status.b.ioc = 0;
++			dma_desc->status.b.sp = 0;
++			dma_desc->status.b.bytes = maxxfer_local;
++			dma_desc->buf = ep->dma_addr + offset;
++			dma_desc->status.b.sts = 0;
++			dma_desc->status.b.bs = BS_HOST_READY;
++
++			xfer_est -= maxxfer_local;
++			offset += maxxfer_local;
++		} else {
++			dma_desc->status.b.bs = BS_HOST_BUSY;
++			dma_desc->status.b.l = 1;
++			dma_desc->status.b.ioc = 1;
++			if (ep->is_in) {
++				dma_desc->status.b.sp =
++				    (xfer_est %
++				     ep->maxpacket) ? 1 : ((ep->
++							    sent_zlp) ? 1 : 0);
++				dma_desc->status.b.bytes = xfer_est;
++			} else {
++				if (maxxfer_local == ep->maxpacket)
++					dma_desc->status.b.bytes = xfer_est;
++				else	
++					dma_desc->status.b.bytes =
++				    		xfer_est + ((4 - (xfer_est & 0x3)) & 0x3);
++			}
++
++			dma_desc->buf = ep->dma_addr + offset;
++			dma_desc->status.b.sts = 0;
++			dma_desc->status.b.bs = BS_HOST_READY;
++		}
++		dma_desc++;
++	}
++}
++
++/**
++ * This function is called when to write ISOC data into appropriate dedicated 
++ * periodic FIFO.
++ */
++static int32_t write_isoc_tx_fifo(fh_otg_core_if_t * core_if, fh_ep_t * fh_ep)
++{
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	fh_otg_dev_in_ep_regs_t *ep_regs;
++	dtxfsts_data_t txstatus = {.d32 = 0 };
++	uint32_t len = 0;
++	int epnum = fh_ep->num;
++	int dwords;
++
++	FH_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
++
++	ep_regs = core_if->dev_if->in_ep_regs[epnum];
++
++	len = fh_ep->xfer_len - fh_ep->xfer_count;
++
++	if (len > fh_ep->maxpacket) {
++		len = fh_ep->maxpacket;
++	}
++
++	dwords = (len + 3) / 4;
++
++	/* While there is space in the queue and space in the FIFO and
++	 * More data to tranfer, Write packets to the Tx FIFO */
++	txstatus.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
++	FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
++
++	while (txstatus.b.txfspcavail >= dwords &&
++	       fh_ep->xfer_count < fh_ep->xfer_len && fh_ep->xfer_len != 0) {
++		/* Write the FIFO */
++		fh_otg_ep_write_packet(core_if, fh_ep, 0);
++
++		len = fh_ep->xfer_len - fh_ep->xfer_count;
++		if (len > fh_ep->maxpacket) {
++			len = fh_ep->maxpacket;
++		}
++
++		dwords = (len + 3) / 4;
++		txstatus.d32 =
++		    FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
++		FH_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
++			    txstatus.d32);
++	}
++
++	FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
++		    FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts));
++
++	return 1;
++}
++
++/**
++ * This function does the setup for a data transfer for an EP and
++ * starts the transfer. For an IN transfer, the packets will be
++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
++ * the packets are unloaded from the Rx FIFO in the ISR.  the ISR.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ */
++
++void fh_otg_ep_start_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	depctl_data_t depctl;
++	deptsiz_data_t deptsiz;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
++	FH_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
++		    "xfer_buff=%p start_xfer_buff=%p, total_len = %d\n",
++		    ep->num, (ep->is_in ? "IN" : "OUT"), ep->xfer_len,
++		    ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,
++		    ep->total_len);
++
++	/* IN endpoint */
++	if (ep->is_in == 1) {
++		fh_otg_dev_in_ep_regs_t *in_regs =
++		    core_if->dev_if->in_ep_regs[ep->num];
++
++		gnptxsts_data_t gtxstatus;
++
++		gtxstatus.d32 =
++		    FH_READ_REG32(&core_if->core_global_regs->gnptxsts);
++
++		if (core_if->en_multiple_tx_fifo == 0
++		    && gtxstatus.b.nptxqspcavail == 0 && !core_if->dma_enable) {
++#ifdef DEBUG
++			FH_PRINTF("TX Queue Full (0x%0x)\n", gtxstatus.d32);
++#endif
++			return;
++		}
++
++		depctl.d32 = FH_READ_REG32(&(in_regs->diepctl));
++		deptsiz.d32 = FH_READ_REG32(&(in_regs->dieptsiz));
++
++		if (ep->maxpacket > ep->maxxfer / MAX_PKT_CNT)
++			ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
++		    		ep->maxxfer : (ep->total_len - ep->xfer_len);
++		else 
++			ep->xfer_len += (MAX_PKT_CNT * ep->maxpacket < (ep->total_len - ep->xfer_len)) ?
++				 MAX_PKT_CNT * ep->maxpacket : (ep->total_len - ep->xfer_len);
++
++		/* Zero Length Packet? */
++		if ((ep->xfer_len - ep->xfer_count) == 0) {
++			deptsiz.b.xfersize = 0;
++			deptsiz.b.pktcnt = 1;
++		} else {
++			/* Program the transfer size and packet count
++			 *      as follows: xfersize = N * maxpacket +
++			 *      short_packet pktcnt = N + (short_packet
++			 *      exist ? 1 : 0) 
++			 */
++			deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
++			deptsiz.b.pktcnt =
++			    (ep->xfer_len - ep->xfer_count - 1 +
++			     ep->maxpacket) / ep->maxpacket;
++			if (deptsiz.b.pktcnt > MAX_PKT_CNT) {
++				deptsiz.b.pktcnt = MAX_PKT_CNT;
++				deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
++			} 
++			if (ep->type == FH_OTG_EP_TYPE_ISOC) 
++				deptsiz.b.mc = deptsiz.b.pktcnt;
++		}
++
++		/* Write the DMA register */
++		if (core_if->dma_enable) {
++			if (core_if->dma_desc_enable == 0) {
++				if (ep->type != FH_OTG_EP_TYPE_ISOC)
++					deptsiz.b.mc = 1;
++				FH_WRITE_REG32(&in_regs->dieptsiz,
++						deptsiz.d32);
++				FH_WRITE_REG32(&(in_regs->diepdma),
++						(uint32_t) ep->dma_addr);
++			} else {
++#ifdef FH_UTE_CFI
++				/* The descriptor chain should be already initialized by now */
++				if (ep->buff_mode != BM_STANDARD) {
++					FH_WRITE_REG32(&in_regs->diepdma,
++							ep->descs_dma_addr);
++				} else {
++#endif
++					init_dma_desc_chain(core_if, ep);
++				/** DIEPDMAn Register write */
++					FH_WRITE_REG32(&in_regs->diepdma,
++							ep->dma_desc_addr);
++#ifdef FH_UTE_CFI
++				}
++#endif
++			}
++		} else {
++			FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
++			if (ep->type != FH_OTG_EP_TYPE_ISOC) {
++				/**
++				 * Enable the Non-Periodic Tx FIFO empty interrupt,
++				 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
++				 * the data will be written into the fifo by the ISR.
++				 */
++				if (core_if->en_multiple_tx_fifo == 0) {
++					intr_mask.b.nptxfempty = 1;
++					FH_MODIFY_REG32
++					    (&core_if->core_global_regs->gintmsk,
++					     intr_mask.d32, intr_mask.d32);
++				} else {
++					/* Enable the Tx FIFO Empty Interrupt for this EP */
++					if (ep->xfer_len > 0) {
++						uint32_t fifoemptymsk = 0;
++						fifoemptymsk = 1 << ep->num;
++						FH_MODIFY_REG32
++						    (&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
++						     0, fifoemptymsk);
++
++					}
++				}
++			} 
++		}
++		if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
++			depctl.b.nextep = core_if->nextep_seq[ep->num];
++
++		if (ep->type == FH_OTG_EP_TYPE_ISOC) {
++			dsts_data_t dsts = {.d32 = 0 };
++			if (ep->bInterval == 1) {
++				dsts.d32 =
++				    FH_READ_REG32(&core_if->dev_if->
++						   dev_global_regs->dsts);
++				ep->frame_num = dsts.b.soffn + ep->bInterval;
++				if (ep->frame_num > 0x3FFF) {
++					ep->frm_overrun = 1;
++					ep->frame_num &= 0x3FFF;
++				} else
++					ep->frm_overrun = 0;
++				if (ep->frame_num & 0x1) {
++					depctl.b.setd1pid = 1;
++				} else {
++					depctl.b.setd0pid = 1;
++				}
++			}
++		}
++		/* EP enable, IN data in FIFO */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++		FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
++		
++		if (!core_if->dma_enable && ep->type == FH_OTG_EP_TYPE_ISOC) {
++				write_isoc_tx_fifo(core_if, ep);
++		}
++
++	} else {
++		/* OUT endpoint */
++		fh_otg_dev_out_ep_regs_t *out_regs =
++		    core_if->dev_if->out_ep_regs[ep->num];
++
++		depctl.d32 = FH_READ_REG32(&(out_regs->doepctl));
++		deptsiz.d32 = FH_READ_REG32(&(out_regs->doeptsiz));
++
++		if (!core_if->dma_desc_enable) {	
++			if (ep->maxpacket > ep->maxxfer / MAX_PKT_CNT)
++				ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
++                        	ep->maxxfer : (ep->total_len - ep->xfer_len);
++                else
++					ep->xfer_len += (MAX_PKT_CNT * ep->maxpacket < (ep->total_len 
++					- ep->xfer_len)) ? MAX_PKT_CNT * ep->maxpacket : (ep->total_len - ep->xfer_len);
++		}
++
++		/* Program the transfer size and packet count as follows:
++		 *
++		 *      pktcnt = N                                                                                
++		 *      xfersize = N * maxpacket
++		 */
++		if ((ep->xfer_len - ep->xfer_count) == 0) {
++			/* Zero Length Packet */
++			deptsiz.b.xfersize = ep->maxpacket;
++			deptsiz.b.pktcnt = 1;
++		} else {
++			deptsiz.b.pktcnt =
++			    (ep->xfer_len - ep->xfer_count +
++			     (ep->maxpacket - 1)) / ep->maxpacket;
++			if (deptsiz.b.pktcnt > MAX_PKT_CNT) {
++				deptsiz.b.pktcnt = MAX_PKT_CNT;
++			}
++			if (!core_if->dma_desc_enable) {
++				ep->xfer_len =
++			    		deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count;
++			}
++			deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
++		}
++
++		FH_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
++			    ep->num, deptsiz.b.xfersize, deptsiz.b.pktcnt);
++
++		if (core_if->dma_enable) {
++			if (!core_if->dma_desc_enable) {
++				FH_WRITE_REG32(&out_regs->doeptsiz,
++						deptsiz.d32);
++
++				FH_WRITE_REG32(&(out_regs->doepdma),
++						(uint32_t) ep->dma_addr);
++			} else {
++#ifdef FH_UTE_CFI
++				/* The descriptor chain should be already initialized by now */
++				if (ep->buff_mode != BM_STANDARD) {
++					FH_WRITE_REG32(&out_regs->doepdma,
++							ep->descs_dma_addr);
++				} else {
++#endif
++					/** This is used for interrupt out transfers*/
++					if (!ep->xfer_len)
++						ep->xfer_len = ep->total_len;
++					init_dma_desc_chain(core_if, ep);
++
++					if (core_if->core_params->dev_out_nak) {
++						if (ep->type == FH_OTG_EP_TYPE_BULK) {
++							deptsiz.b.pktcnt = (ep->total_len +
++								(ep->maxpacket - 1)) / ep->maxpacket;
++							deptsiz.b.xfersize = ep->total_len;
++							/* Remember initial value of doeptsiz */
++							core_if->start_doeptsiz_val[ep->num] = deptsiz.d32;
++							FH_WRITE_REG32(&out_regs->doeptsiz,
++								deptsiz.d32);													
++						}
++					}
++				/** DOEPDMAn Register write */
++					FH_WRITE_REG32(&out_regs->doepdma,
++							ep->dma_desc_addr);
++#ifdef FH_UTE_CFI
++				}
++#endif
++			}
++		} else {
++			FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
++		}
++
++		if (ep->type == FH_OTG_EP_TYPE_ISOC) {
++			dsts_data_t dsts = {.d32 = 0 };
++			if (ep->bInterval == 1) {
++				dsts.d32 =
++				    FH_READ_REG32(&core_if->dev_if->
++						   dev_global_regs->dsts);
++				ep->frame_num = dsts.b.soffn + ep->bInterval;
++				if (ep->frame_num > 0x3FFF) {
++					ep->frm_overrun = 1;
++					ep->frame_num &= 0x3FFF;
++				} else
++					ep->frm_overrun = 0;
++
++				if (ep->frame_num & 0x1) {
++					depctl.b.setd1pid = 1;
++				} else {
++					depctl.b.setd0pid = 1;
++				}
++			}
++		}
++
++		/* EP enable */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++
++		FH_WRITE_REG32(&out_regs->doepctl, depctl.d32);
++
++		FH_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
++			    FH_READ_REG32(&out_regs->doepctl),
++			    FH_READ_REG32(&out_regs->doeptsiz));
++		FH_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
++			    FH_READ_REG32(&core_if->dev_if->dev_global_regs->
++					   daintmsk),
++			    FH_READ_REG32(&core_if->core_global_regs->
++					   gintmsk));
++
++		/* Timer is scheduling only for out bulk transfers for 
++		 * "Device DDMA OUT NAK Enhancement" feature to inform user 
++		 * about received data payload in case of timeout 
++		 */
++		if (core_if->core_params->dev_out_nak) {
++			if (ep->type == FH_OTG_EP_TYPE_BULK) {
++				core_if->ep_xfer_info[ep->num].core_if = core_if;
++				core_if->ep_xfer_info[ep->num].ep = ep;
++				core_if->ep_xfer_info[ep->num].state = 1;
++
++				/* Start a timer for this transfer. */
++				FH_TIMER_SCHEDULE(core_if->ep_xfer_timer[ep->num], 10000);
++			}
++		}
++	}
++}
++
++/**
++ * This function setup a zero length transfer in Buffer DMA and
++ * Slave modes for usb requests with zero field set
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ *
++ */
++void fh_otg_ep_start_zl_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++
++	depctl_data_t depctl;
++	deptsiz_data_t deptsiz;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
++	FH_PRINTF("zero length transfer is called\n");
++
++	/* IN endpoint */
++	if (ep->is_in == 1) {
++		fh_otg_dev_in_ep_regs_t *in_regs =
++		    core_if->dev_if->in_ep_regs[ep->num];
++
++		depctl.d32 = FH_READ_REG32(&(in_regs->diepctl));
++		deptsiz.d32 = FH_READ_REG32(&(in_regs->dieptsiz));
++
++		deptsiz.b.xfersize = 0;
++		deptsiz.b.pktcnt = 1;
++
++		/* Write the DMA register */
++		if (core_if->dma_enable) {
++			if (core_if->dma_desc_enable == 0) {
++				deptsiz.b.mc = 1;
++				FH_WRITE_REG32(&in_regs->dieptsiz,
++						deptsiz.d32);
++				FH_WRITE_REG32(&(in_regs->diepdma),
++						(uint32_t) ep->dma_addr);
++			}
++		} else {
++			FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
++			/**
++			 * Enable the Non-Periodic Tx FIFO empty interrupt,
++			 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
++			 * the data will be written into the fifo by the ISR.
++			 */
++			if (core_if->en_multiple_tx_fifo == 0) {
++				intr_mask.b.nptxfempty = 1;
++				FH_MODIFY_REG32(&core_if->
++						 core_global_regs->gintmsk,
++						 intr_mask.d32, intr_mask.d32);
++			} else {
++				/* Enable the Tx FIFO Empty Interrupt for this EP */
++				if (ep->xfer_len > 0) {
++					uint32_t fifoemptymsk = 0;
++					fifoemptymsk = 1 << ep->num;
++					FH_MODIFY_REG32(&core_if->
++							 dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
++							 0, fifoemptymsk);
++				}
++			}
++		}
++
++		if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
++			depctl.b.nextep = core_if->nextep_seq[ep->num];
++		/* EP enable, IN data in FIFO */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++		FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
++
++	} else {
++		/* OUT endpoint */
++		fh_otg_dev_out_ep_regs_t *out_regs =
++		    core_if->dev_if->out_ep_regs[ep->num];
++
++		depctl.d32 = FH_READ_REG32(&(out_regs->doepctl));
++		deptsiz.d32 = FH_READ_REG32(&(out_regs->doeptsiz));
++
++		/* Zero Length Packet */
++		deptsiz.b.xfersize = ep->maxpacket;
++		deptsiz.b.pktcnt = 1;
++
++		if (core_if->dma_enable) {
++			if (!core_if->dma_desc_enable) {
++				FH_WRITE_REG32(&out_regs->doeptsiz,
++						deptsiz.d32);
++
++				FH_WRITE_REG32(&(out_regs->doepdma),
++						(uint32_t) ep->dma_addr);
++			}
++		} else {
++			FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
++		}
++
++		/* EP enable */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++
++		FH_WRITE_REG32(&out_regs->doepctl, depctl.d32);
++
++	}
++}
++
++/**
++ * This function does the setup for a data transfer for EP0 and starts
++ * the transfer.  For an IN transfer, the packets will be loaded into
++ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
++ * unloaded from the Rx FIFO in the ISR.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP0 data.
++ */
++void fh_otg_ep0_start_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	depctl_data_t depctl;
++	deptsiz0_data_t deptsiz;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	fh_otg_dev_dma_desc_t *dma_desc;
++
++	FH_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
++		    "xfer_buff=%p start_xfer_buff=%p \n",
++		    ep->num, (ep->is_in ? "IN" : "OUT"), ep->xfer_len,
++		    ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff);
++
++	ep->total_len = ep->xfer_len;
++
++	/* IN endpoint */
++	if (ep->is_in == 1) {
++		fh_otg_dev_in_ep_regs_t *in_regs =
++		    core_if->dev_if->in_ep_regs[0];
++
++		gnptxsts_data_t gtxstatus;
++
++		if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
++			depctl.d32 = FH_READ_REG32(&in_regs->diepctl);
++			if (depctl.b.epena)
++				return;
++		}
++
++		gtxstatus.d32 =
++		    FH_READ_REG32(&core_if->core_global_regs->gnptxsts);
++
++		/* If dedicated FIFO every time flush fifo before enable ep*/
++		if (core_if->en_multiple_tx_fifo && core_if->snpsid >= OTG_CORE_REV_3_00a)
++			fh_otg_flush_tx_fifo(core_if, ep->tx_fifo_num);
++
++		if (core_if->en_multiple_tx_fifo == 0
++		    && gtxstatus.b.nptxqspcavail == 0
++		    && !core_if->dma_enable) {
++#ifdef DEBUG
++			deptsiz.d32 = FH_READ_REG32(&in_regs->dieptsiz);
++			FH_DEBUGPL(DBG_PCD, "DIEPCTL0=%0x\n",
++				    FH_READ_REG32(&in_regs->diepctl));
++			FH_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
++				    deptsiz.d32,
++				    deptsiz.b.xfersize, deptsiz.b.pktcnt);
++			FH_PRINTF("TX Queue or FIFO Full (0x%0x)\n",
++				   gtxstatus.d32);
++#endif
++			return;
++		}
++
++		depctl.d32 = FH_READ_REG32(&in_regs->diepctl);
++		deptsiz.d32 = FH_READ_REG32(&in_regs->dieptsiz);
++
++		/* Zero Length Packet? */
++		if (ep->xfer_len == 0) {
++			deptsiz.b.xfersize = 0;
++			deptsiz.b.pktcnt = 1;
++		} else {
++			/* Program the transfer size and packet count
++			 *      as follows: xfersize = N * maxpacket +
++			 *      short_packet pktcnt = N + (short_packet
++			 *      exist ? 1 : 0) 
++			 */
++			if (ep->xfer_len > ep->maxpacket) {
++				ep->xfer_len = ep->maxpacket;
++				deptsiz.b.xfersize = ep->maxpacket;
++			} else {
++				deptsiz.b.xfersize = ep->xfer_len;
++			}
++			deptsiz.b.pktcnt = 1;
++
++		}
++		FH_DEBUGPL(DBG_PCDV,
++			    "IN len=%d  xfersize=%d pktcnt=%d [%08x]\n",
++			    ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
++			    deptsiz.d32);
++
++		/* Write the DMA register */
++		if (core_if->dma_enable) {
++			if (core_if->dma_desc_enable == 0) {
++				FH_WRITE_REG32(&in_regs->dieptsiz,
++						deptsiz.d32);
++
++				FH_WRITE_REG32(&(in_regs->diepdma),
++						(uint32_t) ep->dma_addr);
++			} else {
++				dma_desc = core_if->dev_if->in_desc_addr;
++
++				/** DMA Descriptor Setup */
++				dma_desc->status.b.bs = BS_HOST_BUSY;
++				dma_desc->status.b.l = 1;
++				dma_desc->status.b.ioc = 1;
++				dma_desc->status.b.sp =
++				    (ep->xfer_len == ep->maxpacket) ? 0 : 1;
++				dma_desc->status.b.bytes = ep->xfer_len;
++				dma_desc->buf = ep->dma_addr;
++				dma_desc->status.b.sts = 0;
++				dma_desc->status.b.bs = BS_HOST_READY;
++
++				/** DIEPDMA0 Register write */
++				FH_WRITE_REG32(&in_regs->diepdma,
++						core_if->
++						dev_if->dma_in_desc_addr);
++			}
++		} else {
++			FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
++		}
++
++		if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
++			depctl.b.nextep = core_if->nextep_seq[ep->num];
++				
++		// Unexpected Back-to-Back SETUP Interrupt Issue:
++		// RxFIFO thresholding enabled: thr_ctl >= 4,
++		// Control IN EP in status or data stage,
++		// Affected versions v3.00a, v3.10a & v3.20a
++		if ((core_if->core_params->thr_ctl & 4) && (ep->is_in) &&
++            ((core_if->snpsid == OTG_CORE_REV_3_00a) || 
++             (core_if->snpsid == OTG_CORE_REV_3_10a) || 
++             (core_if->snpsid == OTG_CORE_REV_3_20a))) {
++            
++            int j = 0;
++            dctl_data_t dctl = {.d32 = 0};
++            gintmsk_data_t gintmsk = {.d32 = 0};
++            gintsts_data_t gintsts = {.d32 = 0};
++            
++            gintmsk.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
++            if (gintmsk.b.goutnakeff) {
++                gintmsk.b.goutnakeff = 0;
++                FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
++                gintmsk.b.goutnakeff = 1;            // restore initial value of gintmsk.b.goutnakeff
++            }
++            
++            dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++            dctl.b.sgoutnak = 1;
++            FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
++
++            j = 0;
++            do {
++                j++;
++                fh_udelay(100);
++                gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++                if (j == 100000) {
++                    FH_ERROR("GOUTNAKEFF is not set during 10s\n");
++                    break;
++                }
++            } while (!gintsts.b.goutnakeff); // while not set
++            
++            dctl.b.cgoutnak = 1;
++            dctl.b.sgoutnak = 0;
++            FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
++            
++            j = 0;
++            do {
++                j++;
++                fh_udelay(100);
++                gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++                if (j == 100000) {
++                    FH_ERROR("GOUTNAKEFF is not cleared during 10s\n");
++                    break;
++                }
++            } while (gintsts.b.goutnakeff); // while not cleared
++            
++            // restore saved gintmsk
++            FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
++		}
++		// END of WA for Unexpected Back-to-Back SETUP Interrupt Issue
++		
++		/* EP enable, IN data in FIFO */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++		FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
++
++		/**
++		 * Enable the Non-Periodic Tx FIFO empty interrupt, the
++		 * data will be written into the fifo by the ISR.
++		 */
++		if (!core_if->dma_enable) {
++			if (core_if->en_multiple_tx_fifo == 0) {
++				intr_mask.b.nptxfempty = 1;
++				FH_MODIFY_REG32(&core_if->
++						 core_global_regs->gintmsk,
++						 intr_mask.d32, intr_mask.d32);
++			} else {
++				/* Enable the Tx FIFO Empty Interrupt for this EP */
++				if (ep->xfer_len > 0) {
++					uint32_t fifoemptymsk = 0;
++					fifoemptymsk |= 1 << ep->num;
++					FH_MODIFY_REG32(&core_if->
++							 dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
++							 0, fifoemptymsk);
++				}
++			}
++		}
++	} else {
++		/* OUT endpoint */
++		fh_otg_dev_out_ep_regs_t *out_regs =
++		    core_if->dev_if->out_ep_regs[0];
++
++		depctl.d32 = FH_READ_REG32(&out_regs->doepctl);
++		deptsiz.d32 = FH_READ_REG32(&out_regs->doeptsiz);
++
++		/* Program the transfer size and packet count as follows:
++		 *      xfersize = N * (maxpacket + 4 - (maxpacket % 4))
++		 *      pktcnt = N                                                                                      */
++		/* Zero Length Packet */
++		deptsiz.b.xfersize = ep->maxpacket;
++		deptsiz.b.pktcnt = 1;
++		if (core_if->snpsid >= OTG_CORE_REV_3_00a)
++			deptsiz.b.supcnt = 3;
++
++		FH_DEBUGPL(DBG_PCDV, "len=%d  xfersize=%d pktcnt=%d\n",
++			    ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt);
++
++		if (core_if->dma_enable) {
++			if (!core_if->dma_desc_enable) {
++				FH_WRITE_REG32(&out_regs->doeptsiz,
++						deptsiz.d32);
++
++				FH_WRITE_REG32(&(out_regs->doepdma),
++						(uint32_t) ep->dma_addr);
++			} else {
++				dma_desc = core_if->dev_if->out_desc_addr;
++
++				/** DMA Descriptor Setup */
++				dma_desc->status.b.bs = BS_HOST_BUSY;
++				if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
++					dma_desc->status.b.mtrf = 0;
++					dma_desc->status.b.sr = 0;
++				}
++				dma_desc->status.b.l = 1;
++				dma_desc->status.b.ioc = 1;
++				dma_desc->status.b.bytes = ep->maxpacket;
++				dma_desc->buf = ep->dma_addr;
++				dma_desc->status.b.sts = 0;
++				dma_desc->status.b.bs = BS_HOST_READY;
++
++				/** DOEPDMA0 Register write */
++				FH_WRITE_REG32(&out_regs->doepdma,
++						core_if->dev_if->
++						dma_out_desc_addr);
++			}
++		} else {
++			FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
++		}
++
++		/* EP enable */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++		FH_WRITE_REG32(&(out_regs->doepctl), depctl.d32);
++	}
++}
++
++/**
++ * This function continues control IN transfers started by
++ * fh_otg_ep0_start_transfer, when the transfer does not fit in a
++ * single packet.  NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
++ * bit for the packet count.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP0 data.
++ */
++void fh_otg_ep0_continue_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	depctl_data_t depctl;
++	deptsiz0_data_t deptsiz;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	fh_otg_dev_dma_desc_t *dma_desc;
++
++	if (ep->is_in == 1) {
++		fh_otg_dev_in_ep_regs_t *in_regs =
++		    core_if->dev_if->in_ep_regs[0];
++		gnptxsts_data_t tx_status = {.d32 = 0 };
++
++		tx_status.d32 =
++		    FH_READ_REG32(&core_if->core_global_regs->gnptxsts);
++		/** @todo Should there be check for room in the Tx
++		 * Status Queue.  If not remove the code above this comment. */
++
++		depctl.d32 = FH_READ_REG32(&in_regs->diepctl);
++		deptsiz.d32 = FH_READ_REG32(&in_regs->dieptsiz);
++
++		/* Program the transfer size and packet count
++		 *      as follows: xfersize = N * maxpacket +
++		 *      short_packet pktcnt = N + (short_packet
++		 *      exist ? 1 : 0) 
++		 */
++
++		if (core_if->dma_desc_enable == 0) {
++			deptsiz.b.xfersize =
++			    (ep->total_len - ep->xfer_count) >
++			    ep->maxpacket ? ep->maxpacket : (ep->total_len -
++							     ep->xfer_count);
++			deptsiz.b.pktcnt = 1;
++			if (core_if->dma_enable == 0) {
++				ep->xfer_len += deptsiz.b.xfersize;
++			} else {
++				ep->xfer_len = deptsiz.b.xfersize;
++			}
++			FH_WRITE_REG32(&in_regs->dieptsiz, deptsiz.d32);
++		} else {
++			ep->xfer_len =
++			    (ep->total_len - ep->xfer_count) >
++			    ep->maxpacket ? ep->maxpacket : (ep->total_len -
++							     ep->xfer_count);
++
++			dma_desc = core_if->dev_if->in_desc_addr;
++
++			/** DMA Descriptor Setup */
++			dma_desc->status.b.bs = BS_HOST_BUSY;
++			dma_desc->status.b.l = 1;
++			dma_desc->status.b.ioc = 1;
++			dma_desc->status.b.sp =
++			    (ep->xfer_len == ep->maxpacket) ? 0 : 1;
++			dma_desc->status.b.bytes = ep->xfer_len;
++			dma_desc->buf = ep->dma_addr;
++			dma_desc->status.b.sts = 0;
++			dma_desc->status.b.bs = BS_HOST_READY;
++
++			/** DIEPDMA0 Register write */
++			FH_WRITE_REG32(&in_regs->diepdma,
++					core_if->dev_if->dma_in_desc_addr);
++		}
++
++		FH_DEBUGPL(DBG_PCDV,
++			    "IN len=%d  xfersize=%d pktcnt=%d [%08x]\n",
++			    ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
++			    deptsiz.d32);
++
++		/* Write the DMA register */
++		if (core_if->hwcfg2.b.architecture == FH_INT_DMA_ARCH) {
++			if (core_if->dma_desc_enable == 0)
++				FH_WRITE_REG32(&(in_regs->diepdma),
++						(uint32_t) ep->dma_addr);
++		}
++		if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable)
++			depctl.b.nextep = core_if->nextep_seq[ep->num];
++		/* EP enable, IN data in FIFO */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++		FH_WRITE_REG32(&in_regs->diepctl, depctl.d32);
++
++		/**
++		 * Enable the Non-Periodic Tx FIFO empty interrupt, the
++		 * data will be written into the fifo by the ISR.
++		 */
++		if (!core_if->dma_enable) {
++			if (core_if->en_multiple_tx_fifo == 0) {
++				/* First clear it from GINTSTS */
++				intr_mask.b.nptxfempty = 1;
++				FH_MODIFY_REG32(&core_if->
++						 core_global_regs->gintmsk,
++						 intr_mask.d32, intr_mask.d32);
++
++			} else {
++				/* Enable the Tx FIFO Empty Interrupt for this EP */
++				if (ep->xfer_len > 0) {
++					uint32_t fifoemptymsk = 0;
++					fifoemptymsk |= 1 << ep->num;
++					FH_MODIFY_REG32(&core_if->
++							 dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
++							 0, fifoemptymsk);
++				}
++			}
++		}
++	} else {
++		fh_otg_dev_out_ep_regs_t *out_regs =
++		    core_if->dev_if->out_ep_regs[0];
++
++		depctl.d32 = FH_READ_REG32(&out_regs->doepctl);
++		deptsiz.d32 = FH_READ_REG32(&out_regs->doeptsiz);
++
++		/* Program the transfer size and packet count
++		 *      as follows: xfersize = N * maxpacket +
++		 *      short_packet pktcnt = N + (short_packet
++		 *      exist ? 1 : 0) 
++		 */
++		deptsiz.b.xfersize = ep->maxpacket;
++		deptsiz.b.pktcnt = 1;
++
++		if (core_if->dma_desc_enable == 0) {
++			FH_WRITE_REG32(&out_regs->doeptsiz, deptsiz.d32);
++		} else {
++			dma_desc = core_if->dev_if->out_desc_addr;
++
++			/** DMA Descriptor Setup */
++			dma_desc->status.b.bs = BS_HOST_BUSY;
++			dma_desc->status.b.l = 1;
++			dma_desc->status.b.ioc = 1;
++			dma_desc->status.b.bytes = ep->maxpacket;
++			dma_desc->buf = ep->dma_addr;
++			dma_desc->status.b.sts = 0;
++			dma_desc->status.b.bs = BS_HOST_READY;
++
++			/** DOEPDMA0 Register write */
++			FH_WRITE_REG32(&out_regs->doepdma,
++					core_if->dev_if->dma_out_desc_addr);
++		}
++
++		FH_DEBUGPL(DBG_PCDV,
++			    "IN len=%d  xfersize=%d pktcnt=%d [%08x]\n",
++			    ep->xfer_len, deptsiz.b.xfersize, deptsiz.b.pktcnt,
++			    deptsiz.d32);
++
++		/* Write the DMA register */
++		if (core_if->hwcfg2.b.architecture == FH_INT_DMA_ARCH) {
++			if (core_if->dma_desc_enable == 0)
++				FH_WRITE_REG32(&(out_regs->doepdma),
++						(uint32_t) ep->dma_addr);
++
++		}
++
++		/* EP enable, IN data in FIFO */
++		depctl.b.cnak = 1;
++		depctl.b.epena = 1;
++		FH_WRITE_REG32(&out_regs->doepctl, depctl.d32);
++
++	}
++}
++
++#ifdef DEBUG
++void dump_msg(const u8 * buf, unsigned int length)
++{
++	unsigned int start, num, i;
++	char line[52], *p;
++
++	if (length >= 512)
++		return;
++	start = 0;
++	while (length > 0) {
++		num = length < 16u ? length : 16u;
++		p = line;
++		for (i = 0; i < num; ++i) {
++			if (i == 8)
++				*p++ = ' ';
++			FH_SPRINTF(p, " %02x", buf[i]);
++			p += 3;
++		}
++		*p = 0;
++		FH_PRINTF("%6x: %s\n", start, line);
++		buf += num;
++		start += num;
++		length -= num;
++	}
++}
++#else
++static inline void dump_msg(const u8 * buf, unsigned int length)
++{
++}
++#endif
++
++/**
++ * This function writes a packet into the Tx FIFO associated with the
++ * EP. For non-periodic EPs the non-periodic Tx FIFO is written.  For
++ * periodic EPs the periodic Tx FIFO associated with the EP is written
++ * with all packets for the next micro-frame.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to write packet for.
++ * @param dma Indicates if DMA is being used.
++ */
++void fh_otg_ep_write_packet(fh_otg_core_if_t * core_if, fh_ep_t * ep,
++			     int dma)
++{
++	/**
++	 * The buffer is padded to DWORD on a per packet basis in
++	 * slave/dma mode if the MPS is not DWORD aligned. The last
++	 * packet, if short, is also padded to a multiple of DWORD.
++	 *
++	 * ep->xfer_buff always starts DWORD aligned in memory and is a
++	 * multiple of DWORD in length
++	 *
++	 * ep->xfer_len can be any number of bytes
++	 *
++	 * ep->xfer_count is a multiple of ep->maxpacket until the last
++	 *	packet
++	 *
++	 * FIFO access is DWORD */
++
++	uint32_t i;
++	uint32_t byte_count;
++	uint32_t dword_count;
++	uint32_t *fifo;
++	uint32_t *data_buff = (uint32_t *) ep->xfer_buff;
++
++	FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if,
++		    ep);
++	if (ep->xfer_count >= ep->xfer_len) {
++		FH_WARN("%s() No data for EP%d!!!\n", __func__, ep->num);
++		return;
++	}
++
++	/* Find the byte length of the packet either short packet or MPS */
++	if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) {
++		byte_count = ep->xfer_len - ep->xfer_count;
++	} else {
++		byte_count = ep->maxpacket;
++	}
++
++	/* Find the DWORD length, padded by extra bytes as neccessary if MPS
++	 * is not a multiple of DWORD */
++	dword_count = (byte_count + 3) / 4;
++
++#ifdef VERBOSE
++	dump_msg(ep->xfer_buff, byte_count);
++#endif
++
++	/**@todo NGS Where are the Periodic Tx FIFO addresses
++	 * intialized?	What should this be? */
++
++	fifo = core_if->data_fifo[ep->num];
++
++	FH_DEBUGPL((DBG_PCDV | DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n",
++		    fifo, data_buff, *data_buff, byte_count);
++
++	if (!dma) {
++		for (i = 0; i < dword_count; i++, data_buff++) {
++			FH_WRITE_REG32(fifo, *data_buff);
++		}
++	}
++
++	ep->xfer_count += byte_count;
++	ep->xfer_buff += byte_count;
++	ep->dma_addr += byte_count;
++}
++
++/**
++ * Set the EP STALL.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to set the stall on.
++ */
++void fh_otg_ep_set_stall(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	depctl_data_t depctl;
++	volatile uint32_t *depctl_addr;
++
++	FH_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
++		    (ep->is_in ? "IN" : "OUT"));
++
++	if (ep->is_in == 1) {
++		depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
++		depctl.d32 = FH_READ_REG32(depctl_addr);
++
++		/* set the disable and stall bits */
++		if (depctl.b.epena) {
++			depctl.b.epdis = 1;
++		}
++		depctl.b.stall = 1;
++		FH_WRITE_REG32(depctl_addr, depctl.d32);
++	} else {
++		depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
++		depctl.d32 = FH_READ_REG32(depctl_addr);
++
++		/* set the stall bit */
++		depctl.b.stall = 1;
++		FH_WRITE_REG32(depctl_addr, depctl.d32);
++	}
++
++	FH_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", FH_READ_REG32(depctl_addr));
++
++	return;
++}
++
++/**
++ * Clear the EP STALL.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to clear stall from.
++ */
++void fh_otg_ep_clear_stall(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	depctl_data_t depctl;
++	volatile uint32_t *depctl_addr;
++
++	FH_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
++		    (ep->is_in ? "IN" : "OUT"));
++
++	if (ep->is_in == 1) {
++		depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
++	} else {
++		depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
++	}
++
++	depctl.d32 = FH_READ_REG32(depctl_addr);
++
++	/* clear the stall bits */
++	depctl.b.stall = 0;
++
++	/*
++	 * USB Spec 9.4.5: For endpoints using data toggle, regardless
++	 * of whether an endpoint has the Halt feature set, a
++	 * ClearFeature(ENDPOINT_HALT) request always results in the
++	 * data toggle being reinitialized to DATA0.
++	 */
++	if (ep->type == FH_OTG_EP_TYPE_INTR ||
++	    ep->type == FH_OTG_EP_TYPE_BULK) {
++		depctl.b.setd0pid = 1;	/* DATA0 */
++	}
++
++	FH_WRITE_REG32(depctl_addr, depctl.d32);
++	FH_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", FH_READ_REG32(depctl_addr));
++	return;
++}
++
++/**
++ * This function reads a packet from the Rx FIFO into the destination
++ * buffer. To read SETUP data use fh_otg_read_setup_packet.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param dest	  Destination buffer for the packet.
++ * @param bytes  Number of bytes to copy to the destination.
++ */
++void fh_otg_read_packet(fh_otg_core_if_t * core_if,
++			 uint8_t * dest, uint16_t bytes)
++{
++	int i;
++	int word_count = (bytes + 3) / 4;
++
++	volatile uint32_t *fifo = core_if->data_fifo[0];
++	uint32_t *data_buff = (uint32_t *) dest;
++
++	/**
++	 * @todo Account for the case where _dest is not dword aligned. This
++	 * requires reading data from the FIFO into a uint32_t temp buffer,
++	 * then moving it into the data buffer.
++	 */
++
++	FH_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__,
++		    core_if, dest, bytes);
++
++	for (i = 0; i < word_count; i++, data_buff++) {
++		*data_buff = FH_READ_REG32(fifo);
++	}
++
++	return;
++}
++
++/**
++ * This functions reads the device registers and prints them
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++void fh_otg_dump_dev_registers(fh_otg_core_if_t * core_if)
++{
++	int i;
++	volatile uint32_t *addr;
++
++	FH_PRINTF("Device Global Registers\n");
++	addr = &core_if->dev_if->dev_global_regs->dcfg;
++	FH_PRINTF("DCFG		 @0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++	addr = &core_if->dev_if->dev_global_regs->dctl;
++	FH_PRINTF("DCTL		 @0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++	addr = &core_if->dev_if->dev_global_regs->dsts;
++	FH_PRINTF("DSTS		 @0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++	addr = &core_if->dev_if->dev_global_regs->diepmsk;
++	FH_PRINTF("DIEPMSK	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->dev_if->dev_global_regs->doepmsk;
++	FH_PRINTF("DOEPMSK	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->dev_if->dev_global_regs->daint;
++	FH_PRINTF("DAINT	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->dev_if->dev_global_regs->daintmsk;
++	FH_PRINTF("DAINTMSK	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->dev_if->dev_global_regs->dtknqr1;
++	FH_PRINTF("DTKNQR1	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	if (core_if->hwcfg2.b.dev_token_q_depth > 6) {
++		addr = &core_if->dev_if->dev_global_regs->dtknqr2;
++		FH_PRINTF("DTKNQR2	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++	}
++
++	addr = &core_if->dev_if->dev_global_regs->dvbusdis;
++	FH_PRINTF("DVBUSID	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++
++	addr = &core_if->dev_if->dev_global_regs->dvbuspulse;
++	FH_PRINTF("DVBUSPULSE	@0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++
++	addr = &core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
++	FH_PRINTF("DTKNQR3_DTHRCTL	 @0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++
++	if (core_if->hwcfg2.b.dev_token_q_depth > 22) {
++		addr = &core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
++		FH_PRINTF("DTKNQR4	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++	}
++
++	addr = &core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
++	FH_PRINTF("FIFOEMPMSK	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++
++	if (core_if->hwcfg2.b.multi_proc_int) {
++
++		addr = &core_if->dev_if->dev_global_regs->deachint;
++		FH_PRINTF("DEACHINT	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->dev_global_regs->deachintmsk;
++		FH_PRINTF("DEACHINTMSK	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++
++		for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++			addr =
++			    &core_if->dev_if->
++			    dev_global_regs->diepeachintmsk[i];
++			FH_PRINTF("DIEPEACHINTMSK[%d]	 @0x%08lX : 0x%08X\n",
++				   i, (unsigned long)addr,
++				   FH_READ_REG32(addr));
++		}
++
++		for (i = 0; i <= core_if->dev_if->num_out_eps; i++) {
++			addr =
++			    &core_if->dev_if->
++			    dev_global_regs->doepeachintmsk[i];
++			FH_PRINTF("DOEPEACHINTMSK[%d]	 @0x%08lX : 0x%08X\n",
++				   i, (unsigned long)addr,
++				   FH_READ_REG32(addr));
++		}
++	}
++
++	for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++		FH_PRINTF("Device IN EP %d Registers\n", i);
++		addr = &core_if->dev_if->in_ep_regs[i]->diepctl;
++		FH_PRINTF("DIEPCTL	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->in_ep_regs[i]->diepint;
++		FH_PRINTF("DIEPINT	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->in_ep_regs[i]->dieptsiz;
++		FH_PRINTF("DIETSIZ	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->in_ep_regs[i]->diepdma;
++		FH_PRINTF("DIEPDMA	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->in_ep_regs[i]->dtxfsts;
++		FH_PRINTF("DTXFSTS	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->in_ep_regs[i]->diepdmab;
++		FH_PRINTF("DIEPDMAB	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, 0 /*FH_READ_REG32(addr) */ );
++	}
++
++	for (i = 0; i <= core_if->dev_if->num_out_eps; i++) {
++		FH_PRINTF("Device OUT EP %d Registers\n", i);
++		addr = &core_if->dev_if->out_ep_regs[i]->doepctl;
++		FH_PRINTF("DOEPCTL	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->out_ep_regs[i]->doepint;
++		FH_PRINTF("DOEPINT	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->out_ep_regs[i]->doeptsiz;
++		FH_PRINTF("DOETSIZ	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->dev_if->out_ep_regs[i]->doepdma;
++		FH_PRINTF("DOEPDMA	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		if (core_if->dma_enable) {	/* Don't access this register in SLAVE mode */
++			addr = &core_if->dev_if->out_ep_regs[i]->doepdmab;
++			FH_PRINTF("DOEPDMAB	 @0x%08lX : 0x%08X\n",
++				   (unsigned long)addr, FH_READ_REG32(addr));
++		}
++
++	}
++}
++
++/**
++ * This functions reads the SPRAM and prints its content
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++void fh_otg_dump_spram(fh_otg_core_if_t * core_if)
++{
++	volatile uint8_t *addr, *start_addr, *end_addr;
++
++	FH_PRINTF("SPRAM Data:\n");
++	start_addr = (void *)core_if->core_global_regs;
++	FH_PRINTF("Base Address: 0x%8lX\n", (unsigned long)start_addr);
++	start_addr += 0x00028000;
++	end_addr = (void *)core_if->core_global_regs;
++	end_addr += 0x000280e0;
++
++	for (addr = start_addr; addr < end_addr; addr += 16) {
++		FH_PRINTF
++		    ("0x%8lX:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n",
++		     (unsigned long)addr, addr[0], addr[1], addr[2], addr[3],
++		     addr[4], addr[5], addr[6], addr[7], addr[8], addr[9],
++		     addr[10], addr[11], addr[12], addr[13], addr[14], addr[15]
++		    );
++	}
++
++	return;
++}
++
++/**
++ * This function reads the host registers and prints them
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++void fh_otg_dump_host_registers(fh_otg_core_if_t * core_if)
++{
++	int i;
++	volatile uint32_t *addr;
++
++	FH_PRINTF("Host Global Registers\n");
++	addr = &core_if->host_if->host_global_regs->hcfg;
++	FH_PRINTF("HCFG		 @0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++	addr = &core_if->host_if->host_global_regs->hfir;
++	FH_PRINTF("HFIR		 @0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++	addr = &core_if->host_if->host_global_regs->hfnum;
++	FH_PRINTF("HFNUM	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->host_if->host_global_regs->hptxsts;
++	FH_PRINTF("HPTXSTS	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->host_if->host_global_regs->haint;
++	FH_PRINTF("HAINT	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->host_if->host_global_regs->haintmsk;
++	FH_PRINTF("HAINTMSK	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	if (core_if->dma_desc_enable) {
++		addr = &core_if->host_if->host_global_regs->hflbaddr;
++		FH_PRINTF("HFLBADDR	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++	}
++
++	addr = core_if->host_if->hprt0;
++	FH_PRINTF("HPRT0	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++
++	for (i = 0; i < core_if->core_params->host_channels; i++) {
++		FH_PRINTF("Host Channel %d Specific Registers\n", i);
++		addr = &core_if->host_if->hc_regs[i]->hcchar;
++		FH_PRINTF("HCCHAR	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->host_if->hc_regs[i]->hcsplt;
++		FH_PRINTF("HCSPLT	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->host_if->hc_regs[i]->hcint;
++		FH_PRINTF("HCINT	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->host_if->hc_regs[i]->hcintmsk;
++		FH_PRINTF("HCINTMSK	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->host_if->hc_regs[i]->hctsiz;
++		FH_PRINTF("HCTSIZ	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		addr = &core_if->host_if->hc_regs[i]->hcdma;
++		FH_PRINTF("HCDMA	 @0x%08lX : 0x%08X\n",
++			   (unsigned long)addr, FH_READ_REG32(addr));
++		if (core_if->dma_desc_enable) {
++			addr = &core_if->host_if->hc_regs[i]->hcdmab;
++			FH_PRINTF("HCDMAB	 @0x%08lX : 0x%08X\n",
++				   (unsigned long)addr, FH_READ_REG32(addr));
++		}
++
++	}
++	return;
++}
++
++/**
++ * This function reads the core global registers and prints them
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++void fh_otg_dump_global_registers(fh_otg_core_if_t * core_if)
++{
++	int i, ep_num;
++	volatile uint32_t *addr;
++	char *txfsiz;
++
++	FH_PRINTF("Core Global Registers\n");
++	addr = &core_if->core_global_regs->gotgctl;
++	FH_PRINTF("GOTGCTL	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gotgint;
++	FH_PRINTF("GOTGINT	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gahbcfg;
++	FH_PRINTF("GAHBCFG	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gusbcfg;
++	FH_PRINTF("GUSBCFG	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->grstctl;
++	FH_PRINTF("GRSTCTL	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gintsts;
++	FH_PRINTF("GINTSTS	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gintmsk;
++	FH_PRINTF("GINTMSK	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->grxstsr;
++	FH_PRINTF("GRXSTSR	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->grxfsiz;
++	FH_PRINTF("GRXFSIZ	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gnptxfsiz;
++	FH_PRINTF("GNPTXFSIZ @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gnptxsts;
++	FH_PRINTF("GNPTXSTS	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gi2cctl;
++	FH_PRINTF("GI2CCTL	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gpvndctl;
++	FH_PRINTF("GPVNDCTL	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->ggpio;
++	FH_PRINTF("GGPIO	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->guid;
++	FH_PRINTF("GUID		 @0x%08lX : 0x%08X\n",
++		   (unsigned long)addr, FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gsnpsid;
++	FH_PRINTF("GSNPSID	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->ghwcfg1;
++	FH_PRINTF("GHWCFG1	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->ghwcfg2;
++	FH_PRINTF("GHWCFG2	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->ghwcfg3;
++	FH_PRINTF("GHWCFG3	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->ghwcfg4;
++	FH_PRINTF("GHWCFG4	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->glpmcfg;
++	FH_PRINTF("GLPMCFG	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gpwrdn;
++	FH_PRINTF("GPWRDN	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->gdfifocfg;
++	FH_PRINTF("GDFIFOCFG	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++	addr = &core_if->core_global_regs->adpctl;
++	FH_PRINTF("ADPCTL	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   fh_otg_adp_read_reg(core_if));
++	addr = &core_if->core_global_regs->hptxfsiz;
++	FH_PRINTF("HPTXFSIZ	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));	
++
++	if (core_if->en_multiple_tx_fifo == 0) {
++		ep_num = core_if->hwcfg4.b.num_dev_perio_in_ep;
++		txfsiz = "DPTXFSIZ";
++	} else {
++		ep_num = core_if->hwcfg4.b.num_in_eps;
++		txfsiz = "DIENPTXF";
++	}
++	for (i = 0; i < ep_num; i++) {
++		addr = &core_if->core_global_regs->dtxfsiz[i];
++		FH_PRINTF("%s[%d] @0x%08lX : 0x%08X\n", txfsiz, i + 1,
++			   (unsigned long)addr, FH_READ_REG32(addr));
++	}
++	addr = core_if->pcgcctl;
++	FH_PRINTF("PCGCCTL	 @0x%08lX : 0x%08X\n", (unsigned long)addr,
++		   FH_READ_REG32(addr));
++}
++
++/**
++ * Flush a Tx FIFO.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param num Tx FIFO to flush.
++ */
++void fh_otg_flush_tx_fifo(fh_otg_core_if_t * core_if, const int num)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	volatile grstctl_t greset = {.d32 = 0 };
++	int count = 0;
++
++	FH_DEBUGPL((DBG_CIL | DBG_PCDV), "Flush Tx FIFO %d\n", num);
++
++	greset.b.txfflsh = 1;
++	greset.b.txfnum = num;
++	FH_WRITE_REG32(&global_regs->grstctl, greset.d32);
++
++	do {
++		greset.d32 = FH_READ_REG32(&global_regs->grstctl);
++		if (++count > 10000) {
++			FH_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
++				 __func__, greset.d32,
++				 FH_READ_REG32(&global_regs->gnptxsts));
++			break;
++		}
++		fh_udelay(1);
++	} while (greset.b.txfflsh == 1);
++
++	/* Wait for 3 PHY Clocks */
++	fh_udelay(1);
++}
++
++/**
++ * Flush Rx FIFO.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++void fh_otg_flush_rx_fifo(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	volatile grstctl_t greset = {.d32 = 0 };
++	int count = 0;
++
++	FH_DEBUGPL((DBG_CIL | DBG_PCDV), "%s\n", __func__);
++	/*
++	 *
++	 */
++	greset.b.rxfflsh = 1;
++	FH_WRITE_REG32(&global_regs->grstctl, greset.d32);
++
++	do {
++		greset.d32 = FH_READ_REG32(&global_regs->grstctl);
++		if (++count > 10000) {
++			FH_WARN("%s() HANG! GRSTCTL=%0x\n", __func__,
++				 greset.d32);
++			break;
++		}
++		fh_udelay(1);
++	} while (greset.b.rxfflsh == 1);
++
++	/* Wait for 3 PHY Clocks */
++	fh_udelay(1);
++}
++
++/**
++ * Do core a soft reset of the core.  Be careful with this because it
++ * resets all the internal state machines of the core.
++ */
++void fh_otg_core_reset(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	volatile grstctl_t greset = {.d32 = 0 };
++	int count = 0;
++
++	FH_DEBUGPL(DBG_CILV, "%s\n", __func__);
++	/* Wait for AHB master IDLE state. */
++	do {
++		fh_udelay(10);
++		greset.d32 = FH_READ_REG32(&global_regs->grstctl);
++		if (++count > 100000) {
++			FH_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__,
++				 greset.d32);
++			return;
++		}
++	}
++	while (greset.b.ahbidle == 0);
++
++	/* Core Soft Reset */
++	count = 0;
++	greset.b.csftrst = 1;
++	FH_WRITE_REG32(&global_regs->grstctl, greset.d32);
++	do {
++		greset.d32 = FH_READ_REG32(&global_regs->grstctl);
++		if (++count > 10000) {
++			FH_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n",
++				 __func__, greset.d32);
++			break;
++		}
++		fh_udelay(1);
++	}
++	while (greset.b.csftrst == 1);
++
++	/* Wait for 3 PHY Clocks */
++	fh_mdelay(100);
++}
++
++uint8_t fh_otg_is_device_mode(fh_otg_core_if_t * _core_if)
++{
++	return (fh_otg_mode(_core_if) != FH_HOST_MODE);
++}
++
++uint8_t fh_otg_is_host_mode(fh_otg_core_if_t * _core_if)
++{
++	return (fh_otg_mode(_core_if) == FH_HOST_MODE);
++}
++
++/**
++ * Register HCD callbacks. The callbacks are used to start and stop
++ * the HCD for interrupt processing.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param cb the HCD callback structure.
++ * @param p pointer to be passed to callback function (usb_hcd*).
++ */
++void fh_otg_cil_register_hcd_callbacks(fh_otg_core_if_t * core_if,
++					fh_otg_cil_callbacks_t * cb, void *p)
++{
++	core_if->hcd_cb = cb;
++	cb->p = p;
++}
++
++/**
++ * Register PCD callbacks. The callbacks are used to start and stop
++ * the PCD for interrupt processing.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param cb the PCD callback structure.
++ * @param p pointer to be passed to callback function (pcd*).
++ */
++void fh_otg_cil_register_pcd_callbacks(fh_otg_core_if_t * core_if,
++					fh_otg_cil_callbacks_t * cb, void *p)
++{
++	core_if->pcd_cb = cb;
++	cb->p = p;
++}
++
++#ifdef FH_EN_ISOC
++
++/**
++ * This function writes isoc data per 1 (micro)frame into tx fifo
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ *
++ */
++void write_isoc_frame_data(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	fh_otg_dev_in_ep_regs_t *ep_regs;
++	dtxfsts_data_t txstatus = {.d32 = 0 };
++	uint32_t len = 0;
++	uint32_t dwords;
++
++	ep->xfer_len = ep->data_per_frame;
++	ep->xfer_count = 0;
++
++	ep_regs = core_if->dev_if->in_ep_regs[ep->num];
++
++	len = ep->xfer_len - ep->xfer_count;
++
++	if (len > ep->maxpacket) {
++		len = ep->maxpacket;
++	}
++
++	dwords = (len + 3) / 4;
++
++	/* While there is space in the queue and space in the FIFO and
++	 * More data to tranfer, Write packets to the Tx FIFO */
++	txstatus.d32 =
++	    FH_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
++	FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32);
++
++	while (txstatus.b.txfspcavail > dwords &&
++	       ep->xfer_count < ep->xfer_len && ep->xfer_len != 0) {
++		/* Write the FIFO */
++		fh_otg_ep_write_packet(core_if, ep, 0);
++
++		len = ep->xfer_len - ep->xfer_count;
++		if (len > ep->maxpacket) {
++			len = ep->maxpacket;
++		}
++
++		dwords = (len + 3) / 4;
++		txstatus.d32 =
++		    FH_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
++				   dtxfsts);
++		FH_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", ep->num,
++			    txstatus.d32);
++	}
++}
++
++/**
++ * This function initializes a descriptor chain for Isochronous transfer
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ *
++ */
++void fh_otg_iso_ep_start_frm_transfer(fh_otg_core_if_t * core_if,
++				       fh_ep_t * ep)
++{
++	deptsiz_data_t deptsiz = {.d32 = 0 };
++	depctl_data_t depctl = {.d32 = 0 };
++	dsts_data_t dsts = {.d32 = 0 };
++	volatile uint32_t *addr;
++
++	if (ep->is_in) {
++		addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
++	} else {
++		addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
++	}
++
++	ep->xfer_len = ep->data_per_frame;
++	ep->xfer_count = 0;
++	ep->xfer_buff = ep->cur_pkt_addr;
++	ep->dma_addr = ep->cur_pkt_dma_addr;
++
++	if (ep->is_in) {
++		/* Program the transfer size and packet count
++		 *      as follows: xfersize = N * maxpacket +
++		 *      short_packet pktcnt = N + (short_packet
++		 *      exist ? 1 : 0) 
++		 */
++		deptsiz.b.xfersize = ep->xfer_len;
++		deptsiz.b.pktcnt =
++		    (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
++		deptsiz.b.mc = deptsiz.b.pktcnt;
++		FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz,
++				deptsiz.d32);
++
++		/* Write the DMA register */
++		if (core_if->dma_enable) {
++			FH_WRITE_REG32(&
++					(core_if->dev_if->in_ep_regs[ep->num]->
++					 diepdma), (uint32_t) ep->dma_addr);
++		}
++	} else {
++		deptsiz.b.pktcnt =
++		    (ep->xfer_len + (ep->maxpacket - 1)) / ep->maxpacket;
++		deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
++
++		FH_WRITE_REG32(&core_if->dev_if->
++				out_ep_regs[ep->num]->doeptsiz, deptsiz.d32);
++
++		if (core_if->dma_enable) {
++			FH_WRITE_REG32(&
++					(core_if->dev_if->
++					 out_ep_regs[ep->num]->doepdma),
++					(uint32_t) ep->dma_addr);
++		}
++	}
++
++	/** Enable endpoint, clear nak  */
++
++	depctl.d32 = 0;
++	if (ep->bInterval == 1) {
++		dsts.d32 =
++		    FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++		ep->next_frame = dsts.b.soffn + ep->bInterval;
++
++		if (ep->next_frame & 0x1) {
++			depctl.b.setd1pid = 1;
++		} else {
++			depctl.b.setd0pid = 1;
++		}
++	} else {
++		ep->next_frame += ep->bInterval;
++
++		if (ep->next_frame & 0x1) {
++			depctl.b.setd1pid = 1;
++		} else {
++			depctl.b.setd0pid = 1;
++		}
++	}
++	depctl.b.epena = 1;
++	depctl.b.cnak = 1;
++
++	FH_MODIFY_REG32(addr, 0, depctl.d32);
++	depctl.d32 = FH_READ_REG32(addr);
++
++	if (ep->is_in && core_if->dma_enable == 0) {
++		write_isoc_frame_data(core_if, ep);
++	}
++
++}
++#endif /* FH_EN_ISOC */
++
++static void fh_otg_set_uninitialized(int32_t * p, int size)
++{
++	int i;
++	for (i = 0; i < size; i++) {
++		p[i] = -1;
++	}
++}
++
++static int fh_otg_param_initialized(int32_t val)
++{
++	return val != -1;
++}
++
++static int fh_otg_setup_params(fh_otg_core_if_t * core_if)
++{
++	int i;
++	gintsts_data_t gintsts;
++	gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++
++	core_if->core_params = FH_ALLOC(sizeof(*core_if->core_params));
++	if (!core_if->core_params) {
++		return -FH_E_NO_MEMORY;
++	}
++	fh_otg_set_uninitialized((int32_t *) core_if->core_params,
++				  sizeof(*core_if->core_params) /
++				  sizeof(int32_t));
++	FH_PRINTF("Setting default values for core params\n");
++	fh_otg_set_param_otg_cap(core_if, fh_param_otg_cap_default);
++	fh_otg_set_param_dma_enable(core_if, fh_param_dma_enable_default);
++	fh_otg_set_param_dma_desc_enable(core_if,
++					  fh_param_dma_desc_enable_default);
++	fh_otg_set_param_opt(core_if, fh_param_opt_default);
++	fh_otg_set_param_dma_burst_size(core_if,
++					 fh_param_dma_burst_size_default);
++	fh_otg_set_param_host_support_fs_ls_low_power(core_if,
++						       fh_param_host_support_fs_ls_low_power_default);
++	fh_otg_set_param_enable_dynamic_fifo(core_if,
++					      fh_param_enable_dynamic_fifo_default);
++	fh_otg_set_param_data_fifo_size(core_if,
++					 fh_param_data_fifo_size_default);
++	fh_otg_set_param_dev_rx_fifo_size(core_if,
++					   fh_param_dev_rx_fifo_size_default);
++	fh_otg_set_param_dev_nperio_tx_fifo_size(core_if,
++						  fh_param_dev_nperio_tx_fifo_size_default);
++	fh_otg_set_param_host_rx_fifo_size(core_if,
++					    fh_param_host_rx_fifo_size_default);
++	fh_otg_set_param_host_nperio_tx_fifo_size(core_if,
++						   fh_param_host_nperio_tx_fifo_size_default);
++	fh_otg_set_param_host_perio_tx_fifo_size(core_if,
++						  fh_param_host_perio_tx_fifo_size_default);
++	fh_otg_set_param_max_transfer_size(core_if,
++					    fh_param_max_transfer_size_default);
++	fh_otg_set_param_max_packet_count(core_if,
++					   fh_param_max_packet_count_default);
++	fh_otg_set_param_host_channels(core_if,
++					fh_param_host_channels_default);
++	fh_otg_set_param_dev_endpoints(core_if,
++					fh_param_dev_endpoints_default);
++	fh_otg_set_param_phy_type(core_if, fh_param_phy_type_default);
++	fh_otg_set_param_speed(core_if, fh_param_speed_default);
++	fh_otg_set_param_host_ls_low_power_phy_clk(core_if,
++						    fh_param_host_ls_low_power_phy_clk_default);
++	fh_otg_set_param_phy_ulpi_ddr(core_if, fh_param_phy_ulpi_ddr_default);
++	fh_otg_set_param_phy_ulpi_ext_vbus(core_if,
++					    fh_param_phy_ulpi_ext_vbus_default);
++	fh_otg_set_param_phy_utmi_width(core_if,
++					 fh_param_phy_utmi_width_default);
++	fh_otg_set_param_ts_dline(core_if, fh_param_ts_dline_default);
++	fh_otg_set_param_i2c_enable(core_if, fh_param_i2c_enable_default);
++	fh_otg_set_param_ulpi_fs_ls(core_if, fh_param_ulpi_fs_ls_default);
++	fh_otg_set_param_en_multiple_tx_fifo(core_if,
++					      fh_param_en_multiple_tx_fifo_default);
++	
++	if (gintsts.b.curmode) {
++		/* Force device mode to get power-on values of device FIFOs */
++		gusbcfg_data_t gusbcfg = {.d32 = 0 };
++		gusbcfg.d32 =  FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++		gusbcfg.b.force_dev_mode = 1;
++		FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
++		fh_mdelay(100);
++		for (i = 0; i < 15; i++) {
++		fh_otg_set_param_dev_perio_tx_fifo_size(core_if,
++							 fh_param_dev_perio_tx_fifo_size_default, i);
++		}
++		for (i = 0; i < 15; i++) {
++			fh_otg_set_param_dev_tx_fifo_size(core_if,
++							   fh_param_dev_tx_fifo_size_default, i);
++		}
++		gusbcfg.d32 =  FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++		gusbcfg.b.force_dev_mode = 0;
++		FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
++		fh_mdelay(100);
++	} else {
++		for (i = 0; i < 15; i++) {
++			fh_otg_set_param_dev_perio_tx_fifo_size(core_if,
++				fh_param_dev_perio_tx_fifo_size_default, i);
++		}
++		for (i = 0; i < 15; i++) {
++			fh_otg_set_param_dev_tx_fifo_size(core_if,
++				fh_param_dev_tx_fifo_size_default, i);
++		}
++	}
++
++	fh_otg_set_param_thr_ctl(core_if, fh_param_thr_ctl_default);
++	fh_otg_set_param_mpi_enable(core_if, fh_param_mpi_enable_default);
++	fh_otg_set_param_pti_enable(core_if, fh_param_pti_enable_default);
++	fh_otg_set_param_lpm_enable(core_if, fh_param_lpm_enable_default);
++		
++	fh_otg_set_param_besl_enable(core_if, fh_param_besl_enable_default);
++	fh_otg_set_param_baseline_besl(core_if, fh_param_baseline_besl_default);
++	fh_otg_set_param_deep_besl(core_if, fh_param_deep_besl_default);
++	
++	fh_otg_set_param_ic_usb_cap(core_if, fh_param_ic_usb_cap_default);
++	fh_otg_set_param_tx_thr_length(core_if,
++					fh_param_tx_thr_length_default);
++	fh_otg_set_param_rx_thr_length(core_if,
++					fh_param_rx_thr_length_default);
++	fh_otg_set_param_ahb_thr_ratio(core_if,
++					fh_param_ahb_thr_ratio_default);
++	fh_otg_set_param_power_down(core_if, fh_param_power_down_default);
++	fh_otg_set_param_reload_ctl(core_if, fh_param_reload_ctl_default);
++	fh_otg_set_param_dev_out_nak(core_if, fh_param_dev_out_nak_default);
++	fh_otg_set_param_cont_on_bna(core_if, fh_param_cont_on_bna_default);
++	fh_otg_set_param_ahb_single(core_if, fh_param_ahb_single_default);
++	fh_otg_set_param_otg_ver(core_if, fh_param_otg_ver_default);
++	fh_otg_set_param_adp_enable(core_if, fh_param_adp_enable_default);
++	return 0;
++}
++
++uint8_t fh_otg_is_dma_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->dma_enable;
++}
++
++/* Checks if the parameter is outside of its valid range of values */
++#define FH_OTG_PARAM_TEST(_param_, _low_, _high_) \
++		(((_param_) < (_low_)) || \
++		((_param_) > (_high_)))
++
++/* Parameter access functions */
++int fh_otg_set_param_otg_cap(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int valid;
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 2)) {
++		FH_WARN("Wrong value for otg_cap parameter\n");
++		FH_WARN("otg_cap parameter must be 0,1 or 2\n");
++		retval = -FH_E_INVALID;
++		goto out;
++	}
++
++	valid = 1;
++	switch (val) {
++	case FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE:
++		if (core_if->hwcfg2.b.op_mode !=
++		    FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
++			valid = 0;
++		break;
++	case FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE:
++		if ((core_if->hwcfg2.b.op_mode !=
++		     FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
++		    && (core_if->hwcfg2.b.op_mode !=
++			FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
++		    && (core_if->hwcfg2.b.op_mode !=
++			FH_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
++		    && (core_if->hwcfg2.b.op_mode !=
++			FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) {
++			valid = 0;
++		}
++		break;
++	case FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE:
++		/* always valid */
++		break;
++	}
++	if (!valid) {
++		if (fh_otg_param_initialized(core_if->core_params->otg_cap)) {
++			FH_ERROR
++			    ("%d invalid for otg_cap paremter. Check HW configuration.\n",
++			     val);
++		}
++		val =
++		    (((core_if->hwcfg2.b.op_mode ==
++		       FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
++		      || (core_if->hwcfg2.b.op_mode ==
++			  FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
++		      || (core_if->hwcfg2.b.op_mode ==
++			  FH_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
++		      || (core_if->hwcfg2.b.op_mode ==
++			  FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ?
++		     FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE :
++		     FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->otg_cap = val;
++out:
++	return retval;
++}
++
++int32_t fh_otg_get_param_otg_cap(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->otg_cap;
++}
++
++int fh_otg_set_param_opt(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for opt parameter\n");
++		return -FH_E_INVALID;
++	}
++	core_if->core_params->opt = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_opt(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->opt;
++}
++
++int fh_otg_set_param_dma_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for dma enable\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == 1) && (core_if->hwcfg2.b.architecture == 0)) {
++		if (fh_otg_param_initialized(core_if->core_params->dma_enable)) {
++			FH_ERROR
++			    ("%d invalid for dma_enable paremter. Check HW configuration.\n",
++			     val);
++		}
++		val = 0;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->dma_enable = val;
++	if (val == 0) {
++		fh_otg_set_param_dma_desc_enable(core_if, 0);
++	}
++	return retval;
++}
++
++int32_t fh_otg_get_param_dma_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->dma_enable;
++}
++
++int fh_otg_set_param_dma_desc_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for dma_enable\n");
++		FH_WARN("dma_desc_enable must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == 1)
++	    && ((fh_otg_get_param_dma_enable(core_if) == 0)
++		|| (core_if->hwcfg4.b.desc_dma == 0))) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->dma_desc_enable)) {
++			FH_ERROR
++			    ("%d invalid for dma_desc_enable paremter. Check HW configuration.\n",
++			     val);
++		}
++		val = 0;
++		retval = -FH_E_INVALID;
++	}
++	core_if->core_params->dma_desc_enable = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_dma_desc_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->dma_desc_enable;
++}
++
++int fh_otg_set_param_host_support_fs_ls_low_power(fh_otg_core_if_t * core_if,
++						   int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for host_support_fs_low_power\n");
++		FH_WARN("host_support_fs_low_power must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++	core_if->core_params->host_support_fs_ls_low_power = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_host_support_fs_ls_low_power(fh_otg_core_if_t *
++						       core_if)
++{
++	return core_if->core_params->host_support_fs_ls_low_power;
++}
++
++int fh_otg_set_param_enable_dynamic_fifo(fh_otg_core_if_t * core_if,
++					  int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for enable_dynamic_fifo\n");
++		FH_WARN("enable_dynamic_fifo must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == 1) && (core_if->hwcfg2.b.dynamic_fifo == 0)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->enable_dynamic_fifo)) {
++			FH_ERROR
++			    ("%d invalid for enable_dynamic_fifo paremter. Check HW configuration.\n",
++			     val);
++		}
++		val = 0;
++		retval = -FH_E_INVALID;
++	}
++	core_if->core_params->enable_dynamic_fifo = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_enable_dynamic_fifo(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->enable_dynamic_fifo;
++}
++
++int fh_otg_set_param_data_fifo_size(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 32, 32768)) {
++		FH_WARN("Wrong value for data_fifo_size\n");
++		FH_WARN("data_fifo_size must be 32-32768\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val > core_if->hwcfg3.b.dfifo_depth) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->data_fifo_size)) {
++			FH_ERROR
++			    ("%d invalid for data_fifo_size parameter. Check HW configuration.%d\n",
++			     val, core_if->hwcfg3.b.dfifo_depth);
++		}
++		val = core_if->hwcfg3.b.dfifo_depth;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->data_fifo_size = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_data_fifo_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->data_fifo_size;
++}
++
++int fh_otg_set_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
++		FH_WARN("Wrong value for dev_rx_fifo_size\n");
++		FH_WARN("dev_rx_fifo_size must be 16-32768\n");
++		return -FH_E_INVALID;
++	}
++
++	/*
++	if (val > FH_READ_REG32(&core_if->core_global_regs->grxfsiz)) {
++		if (fh_otg_param_initialized(core_if->core_params->dev_rx_fifo_size)) {
++		FH_WARN("%d invalid for dev_rx_fifo_size parameter\n", val);
++		}
++		val = FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
++		retval = -FH_E_INVALID;
++	}
++	*/
++
++	core_if->core_params->dev_rx_fifo_size = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->dev_rx_fifo_size;
++}
++
++int fh_otg_set_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t * core_if,
++					      int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
++		FH_WARN("Wrong value for dev_nperio_tx_fifo\n");
++		FH_WARN("dev_nperio_tx_fifo must be 16-32768\n");
++		return -FH_E_INVALID;
++	}
++
++	/*
++	if (val > (FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz) >> 16)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->dev_nperio_tx_fifo_size)) {
++			FH_ERROR
++			    ("%d invalid for dev_nperio_tx_fifo_size. Check HW configuration.\n",
++			     val);
++		}
++		val =
++		    (FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz) >>
++		     16);
++		retval = -FH_E_INVALID;
++	}*/
++
++	core_if->core_params->dev_nperio_tx_fifo_size = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->dev_nperio_tx_fifo_size;
++}
++
++int fh_otg_set_param_host_rx_fifo_size(fh_otg_core_if_t * core_if,
++					int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
++		FH_WARN("Wrong value for host_rx_fifo_size\n");
++		FH_WARN("host_rx_fifo_size must be 16-32768\n");
++		return -FH_E_INVALID;
++	}
++
++	/*
++	if (val > FH_READ_REG32(&core_if->core_global_regs->grxfsiz)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->host_rx_fifo_size)) {
++			FH_ERROR
++			    ("%d invalid for host_rx_fifo_size. Check HW configuration.\n",
++			     val);
++		}
++		val = FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
++		retval = -FH_E_INVALID;
++	}
++	*/
++
++	core_if->core_params->host_rx_fifo_size = val;
++	return retval;
++
++}
++
++int32_t fh_otg_get_param_host_rx_fifo_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->host_rx_fifo_size;
++}
++
++int fh_otg_set_param_host_nperio_tx_fifo_size(fh_otg_core_if_t * core_if,
++					       int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
++		FH_WARN("Wrong value for host_nperio_tx_fifo_size\n");
++		FH_WARN("host_nperio_tx_fifo_size must be 16-32768\n");
++		return -FH_E_INVALID;
++	}
++
++	/*
++	if (val > (FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz) >> 16)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->host_nperio_tx_fifo_size)) {
++			FH_ERROR
++			    ("%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
++			     val);
++		}
++		val =
++		    (FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz) >>
++		     16);
++		retval = -FH_E_INVALID;
++	}*/
++
++	core_if->core_params->host_nperio_tx_fifo_size = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_host_nperio_tx_fifo_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->host_nperio_tx_fifo_size;
++}
++
++int fh_otg_set_param_host_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
++					      int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
++		FH_WARN("Wrong value for host_perio_tx_fifo_size\n");
++		FH_WARN("host_perio_tx_fifo_size must be 16-32768\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val > ((core_if->hptxfsiz.d32) >> 16)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->host_perio_tx_fifo_size)) {
++			FH_ERROR
++			    ("%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
++			     val);
++		}
++		val = (core_if->hptxfsiz.d32) >> 16;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->host_perio_tx_fifo_size = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_host_perio_tx_fifo_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->host_perio_tx_fifo_size;
++}
++
++int fh_otg_set_param_max_transfer_size(fh_otg_core_if_t * core_if,
++					int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 2047, 524288)) {
++		FH_WARN("Wrong value for max_transfer_size\n");
++		FH_WARN("max_transfer_size must be 2047-524288\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val >= (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->max_transfer_size)) {
++			FH_ERROR
++			    ("%d invalid for max_transfer_size. Check HW configuration.\n",
++			     val);
++		}
++		val =
++		    ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 11)) -
++		     1);
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->max_transfer_size = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_max_transfer_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->max_transfer_size;
++}
++
++int fh_otg_set_param_max_packet_count(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 15, 511)) {
++		FH_WARN("Wrong value for max_packet_count\n");
++		FH_WARN("max_packet_count must be 15-511\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val > (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->max_packet_count)) {
++			FH_ERROR
++			    ("%d invalid for max_packet_count. Check HW configuration.\n",
++			     val);
++		}
++		val =
++		    ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1);
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->max_packet_count = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_max_packet_count(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->max_packet_count;
++}
++
++int fh_otg_set_param_host_channels(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 1, 16)) {
++		FH_WARN("Wrong value for host_channels\n");
++		FH_WARN("host_channels must be 1-16\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val > (core_if->hwcfg2.b.num_host_chan + 1)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->host_channels)) {
++			FH_ERROR
++			    ("%d invalid for host_channels. Check HW configurations.\n",
++			     val);
++		}
++		val = (core_if->hwcfg2.b.num_host_chan + 1);
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->host_channels = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_host_channels(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->host_channels;
++}
++
++int fh_otg_set_param_dev_endpoints(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 1, 15)) {
++		FH_WARN("Wrong value for dev_endpoints\n");
++		FH_WARN("dev_endpoints must be 1-15\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val > (core_if->hwcfg2.b.num_dev_ep)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->dev_endpoints)) {
++			FH_ERROR
++			    ("%d invalid for dev_endpoints. Check HW configurations.\n",
++			     val);
++		}
++		val = core_if->hwcfg2.b.num_dev_ep;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->dev_endpoints = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_dev_endpoints(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->dev_endpoints;
++}
++
++int fh_otg_set_param_phy_type(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	int valid = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 2)) {
++		FH_WARN("Wrong value for phy_type\n");
++		FH_WARN("phy_type must be 0,1 or 2\n");
++		return -FH_E_INVALID;
++	}
++#ifndef NO_FS_PHY_HW_CHECKS
++	if ((val == FH_PHY_TYPE_PARAM_UTMI) &&
++	    ((core_if->hwcfg2.b.hs_phy_type == 1) ||
++	     (core_if->hwcfg2.b.hs_phy_type == 3))) {
++		valid = 1;
++	} else if ((val == FH_PHY_TYPE_PARAM_ULPI) &&
++		   ((core_if->hwcfg2.b.hs_phy_type == 2) ||
++		    (core_if->hwcfg2.b.hs_phy_type == 3))) {
++		valid = 1;
++	} else if ((val == FH_PHY_TYPE_PARAM_FS) &&
++		   (core_if->hwcfg2.b.fs_phy_type == 1)) {
++		valid = 1;
++	}
++	if (!valid) {
++		if (fh_otg_param_initialized(core_if->core_params->phy_type)) {
++			FH_ERROR
++			    ("%d invalid for phy_type. Check HW configurations.\n",
++			     val);
++		}
++		if (core_if->hwcfg2.b.hs_phy_type) {
++			if ((core_if->hwcfg2.b.hs_phy_type == 3) ||
++			    (core_if->hwcfg2.b.hs_phy_type == 1)) {
++				val = FH_PHY_TYPE_PARAM_UTMI;
++			} else {
++				val = FH_PHY_TYPE_PARAM_ULPI;
++			}
++		}
++		retval = -FH_E_INVALID;
++	}
++#endif
++	core_if->core_params->phy_type = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_phy_type(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->phy_type;
++}
++
++int fh_otg_set_param_speed(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for speed parameter\n");
++		FH_WARN("max_speed parameter must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++	if ((val == 0)
++	    && fh_otg_get_param_phy_type(core_if) == FH_PHY_TYPE_PARAM_FS) {
++		if (fh_otg_param_initialized(core_if->core_params->speed)) {
++			FH_ERROR
++			    ("%d invalid for speed paremter. Check HW configuration.\n",
++			     val);
++		}
++		val =
++		    (fh_otg_get_param_phy_type(core_if) ==
++		     FH_PHY_TYPE_PARAM_FS ? 1 : 0);
++		retval = -FH_E_INVALID;
++	}
++	core_if->core_params->speed = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_speed(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->speed;
++}
++
++int fh_otg_set_param_host_ls_low_power_phy_clk(fh_otg_core_if_t * core_if,
++						int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN
++		    ("Wrong value for host_ls_low_power_phy_clk parameter\n");
++		FH_WARN("host_ls_low_power_phy_clk must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)
++	    && (fh_otg_get_param_phy_type(core_if) == FH_PHY_TYPE_PARAM_FS)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->host_ls_low_power_phy_clk)) {
++			FH_ERROR
++			    ("%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
++			     val);
++		}
++		val =
++		    (fh_otg_get_param_phy_type(core_if) ==
++		     FH_PHY_TYPE_PARAM_FS) ?
++		    FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ :
++		    FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->host_ls_low_power_phy_clk = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_host_ls_low_power_phy_clk(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->host_ls_low_power_phy_clk;
++}
++
++int fh_otg_set_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for phy_ulpi_ddr\n");
++		FH_WARN("phy_upli_ddr must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->phy_ulpi_ddr = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->phy_ulpi_ddr;
++}
++
++int fh_otg_set_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if,
++					int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong valaue for phy_ulpi_ext_vbus\n");
++		FH_WARN("phy_ulpi_ext_vbus must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->phy_ulpi_ext_vbus = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->phy_ulpi_ext_vbus;
++}
++
++int fh_otg_set_param_phy_utmi_width(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 8, 8) && FH_OTG_PARAM_TEST(val, 16, 16)) {
++		FH_WARN("Wrong valaue for phy_utmi_width\n");
++		FH_WARN("phy_utmi_width must be 8 or 16\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->phy_utmi_width = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_phy_utmi_width(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->phy_utmi_width;
++}
++
++int fh_otg_set_param_ulpi_fs_ls(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong valaue for ulpi_fs_ls\n");
++		FH_WARN("ulpi_fs_ls must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->ulpi_fs_ls = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_ulpi_fs_ls(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->ulpi_fs_ls;
++}
++
++int fh_otg_set_param_ts_dline(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong valaue for ts_dline\n");
++		FH_WARN("ts_dline must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->ts_dline = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_ts_dline(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->ts_dline;
++}
++
++int fh_otg_set_param_i2c_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong valaue for i2c_enable\n");
++		FH_WARN("i2c_enable must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++#ifndef NO_FS_PHY_HW_CHECK
++	if (val == 1 && core_if->hwcfg3.b.i2c == 0) {
++		if (fh_otg_param_initialized(core_if->core_params->i2c_enable)) {
++			FH_ERROR
++			    ("%d invalid for i2c_enable. Check HW configuration.\n",
++			     val);
++		}
++		val = 0;
++		retval = -FH_E_INVALID;
++	}
++#endif
++
++	core_if->core_params->i2c_enable = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_i2c_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->i2c_enable;
++}
++
++int fh_otg_set_param_dev_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
++					     int32_t val, int fifo_num)
++{
++	int retval = 0;
++	gintsts_data_t gintsts;
++	gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++
++	if (FH_OTG_PARAM_TEST(val, 4, 768)) {
++		FH_WARN("Wrong value for dev_perio_tx_fifo_size\n");
++		FH_WARN("dev_perio_tx_fifo_size must be 4-768\n");
++		return -FH_E_INVALID;
++	}
++
++	/*
++	if (val >
++	    (FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[fifo_num]) >> 16)) {
++		FH_WARN("Value is larger then power-on FIFO size\n");
++		if (fh_otg_param_initialized
++		    (core_if->core_params->dev_perio_tx_fifo_size[fifo_num])) {
++			FH_ERROR
++			    ("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n",
++			     val, fifo_num);
++		}
++		val = (FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[fifo_num]) >> 16);
++		retval = -FH_E_INVALID;
++	}
++	*/
++
++	core_if->core_params->dev_perio_tx_fifo_size[fifo_num] = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_dev_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
++						 int fifo_num)
++{
++	return core_if->core_params->dev_perio_tx_fifo_size[fifo_num];
++}
++
++int fh_otg_set_param_en_multiple_tx_fifo(fh_otg_core_if_t * core_if,
++					  int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong valaue for en_multiple_tx_fifo,\n");
++		FH_WARN("en_multiple_tx_fifo must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val == 1 && core_if->hwcfg4.b.ded_fifo_en == 0) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->en_multiple_tx_fifo)) {
++			FH_ERROR
++			    ("%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
++			     val);
++		}
++		val = 0;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->en_multiple_tx_fifo = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_en_multiple_tx_fifo(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->en_multiple_tx_fifo;
++}
++
++int fh_otg_set_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if, int32_t val,
++				       int fifo_num)
++{
++	int retval = 0;
++	fifosize_data_t txfifosize;
++	txfifosize.d32 = FH_READ_REG32(&core_if->core_global_regs->dtxfsiz[fifo_num]);	
++
++	if (FH_OTG_PARAM_TEST(val, 16, 32768)) {
++		FH_WARN("Wrong value for dev_tx_fifo_size\n");
++		FH_WARN("dev_tx_fifo_size must be 16-32768\n");
++		return -FH_E_INVALID;
++	}
++
++	/*
++	if (val > txfifosize.b.depth) {
++		FH_WARN("Value is larger then power-on FIFO size\n");
++		if (fh_otg_param_initialized
++		    (core_if->core_params->dev_tx_fifo_size[fifo_num])) {
++			FH_ERROR
++			    ("`%d' invalid for parameter `dev_tx_fifo_size_%d'. Check HW configuration.\n",
++			     val, fifo_num);
++		}
++		val = txfifosize.b.depth;
++		retval = -FH_E_INVALID;
++	}
++	*/
++
++	core_if->core_params->dev_tx_fifo_size[fifo_num] = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if,
++					   int fifo_num)
++{
++	return core_if->core_params->dev_tx_fifo_size[fifo_num];
++}
++
++int fh_otg_set_param_thr_ctl(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 7)) {
++		FH_WARN("Wrong value for thr_ctl\n");
++		FH_WARN("thr_ctl must be 0-7\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val != 0) &&
++	    (!fh_otg_get_param_dma_enable(core_if) ||
++	     !core_if->hwcfg4.b.ded_fifo_en)) {
++		if (fh_otg_param_initialized(core_if->core_params->thr_ctl)) {
++			FH_ERROR
++			    ("%d invalid for parameter thr_ctl. Check HW configuration.\n",
++			     val);
++		}
++		val = 0;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->thr_ctl = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_thr_ctl(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->thr_ctl;
++}
++
++int fh_otg_set_param_lpm_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for lpm_enable\n");
++		FH_WARN("lpm_enable must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val && !core_if->hwcfg3.b.otg_lpm_en) {
++		if (fh_otg_param_initialized(core_if->core_params->lpm_enable)) {
++			FH_ERROR
++			    ("%d invalid for parameter lpm_enable. Check HW configuration.\n",
++			     val);
++		}
++		val = 0;
++		retval = -FH_E_INVALID;
++	}
++
++	core_if->core_params->lpm_enable = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_lpm_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->lpm_enable;
++}
++
++int fh_otg_set_param_besl_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("Wrong value for besl_enable\n");
++		FH_WARN("besl_enable must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->besl_enable = val;
++	
++	if(val)
++	{
++		retval += fh_otg_set_param_lpm_enable(core_if,val);
++	}
++	
++	return retval;
++}
++
++int32_t fh_otg_get_param_besl_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->besl_enable;
++}
++
++int fh_otg_set_param_baseline_besl(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++		
++	if (FH_OTG_PARAM_TEST(val, 0, 15)) {
++		FH_WARN("Wrong value for baseline_besl\n");
++		FH_WARN("baseline_besl must be 0-15\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->baseline_besl = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_baseline_besl(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->baseline_besl;
++}
++
++int fh_otg_set_param_deep_besl(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 15)) {
++		FH_WARN("Wrong value for deep_besl\n");
++		FH_WARN("deep_besl must be 0-15\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->deep_besl = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_deep_besl(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->deep_besl;
++}
++
++int fh_otg_set_param_tx_thr_length(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 8, 128)) {
++		FH_WARN("Wrong valaue for tx_thr_length\n");
++		FH_WARN("tx_thr_length must be 8 - 128\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->tx_thr_length = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_tx_thr_length(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->tx_thr_length;
++}
++
++int fh_otg_set_param_rx_thr_length(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 8, 128)) {
++		FH_WARN("Wrong valaue for rx_thr_length\n");
++		FH_WARN("rx_thr_length must be 8 - 128\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->rx_thr_length = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_rx_thr_length(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->rx_thr_length;
++}
++
++int fh_otg_set_param_dma_burst_size(fh_otg_core_if_t * core_if, int32_t val)
++{
++	if (FH_OTG_PARAM_TEST(val, 1, 1) &&
++	    FH_OTG_PARAM_TEST(val, 4, 4) &&
++	    FH_OTG_PARAM_TEST(val, 8, 8) &&
++	    FH_OTG_PARAM_TEST(val, 16, 16) &&
++	    FH_OTG_PARAM_TEST(val, 32, 32) &&
++	    FH_OTG_PARAM_TEST(val, 64, 64) &&
++	    FH_OTG_PARAM_TEST(val, 128, 128) &&
++	    FH_OTG_PARAM_TEST(val, 256, 256)) {
++		FH_WARN("`%d' invalid for parameter `dma_burst_size'\n", val);
++		return -FH_E_INVALID;
++	}
++	core_if->core_params->dma_burst_size = val;
++	return 0;
++}
++
++int32_t fh_otg_get_param_dma_burst_size(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->dma_burst_size;
++}
++
++int fh_otg_set_param_pti_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `pti_enable'\n", val);
++		return -FH_E_INVALID;
++	}
++	if (val && (core_if->snpsid < OTG_CORE_REV_2_72a)) {
++		if (fh_otg_param_initialized(core_if->core_params->pti_enable)) {
++			FH_ERROR
++			    ("%d invalid for parameter pti_enable. Check HW configuration.\n",
++			     val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->pti_enable = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_pti_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->pti_enable;
++}
++
++int fh_otg_set_param_mpi_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `mpi_enable'\n", val);
++		return -FH_E_INVALID;
++	}
++	if (val && (core_if->hwcfg2.b.multi_proc_int == 0)) {
++		if (fh_otg_param_initialized(core_if->core_params->mpi_enable)) {
++			FH_ERROR
++			    ("%d invalid for parameter mpi_enable. Check HW configuration.\n",
++			     val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->mpi_enable = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_mpi_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->mpi_enable;
++}
++
++int fh_otg_set_param_adp_enable(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `adp_enable'\n", val);
++		return -FH_E_INVALID;
++	}
++	if (val && (core_if->hwcfg3.b.adp_supp == 0)) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->adp_supp_enable)) {
++			FH_ERROR
++			    ("%d invalid for parameter adp_enable. Check HW configuration.\n",
++			     val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->adp_supp_enable = val;
++	/* Set OTG version 2.0 in case of enabling ADP */
++	if (val)
++		fh_otg_set_param_otg_ver(core_if, 1);
++
++	return retval;
++}
++
++int32_t fh_otg_get_param_adp_enable(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->adp_supp_enable;
++}
++
++int fh_otg_set_param_ic_usb_cap(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `ic_usb_cap'\n", val);
++		FH_WARN("ic_usb_cap must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val && (core_if->hwcfg2.b.otg_enable_ic_usb == 0)) {
++		if (fh_otg_param_initialized(core_if->core_params->ic_usb_cap)) {
++			FH_ERROR
++			    ("%d invalid for parameter ic_usb_cap. Check HW configuration.\n",
++			     val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->ic_usb_cap = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_ic_usb_cap(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->ic_usb_cap;
++}
++
++int fh_otg_set_param_ahb_thr_ratio(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	int valid = 1;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 3)) {
++		FH_WARN("`%d' invalid for parameter `ahb_thr_ratio'\n", val);
++		FH_WARN("ahb_thr_ratio must be 0 - 3\n");
++		return -FH_E_INVALID;
++	}
++
++	if (val
++	    && (core_if->snpsid < OTG_CORE_REV_2_81a
++		|| !fh_otg_get_param_thr_ctl(core_if))) {
++		valid = 0;
++	} else if (val
++		   && ((fh_otg_get_param_tx_thr_length(core_if) / (1 << val)) <
++		       4)) {
++		valid = 0;
++	}
++	if (valid == 0) {
++		if (fh_otg_param_initialized
++		    (core_if->core_params->ahb_thr_ratio)) {
++			FH_ERROR
++			    ("%d invalid for parameter ahb_thr_ratio. Check HW configuration.\n",
++			     val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++
++	core_if->core_params->ahb_thr_ratio = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_ahb_thr_ratio(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->ahb_thr_ratio;
++}
++
++int fh_otg_set_param_power_down(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	int valid = 1;
++	hwcfg4_data_t hwcfg4 = {.d32 = 0 };
++	hwcfg4.d32 = FH_READ_REG32(&core_if->core_global_regs->ghwcfg4);
++
++	if (FH_OTG_PARAM_TEST(val, 0, 3)) {
++		FH_WARN("`%d' invalid for parameter `power_down'\n", val);
++		FH_WARN("power_down must be 0 - 3\n");
++		return -FH_E_INVALID;
++	}
++	if ((val == 1) && (hwcfg4.b.power_optimiz == 0)) {
++		val = 0;
++	}	
++	if ((val == 2) && ((core_if->snpsid < OTG_CORE_REV_2_91a) || (hwcfg4.b.hiber == 0)) ) {
++		valid = 0;
++	}
++	if ((val == 3)
++	    && ((core_if->snpsid < OTG_CORE_REV_3_00a)
++		|| (hwcfg4.b.xhiber == 0))) {
++		valid = 0;
++	}
++	if (valid == 0) {
++		if (fh_otg_param_initialized(core_if->core_params->power_down)) {
++			FH_ERROR
++			    ("%d invalid for parameter power_down. Check HW configuration.\n",
++			     val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->power_down = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_power_down(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->power_down;
++}
++
++int fh_otg_set_param_reload_ctl(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	int valid = 1;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `reload_ctl'\n", val);
++		FH_WARN("reload_ctl must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == 1) && (core_if->snpsid < OTG_CORE_REV_2_92a)) {
++		valid = 0;
++	}
++	if (valid == 0) {
++		if (fh_otg_param_initialized(core_if->core_params->reload_ctl)) {
++			FH_ERROR("%d invalid for parameter reload_ctl."
++				  "Check HW configuration.\n", val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->reload_ctl = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_reload_ctl(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->reload_ctl;
++}
++
++int fh_otg_set_param_dev_out_nak(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	int valid = 1;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `dev_out_nak'\n", val);
++		FH_WARN("dev_out_nak must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == 1) && ((core_if->snpsid < OTG_CORE_REV_2_93a) ||
++			   !(core_if->core_params->dma_desc_enable))) {
++		valid = 0;
++	}
++	if (valid == 0) {
++		if (fh_otg_param_initialized(core_if->core_params->dev_out_nak)) {
++			FH_ERROR("%d invalid for parameter dev_out_nak."
++				  "Check HW configuration.\n", val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->dev_out_nak = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_dev_out_nak(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->dev_out_nak;
++}
++
++int fh_otg_set_param_cont_on_bna(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	int valid = 1;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `cont_on_bna'\n", val);
++		FH_WARN("cont_on_bna must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == 1) && ((core_if->snpsid < OTG_CORE_REV_2_94a) ||
++			   !(core_if->core_params->dma_desc_enable))) {
++		valid = 0;
++	}
++	if (valid == 0) {
++		if (fh_otg_param_initialized(core_if->core_params->cont_on_bna)) {
++			FH_ERROR("%d invalid for parameter cont_on_bna."
++				"Check HW configuration.\n", val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->cont_on_bna = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_cont_on_bna(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->cont_on_bna;
++}
++
++int fh_otg_set_param_ahb_single(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++	int valid = 1;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `ahb_single'\n", val);
++		FH_WARN("ahb_single must be 0 or 1\n");
++		return -FH_E_INVALID;
++	}
++
++	if ((val == 1) && (core_if->snpsid < OTG_CORE_REV_2_94a)) {
++		valid = 0;
++	}
++	if (valid == 0) {
++		if (fh_otg_param_initialized(core_if->core_params->ahb_single)) {
++			FH_ERROR("%d invalid for parameter ahb_single."
++				  "Check HW configuration.\n", val);
++		}
++		retval = -FH_E_INVALID;
++		val = 0;
++	}
++	core_if->core_params->ahb_single = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_ahb_single(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->ahb_single;
++}
++
++int fh_otg_set_param_otg_ver(fh_otg_core_if_t * core_if, int32_t val)
++{
++	int retval = 0;
++
++	if (FH_OTG_PARAM_TEST(val, 0, 1)) {
++		FH_WARN("`%d' invalid for parameter `otg_ver'\n", val);
++		FH_WARN
++		    ("otg_ver must be 0(for OTG 1.3 support) or 1(for OTG 2.0 support)\n");
++		return -FH_E_INVALID;
++	}
++
++	core_if->core_params->otg_ver = val;
++	return retval;
++}
++
++int32_t fh_otg_get_param_otg_ver(fh_otg_core_if_t * core_if)
++{
++	return core_if->core_params->otg_ver;
++}
++
++uint32_t fh_otg_get_hnpstatus(fh_otg_core_if_t * core_if)
++{
++	gotgctl_data_t otgctl;
++	otgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++	return otgctl.b.hstnegscs;
++}
++
++uint32_t fh_otg_get_srpstatus(fh_otg_core_if_t * core_if)
++{
++	gotgctl_data_t otgctl;
++	otgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++	return otgctl.b.sesreqscs;
++}
++
++void fh_otg_set_hnpreq(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	if(core_if->otg_ver == 0) {
++		gotgctl_data_t otgctl;
++		otgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++		otgctl.b.hnpreq = val;
++		FH_WRITE_REG32(&core_if->core_global_regs->gotgctl, otgctl.d32);
++	} else {
++		core_if->otg_sts = val;
++	}
++}
++
++uint32_t fh_otg_get_gsnpsid(fh_otg_core_if_t * core_if)
++{
++	return core_if->snpsid;
++}
++
++uint32_t fh_otg_get_mode(fh_otg_core_if_t * core_if)
++{
++	gintsts_data_t gintsts;
++	gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++	return gintsts.b.curmode;
++}
++
++uint32_t fh_otg_get_hnpcapable(fh_otg_core_if_t * core_if)
++{
++	gusbcfg_data_t usbcfg;
++	usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++	return usbcfg.b.hnpcap;
++}
++
++void fh_otg_set_hnpcapable(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	gusbcfg_data_t usbcfg;
++	usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++	usbcfg.b.hnpcap = val;
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, usbcfg.d32);
++}
++
++uint32_t fh_otg_get_srpcapable(fh_otg_core_if_t * core_if)
++{
++	gusbcfg_data_t usbcfg;
++	usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++	return usbcfg.b.srpcap;
++}
++
++void fh_otg_set_srpcapable(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	gusbcfg_data_t usbcfg;
++	usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++	usbcfg.b.srpcap = val;
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, usbcfg.d32);
++}
++
++uint32_t fh_otg_get_devspeed(fh_otg_core_if_t * core_if)
++{
++	dcfg_data_t dcfg;
++	dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++	return dcfg.b.devspd;
++}
++
++void fh_otg_set_devspeed(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	dcfg_data_t dcfg;
++	dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++	dcfg.b.devspd = val;
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
++}
++
++uint32_t fh_otg_get_busconnected(fh_otg_core_if_t * core_if)
++{
++	hprt0_data_t hprt0;
++	hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
++	return hprt0.b.prtconnsts;
++}
++
++uint32_t fh_otg_get_enumspeed(fh_otg_core_if_t * core_if)
++{
++	dsts_data_t dsts;
++	dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++	return dsts.b.enumspd;
++}
++
++uint32_t fh_otg_get_prtpower(fh_otg_core_if_t * core_if)
++{
++	hprt0_data_t hprt0;
++	hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
++	return hprt0.b.prtpwr;
++
++}
++
++uint32_t fh_otg_get_core_state(fh_otg_core_if_t * core_if)
++{
++	return core_if->hibernation_suspend;
++}
++
++void fh_otg_set_prtpower(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	hprt0_data_t hprt0;
++	hprt0.d32 = fh_otg_read_hprt0(core_if);
++	hprt0.b.prtpwr = val;
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++}
++
++uint32_t fh_otg_get_prtsuspend(fh_otg_core_if_t * core_if)
++{
++	hprt0_data_t hprt0;
++	hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
++	return hprt0.b.prtsusp;
++
++}
++
++void fh_otg_set_prtsuspend(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	hprt0_data_t hprt0;
++	hprt0.d32 = fh_otg_read_hprt0(core_if);
++	hprt0.b.prtsusp = val;
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++}
++
++uint32_t fh_otg_get_fr_interval(fh_otg_core_if_t * core_if)
++{
++	hfir_data_t hfir;
++	hfir.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hfir);
++	return hfir.b.frint;
++
++}
++
++void fh_otg_set_fr_interval(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	hfir_data_t hfir;
++	uint32_t fram_int;
++	fram_int = calc_frame_interval(core_if);
++	hfir.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hfir);
++	if (!core_if->core_params->reload_ctl) {
++		FH_WARN("\nCannot reload HFIR register.HFIR.HFIRRldCtrl bit is"
++			 "not set to 1.\nShould load driver with reload_ctl=1"
++			 " module parameter\n");
++		return;
++	}
++	switch (fram_int) {
++	case 3750:
++		if ((val < 3350) || (val > 4150)) {
++			FH_WARN("HFIR interval for HS core and 30 MHz"
++				 "clock freq should be from 3350 to 4150\n");
++			return;
++		}
++		break;
++	case 30000:
++		if ((val < 26820) || (val > 33180)) {
++			FH_WARN("HFIR interval for FS/LS core and 30 MHz"
++				 "clock freq should be from 26820 to 33180\n");
++			return;
++		}
++		break;
++	case 6000:
++		if ((val < 5360) || (val > 6640)) {
++			FH_WARN("HFIR interval for HS core and 48 MHz"
++				 "clock freq should be from 5360 to 6640\n");
++			return;
++		}
++		break;
++	case 48000:
++		if ((val < 42912) || (val > 53088)) {
++			FH_WARN("HFIR interval for FS/LS core and 48 MHz"
++				 "clock freq should be from 42912 to 53088\n");
++			return;
++		}
++		break;
++	case 7500:
++		if ((val < 6700) || (val > 8300)) {
++			FH_WARN("HFIR interval for HS core and 60 MHz"
++				 "clock freq should be from 6700 to 8300\n");
++			return;
++		}
++		break;
++	case 60000:
++		if ((val < 53640) || (val > 65536)) {
++			FH_WARN("HFIR interval for FS/LS core and 60 MHz"
++				 "clock freq should be from 53640 to 65536\n");
++			return;
++		}
++		break;
++	default:
++		FH_WARN("Unknown frame interval\n");
++		return;
++		break;
++
++	}
++	hfir.b.frint = val;
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->hfir, hfir.d32);
++}
++
++uint32_t fh_otg_get_mode_ch_tim(fh_otg_core_if_t * core_if)
++{
++	hcfg_data_t hcfg;
++	hcfg.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
++	return hcfg.b.modechtimen;
++
++}
++
++void fh_otg_set_mode_ch_tim(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	hcfg_data_t hcfg;
++	hcfg.d32 = FH_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
++	hcfg.b.modechtimen = val;
++	FH_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
++}
++
++void fh_otg_set_prtresume(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	hprt0_data_t hprt0;
++	hprt0.d32 = fh_otg_read_hprt0(core_if);
++	hprt0.b.prtres = val;
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++}
++
++uint32_t fh_otg_get_remotewakesig(fh_otg_core_if_t * core_if)
++{
++	dctl_data_t dctl;
++	dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++	return dctl.b.rmtwkupsig;
++}
++
++uint32_t fh_otg_get_beslreject(fh_otg_core_if_t * core_if)
++{
++	dctl_data_t dctl;
++	dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++	return dctl.b.besl_reject;
++}
++
++void fh_otg_set_beslreject(fh_otg_core_if_t * core_if, uint32_t val)
++{
++    dctl_data_t dctl;
++	dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++	dctl.b.besl_reject = val;
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
++}
++uint32_t fh_otg_get_hirdthresh(fh_otg_core_if_t * core_if)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	return lpmcfg.b.hird_thres;
++}
++
++void fh_otg_set_hirdthresh(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	glpmcfg_data_t lpmcfg;
++	
++	if (FH_OTG_PARAM_TEST(val, 0, 15)) {
++		FH_WARN("Wrong valaue for hird_thres\n");
++		FH_WARN("hird_thres must be 0-f\n");
++		return ;
++	}
++	
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	lpmcfg.b.hird_thres &= (1<<4);
++	lpmcfg.b.hird_thres |= val;
++	FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
++}
++
++uint32_t fh_otg_get_lpm_portsleepstatus(fh_otg_core_if_t * core_if)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++
++	FH_ASSERT(!
++		   ((core_if->lx_state == FH_OTG_L1) ^ lpmcfg.b.prt_sleep_sts),
++		   "lx_state = %d, lmpcfg.prt_sleep_sts = %d\n",
++		   core_if->lx_state, lpmcfg.b.prt_sleep_sts);
++
++	return lpmcfg.b.prt_sleep_sts;
++}
++
++uint32_t fh_otg_get_lpm_remotewakeenabled(fh_otg_core_if_t * core_if)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	return lpmcfg.b.rem_wkup_en;
++}
++
++uint32_t fh_otg_get_lpmresponse(fh_otg_core_if_t * core_if)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	return lpmcfg.b.appl_resp;
++}
++
++void fh_otg_set_lpmresponse(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	lpmcfg.b.appl_resp = val;
++	FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
++}
++
++uint32_t fh_otg_get_hsic_connect(fh_otg_core_if_t * core_if)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	return lpmcfg.b.hsic_connect;
++}
++
++void fh_otg_set_hsic_connect(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	lpmcfg.b.hsic_connect = val;
++	FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
++}
++
++uint32_t fh_otg_get_inv_sel_hsic(fh_otg_core_if_t * core_if)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	return lpmcfg.b.inv_sel_hsic;
++
++}
++
++void fh_otg_set_inv_sel_hsic(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	glpmcfg_data_t lpmcfg;
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	lpmcfg.b.inv_sel_hsic = val;
++	FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
++}
++
++uint32_t fh_otg_get_gotgctl(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++}
++
++void fh_otg_set_gotgctl(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(&core_if->core_global_regs->gotgctl, val);
++}
++
++uint32_t fh_otg_get_gusbcfg(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++}
++
++void fh_otg_set_gusbcfg(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, val);
++}
++
++uint32_t fh_otg_get_grxfsiz(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
++}
++
++void fh_otg_set_grxfsiz(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(&core_if->core_global_regs->grxfsiz, val);
++}
++
++uint32_t fh_otg_get_gnptxfsiz(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->gnptxfsiz);
++}
++
++void fh_otg_set_gnptxfsiz(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(&core_if->core_global_regs->gnptxfsiz, val);
++}
++
++uint32_t fh_otg_get_gpvndctl(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->gpvndctl);
++}
++
++void fh_otg_set_gpvndctl(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(&core_if->core_global_regs->gpvndctl, val);
++}
++
++uint32_t fh_otg_get_ggpio(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->ggpio);
++}
++
++void fh_otg_set_ggpio(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(&core_if->core_global_regs->ggpio, val);
++}
++
++uint32_t fh_otg_get_hprt0(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(core_if->host_if->hprt0);
++
++}
++
++void fh_otg_set_hprt0(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(core_if->host_if->hprt0, val);
++}
++
++uint32_t fh_otg_get_guid(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->guid);
++}
++
++void fh_otg_set_guid(fh_otg_core_if_t * core_if, uint32_t val)
++{
++	FH_WRITE_REG32(&core_if->core_global_regs->guid, val);
++}
++
++uint32_t fh_otg_get_hptxfsiz(fh_otg_core_if_t * core_if)
++{
++	return FH_READ_REG32(&core_if->core_global_regs->hptxfsiz);
++}
++
++uint16_t fh_otg_get_otg_version(fh_otg_core_if_t * core_if)
++{
++	return ((core_if->otg_ver == 1) ? (uint16_t)0x0200 : (uint16_t)0x0103);
++}
++
++/**
++ * Start the SRP timer to detect when the SRP does not complete within
++ * 6 seconds.
++ *
++ * @param core_if the pointer to core_if strucure.
++ */
++void fh_otg_pcd_start_srp_timer(fh_otg_core_if_t * core_if)
++{
++	core_if->srp_timer_started = 1;
++	FH_TIMER_SCHEDULE(core_if->srp_timer, 6000 /* 6 secs */ );
++}
++
++void fh_otg_initiate_srp(void * p)
++{
++	fh_otg_core_if_t * core_if = p;
++	uint32_t *addr = (uint32_t *) & (core_if->core_global_regs->gotgctl);
++	gotgctl_data_t mem;
++	gotgctl_data_t val;
++
++	val.d32 = FH_READ_REG32(addr);
++	if (val.b.sesreq) {
++		FH_ERROR("Session Request Already active!\n");
++		return;
++	}
++
++	FH_INFO("Session Request Initated\n");	//NOTICE
++	mem.d32 = FH_READ_REG32(addr);
++	mem.b.sesreq = 1;
++	FH_WRITE_REG32(addr, mem.d32);
++
++	/* Start the SRP timer */
++	fh_otg_pcd_start_srp_timer(core_if);
++	return;
++}
++
++int fh_otg_check_haps_status(fh_otg_core_if_t * core_if)
++{
++   int retval = 0;
++
++   if(FH_READ_REG32(&core_if->core_global_regs->gsnpsid) == 0xffffffff)
++   {
++		return -1;
++   } else {
++		return retval;
++   } 
++
++}
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.h
+new file mode 100644
+index 00000000..bf5e773a
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil.h
+@@ -0,0 +1,1503 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_cil.h $
++ * $Revision: #136 $
++ * $Date: 2015/10/12 $
++ * $Change: 2972621 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#if !defined(__FH_CIL_H__)
++#define __FH_CIL_H__
++
++#include "../fh_common_port/fh_list.h"
++#include "fh_otg_dbg.h"
++#include "fh_otg_regs.h"
++
++#include "fh_otg_core_if.h"
++#include "fh_otg_adp.h"
++
++/**
++ * @file
++ * This file contains the interface to the Core Interface Layer.
++ */
++
++#ifdef FH_UTE_CFI
++
++#define MAX_DMA_DESCS_PER_EP	256
++
++/**
++ * Enumeration for the data buffer mode
++ */
++typedef enum _data_buffer_mode {
++	BM_STANDARD = 0,	/* data buffer is in normal mode */
++	BM_SG = 1,		/* data buffer uses the scatter/gather mode */
++	BM_CONCAT = 2,		/* data buffer uses the concatenation mode */
++	BM_CIRCULAR = 3,	/* data buffer uses the circular DMA mode */
++	BM_ALIGN = 4		/* data buffer is in buffer alignment mode */
++} data_buffer_mode_e;
++#endif //FH_UTE_CFI
++
++/** Macros defined for FH OTG HW Release version */
++
++#define OTG_CORE_REV_2_60a	0x4F54260A
++#define OTG_CORE_REV_2_71a	0x4F54271A
++#define OTG_CORE_REV_2_72a	0x4F54272A
++#define OTG_CORE_REV_2_80a	0x4F54280A
++#define OTG_CORE_REV_2_81a	0x4F54281A
++#define OTG_CORE_REV_2_90a	0x4F54290A
++#define OTG_CORE_REV_2_91a	0x4F54291A
++#define OTG_CORE_REV_2_92a	0x4F54292A
++#define OTG_CORE_REV_2_93a	0x4F54293A
++#define OTG_CORE_REV_2_94a	0x4F54294A
++#define OTG_CORE_REV_3_00a	0x4F54300A
++#define OTG_CORE_REV_3_10a	0x4F54310A
++#define OTG_CORE_REV_3_20a	0x4F54320A
++#define OTG_CORE_REV_3_30a	0x4F54330A
++
++/**
++ * Information for each ISOC packet.
++ */
++typedef struct iso_pkt_info {
++	uint32_t offset;
++	uint32_t length;
++	int32_t status;
++} iso_pkt_info_t;
++
++/**
++ * The <code>fh_ep</code> structure represents the state of a single
++ * endpoint when acting in device mode. It contains the data items
++ * needed for an endpoint to be activated and transfer packets.
++ */
++typedef struct fh_ep {
++	/** EP number used for register address lookup */
++	uint8_t num;
++	/** EP direction 0 = OUT */
++	unsigned is_in:1;
++	/** EP active. */
++	unsigned active:1;
++
++	/**
++	 * Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic 
++	 * Tx FIFO. If dedicated Tx FIFOs are enabled Tx FIFO # FOR IN EPs*/
++	unsigned tx_fifo_num:4;
++	/** EP type: 0 - Control, 1 - ISOC,	 2 - BULK,	3 - INTR */
++	unsigned type:2;
++#define FH_OTG_EP_TYPE_CONTROL	   0
++#define FH_OTG_EP_TYPE_ISOC	   1
++#define FH_OTG_EP_TYPE_BULK	   2
++#define FH_OTG_EP_TYPE_INTR	   3
++
++	/** DATA start PID for INTR and BULK EP */
++	unsigned data_pid_start:1;
++	/** Frame (even/odd) for ISOC EP */
++	unsigned even_odd_frame:1;
++	/** Max Packet bytes */
++	unsigned maxpacket:11;
++
++	/** Max Transfer size */
++	uint32_t maxxfer;
++
++	/** @name Transfer state */
++	/** @{ */
++
++	/**
++	 * Pointer to the beginning of the transfer buffer -- do not modify
++	 * during transfer.
++	 */
++   	fh_dma_t dma_addr;
++
++	fh_dma_t dma_desc_addr;
++	fh_otg_dev_dma_desc_t *desc_addr;
++
++	/* Additional desc chain for ISO transfers */
++	fh_dma_t dma_desc_addr1;
++	fh_otg_dev_dma_desc_t *desc_addr1;
++	/* Flag indicating which one of two ISO desc chains currently is in use */
++	uint8_t use_add_buf;
++
++	uint8_t *start_xfer_buff;
++	/** pointer to the transfer buffer */
++	uint8_t *xfer_buff;
++	/** Number of bytes to transfer */
++	unsigned xfer_len:19;
++	/** Number of bytes transferred. */
++	unsigned xfer_count:19;
++	/** Sent ZLP */
++	unsigned sent_zlp:1;
++	/** Total len for control transfer */
++	unsigned total_len:19;
++
++	/** stall clear flag */
++	unsigned stall_clear_flag:1;
++
++	/** SETUP pkt cnt rollover flag for EP0 out*/
++	unsigned stp_rollover;
++
++#ifdef FH_UTE_CFI
++	/* The buffer mode */
++	data_buffer_mode_e buff_mode;
++
++	/* The chain of DMA descriptors.
++	 * MAX_DMA_DESCS_PER_EP will be allocated for each active EP.
++	 */
++	fh_otg_dma_desc_t *descs;
++
++	/* The DMA address of the descriptors chain start */
++	dma_addr_t descs_dma_addr;
++	/** This variable stores the length of the last enqueued request */
++	uint32_t cfi_req_len;
++#endif				//FH_UTE_CFI
++
++/** Max DMA Descriptor count for any EP */
++#define MAX_DMA_DESC_CNT 256
++	/** Allocated DMA Desc count */
++	uint32_t desc_cnt;
++	
++	/** First ISO Desc in use in the first chain*/
++	uint32_t iso_desc_first;
++	/** Last ISO Desc in use in the second chain */
++	uint32_t iso_desc_second;
++	/** Flag indicated that iso transfers were started */
++	uint8_t iso_transfer_started;
++
++	/** bInterval */
++	uint32_t bInterval;
++	/** Next frame num to setup next ISOC transfer */
++	uint32_t frame_num;
++	/** Indicates SOF number overrun in DSTS */
++	uint8_t frm_overrun;
++
++#ifdef FH_UTE_PER_IO
++	/** Next frame num for which will be setup DMA Desc */
++	uint32_t xiso_frame_num;
++	/** bInterval */
++	uint32_t xiso_bInterval;
++	/** Count of currently active transfers - shall be either 0 or 1 */
++	int xiso_active_xfers;
++	int xiso_queued_xfers;
++#endif
++#ifdef FH_EN_ISOC
++	/**
++	 * Variables specific for ISOC EPs
++	 *
++	 */
++	/** DMA addresses of ISOC buffers */
++	fh_dma_t dma_addr0;
++	fh_dma_t dma_addr1;
++
++	fh_dma_t iso_dma_desc_addr;
++	fh_otg_dev_dma_desc_t *iso_desc_addr;
++
++	/** pointer to the transfer buffers */
++	uint8_t *xfer_buff0;
++	uint8_t *xfer_buff1;
++
++	/** number of ISOC Buffer is processing */
++	uint32_t proc_buf_num;
++	/** Interval of ISOC Buffer processing */
++	uint32_t buf_proc_intrvl;
++	/** Data size for regular frame */
++	uint32_t data_per_frame;
++
++	/* todo - pattern data support is to be implemented in the future */
++	/** Data size for pattern frame */
++	uint32_t data_pattern_frame;
++	/** Frame number of pattern data */
++	uint32_t sync_frame;
++
++	/** bInterval */
++	uint32_t bInterval;
++	/** ISO Packet number per frame */
++	uint32_t pkt_per_frm;
++	/** Next frame num for which will be setup DMA Desc */
++	uint32_t next_frame;
++	/** Number of packets per buffer processing */
++	uint32_t pkt_cnt;
++	/** Info for all isoc packets */
++	iso_pkt_info_t *pkt_info;
++	/** current pkt number */
++	uint32_t cur_pkt;
++	/** current pkt number */
++	uint8_t *cur_pkt_addr;
++	/** current pkt number */
++	uint32_t cur_pkt_dma_addr;
++#endif				/* FH_EN_ISOC */
++
++/** @} */
++} fh_ep_t;
++
++/*
++ * Reasons for halting a host channel.
++ */
++typedef enum fh_otg_halt_status {
++	FH_OTG_HC_XFER_NO_HALT_STATUS,
++	FH_OTG_HC_XFER_COMPLETE,
++	FH_OTG_HC_XFER_URB_COMPLETE,
++	FH_OTG_HC_XFER_ACK,
++	FH_OTG_HC_XFER_NAK,
++	FH_OTG_HC_XFER_NYET,
++	FH_OTG_HC_XFER_STALL,
++	FH_OTG_HC_XFER_XACT_ERR,
++	FH_OTG_HC_XFER_FRAME_OVERRUN,
++	FH_OTG_HC_XFER_BABBLE_ERR,
++	FH_OTG_HC_XFER_DATA_TOGGLE_ERR,
++	FH_OTG_HC_XFER_AHB_ERR,
++	FH_OTG_HC_XFER_PERIODIC_INCOMPLETE,
++	FH_OTG_HC_XFER_URB_DEQUEUE
++} fh_otg_halt_status_e;
++
++/**
++ * Host channel descriptor. This structure represents the state of a single
++ * host channel when acting in host mode. It contains the data items needed to
++ * transfer packets to an endpoint via a host channel.
++ */
++typedef struct fh_hc {
++	/** Host channel number used for register address lookup */
++	uint8_t hc_num;
++
++	/** Device to access */
++	unsigned dev_addr:7;
++
++	/** EP to access */
++	unsigned ep_num:4;
++
++	/** EP direction. 0: OUT, 1: IN */
++	unsigned ep_is_in:1;
++
++	/**
++	 * EP speed.
++	 * One of the following values:
++	 *	- FH_OTG_EP_SPEED_LOW
++	 *	- FH_OTG_EP_SPEED_FULL
++	 *	- FH_OTG_EP_SPEED_HIGH
++	 */
++	unsigned speed:2;
++#define FH_OTG_EP_SPEED_LOW	0
++#define FH_OTG_EP_SPEED_FULL	1
++#define FH_OTG_EP_SPEED_HIGH	2
++
++	/**
++	 * Endpoint type.
++	 * One of the following values:
++	 *	- FH_OTG_EP_TYPE_CONTROL: 0
++	 *	- FH_OTG_EP_TYPE_ISOC: 1
++	 *	- FH_OTG_EP_TYPE_BULK: 2
++	 *	- FH_OTG_EP_TYPE_INTR: 3
++	 */
++	unsigned ep_type:2;
++
++	/** Max packet size in bytes */
++	unsigned max_packet:11;
++
++	/**
++	 * PID for initial transaction.
++	 * 0: DATA0,<br>
++	 * 1: DATA2,<br>
++	 * 2: DATA1,<br>
++	 * 3: MDATA (non-Control EP),
++	 *	  SETUP (Control EP)
++	 */
++	unsigned data_pid_start:2;
++#define FH_OTG_HC_PID_DATA0 0
++#define FH_OTG_HC_PID_DATA2 1
++#define FH_OTG_HC_PID_DATA1 2
++#define FH_OTG_HC_PID_MDATA 3
++#define FH_OTG_HC_PID_SETUP 3
++
++	/** Number of periodic transactions per (micro)frame */
++	unsigned multi_count:2;
++
++	/** @name Transfer State */
++	/** @{ */
++
++	/** Pointer to the current transfer buffer position. */
++	uint8_t *xfer_buff;
++	/**
++	 * In Buffer DMA mode this buffer will be used
++	 * if xfer_buff is not DWORD aligned.
++	 */
++	fh_dma_t align_buff;
++	/** Total number of bytes to transfer. */
++	uint32_t xfer_len;
++	/** Number of bytes transferred so far. */
++	uint32_t xfer_count;
++	/** Packet count at start of transfer.*/
++	uint16_t start_pkt_count;
++
++	/**
++	 * Flag to indicate whether the transfer has been started. Set to 1 if
++	 * it has been started, 0 otherwise.
++	 */
++	uint8_t xfer_started;
++
++	/**
++	 * Set to 1 to indicate that a PING request should be issued on this
++	 * channel. If 0, process normally.
++	 */
++	uint8_t do_ping;
++
++	/**
++	 * Set to 1 to indicate that the error count for this transaction is
++	 * non-zero. Set to 0 if the error count is 0.
++	 */
++	uint8_t error_state;
++
++	/**
++	 * Set to 1 to indicate that this channel should be halted the next
++	 * time a request is queued for the channel. This is necessary in
++	 * slave mode if no request queue space is available when an attempt
++	 * is made to halt the channel.
++	 */
++	uint8_t halt_on_queue;
++
++	/**
++	 * Set to 1 if the host channel has been halted, but the core is not
++	 * finished flushing queued requests. Otherwise 0.
++	 */
++	uint8_t halt_pending;
++
++	/**
++	 * Reason for halting the host channel.
++	 */
++	fh_otg_halt_status_e halt_status;
++
++	/*
++	 * Split settings for the host channel
++	 */
++	uint8_t do_split;		   /**< Enable split for the channel */
++	uint8_t complete_split;	   /**< Enable complete split */
++	uint8_t hub_addr;		   /**< Address of high speed hub */
++
++	uint8_t port_addr;		   /**< Port of the low/full speed device */
++	/** Split transaction position
++	 * One of the following values:
++	 *	  - FH_HCSPLIT_XACTPOS_MID
++	 *	  - FH_HCSPLIT_XACTPOS_BEGIN
++	 *	  - FH_HCSPLIT_XACTPOS_END
++	 *	  - FH_HCSPLIT_XACTPOS_ALL */
++	uint8_t xact_pos;
++
++	/** Set when the host channel does a short read. */
++	uint8_t short_read;
++
++	/**
++	 * Number of requests issued for this channel since it was assigned to
++	 * the current transfer (not counting PINGs).
++	 */
++	uint8_t requests;
++
++	/**
++	 * Queue Head for the transfer being processed by this channel.
++	 */
++	struct fh_otg_qh *qh;
++
++	/** @} */
++
++	/** Entry in list of host channels. */
++	 FH_CIRCLEQ_ENTRY(fh_hc) hc_list_entry;
++
++	/** @name Descriptor DMA support */
++	/** @{ */
++
++	/** Number of Transfer Descriptors */
++	uint16_t ntd;
++
++	/** Descriptor List DMA address */
++	fh_dma_t desc_list_addr;
++
++	/** Scheduling micro-frame bitmap. */
++	uint8_t schinfo;
++
++	/** @} */
++} fh_hc_t;
++
++/**
++ * The following parameters may be specified when starting the module. These
++ * parameters define how the FH_otg controller should be configured.
++ */
++typedef struct fh_otg_core_params {
++	int32_t opt;
++
++	/**
++	 * Specifies the OTG capabilities. The driver will automatically
++	 * detect the value for this parameter if none is specified.
++	 * 0 - HNP and SRP capable (default)
++	 * 1 - SRP Only capable
++	 * 2 - No HNP/SRP capable
++	 */
++	int32_t otg_cap;
++
++	/**
++	 * Specifies whether to use slave or DMA mode for accessing the data
++	 * FIFOs. The driver will automatically detect the value for this
++	 * parameter if none is specified.
++	 * 0 - Slave
++	 * 1 - DMA (default, if available)
++	 */
++	int32_t dma_enable;
++
++	/**
++	 * When DMA mode is enabled specifies whether to use address DMA or DMA 
++	 * Descriptor mode for accessing the data FIFOs in device mode. The driver 
++	 * will automatically detect the value for this if none is specified.
++	 * 0 - address DMA
++	 * 1 - DMA Descriptor(default, if available)
++	 */
++	int32_t dma_desc_enable;
++	/** The DMA Burst size (applicable only for External DMA
++	 * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
++	 */
++	int32_t dma_burst_size;	/* Translate this to GAHBCFG values */
++
++	/**
++	 * Specifies the maximum speed of operation in host and device mode.
++	 * The actual speed depends on the speed of the attached device and
++	 * the value of phy_type. The actual speed depends on the speed of the
++	 * attached device.
++	 * 0 - High Speed (default)
++	 * 1 - Full Speed
++	 */
++	int32_t speed;
++	/** Specifies whether low power mode is supported when attached
++	 *	to a Full Speed or Low Speed device in host mode.
++	 * 0 - Don't support low power mode (default)
++	 * 1 - Support low power mode
++	 */
++	int32_t host_support_fs_ls_low_power;
++
++	/** Specifies the PHY clock rate in low power mode when connected to a
++	 * Low Speed device in host mode. This parameter is applicable only if
++	 * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
++	 * then defaults to 6 MHZ otherwise 48 MHZ.
++	 *
++	 * 0 - 48 MHz
++	 * 1 - 6 MHz
++	 */
++	int32_t host_ls_low_power_phy_clk;
++
++	/**
++	 * 0 - Use cC FIFO size parameters
++	 * 1 - Allow dynamic FIFO sizing (default)
++	 */
++	int32_t enable_dynamic_fifo;
++
++	/** Total number of 4-byte words in the data FIFO memory. This
++	 * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
++	 * Tx FIFOs.
++	 * 32 to 32768 (default 8192)
++	 * Note: The total FIFO memory depth in the FPGA configuration is 8192.
++	 */
++	int32_t data_fifo_size;
++
++	/** Number of 4-byte words in the Rx FIFO in device mode when dynamic
++	 * FIFO sizing is enabled.
++	 * 16 to 32768 (default 1064)
++	 */
++	int32_t dev_rx_fifo_size;
++
++	/** Number of 4-byte words in the non-periodic Tx FIFO in device mode
++	 * when dynamic FIFO sizing is enabled.
++	 * 16 to 32768 (default 1024)
++	 */
++	int32_t dev_nperio_tx_fifo_size;
++
++	/** Number of 4-byte words in each of the periodic Tx FIFOs in device
++	 * mode when dynamic FIFO sizing is enabled.
++	 * 4 to 768 (default 256)
++	 */
++	uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS];
++
++	/** Number of 4-byte words in the Rx FIFO in host mode when dynamic
++	 * FIFO sizing is enabled.
++	 * 16 to 32768 (default 1024)
++	 */
++	int32_t host_rx_fifo_size;
++
++	/** Number of 4-byte words in the non-periodic Tx FIFO in host mode
++	 * when Dynamic FIFO sizing is enabled in the core.
++	 * 16 to 32768 (default 1024)
++	 */
++	int32_t host_nperio_tx_fifo_size;
++
++	/** Number of 4-byte words in the host periodic Tx FIFO when dynamic
++	 * FIFO sizing is enabled.
++	 * 16 to 32768 (default 1024)
++	 */
++	int32_t host_perio_tx_fifo_size;
++
++	/** The maximum transfer size supported in bytes.
++	 * 2047 to 65,535  (default 65,535)
++	 */
++	int32_t max_transfer_size;
++
++	/** The maximum number of packets in a transfer.
++	 * 15 to 511  (default 511)
++	 */
++	int32_t max_packet_count;
++
++	/** The number of host channel registers to use.
++	 * 1 to 16 (default 12)
++	 * Note: The FPGA configuration supports a maximum of 12 host channels.
++	 */
++	int32_t host_channels;
++
++	/** The number of endpoints in addition to EP0 available for device
++	 * mode operations.
++	 * 1 to 15 (default 6 IN and OUT)
++	 * Note: The FPGA configuration supports a maximum of 6 IN and OUT
++	 * endpoints in addition to EP0.
++	 */
++	int32_t dev_endpoints;
++
++		/**
++		 * Specifies the type of PHY interface to use. By default, the driver
++		 * will automatically detect the phy_type.
++		 *
++		 * 0 - Full Speed PHY
++		 * 1 - UTMI+ (default)
++		 * 2 - ULPI
++		 */
++	int32_t phy_type;
++
++	/**
++	 * Specifies the UTMI+ Data Width. This parameter is
++	 * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
++	 * PHY_TYPE, this parameter indicates the data width between
++	 * the MAC and the ULPI Wrapper.) Also, this parameter is
++	 * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
++	 * to "8 and 16 bits", meaning that the core has been
++	 * configured to work at either data path width.
++	 *
++	 * 8 or 16 bits (default 16)
++	 */
++	int32_t phy_utmi_width;
++
++	/**
++	 * Specifies whether the ULPI operates at double or single
++	 * data rate. This parameter is only applicable if PHY_TYPE is
++	 * ULPI.
++	 *
++	 * 0 - single data rate ULPI interface with 8 bit wide data
++	 * bus (default)
++	 * 1 - double data rate ULPI interface with 4 bit wide data
++	 * bus
++	 */
++	int32_t phy_ulpi_ddr;
++
++	/**
++	 * Specifies whether to use the internal or external supply to
++	 * drive the vbus with a ULPI phy.
++	 */
++	int32_t phy_ulpi_ext_vbus;
++
++	/**
++	 * Specifies whether to use the I2Cinterface for full speed PHY. This
++	 * parameter is only applicable if PHY_TYPE is FS.
++	 * 0 - No (default)
++	 * 1 - Yes
++	 */
++	int32_t i2c_enable;
++
++	int32_t ulpi_fs_ls;
++
++	int32_t ts_dline;
++
++	/**
++	 * Specifies whether dedicated transmit FIFOs are
++	 * enabled for non periodic IN endpoints in device mode
++	 * 0 - No
++	 * 1 - Yes
++	 */
++	int32_t en_multiple_tx_fifo;
++
++	/** Number of 4-byte words in each of the Tx FIFOs in device
++	 * mode when dynamic FIFO sizing is enabled.
++	 * 4 to 768 (default 256)
++	 */
++	uint32_t dev_tx_fifo_size[MAX_TX_FIFOS];
++
++	/** Thresholding enable flag-
++	 * bit 0 - enable non-ISO Tx thresholding
++	 * bit 1 - enable ISO Tx thresholding
++	 * bit 2 - enable Rx thresholding
++	 */
++	uint32_t thr_ctl;
++
++	/** Thresholding length for Tx
++	 *	FIFOs in 32 bit DWORDs
++	 */
++	uint32_t tx_thr_length;
++
++	/** Thresholding length for Rx
++	 *	FIFOs in 32 bit DWORDs
++	 */
++	uint32_t rx_thr_length;
++
++	/**
++	 * Specifies whether LPM (Link Power Management) support is enabled
++	 */
++	int32_t lpm_enable;
++		
++	/**
++	* Specifies whether LPM Errata (Link Power Management) support is enabled
++	*/
++	int32_t besl_enable;
++	
++	/**
++	* Specifies the baseline besl value
++	*/
++	int32_t baseline_besl;
++	
++	/**
++	* Specifies the deep besl value
++	*/
++	int32_t deep_besl;	
++	/** Per Transfer Interrupt
++	 *	mode enable flag
++	 * 1 - Enabled
++	 * 0 - Disabled
++	 */
++	int32_t pti_enable;
++
++	/** Multi Processor Interrupt
++	 *	mode enable flag
++	 * 1 - Enabled
++	 * 0 - Disabled
++	 */
++	int32_t mpi_enable;
++
++	/** IS_USB Capability
++	 * 1 - Enabled
++	 * 0 - Disabled
++	 */
++	int32_t ic_usb_cap;
++
++	/** AHB Threshold Ratio
++	 * 2'b00 AHB Threshold = 	MAC Threshold
++	 * 2'b01 AHB Threshold = 1/2 	MAC Threshold
++	 * 2'b10 AHB Threshold = 1/4	MAC Threshold
++	 * 2'b11 AHB Threshold = 1/8	MAC Threshold
++	 */
++	int32_t ahb_thr_ratio;
++
++	/** ADP Support
++	 * 1 - Enabled
++	 * 0 - Disabled
++	 */
++	int32_t adp_supp_enable;
++
++	/** HFIR Reload Control
++	 * 0 - The HFIR cannot be reloaded dynamically.
++	 * 1 - Allow dynamic reloading of the HFIR register during runtime.
++	 */
++	int32_t reload_ctl;
++
++	/** DCFG: Enable device Out NAK 
++	 * 0 - The core does not set NAK after Bulk Out transfer complete.
++	 * 1 - The core sets NAK after Bulk OUT transfer complete.
++	 */
++	int32_t dev_out_nak;
++
++	/** DCFG: Enable Continue on BNA 
++	 * After receiving BNA interrupt the core disables the endpoint,when the
++	 * endpoint is re-enabled by the application the core starts processing 
++	 * 0 - from the DOEPDMA descriptor
++	 * 1 - from the descriptor which received the BNA.
++	 */
++	int32_t cont_on_bna;
++
++	/** GAHBCFG: AHB Single Support 
++	 * This bit when programmed supports SINGLE transfers for remainder 
++	 * data in a transfer for DMA mode of operation.
++	 * 0 - in this case the remainder data will be sent using INCR burst size.
++	 * 1 - in this case the remainder data will be sent using SINGLE burst size.
++	 */
++	int32_t ahb_single;
++
++	/** Core Power down mode
++	 * 0 - No Power Down is enabled
++	 * 1 - Reserved
++	 * 2 - Complete Power Down (Hibernation)
++	 */
++	int32_t power_down;
++
++	/** OTG revision supported
++	 * 0 - OTG 1.3 revision
++	 * 1 - OTG 2.0 revision
++	 */
++	int32_t otg_ver;
++			
++} fh_otg_core_params_t;
++
++#ifdef DEBUG
++struct fh_otg_core_if;
++typedef struct hc_xfer_info {
++	struct fh_otg_core_if *core_if;
++	fh_hc_t *hc;
++} hc_xfer_info_t;
++#endif
++
++typedef struct ep_xfer_info {
++	struct fh_otg_core_if *core_if;
++	fh_ep_t *ep;
++	uint8_t state;
++} ep_xfer_info_t;
++/*
++ * Device States
++ */
++typedef enum fh_otg_lx_state {
++	/** On state */
++	FH_OTG_L0,
++	/** LPM sleep state*/
++	FH_OTG_L1,
++	/** USB suspend state*/
++	FH_OTG_L2,
++	/** Off state*/
++	FH_OTG_L3
++} fh_otg_lx_state_e;
++
++struct fh_otg_global_regs_backup {
++	uint32_t gotgctl_local;
++	uint32_t gintmsk_local;
++	uint32_t gahbcfg_local;
++	uint32_t gusbcfg_local;
++	uint32_t grxfsiz_local;
++	uint32_t gnptxfsiz_local;
++#ifdef CONFIG_USB_FH_OTG_LPM
++	uint32_t glpmcfg_local;
++#endif
++	uint32_t gi2cctl_local;
++	uint32_t hptxfsiz_local;
++	uint32_t pcgcctl_local;
++	uint32_t gdfifocfg_local;
++	uint32_t dtxfsiz_local[MAX_EPS_CHANNELS];
++	uint32_t gpwrdn_local;
++	uint32_t xhib_pcgcctl;
++	uint32_t xhib_gpwrdn;
++};
++
++struct fh_otg_host_regs_backup {
++	uint32_t hcfg_local;
++	uint32_t haintmsk_local;
++	uint32_t hcintmsk_local[MAX_EPS_CHANNELS];
++	uint32_t hprt0_local;
++	uint32_t hfir_local;
++};
++
++struct fh_otg_dev_regs_backup {
++	uint32_t dcfg;
++	uint32_t dctl;
++	uint32_t daintmsk;
++	uint32_t diepmsk;
++	uint32_t doepmsk;
++	uint32_t diepctl[MAX_EPS_CHANNELS];
++	uint32_t dieptsiz[MAX_EPS_CHANNELS];
++	uint32_t diepdma[MAX_EPS_CHANNELS];
++	uint32_t doepctl[MAX_EPS_CHANNELS];
++	uint32_t doeptsiz[MAX_EPS_CHANNELS];
++	uint32_t doepdma[MAX_EPS_CHANNELS];
++};
++/**
++ * The <code>fh_otg_core_if</code> structure contains information needed to manage
++ * the FH_otg controller acting in either host or device mode. It
++ * represents the programming view of the controller as a whole.
++ */
++struct fh_otg_core_if {
++	/** Parameters that define how the core should be configured.*/
++	fh_otg_core_params_t *core_params;
++
++	/** Core Global registers starting at offset 000h. */
++	fh_otg_core_global_regs_t *core_global_regs;
++
++	/** Device-specific information */
++	fh_otg_dev_if_t *dev_if;
++	/** Host-specific information */
++	fh_otg_host_if_t *host_if;
++
++	/** Value from SNPSID register */
++	uint32_t snpsid;
++
++	/*
++	 * Set to 1 if the core PHY interface bits in USBCFG have been
++	 * initialized.
++	 */
++	uint8_t phy_init_done;
++
++	/*
++	 * SRP Success flag, set by srp success interrupt in FS I2C mode
++	 */
++	uint8_t srp_success;
++	uint8_t srp_timer_started;
++	/** Timer for SRP. If it expires before SRP is successful
++	 * clear the SRP. */
++	fh_timer_t *srp_timer;
++
++#ifdef FH_DEV_SRPCAP
++	/* This timer is needed to power on the hibernated host core if SRP is not
++	 * initiated on connected SRP capable device for limited period of time
++	 */
++	uint8_t pwron_timer_started;
++	fh_timer_t *pwron_timer;
++#endif
++	/* Common configuration information */
++	/** Power and Clock Gating Control Register */
++	volatile uint32_t *pcgcctl;
++#define FH_OTG_PCGCCTL_OFFSET 0xE00
++
++	/** Push/pop addresses for endpoints or host channels.*/
++	uint32_t *data_fifo[MAX_EPS_CHANNELS];
++#define FH_OTG_DATA_FIFO_OFFSET 0x1000
++#define FH_OTG_DATA_FIFO_SIZE 0x1000
++
++	/** Total RAM for FIFOs (Bytes) */
++	uint16_t total_fifo_size;
++	/** Size of Rx FIFO (Bytes) */
++	uint16_t rx_fifo_size;
++	/** Size of Non-periodic Tx FIFO (Bytes) */
++	uint16_t nperio_tx_fifo_size;
++
++	/** 1 if DMA is enabled, 0 otherwise. */
++	uint8_t dma_enable;
++
++	/** 1 if DMA descriptor is enabled, 0 otherwise. */
++	uint8_t dma_desc_enable;
++
++	/** 1 if PTI Enhancement mode is enabled, 0 otherwise. */
++	uint8_t pti_enh_enable;
++
++	/** 1 if MPI Enhancement mode is enabled, 0 otherwise. */
++	uint8_t multiproc_int_enable;
++
++	/** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */
++	uint8_t en_multiple_tx_fifo;
++
++	/** Set to 1 if multiple packets of a high-bandwidth transfer is in
++	 * process of being queued */
++	uint8_t queuing_high_bandwidth;
++
++	/** Hardware Configuration -- stored here for convenience.*/
++	hwcfg1_data_t hwcfg1;
++	hwcfg2_data_t hwcfg2;
++	hwcfg3_data_t hwcfg3;
++	hwcfg4_data_t hwcfg4;
++	fifosize_data_t hptxfsiz;
++
++	/** Host and Device Configuration -- stored here for convenience.*/
++	hcfg_data_t hcfg;
++	dcfg_data_t dcfg;
++
++	/** The operational State, during transations
++	 * (a_host>>a_peripherial and b_device=>b_host) this may not
++	 * match the core but allows the software to determine
++	 * transitions.
++	 */
++	uint8_t op_state;
++
++	/** Test mode for PET testing */
++	uint8_t test_mode;
++
++	/**
++	 * Set to 1 if the HCD needs to be restarted on a session request
++	 * interrupt. This is required if no connector ID status change has
++	 * occurred since the HCD was last disconnected.
++	 */
++	uint8_t restart_hcd_on_session_req;
++
++	/** HCD callbacks */
++	/** A-Device is a_host */
++#define A_HOST		(1)
++	/** A-Device is a_suspend */
++#define A_SUSPEND	(2)
++	/** A-Device is a_peripherial */
++#define A_PERIPHERAL	(3)
++	/** B-Device is operating as a Peripheral. */
++#define B_PERIPHERAL	(4)
++	/** B-Device is operating as a Host. */
++#define B_HOST		(5)
++
++	/** HCD callbacks */
++	struct fh_otg_cil_callbacks *hcd_cb;
++	/** PCD callbacks */
++	struct fh_otg_cil_callbacks *pcd_cb;
++
++	/** Device mode Periodic Tx FIFO Mask */
++	uint32_t p_tx_msk;
++	/** Device mode Periodic Tx FIFO Mask */
++	uint32_t tx_msk;
++
++	/** Workqueue object used for handling several interrupts */
++	fh_workq_t *wq_otg;
++
++	/** Timer object used for handling "Wakeup Detected" Interrupt */
++	fh_timer_t *wkp_timer;
++	/** This arrays used for debug purposes for DEV OUT NAK enhancement */
++	uint32_t start_doeptsiz_val[MAX_EPS_CHANNELS];
++	ep_xfer_info_t ep_xfer_info[MAX_EPS_CHANNELS];
++	fh_timer_t *ep_xfer_timer[MAX_EPS_CHANNELS];
++#ifdef DEBUG
++	uint32_t start_hcchar_val[MAX_EPS_CHANNELS];
++
++	hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS];
++	fh_timer_t *hc_xfer_timer[MAX_EPS_CHANNELS];
++
++	uint32_t hfnum_7_samples;
++	uint64_t hfnum_7_frrem_accum;
++	uint32_t hfnum_0_samples;
++	uint64_t hfnum_0_frrem_accum;
++	uint32_t hfnum_other_samples;
++	uint64_t hfnum_other_frrem_accum;
++#endif
++
++#ifdef FH_UTE_CFI
++	uint16_t pwron_rxfsiz;
++	uint16_t pwron_gnptxfsiz;
++	uint16_t pwron_txfsiz[15];
++
++	uint16_t init_rxfsiz;
++	uint16_t init_gnptxfsiz;
++	uint16_t init_txfsiz[15];
++#endif
++
++	/** Lx state of device */
++	fh_otg_lx_state_e lx_state;
++
++	/** Saved Core Global registers */
++	struct fh_otg_global_regs_backup *gr_backup;
++	/** Saved Host registers */
++	struct fh_otg_host_regs_backup *hr_backup;
++	/** Saved Device registers */
++	struct fh_otg_dev_regs_backup *dr_backup;
++
++	/** Power Down Enable */
++	uint32_t power_down;
++
++	/** ADP support Enable */
++	uint32_t adp_enable;
++
++	/** ADP structure object */
++	fh_otg_adp_t adp;
++
++	/** hibernation/suspend flag */
++	int hibernation_suspend;
++
++	/** Device mode extended hibernation flag */
++	int xhib;
++
++	/** OTG revision supported */
++	uint32_t otg_ver;
++
++	/** OTG status flag used for HNP polling */
++	uint8_t otg_sts;
++
++	/** Pointer to either hcd->lock or pcd->lock */
++	fh_spinlock_t *lock;
++
++	/** Start predict NextEP based on Learning Queue if equal 1,
++	 * also used as counter of disabled NP IN EP's */
++	uint8_t start_predict;
++
++	/** NextEp sequence, including EP0: nextep_seq[] = EP if non-periodic and 
++	 * active, 0xff otherwise */
++	uint8_t nextep_seq[MAX_EPS_CHANNELS];
++
++	/** Index of fisrt EP in nextep_seq array which should be re-enabled **/
++	uint8_t first_in_nextep_seq;
++
++	/** Frame number while entering to ISR - needed for ISOCs **/
++	uint32_t frame_num; 
++
++	/** Flag to not perform ADP probing if IDSTS event happened */
++	uint8_t stop_adpprb;
++
++};
++
++#ifdef DEBUG
++/*
++ * This function is called when transfer is timed out.
++ */
++extern void hc_xfer_timeout(void *ptr);
++#endif
++
++/*
++ * This function is called when transfer is timed out on endpoint.
++ */
++extern void ep_xfer_timeout(void *ptr);
++
++/*
++ * The following functions are functions for works
++ * using during handling some interrupts
++ */
++extern void w_conn_id_status_change(void *p);
++
++extern void w_wakeup_detected(void *p);
++
++/** Saves global register values into system memory. */
++extern int fh_otg_save_global_regs(fh_otg_core_if_t * core_if);
++/** Saves device register values into system memory. */
++extern int fh_otg_save_dev_regs(fh_otg_core_if_t * core_if);
++/** Saves host register values into system memory. */
++extern int fh_otg_save_host_regs(fh_otg_core_if_t * core_if);
++/** Restore global register values. */
++extern int fh_otg_restore_global_regs(fh_otg_core_if_t * core_if);
++/** Restore host register values. */
++extern int fh_otg_restore_host_regs(fh_otg_core_if_t * core_if, int reset);
++/** Restore device register values. */
++extern int fh_otg_restore_dev_regs(fh_otg_core_if_t * core_if,
++				    int rem_wakeup);
++extern int restore_lpm_i2c_regs(fh_otg_core_if_t * core_if);
++extern int restore_essential_regs(fh_otg_core_if_t * core_if, int rmode,
++				  int is_host);
++
++extern int fh_otg_host_hibernation_restore(fh_otg_core_if_t * core_if,
++					    int restore_mode, int reset);
++extern int fh_otg_device_hibernation_restore(fh_otg_core_if_t * core_if,
++					      int rem_wakeup, int reset);
++
++/*
++ * The following functions support initialization of the CIL driver component
++ * and the FH_otg controller.
++ */
++extern void fh_otg_core_host_init(fh_otg_core_if_t * _core_if);
++extern void fh_otg_core_dev_init(fh_otg_core_if_t * _core_if);
++
++/** @name Device CIL Functions
++ * The following functions support managing the FH_otg controller in device
++ * mode.
++ */
++/**@{*/
++extern void fh_otg_wakeup(fh_otg_core_if_t * _core_if);
++extern void fh_otg_read_setup_packet(fh_otg_core_if_t * _core_if,
++				      uint32_t * _dest);
++extern uint32_t fh_otg_get_frame_number(fh_otg_core_if_t * _core_if);
++extern void fh_otg_ep0_activate(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
++extern void fh_otg_ep_activate(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
++extern void fh_otg_ep_deactivate(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
++extern void fh_otg_ep_start_transfer(fh_otg_core_if_t * _core_if,
++				      fh_ep_t * _ep);
++extern void fh_otg_ep_start_zl_transfer(fh_otg_core_if_t * _core_if,
++					 fh_ep_t * _ep);
++extern void fh_otg_ep0_start_transfer(fh_otg_core_if_t * _core_if,
++				       fh_ep_t * _ep);
++extern void fh_otg_ep0_continue_transfer(fh_otg_core_if_t * _core_if,
++					  fh_ep_t * _ep);
++extern void fh_otg_ep_write_packet(fh_otg_core_if_t * _core_if,
++				    fh_ep_t * _ep, int _dma);
++extern void fh_otg_ep_set_stall(fh_otg_core_if_t * _core_if, fh_ep_t * _ep);
++extern void fh_otg_ep_clear_stall(fh_otg_core_if_t * _core_if,
++				   fh_ep_t * _ep);
++extern void fh_otg_enable_device_interrupts(fh_otg_core_if_t * _core_if);
++
++#ifdef FH_EN_ISOC
++extern void fh_otg_iso_ep_start_frm_transfer(fh_otg_core_if_t * core_if,
++					      fh_ep_t * ep);
++extern void fh_otg_iso_ep_start_buf_transfer(fh_otg_core_if_t * core_if,
++					      fh_ep_t * ep);
++#endif /* FH_EN_ISOC */
++/**@}*/
++
++/** @name Host CIL Functions
++ * The following functions support managing the FH_otg controller in host
++ * mode.
++ */
++/**@{*/
++extern void fh_otg_hc_init(fh_otg_core_if_t * _core_if, fh_hc_t * _hc);
++extern void fh_otg_hc_halt(fh_otg_core_if_t * _core_if,
++			    fh_hc_t * _hc, fh_otg_halt_status_e _halt_status);
++extern void fh_otg_hc_cleanup(fh_otg_core_if_t * _core_if, fh_hc_t * _hc);
++extern void fh_otg_hc_start_transfer(fh_otg_core_if_t * _core_if,
++				      fh_hc_t * _hc);
++extern int fh_otg_hc_continue_transfer(fh_otg_core_if_t * _core_if,
++					fh_hc_t * _hc);
++extern void fh_otg_hc_do_ping(fh_otg_core_if_t * _core_if, fh_hc_t * _hc);
++extern void fh_otg_hc_write_packet(fh_otg_core_if_t * _core_if,
++				    fh_hc_t * _hc);
++extern void fh_otg_enable_host_interrupts(fh_otg_core_if_t * _core_if);
++extern void fh_otg_disable_host_interrupts(fh_otg_core_if_t * _core_if);
++
++extern void fh_otg_hc_start_transfer_ddma(fh_otg_core_if_t * core_if,
++					   fh_hc_t * hc);
++
++extern uint32_t calc_frame_interval(fh_otg_core_if_t * core_if);
++extern int fh_otg_check_haps_status(fh_otg_core_if_t * core_if);
++
++/* Macro used to clear one channel interrupt */
++#define clear_hc_int(_hc_regs_, _intr_) \
++do { \
++	hcint_data_t hcint_clear = {.d32 = 0}; \
++	hcint_clear.b._intr_ = 1; \
++	FH_WRITE_REG32(&(_hc_regs_)->hcint, hcint_clear.d32); \
++} while (0)
++
++/*
++ * Macro used to disable one channel interrupt. Channel interrupts are
++ * disabled when the channel is halted or released by the interrupt handler.
++ * There is no need to handle further interrupts of that type until the
++ * channel is re-assigned. In fact, subsequent handling may cause crashes
++ * because the channel structures are cleaned up when the channel is released.
++ */
++#define disable_hc_int(_hc_regs_, _intr_) \
++do { \
++	hcintmsk_data_t hcintmsk = {.d32 = 0}; \
++	hcintmsk.b._intr_ = 1; \
++	FH_MODIFY_REG32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \
++} while (0)
++
++/**
++ * This function Reads HPRT0 in preparation to modify. It keeps the
++ * WC bits 0 so that if they are read as 1, they won't clear when you
++ * write it back
++ */
++static inline uint32_t fh_otg_read_hprt0(fh_otg_core_if_t * _core_if)
++{
++	hprt0_data_t hprt0;
++	hprt0.d32 = FH_READ_REG32(_core_if->host_if->hprt0);
++	hprt0.b.prtena = 0;
++	hprt0.b.prtconndet = 0;
++	hprt0.b.prtenchng = 0;
++	hprt0.b.prtovrcurrchng = 0;
++	return hprt0.d32;
++}
++
++/**@}*/
++
++/** @name Common CIL Functions
++ * The following functions support managing the FH_otg controller in either
++ * device or host mode.
++ */
++/**@{*/
++
++extern void fh_otg_read_packet(fh_otg_core_if_t * core_if,
++				uint8_t * dest, uint16_t bytes);
++
++extern void fh_otg_flush_tx_fifo(fh_otg_core_if_t * _core_if, const int _num);
++extern void fh_otg_flush_rx_fifo(fh_otg_core_if_t * _core_if);
++extern void fh_otg_core_reset(fh_otg_core_if_t * _core_if);
++
++/**
++ * This function returns the Core Interrupt register.
++ */
++static inline uint32_t fh_otg_read_core_intr(fh_otg_core_if_t * core_if)
++{
++	return (FH_READ_REG32(&core_if->core_global_regs->gintsts) &
++		FH_READ_REG32(&core_if->core_global_regs->gintmsk));
++}
++
++/**
++ * This function returns the OTG Interrupt register.
++ */
++static inline uint32_t fh_otg_read_otg_intr(fh_otg_core_if_t * core_if)
++{
++	return (FH_READ_REG32(&core_if->core_global_regs->gotgint));
++}
++
++/**
++ * This function reads the Device All Endpoints Interrupt register and
++ * returns the IN endpoint interrupt bits.
++ */
++static inline uint32_t fh_otg_read_dev_all_in_ep_intr(fh_otg_core_if_t *
++						       core_if)
++{
++
++	uint32_t v;
++
++	if (core_if->multiproc_int_enable) {
++		v = FH_READ_REG32(&core_if->dev_if->
++				   dev_global_regs->deachint) &
++		    FH_READ_REG32(&core_if->
++				   dev_if->dev_global_regs->deachintmsk);
++	} else {
++		v = FH_READ_REG32(&core_if->dev_if->dev_global_regs->daint) &
++		    FH_READ_REG32(&core_if->dev_if->dev_global_regs->daintmsk);
++	}
++	return (v & 0xffff);
++}
++
++/**
++ * This function reads the Device All Endpoints Interrupt register and
++ * returns the OUT endpoint interrupt bits.
++ */
++static inline uint32_t fh_otg_read_dev_all_out_ep_intr(fh_otg_core_if_t *
++							core_if)
++{
++	uint32_t v;
++
++	if (core_if->multiproc_int_enable) {
++		v = FH_READ_REG32(&core_if->dev_if->
++				   dev_global_regs->deachint) &
++		    FH_READ_REG32(&core_if->
++				   dev_if->dev_global_regs->deachintmsk);
++	} else {
++		v = FH_READ_REG32(&core_if->dev_if->dev_global_regs->daint) &
++		    FH_READ_REG32(&core_if->dev_if->dev_global_regs->daintmsk);
++	}
++
++	return ((v & 0xffff0000) >> 16);
++}
++
++/**
++ * This function returns the Device IN EP Interrupt register
++ */
++static inline uint32_t fh_otg_read_dev_in_ep_intr(fh_otg_core_if_t * core_if,
++						   fh_ep_t * ep)
++{
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	uint32_t v, msk, emp;
++
++	if (core_if->multiproc_int_enable) {
++		msk =
++		    FH_READ_REG32(&dev_if->
++				   dev_global_regs->diepeachintmsk[ep->num]);
++		emp =
++		    FH_READ_REG32(&dev_if->
++				   dev_global_regs->dtknqr4_fifoemptymsk);
++		msk |= ((emp >> ep->num) & 0x1) << 7;
++		v = FH_READ_REG32(&dev_if->in_ep_regs[ep->num]->diepint) & msk;
++	} else {
++		msk = FH_READ_REG32(&dev_if->dev_global_regs->diepmsk);
++		emp =
++		    FH_READ_REG32(&dev_if->
++				   dev_global_regs->dtknqr4_fifoemptymsk);
++		msk |= ((emp >> ep->num) & 0x1) << 7;
++		v = FH_READ_REG32(&dev_if->in_ep_regs[ep->num]->diepint) & msk;
++	}
++
++	return v;
++}
++
++/**
++ * This function returns the Device OUT EP Interrupt register
++ */
++static inline uint32_t fh_otg_read_dev_out_ep_intr(fh_otg_core_if_t *
++						    _core_if, fh_ep_t * _ep)
++{
++	fh_otg_dev_if_t *dev_if = _core_if->dev_if;
++	uint32_t v;
++	doepmsk_data_t msk = {.d32 = 0 };
++
++	if (_core_if->multiproc_int_enable) {
++		msk.d32 =
++		    FH_READ_REG32(&dev_if->
++				   dev_global_regs->doepeachintmsk[_ep->num]);
++		if (_core_if->pti_enh_enable) {
++			msk.b.pktdrpsts = 1;
++		}
++		v = FH_READ_REG32(&dev_if->
++				   out_ep_regs[_ep->num]->doepint) & msk.d32;
++	} else {
++		msk.d32 = FH_READ_REG32(&dev_if->dev_global_regs->doepmsk);
++		if (_core_if->pti_enh_enable) {
++			msk.b.pktdrpsts = 1;
++		}
++		v = FH_READ_REG32(&dev_if->
++				   out_ep_regs[_ep->num]->doepint) & msk.d32;
++	}
++	return v;
++}
++
++/**
++ * This function returns the Host All Channel Interrupt register
++ */
++static inline uint32_t fh_otg_read_host_all_channels_intr(fh_otg_core_if_t *
++							   _core_if)
++{
++	return (FH_READ_REG32(&_core_if->host_if->host_global_regs->haint));
++}
++
++static inline uint32_t fh_otg_read_host_channel_intr(fh_otg_core_if_t *
++						      _core_if, fh_hc_t * _hc)
++{
++	return (FH_READ_REG32
++		(&_core_if->host_if->hc_regs[_hc->hc_num]->hcint));
++}
++
++/**
++ * This function returns the mode of the operation, host or device.
++ *
++ * @return 0 - Device Mode, 1 - Host Mode
++ */
++static inline uint32_t fh_otg_mode(fh_otg_core_if_t * _core_if)
++{
++	return (FH_READ_REG32(&_core_if->core_global_regs->gintsts) & 0x1);
++}
++
++/**@}*/
++
++/**
++ * FH_otg CIL callback structure. This structure allows the HCD and
++ * PCD to register functions used for starting and stopping the PCD
++ * and HCD for role change on for a DRD.
++ */
++typedef struct fh_otg_cil_callbacks {
++	/** Start function for role change */
++	int (*start) (void *_p);
++	/** Stop Function for role change */
++	int (*stop) (void *_p);
++	/** Disconnect Function for role change */
++	int (*disconnect) (void *_p);
++	/** Resume/Remote wakeup Function */
++	int (*resume_wakeup) (void *_p);
++	/** Suspend function */
++	int (*suspend) (void *_p);
++	/** Session Start (SRP) */
++	int (*session_start) (void *_p);
++#ifdef CONFIG_USB_FH_OTG_LPM
++	/** Sleep (switch to L0 state) */
++	int (*sleep) (void *_p);
++#endif
++	/** Pointer passed to start() and stop() */
++	void *p;
++} fh_otg_cil_callbacks_t;
++
++extern void fh_otg_cil_register_pcd_callbacks(fh_otg_core_if_t * _core_if,
++					       fh_otg_cil_callbacks_t * _cb,
++					       void *_p);
++extern void fh_otg_cil_register_hcd_callbacks(fh_otg_core_if_t * _core_if,
++					       fh_otg_cil_callbacks_t * _cb,
++					       void *_p);
++
++void fh_otg_initiate_srp(void * core_if);
++
++//////////////////////////////////////////////////////////////////////
++/** Start the HCD.  Helper function for using the HCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_hcd_start(fh_otg_core_if_t * core_if)
++{
++	if (core_if->hcd_cb && core_if->hcd_cb->start) {
++		core_if->hcd_cb->start(core_if->hcd_cb->p);
++	}
++}
++
++/** Stop the HCD.  Helper function for using the HCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_hcd_stop(fh_otg_core_if_t * core_if)
++{
++	if (core_if->hcd_cb && core_if->hcd_cb->stop) {
++		core_if->hcd_cb->stop(core_if->hcd_cb->p);
++	}
++}
++
++/** Disconnect the HCD.  Helper function for using the HCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_hcd_disconnect(fh_otg_core_if_t * core_if)
++{
++	if (core_if->hcd_cb && core_if->hcd_cb->disconnect) {
++		core_if->hcd_cb->disconnect(core_if->hcd_cb->p);
++	}
++}
++
++/** Inform the HCD the a New Session has begun.  Helper function for
++ * using the HCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_hcd_session_start(fh_otg_core_if_t * core_if)
++{
++	if (core_if->hcd_cb && core_if->hcd_cb->session_start) {
++		core_if->hcd_cb->session_start(core_if->hcd_cb->p);
++	}
++}
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++/**
++ * Inform the HCD about LPM sleep.
++ * Helper function for using the HCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_hcd_sleep(fh_otg_core_if_t * core_if)
++{
++	if (core_if->hcd_cb && core_if->hcd_cb->sleep) {
++		core_if->hcd_cb->sleep(core_if->hcd_cb->p);
++	}
++}
++#endif
++
++/** Resume the HCD.  Helper function for using the HCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_hcd_resume(fh_otg_core_if_t * core_if)
++{
++	if (core_if->hcd_cb && core_if->hcd_cb->resume_wakeup) {
++		core_if->hcd_cb->resume_wakeup(core_if->hcd_cb->p);
++	}
++}
++
++/** Start the PCD.  Helper function for using the PCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_pcd_start(fh_otg_core_if_t * core_if)
++{
++	if (core_if->pcd_cb && core_if->pcd_cb->start) {
++		core_if->pcd_cb->start(core_if->pcd_cb->p);
++	}
++}
++
++/** Stop the PCD.  Helper function for using the PCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_pcd_stop(fh_otg_core_if_t * core_if)
++{
++	if (core_if->pcd_cb && core_if->pcd_cb->stop) {
++		core_if->pcd_cb->stop(core_if->pcd_cb->p);
++	}
++}
++
++/** Suspend the PCD.  Helper function for using the PCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_pcd_suspend(fh_otg_core_if_t * core_if)
++{
++	if (core_if->pcd_cb && core_if->pcd_cb->suspend) {
++		core_if->pcd_cb->suspend(core_if->pcd_cb->p);
++	}
++}
++
++/** Resume the PCD.  Helper function for using the PCD callbacks.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static inline void cil_pcd_resume(fh_otg_core_if_t * core_if)
++{
++	if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
++		core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
++	}
++}
++
++//////////////////////////////////////////////////////////////////////
++
++#endif
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil_intr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil_intr.c
+new file mode 100644
+index 00000000..4b1cd2e4
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_cil_intr.c
+@@ -0,0 +1,1739 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_cil_intr.c $
++ * $Revision: #40 $
++ * $Date: 2015/10/12 $
++ * $Change: 2972621 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++/** @file
++ *
++ * The Core Interface Layer provides basic services for accessing and
++ * managing the FH_otg hardware. These services are used by both the
++ * Host Controller Driver and the Peripheral Controller Driver.
++ *
++ * This file contains the Common Interrupt handlers.
++ */
++#include "../fh_common_port/fh_os.h"
++#include "fh_otg_regs.h"
++#include "fh_otg_cil.h"
++#include "fh_otg_driver.h"
++#include "fh_otg_pcd.h"
++#include "fh_otg_hcd.h"
++
++#ifdef DEBUG
++inline const char *op_state_str(fh_otg_core_if_t * core_if)
++{
++	return (core_if->op_state == A_HOST ? "a_host" :
++		(core_if->op_state == A_SUSPEND ? "a_suspend" :
++		 (core_if->op_state == A_PERIPHERAL ? "a_peripheral" :
++		  (core_if->op_state == B_PERIPHERAL ? "b_peripheral" :
++		   (core_if->op_state == B_HOST ? "b_host" : "unknown")))));
++}
++#endif
++
++/** This function will log a debug message
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++int32_t fh_otg_handle_mode_mismatch_intr(fh_otg_core_if_t * core_if)
++{
++	gintsts_data_t gintsts;
++	FH_WARN("Mode Mismatch Interrupt: currently in %s mode\n",
++		 fh_otg_mode(core_if) ? "Host" : "Device");
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.modemismatch = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++	return 1;
++}
++
++/**
++ * This function handles the OTG Interrupts. It reads the OTG
++ * Interrupt Register (GOTGINT) to determine what interrupt has
++ * occurred.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++int32_t fh_otg_handle_otg_intr(fh_otg_core_if_t * core_if)
++{
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	gotgint_data_t gotgint;
++	gotgctl_data_t gotgctl;
++	gintmsk_data_t gintmsk;
++	gpwrdn_data_t gpwrdn;
++
++	gotgint.d32 = FH_READ_REG32(&global_regs->gotgint);
++	gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
++	FH_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32,
++		    op_state_str(core_if));
++
++	if (gotgint.b.sesenddet) {
++		FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
++			    "Session End Detected++ (%s)\n",
++			    op_state_str(core_if));
++		gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
++
++		if (core_if->op_state == B_HOST) {
++			if (core_if->adp_enable && FH_WORKQ_PENDING(core_if->wq_otg)) {
++				
++				/* During ST_B_ADP test after HNP HSOTG tries to go to B_HOST 
++				 * mode but PET is not expecting fully functional host at that 
++				 * point and switches off the VBUS expecting immediate ADP probe */
++				gpwrdn.b.pmuintsel = 1;
++				gpwrdn.b.pmuactv = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++				fh_mdelay(20);
++				fh_otg_adp_probe_start(core_if);
++				goto exit_interrupt;
++			}
++			cil_pcd_start(core_if);
++			core_if->op_state = B_PERIPHERAL;
++		} else {
++			/* If not B_HOST and Device HNP still set. HNP
++			 * Did not succeed!*/
++			if (gotgctl.b.devhnpen) {
++				FH_DEBUGPL(DBG_ANY, "Session End Detected\n");
++				__FH_ERROR("Device Not Connected/Responding!\n");
++			}
++
++			/* If Session End Detected the B-Cable has
++			 * been disconnected. */
++			/* Reset PCD and Gadget driver to a
++			 * clean state. */
++			core_if->lx_state = FH_OTG_L0;
++			FH_SPINUNLOCK(core_if->lock);
++			cil_pcd_stop(core_if);
++			FH_SPINLOCK(core_if->lock);
++
++			if (core_if->otg_ver) {
++				/** PET testing*/
++				gotgctl.d32 = 0;
++				gotgctl.b.devhnpen = 1;
++				FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
++				if (core_if->test_mode == 6) {
++					FH_WORKQ_SCHEDULE_DELAYED(core_if->wq_otg,	fh_otg_initiate_srp, 
++								core_if, 3000, "initate SRP"); //manukz: old value was 50
++					core_if->test_mode = 0;
++				} else	if (core_if->adp_enable) {
++					if (core_if->power_down == 2) {
++						gpwrdn.d32 = 0;
++						gpwrdn.b.pwrdnswtch = 1;
++						FH_MODIFY_REG32(&core_if->
++								 core_global_regs->
++								 gpwrdn, gpwrdn.d32, 0);
++					}
++
++					gpwrdn.d32 = 0;
++					gpwrdn.b.pmuintsel = 1;
++					gpwrdn.b.pmuactv = 1;
++					FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++					fh_otg_adp_sense_start(core_if);
++				} 
++			}
++		}
++exit_interrupt:
++		if (core_if->otg_ver == 0) {
++			gotgctl.d32 = 0;
++			gotgctl.b.devhnpen = 1;
++			FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
++		}
++	}
++	if (gotgint.b.sesreqsucstschng) {
++		FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
++			    "Session Reqeust Success Status Change++\n");
++		gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
++		if (gotgctl.b.sesreqscs) {
++
++			if ((core_if->core_params->phy_type ==
++			     FH_PHY_TYPE_PARAM_FS) && (core_if->core_params->i2c_enable)) {
++				core_if->srp_success = 1;
++			} else {
++				FH_SPINUNLOCK(core_if->lock);
++				cil_pcd_resume(core_if);
++				FH_SPINLOCK(core_if->lock);
++				/* Clear Session Request */
++				gotgctl.d32 = 0;
++				gotgctl.b.sesreq = 1;
++				FH_MODIFY_REG32(&global_regs->gotgctl,
++						 gotgctl.d32, 0);
++			}
++		}
++	}
++	if (gotgint.b.hstnegsucstschng) {
++		/* Print statements during the HNP interrupt handling
++		 * can cause it to fail.*/
++		gotgctl.d32 = FH_READ_REG32(&global_regs->gotgctl);
++		/* WA for 3.00a- HW is not setting cur_mode, even sometimes
++		 * this does not help*/
++		if (core_if->snpsid >= OTG_CORE_REV_3_00a)
++			fh_udelay(100);
++		if (gotgctl.b.hstnegscs) {
++			if (fh_otg_is_host_mode(core_if)) {
++				core_if->op_state = B_HOST;
++				/*
++				 * Need to disable SOF interrupt immediately.
++				 * When switching from device to host, the PCD
++				 * interrupt handler won't handle the
++				 * interrupt if host mode is already set. The
++				 * HCD interrupt handler won't get called if
++				 * the HCD state is HALT. This means that the
++				 * interrupt does not get handled and Linux
++				 * complains loudly.
++				 */
++				gintmsk.d32 = 0;
++				gintmsk.b.sofintr = 1;
++				/* To avoid multiple USB Suspend interrupts during
++				 * OTG 2.0 role change */
++				if (core_if->otg_ver)
++					gintmsk.b.usbsuspend = 1;
++				FH_MODIFY_REG32(&global_regs->gintmsk,
++						 gintmsk.d32, 0);
++				/* Call callback function with spin lock released */
++				FH_SPINUNLOCK(core_if->lock);
++				cil_pcd_stop(core_if);
++				/*
++				 * Initialize the Core for Host mode.
++				 */
++				if (core_if->otg_ver) {
++					fh_mdelay(100);
++					cil_hcd_start(core_if);
++					cil_hcd_session_start(core_if);
++				} else {
++					cil_hcd_start(core_if);
++				}
++				FH_SPINLOCK(core_if->lock);
++			}
++		} else {
++			gotgctl.d32 = 0;
++			gotgctl.b.hnpreq = 1;
++			gotgctl.b.devhnpen = 1;
++			FH_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
++			FH_DEBUGPL(DBG_ANY, "HNP Failed\n");
++			__FH_ERROR("Device Not Connected/Responding\n");
++		}
++	}
++	if (gotgint.b.hstnegdet) {
++		/* The disconnect interrupt is set at the same time as
++		 * Host Negotiation Detected.  During the mode
++		 * switch all interrupts are cleared so the disconnect
++		 * interrupt handler will not get executed.
++		 */
++		FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
++			    "Host Negotiation Detected++ (%s)\n",
++			    (fh_otg_is_host_mode(core_if) ? "Host" :
++			     "Device"));
++		if (fh_otg_is_device_mode(core_if)) {
++			FH_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n",
++				    core_if->op_state);
++			FH_SPINUNLOCK(core_if->lock);
++			cil_hcd_disconnect(core_if);
++			cil_pcd_start(core_if);
++			FH_SPINLOCK(core_if->lock);
++			core_if->op_state = A_PERIPHERAL;
++		} else {
++			/*
++			 * Need to disable SOF interrupt immediately. When
++			 * switching from device to host, the PCD interrupt
++			 * handler won't handle the interrupt if host mode is
++			 * already set. The HCD interrupt handler won't get
++			 * called if the HCD state is HALT. This means that
++			 * the interrupt does not get handled and Linux
++			 * complains loudly.
++			 */
++			gintmsk.d32 = 0;
++			gintmsk.b.sofintr = 1;
++			FH_MODIFY_REG32(&global_regs->gintmsk, gintmsk.d32, 0);
++			FH_SPINUNLOCK(core_if->lock);
++			cil_pcd_stop(core_if);
++			cil_hcd_start(core_if);
++			FH_SPINLOCK(core_if->lock);
++			core_if->op_state = A_HOST;
++		}
++	}
++	if (gotgint.b.adevtoutchng) {
++		FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
++			    "A-Device Timeout Change++\n");
++	}
++	if (gotgint.b.debdone) {
++		FH_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " "Debounce Done++\n");
++		/* Need to power off VBUS after 10s if OTG2 non-hnp capable host*/
++		if (core_if->otg_ver && core_if->op_state == A_PERIPHERAL) {
++			FH_DEBUGPL(DBG_ANY, "a_peripheral->a_host\n");
++			/* Clear the a_peripheral flag, back to a_host. */
++			FH_SPINUNLOCK(core_if->lock);
++			cil_pcd_stop(core_if);
++			cil_hcd_start(core_if);
++			FH_SPINLOCK(core_if->lock);
++			core_if->op_state = A_HOST;
++		}
++
++		//if(core_if->otg_ver == 1)
++			//cil_hcd_session_start(core_if); mvardan (for ADP issue)
++	}
++
++	/* Clear GOTGINT */
++	FH_WRITE_REG32(&core_if->core_global_regs->gotgint, gotgint.d32);
++
++	return 1;
++}
++
++void w_conn_id_status_change(void *p)
++{
++	fh_otg_core_if_t *core_if = p;
++	uint32_t count = 0;
++	gotgctl_data_t gotgctl = {.d32 = 0 };
++
++	gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++	FH_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32);
++	FH_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts);
++
++	/* B-Device connector (Device Mode) */
++	if (gotgctl.b.conidsts) {
++		gotgctl_data_t gotgctl_local;
++		/* Wait for switch to device mode. */
++		while (!fh_otg_is_device_mode(core_if)) {
++			gotgctl_local.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++			FH_DEBUGPL(DBG_ANY, "Waiting for Peripheral Mode, Mode=%s count = %d gotgctl=%08x\n",
++				   (fh_otg_is_host_mode(core_if) ? "Host" :
++				    "Peripheral"), count, gotgctl_local.d32);
++			fh_mdelay(1); //vahrama previous value was 100
++			if(!gotgctl_local.b.conidsts)
++				goto host;
++			if (++count > 10000)
++				break;
++		}
++		FH_ASSERT(++count < 10000,
++			   "Connection id status change timed out");
++		core_if->op_state = B_PERIPHERAL;
++		if(core_if->otg_ver == 0)
++			fh_otg_core_init(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++		cil_pcd_start(core_if);
++	} else {
++host:
++		/* A-Device connector (Host Mode) */
++		while (!fh_otg_is_host_mode(core_if)) {
++		FH_DEBUGPL(DBG_ANY,"Waiting for Host Mode, Mode=%s\n",
++				   (fh_otg_is_host_mode(core_if) ? "Host" :
++				    "Peripheral"));
++			fh_mdelay(1);	//vahrama previously was 100
++			if (++count > 10000)
++				break;
++		}
++		FH_ASSERT(++count < 10000,
++			   "Connection id status change timed out");
++		core_if->op_state = A_HOST;
++		/*
++		 * Initialize the Core for Host mode.
++		 */
++		if (core_if->otg_ver)
++			/* To power off the bus in 10s from the beginning
++			 * of test while denounce has not come yet */
++			cil_hcd_session_start(core_if);
++		else
++			fh_otg_core_init(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++		cil_hcd_start(core_if);
++	}
++}
++
++/**
++ * This function handles the Connector ID Status Change Interrupt.  It
++ * reads the OTG Interrupt Register (GOTCTL) to determine whether this
++ * is a Device to Host Mode transition or a Host Mode to Device
++ * Transition. 
++ *
++ * This only occurs when the cable is connected/removed from the PHY
++ * connector.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++int32_t fh_otg_handle_conn_id_status_change_intr(fh_otg_core_if_t * core_if)
++{
++
++	/*
++	 * Need to disable SOF interrupt immediately. If switching from device
++	 * to host, the PCD interrupt handler won't handle the interrupt if
++	 * host mode is already set. The HCD interrupt handler won't get
++	 * called if the HCD state is HALT. This means that the interrupt does
++	 * not get handled and Linux complains loudly.
++	 */
++	gintmsk_data_t gintmsk = {.d32 = 0 };
++	gintsts_data_t gintsts = {.d32 = 0 };
++
++	gintmsk.b.sofintr = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
++
++	FH_DEBUGPL(DBG_CIL,
++		    " ++Connector ID Status Change Interrupt++  (%s)\n",
++		    (fh_otg_is_host_mode(core_if) ? "Host" : "Device"));
++
++	FH_SPINUNLOCK(core_if->lock);
++
++	/* Needed to avoit conn_id_status change duplication */
++	//if (core_if->otg_ver)
++		//fh_mdelay(50);
++	/*
++	 * Need to schedule a work, as there are possible DELAY function calls
++	 * Release lock before scheduling workq as it holds spinlock during scheduling
++	 */
++
++	FH_WORKQ_SCHEDULE(core_if->wq_otg, w_conn_id_status_change,
++			   core_if, "connection id status change");
++	FH_SPINLOCK(core_if->lock);
++
++	/* Set flag and clear interrupt */
++	gintsts.b.conidstschng = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This interrupt indicates that a device is initiating the Session
++ * Request Protocol to request the host to turn on bus power so a new
++ * session can begin. The handler responds by turning on bus power. If
++ * the FH_otg controller is in low power mode, the handler brings the
++ * controller out of low power mode before turning on bus power.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++int32_t fh_otg_handle_session_req_intr(fh_otg_core_if_t * core_if)
++{
++	gintsts_data_t gintsts;
++
++#ifndef FH_HOST_ONLY
++	FH_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n");
++
++	if (fh_otg_is_device_mode(core_if)) {
++		gotgctl_data_t gotgctl = {.d32 = 0 };
++		FH_DEBUGPL(DBG_PCD, "SRP: Device mode\n");
++		gotgctl.d32 =
++			FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++		if (gotgctl.b.sesreqscs)
++			FH_PRINTF("SRP Success\n");
++		else
++			FH_PRINTF("SRP Fail\n");
++		if (core_if->otg_ver) {
++			gotgctl.d32 = 0 ;	
++			gotgctl.b.devhnpen = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl, gotgctl.d32, 0);
++		}
++	} else {
++		hprt0_data_t hprt0;
++		FH_PRINTF("SRP: Host mode\n");
++
++		/* Turn on the port power bit. */
++		hprt0.d32 = fh_otg_read_hprt0(core_if);
++		hprt0.b.prtpwr = 1;
++		FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++		/* Start the Connection timer. So a message can be displayed
++		 * if connect does not occur within 10 seconds. */
++		cil_hcd_session_start(core_if);
++	}
++#endif
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.sessreqintr = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++void w_wakeup_detected(void *p)
++{
++	fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) p;
++	/*
++	 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
++	 * so that OPT tests pass with all PHYs).
++	 */
++	hprt0_data_t hprt0 = {.d32 = 0 };
++#if 0
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	/* Restart the Phy Clock */
++	pcgcctl.b.stoppclk = 1;
++	FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++	fh_udelay(10);
++#endif //0
++	hprt0.d32 = fh_otg_read_hprt0(core_if);
++	FH_DEBUGPL(DBG_ANY, "Resume: HPRT0=%0x\n", hprt0.d32);
++//      fh_mdelay(70);
++	hprt0.b.prtres = 0;	/* Resume */
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++	FH_DEBUGPL(DBG_ANY, "Clear Resume: HPRT0=%0x\n",
++		    FH_READ_REG32(core_if->host_if->hprt0));
++
++	cil_hcd_resume(core_if);
++
++	/** Change to L0 state*/
++	core_if->lx_state = FH_OTG_L0;
++}
++
++/**
++ * This interrupt indicates that the FH_otg controller has detected a
++ * resume or remote wakeup sequence. If the FH_otg controller is in
++ * low power mode, the handler must brings the controller out of low
++ * power mode. The controller automatically begins resume
++ * signaling. The handler schedules a time to stop resume signaling.
++ */
++int32_t fh_otg_handle_wakeup_detected_intr(fh_otg_core_if_t * core_if)
++{
++	gintsts_data_t gintsts;
++
++	FH_DEBUGPL(DBG_ANY,
++		    "++Resume and Remote Wakeup Detected Interrupt++\n");
++
++	FH_PRINTF("%s lxstate = %d\n", __func__, core_if->lx_state);
++
++	if (fh_otg_is_device_mode(core_if)) {
++		dctl_data_t dctl = {.d32 = 0 };
++		FH_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n",
++			    FH_READ_REG32(&core_if->dev_if->dev_global_regs->
++					   dsts));
++		if (core_if->lx_state == FH_OTG_L2) {
++#ifdef PARTIAL_POWER_DOWN
++			if (core_if->hwcfg4.b.power_optimiz) {
++				pcgcctl_data_t power = {.d32 = 0 };
++
++				power.d32 = FH_READ_REG32(core_if->pcgcctl);
++				FH_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n",
++					    power.d32);
++
++				power.b.stoppclk = 0;
++				FH_WRITE_REG32(core_if->pcgcctl, power.d32);
++
++				power.b.pwrclmp = 0;
++				FH_WRITE_REG32(core_if->pcgcctl, power.d32);
++
++				power.b.rstpdwnmodule = 0;
++				FH_WRITE_REG32(core_if->pcgcctl, power.d32);
++			}
++#endif
++			/* Clear the Remote Wakeup Signaling */
++			dctl.b.rmtwkupsig = 1;
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++					 dctl, dctl.d32, 0);
++
++			FH_SPINUNLOCK(core_if->lock);
++			if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
++				core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
++			}
++			FH_SPINLOCK(core_if->lock);
++		} else {
++			glpmcfg_data_t lpmcfg;
++			pcgcctl_data_t pcgcctl = {.d32 = 0 };
++
++			lpmcfg.d32 =
++			    FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++			lpmcfg.b.hird_thres &= (~(1 << 4));	
++	       	lpmcfg.b.en_utmi_sleep = 0; 
++
++			/* Clear Enbl_L1Gating bit. */
++			pcgcctl.b.enbl_sleep_gating = 1;
++			FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,0);
++
++			FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg,
++					lpmcfg.d32);
++		}
++		/** Change to L0 state*/
++		core_if->lx_state = FH_OTG_L0;
++	} else {
++		if (core_if->lx_state != FH_OTG_L1) {
++			pcgcctl_data_t pcgcctl = {.d32 = 0 };
++
++			/* Restart the Phy Clock */
++			pcgcctl.b.stoppclk = 1;
++			FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++			FH_TIMER_SCHEDULE(core_if->wkp_timer, 71);
++		} else {
++			/** Change to L0 state*/
++			core_if->lx_state = FH_OTG_L0;
++		}
++	}
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.wkupintr = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This interrupt indicates that the Wakeup Logic has detected a
++ * Device disconnect.
++ */
++static int32_t fh_otg_handle_pwrdn_disconnect_intr(fh_otg_core_if_t * core_if)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	gpwrdn_data_t gpwrdn_temp = {.d32 = 0 };
++	gpwrdn_temp.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++
++	FH_PRINTF("%s called\n", __FUNCTION__);
++
++	if (!core_if->hibernation_suspend) {
++		FH_PRINTF("Already exited from Hibernation\n");
++		return 1;
++	}
++
++	/* Switch on the voltage to the core */
++	gpwrdn.b.pwrdnswtch = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Reset the core */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Disable power clamps */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnclmp = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	/* Remove reset the core signal */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++	fh_udelay(10);
++
++	/* Disable PMU interrupt */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuintsel = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	core_if->hibernation_suspend = 0;
++
++	/* Disable PMU */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuactv = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	if (gpwrdn_temp.b.idsts) {
++		core_if->op_state = B_PERIPHERAL;
++		fh_otg_core_init(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++		cil_pcd_start(core_if);
++	} else {
++		core_if->op_state = A_HOST;
++		fh_otg_core_init(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++		cil_hcd_start(core_if);
++	}
++
++	return 1;
++}
++
++/**
++ * This interrupt indicates that the Wakeup Logic has detected a
++ * remote wakeup sequence.
++ */
++static int32_t fh_otg_handle_pwrdn_wakeup_detected_intr(fh_otg_core_if_t * core_if)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	FH_DEBUGPL(DBG_ANY,
++		    "++Powerdown Remote Wakeup Detected Interrupt++\n");
++
++	if (!core_if->hibernation_suspend) {
++		FH_PRINTF("Already exited from Hibernation\n");
++		return 1;
++	}
++
++	gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	if (gpwrdn.b.idsts) {	// Device Mode
++		if ((core_if->power_down == 2)
++		    && (core_if->hibernation_suspend == 1)) {
++			fh_otg_device_hibernation_restore(core_if, 0, 0);
++		}
++	} else {
++		if ((core_if->power_down == 2)
++		    && (core_if->hibernation_suspend == 1)) {
++			fh_otg_host_hibernation_restore(core_if, 1, 0);
++		}
++	}
++	return 1;
++}
++
++static int32_t fh_otg_handle_pwrdn_idsts_change(fh_otg_device_t * otg_dev)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	gpwrdn_data_t gpwrdn_temp = {.d32 = 0 };
++	fh_otg_core_if_t *core_if = otg_dev->core_if;
++
++	FH_DEBUGPL(DBG_ANY, "%s called\n", __FUNCTION__);
++	gpwrdn_temp.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	if (core_if->power_down == 2) {
++		if (!core_if->hibernation_suspend) {
++			FH_PRINTF("Already exited from Hibernation\n");
++			return 1;
++		}
++		FH_DEBUGPL(DBG_ANY, "Exit from hibernation on ID sts change\n");
++		/* Switch on the voltage to the core */
++		gpwrdn.b.pwrdnswtch = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/* Reset the core */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnrstn = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/* Disable power clamps */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnclmp = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++		/* Remove reset the core signal */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnrstn = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++		fh_udelay(10);
++
++		/* Disable PMU interrupt */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuintsel = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++		/*Indicates that we are exiting from hibernation */
++		core_if->hibernation_suspend = 0;
++
++		/* Disable PMU */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuactv = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		gpwrdn.d32 = core_if->gr_backup->gpwrdn_local;
++		if (gpwrdn.b.dis_vbus == 1) {
++			gpwrdn.d32 = 0;
++			gpwrdn.b.dis_vbus = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		}
++
++		if (gpwrdn_temp.b.idsts) {
++			core_if->op_state = B_PERIPHERAL;
++			fh_otg_core_init(core_if);
++			fh_otg_enable_global_interrupts(core_if);
++			cil_pcd_start(core_if);
++		} else {
++			core_if->op_state = A_HOST;
++			fh_otg_core_init(core_if);
++			fh_otg_enable_global_interrupts(core_if);
++			cil_hcd_start(core_if);
++		}
++	}
++
++	if (core_if->adp_enable) {
++		uint8_t is_host = 0;
++		FH_SPINUNLOCK(core_if->lock);
++		/* Change the core_if's lock to hcd/pcd lock depend on mode? */
++#ifndef FH_HOST_ONLY
++		if (gpwrdn_temp.b.idsts)
++			core_if->lock = otg_dev->pcd->lock;
++#endif
++#ifndef FH_DEVICE_ONLY
++		if (!gpwrdn_temp.b.idsts) {
++			core_if->lock = otg_dev->hcd->lock;
++			is_host = 1;
++		}
++#endif
++		FH_DEBUGPL(DBG_ANY, "RESTART ADP\n");
++		if (core_if->adp.probe_enabled)
++			fh_otg_adp_probe_stop(core_if);
++		if (core_if->adp.sense_enabled)
++			fh_otg_adp_sense_stop(core_if);
++		if (core_if->adp.sense_timer_started)
++			FH_TIMER_CANCEL(core_if->adp.sense_timer);
++		if (core_if->adp.vbuson_timer_started)
++			FH_TIMER_CANCEL(core_if->adp.vbuson_timer);
++		/* Do not need to reset ADP if we are coming back 
++		 * to the device mode after HNP. This is needed 
++		 * not to perform SRP after reverse, just do ADP 
++		 * probe and compare the RTIM values with the one 
++		 * before HNP */
++		if (core_if->op_state != B_HOST) {
++			core_if->adp.probe_timer_values[0] = -1;
++			core_if->adp.probe_timer_values[1] = -1;
++			core_if->adp.probe_counter = 0;
++			core_if->adp.gpwrdn = 0;
++		}
++		core_if->adp.sense_timer_started = 0;
++		core_if->adp.vbuson_timer_started = 0;
++
++		/* Disable PMU and restart ADP */
++		gpwrdn_temp.d32 = 0;
++		gpwrdn_temp.b.pmuactv = 1;
++		gpwrdn_temp.b.pmuintsel = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_mdelay(110);
++		fh_otg_adp_start(core_if, is_host);
++		FH_SPINLOCK(core_if->lock);
++	}
++
++	return 1;
++}
++
++static int32_t fh_otg_handle_pwrdn_session_change(fh_otg_core_if_t * core_if)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	int32_t otg_cap_param = core_if->core_params->otg_cap;
++	FH_DEBUGPL(DBG_ANY, "%s called\n", __FUNCTION__);
++
++	gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	if (core_if->power_down == 2) {
++		if (!core_if->hibernation_suspend) {
++			FH_PRINTF("Already exited from Hibernation\n");
++			return 1;
++		}
++
++		if ((otg_cap_param != FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE ||
++		     otg_cap_param != FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE) &&
++		    gpwrdn.b.bsessvld == 0) {
++			/* Save gpwrdn register for further usage if stschng interrupt */
++			core_if->gr_backup->gpwrdn_local =
++			    FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++			/*Exit from ISR and wait for stschng interrupt with bsessvld = 1 */
++			return 1;
++		}
++
++		/* Switch on the voltage to the core */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnswtch = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/* Reset the core */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnrstn = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/* Disable power clamps */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnclmp = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++		/* Remove reset the core signal */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnrstn = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++		fh_udelay(10);
++
++		/* Disable PMU interrupt */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuintsel = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/*Indicates that we are exiting from hibernation */
++		core_if->hibernation_suspend = 0;
++
++		/* Disable PMU */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuactv = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		core_if->op_state = B_PERIPHERAL;
++		fh_otg_core_init(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++		cil_pcd_start(core_if);
++
++		if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE ||
++		    otg_cap_param == FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE) {
++			/*
++			 * Initiate SRP after initial ADP probe.
++			 */
++			fh_otg_initiate_srp(core_if);
++		}
++	} else if (core_if->adp_enable && core_if->op_state != A_HOST){
++		fh_otg_adp_probe_stop(core_if);
++		if (FH_WORKQ_PENDING(core_if->wq_otg))
++			core_if->stop_adpprb = 1;
++		/* Disable Power Down Logic */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuintsel = 1;
++		gpwrdn.b.pmuactv = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->
++				 gpwrdn, gpwrdn.d32, 0);
++
++		/*
++		 * Initialize the Core for Device mode.
++		 */
++		core_if->op_state = B_PERIPHERAL;
++		cil_pcd_start(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++	}
++
++	return 1;
++}
++
++/**
++ * This interrupt indicates that the Wakeup Logic has detected a
++ * status change either on IDDIG or BSessVld.
++ */
++static uint32_t fh_otg_handle_pwrdn_stschng_intr(fh_otg_device_t * otg_dev)
++{
++	int retval;
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	gpwrdn_data_t gpwrdn_temp = {.d32 = 0 };
++	fh_otg_core_if_t *core_if = otg_dev->core_if;
++
++	FH_DEBUGPL(DBG_CIL, "%s called\n", __FUNCTION__);
++
++	if (core_if->power_down == 2) {
++		if (core_if->hibernation_suspend <= 0) {
++			FH_PRINTF("Already exited from Hibernation\n");
++			return 1;
++		} else
++			gpwrdn_temp.d32 = core_if->gr_backup->gpwrdn_local;
++
++	} else {
++		gpwrdn_temp.d32 = core_if->adp.gpwrdn;
++	}
++
++	gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++
++	if (gpwrdn.b.idsts ^ gpwrdn_temp.b.idsts) {
++		retval = fh_otg_handle_pwrdn_idsts_change(otg_dev);
++	} else if (gpwrdn.b.bsessvld ^ gpwrdn_temp.b.bsessvld) {
++		retval = fh_otg_handle_pwrdn_session_change(core_if);
++	}
++
++	return retval;
++}
++
++/**
++ * This interrupt indicates that the Wakeup Logic has detected a
++ * SRP.
++ */
++static int32_t fh_otg_handle_pwrdn_srp_intr(fh_otg_core_if_t * core_if)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++
++	FH_PRINTF("%s called\n", __FUNCTION__);
++
++	if (core_if->power_down == 2) {
++		if (!core_if->hibernation_suspend) {
++			FH_PRINTF("Already exited from Hibernation\n");
++			return 1;
++		}
++#ifdef FH_DEV_SRPCAP
++		if (core_if->pwron_timer_started) {
++			core_if->pwron_timer_started = 0;
++			FH_TIMER_CANCEL(core_if->pwron_timer);
++		}
++#endif
++
++		/* Switch on the voltage to the core */
++		gpwrdn.b.pwrdnswtch = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/* Reset the core */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnrstn = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/* Disable power clamps */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnclmp = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++		/* Remove reset the core signal */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pwrdnrstn = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++		fh_udelay(10);
++
++		/* Disable PMU interrupt */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuintsel = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++		/* Indicates that we are exiting from hibernation */
++		core_if->hibernation_suspend = 0;
++
++		/* Disable PMU */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuactv = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++		fh_udelay(10);
++
++		/* Programm Disable VBUS to 0 */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.dis_vbus = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++		/*Initialize the core as Host */
++		core_if->op_state = A_HOST;
++		fh_otg_core_init(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++		cil_hcd_start(core_if);
++	}
++	/* Do not need to du anything if this is "old" SRP and we are already 
++	 * in the normal mode of operation */
++	if(core_if->adp_enable) {	
++		gpwrdn.d32 =  FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++		if (!gpwrdn.b.pmuactv) {
++			return 1;
++		}
++		
++		fh_otg_adp_probe_stop(core_if);
++		/* Disable Interrupt from Power Down Logic */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.pmuintsel = 1;
++		gpwrdn.b.pmuactv = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->
++				 gpwrdn, gpwrdn.d32, 0);
++
++		/*
++		 * Initialize the Core for Host mode.
++		 */
++		core_if->op_state = A_HOST;
++		fh_otg_core_init(core_if);
++		fh_otg_enable_global_interrupts(core_if);
++		cil_hcd_start(core_if);
++		/* Start the Connection timer. So a message can be displayed
++		 * if connect does not occur within 10 seconds. */
++		cil_hcd_session_start(core_if);
++	}
++
++	return 1;
++}
++
++/** This interrupt indicates that restore command after Hibernation
++ * was completed by the core. */
++int32_t fh_otg_handle_restore_done_intr(fh_otg_core_if_t * core_if)
++{
++	pcgcctl_data_t pcgcctl;
++	FH_DEBUGPL(DBG_ANY, "++Restore Done Interrupt++\n");
++
++	//TODO De-assert restore signal. 8.a
++	pcgcctl.d32 = FH_READ_REG32(core_if->pcgcctl);
++	if (pcgcctl.b.restoremode == 1) {
++		gintmsk_data_t gintmsk = {.d32 = 0 };
++		/*
++		 * If restore mode is Remote Wakeup,
++		 * unmask Remote Wakeup interrupt.
++		 */
++		gintmsk.b.wkupintr = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
++				 0, gintmsk.d32);
++	}
++
++	return 1;
++}
++
++/**
++ * This interrupt indicates that a device has been disconnected from
++ * the root port.
++ */
++int32_t fh_otg_handle_disconnect_intr(fh_otg_core_if_t * core_if)
++{
++	gintsts_data_t gintsts;
++
++	FH_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n",
++		    (fh_otg_is_host_mode(core_if) ? "Host" : "Device"),
++		    op_state_str(core_if));
++
++/** @todo Consolidate this if statement. */
++#ifndef FH_HOST_ONLY
++	if (core_if->op_state == B_HOST) {
++		/* If in device mode Disconnect and stop the HCD, then
++		 * start the PCD. */
++		FH_SPINUNLOCK(core_if->lock);
++		cil_hcd_disconnect(core_if);
++		cil_pcd_start(core_if);
++		FH_SPINLOCK(core_if->lock);
++		core_if->op_state = B_PERIPHERAL;
++	} else if (fh_otg_is_device_mode(core_if)) {
++		gotgctl_data_t gotgctl = {.d32 = 0 };
++		gotgctl.d32 =
++		    FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++		if (gotgctl.b.hstsethnpen == 1) {
++			/* Do nothing, if HNP in process the OTG
++			 * interrupt "Host Negotiation Detected"
++			 * interrupt will do the mode switch.
++			 */
++		} else if (gotgctl.b.devhnpen == 0) {
++			/* If in device mode Disconnect and stop the HCD, then
++			 * start the PCD. */
++			FH_SPINUNLOCK(core_if->lock);
++			cil_hcd_disconnect(core_if);
++			cil_pcd_start(core_if);
++			FH_SPINLOCK(core_if->lock);
++			core_if->op_state = B_PERIPHERAL;
++		} else {
++			FH_DEBUGPL(DBG_ANY, "!a_peripheral && !devhnpen\n");
++		}
++	} else {
++		if (core_if->op_state == A_HOST) {
++			/* A-Cable still connected but device disconnected. */
++			cil_hcd_disconnect(core_if);
++			if (core_if->adp_enable) {
++				gpwrdn_data_t gpwrdn = {.d32 = 0 };
++				cil_hcd_stop(core_if);
++				/* Enable Power Down Logic */
++				gpwrdn.b.pmuintsel = 1;
++				gpwrdn.b.pmuactv = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++				fh_otg_adp_probe_start(core_if);
++
++				/* Power off the core */
++				if (core_if->power_down == 2) {
++					gpwrdn.d32 = 0;
++					gpwrdn.b.pwrdnswtch = 1;
++					FH_MODIFY_REG32
++					    (&core_if->core_global_regs->gpwrdn,
++					     gpwrdn.d32, 0);
++				}
++			}
++		}
++	}
++#endif
++	/* Change to L3(OFF) state */
++	core_if->lx_state = FH_OTG_L3;
++
++	gintsts.d32 = 0;
++	gintsts.b.disconnect = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++	return 1;
++}
++
++/**
++ * This interrupt indicates that SUSPEND state has been detected on
++ * the USB.
++ *
++ * For HNP the USB Suspend interrupt signals the change from
++ * "a_peripheral" to "a_host".
++ *
++ * When power management is enabled the core will be put in low power
++ * mode.
++ */
++int32_t fh_otg_handle_usb_suspend_intr(fh_otg_core_if_t * core_if)
++{
++	dsts_data_t dsts;
++	gintsts_data_t gintsts;
++	dcfg_data_t dcfg;
++
++	FH_DEBUGPL(DBG_ANY, "USB SUSPEND\n");
++
++	if ((core_if->otg_ver == 1) && (core_if->op_state == A_PERIPHERAL)) {
++		core_if->lx_state = FH_OTG_L2;
++
++		/* Clear interrupt */
++		gintsts.d32 = 0;
++		gintsts.b.usbsuspend = 1;
++		FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++		return 1;
++	}
++
++	if (fh_otg_is_device_mode(core_if)) {
++		/* Check the Device status register to determine if the Suspend
++		 * state is active. */
++		dsts.d32 =
++		    FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++		FH_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32);
++		FH_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d "
++			    "HWCFG4.power Optimize=%d\n",
++			    dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz);
++
++#ifdef PARTIAL_POWER_DOWN
++/** @todo Add a module parameter for power management. */
++
++		if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) {
++			pcgcctl_data_t power = {.d32 = 0 };
++			FH_DEBUGPL(DBG_CIL, "suspend\n");
++
++			power.b.pwrclmp = 1;
++			FH_WRITE_REG32(core_if->pcgcctl, power.d32);
++
++			power.b.rstpdwnmodule = 1;
++			FH_MODIFY_REG32(core_if->pcgcctl, 0, power.d32);
++
++			power.b.stoppclk = 1;
++			FH_MODIFY_REG32(core_if->pcgcctl, 0, power.d32);
++
++		} else {
++			FH_DEBUGPL(DBG_ANY, "disconnect?\n");
++		}
++#endif
++		/* PCD callback for suspend. Release the lock inside of callback function */
++		cil_pcd_suspend(core_if);
++		if (core_if->power_down == 2) {
++			dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++			FH_DEBUGPL(DBG_ANY,"lx_state = %08x\n",core_if->lx_state);
++			FH_DEBUGPL(DBG_ANY," device address = %08d\n",dcfg.b.devaddr);
++
++			if (core_if->lx_state != FH_OTG_L3 && dcfg.b.devaddr) {
++				pcgcctl_data_t pcgcctl = {.d32 = 0 };
++				gpwrdn_data_t gpwrdn = {.d32 = 0 };
++				gusbcfg_data_t gusbcfg = {.d32 = 0 };
++
++				/* Change to L2(suspend) state */
++				core_if->lx_state = FH_OTG_L2;
++
++				/* Clear interrupt in gintsts */
++				gintsts.d32 = 0;
++				gintsts.b.usbsuspend = 1;
++				FH_WRITE_REG32(&core_if->core_global_regs->
++						gintsts, gintsts.d32);
++				FH_PRINTF("Start of hibernation completed\n");
++				fh_otg_save_global_regs(core_if);
++				fh_otg_save_dev_regs(core_if);
++
++				gusbcfg.d32 =
++				    FH_READ_REG32(&core_if->core_global_regs->
++						   gusbcfg);
++				if (gusbcfg.b.ulpi_utmi_sel == 1) {
++					/* ULPI interface */
++					/* Suspend the Phy Clock */
++					pcgcctl.d32 = 0;
++					pcgcctl.b.stoppclk = 1;
++					FH_MODIFY_REG32(core_if->pcgcctl, 0,
++							 pcgcctl.d32);
++					fh_udelay(10);
++					gpwrdn.b.pmuactv = 1;
++					FH_MODIFY_REG32(&core_if->
++							 core_global_regs->
++							 gpwrdn, 0, gpwrdn.d32);
++				} else {
++					/* UTMI+ Interface */
++					gpwrdn.b.pmuactv = 1;
++					FH_MODIFY_REG32(&core_if->
++							 core_global_regs->
++							 gpwrdn, 0, gpwrdn.d32);
++					fh_udelay(10);
++					pcgcctl.b.stoppclk = 1;
++					FH_MODIFY_REG32(core_if->pcgcctl, 0,
++							 pcgcctl.d32);
++					fh_udelay(10);
++				}
++
++				/* Set flag to indicate that we are in hibernation */
++				core_if->hibernation_suspend = 1;
++				/* Enable interrupts from wake up logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pmuintsel = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++				fh_udelay(10);
++
++				/* Unmask device mode interrupts in GPWRDN */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.rst_det_msk = 1;
++				gpwrdn.b.lnstchng_msk = 1;
++				gpwrdn.b.sts_chngint_msk = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++				fh_udelay(10);
++
++				/* Enable Power Down Clamp */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pwrdnclmp = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++				fh_udelay(10);
++
++				/* Switch off VDD */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pwrdnswtch = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++
++				/* Save gpwrdn register for further usage if stschng interrupt */
++				core_if->gr_backup->gpwrdn_local =
++							FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++				FH_PRINTF("Hibernation completed\n");
++
++				return 1;
++			}
++		} else if (core_if->power_down == 3) {
++			pcgcctl_data_t pcgcctl = {.d32 = 0 };
++			dcfg.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
++			FH_DEBUGPL(DBG_ANY, "lx_state = %08x\n",core_if->lx_state);
++			FH_DEBUGPL(DBG_ANY, " device address = %08d\n",dcfg.b.devaddr);
++
++			if (core_if->lx_state != FH_OTG_L3 && dcfg.b.devaddr) {
++				FH_DEBUGPL(DBG_ANY, "Start entering to extended hibernation\n");
++				core_if->xhib = 1;
++							
++				/* Clear interrupt in gintsts */
++				gintsts.d32 = 0;
++				gintsts.b.usbsuspend = 1;
++				FH_WRITE_REG32(&core_if->core_global_regs->
++					gintsts, gintsts.d32);
++
++				fh_otg_save_global_regs(core_if);
++				fh_otg_save_dev_regs(core_if);
++				
++				/* Wait for 10 PHY clocks */
++				fh_udelay(10);
++
++				/* Program GPIO register while entering to xHib */
++				FH_WRITE_REG32(&core_if->core_global_regs->ggpio, 0x1);
++
++				pcgcctl.b.enbl_extnd_hiber = 1;
++				FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
++				FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
++				
++				pcgcctl.d32 = 0;
++				pcgcctl.b.extnd_hiber_pwrclmp = 1;
++				FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
++
++				pcgcctl.d32 = 0;
++				pcgcctl.b.extnd_hiber_switch = 1;
++				core_if->gr_backup->xhib_gpwrdn = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++				core_if->gr_backup->xhib_pcgcctl = FH_READ_REG32(core_if->pcgcctl) | pcgcctl.d32;
++				FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
++
++				FH_DEBUGPL(DBG_ANY, "Finished entering to extended hibernation\n");
++				
++				return 1;
++			}
++		}
++		if ((core_if->otg_ver == 1) && (core_if->core_params->otg_cap == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE)) {
++			gotgctl_data_t gotgctl = {.d32 = 0 };
++			gotgctl.d32 = FH_READ_REG32(&core_if->core_global_regs->gotgctl);
++			if (gotgctl.b.devhnpen && core_if->otg_ver == 1){
++				gotgctl_data_t gotgctl = {.d32 = 0 };
++				fh_mdelay(5);
++				/**@todo Is the gotgctl.devhnpen cleared
++				 * by a USB Reset? */
++				gotgctl.b.devhnpen = 1;
++				gotgctl.b.hnpreq = 1;
++				FH_WRITE_REG32(&core_if->core_global_regs->gotgctl,
++						gotgctl.d32);
++			}
++		}
++	} else {
++		if (core_if->op_state == A_PERIPHERAL) {
++			FH_DEBUGPL(DBG_ANY, "a_peripheral->a_host\n");
++			/* Clear the a_peripheral flag, back to a_host. */
++			FH_SPINUNLOCK(core_if->lock);
++			cil_pcd_stop(core_if);
++			cil_hcd_start(core_if);
++			FH_SPINLOCK(core_if->lock);
++			core_if->op_state = A_HOST;
++		}
++	}
++
++	/* Change to L2(suspend) state */
++	core_if->lx_state = FH_OTG_L2;
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.usbsuspend = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++static int32_t fh_otg_handle_xhib_exit_intr(fh_otg_core_if_t * core_if)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	gahbcfg_data_t gahbcfg = {.d32 = 0 };
++
++	fh_udelay(10);
++
++	/* Program GPIO register while entering to xHib */
++	FH_WRITE_REG32(&core_if->core_global_regs->ggpio, 0x0);
++
++	pcgcctl.d32 = core_if->gr_backup->xhib_pcgcctl;
++	pcgcctl.b.extnd_hiber_pwrclmp = 0;
++	FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++	fh_udelay(10);
++
++	gpwrdn.d32 = core_if->gr_backup->xhib_gpwrdn;
++	gpwrdn.b.restore = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32);
++	fh_udelay(10);
++
++	restore_lpm_i2c_regs(core_if);
++
++	pcgcctl.d32 = core_if->gr_backup->pcgcctl_local & (0x3FFFF << 14);
++	pcgcctl.b.max_xcvrselect = 1;
++	pcgcctl.b.ess_reg_restored = 0;
++	pcgcctl.b.extnd_hiber_switch = 0;
++	pcgcctl.b.extnd_hiber_pwrclmp = 0;
++	pcgcctl.b.enbl_extnd_hiber = 1;
++	FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++
++	gahbcfg.d32 = core_if->gr_backup->gahbcfg_local;
++	gahbcfg.b.glblintrmsk = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gahbcfg.d32);
++
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
++	FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, 0x1 << 16);
++
++	FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
++			core_if->gr_backup->gusbcfg_local);
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg,
++			core_if->dr_backup->dcfg);
++
++	pcgcctl.d32 = 0;
++	pcgcctl.d32 = core_if->gr_backup->pcgcctl_local & (0x3FFFF << 14);
++	pcgcctl.b.max_xcvrselect = 1;
++	pcgcctl.d32 |= 0x608;
++	FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++	fh_udelay(10);
++
++	pcgcctl.d32 = 0;
++	pcgcctl.d32 = core_if->gr_backup->pcgcctl_local & (0x3FFFF << 14);
++	pcgcctl.b.max_xcvrselect = 1;
++	pcgcctl.b.ess_reg_restored = 1;
++	pcgcctl.b.enbl_extnd_hiber = 1;
++	pcgcctl.b.rstpdwnmodule = 1;
++	pcgcctl.b.restoremode = 1;
++	FH_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
++
++	FH_DEBUGPL(DBG_ANY, "%s called\n", __FUNCTION__);
++
++	return 1;
++}
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++/**
++ * This function hadles LPM transaction received interrupt.
++ */
++static int32_t fh_otg_handle_lpm_intr(fh_otg_core_if_t * core_if)
++{
++	glpmcfg_data_t lpmcfg;
++	gintsts_data_t gintsts;
++
++	if (!core_if->core_params->lpm_enable) {
++		FH_PRINTF("Unexpected LPM interrupt\n");
++	}
++
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	FH_PRINTF("LPM config register = 0x%08x\n", lpmcfg.d32);
++
++	if (fh_otg_is_host_mode(core_if)) {
++		cil_hcd_sleep(core_if);
++	} else {
++
++		pcgcctl_data_t pcgcctl = {.d32 = 0 };
++
++		lpmcfg.b.hird_thres |= (1 << 4);
++		lpmcfg.b.en_utmi_sleep = 1;
++
++		pcgcctl.b.enbl_sleep_gating = 1;
++	   	FH_MODIFY_REG32(core_if->pcgcctl,0,pcgcctl.d32);
++
++		if(fh_otg_get_param_besl_enable(core_if)) {
++			lpmcfg.b.en_besl = 1;				
++		}
++
++		FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg,
++				lpmcfg.d32);		
++	}
++
++	/* Examine prt_sleep_sts after TL1TokenTetry period max (10 us) */
++	fh_udelay(10);
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	if (lpmcfg.b.prt_sleep_sts) {
++		/* Save the current state */
++		core_if->lx_state = FH_OTG_L1;
++	}
++
++	/* Clear interrupt  */
++	gintsts.d32 = 0;
++	gintsts.b.lpmtranrcvd = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++	return 1;
++}
++#endif /* CONFIG_USB_FH_OTG_LPM */
++
++/**
++ * This function returns the Core Interrupt register.
++ */
++static inline uint32_t fh_otg_read_common_intr(fh_otg_core_if_t * core_if)
++{
++	gahbcfg_data_t gahbcfg = {.d32 = 0 };
++	gintsts_data_t gintsts;
++	gintmsk_data_t gintmsk;
++	gintmsk_data_t gintmsk_common = {.d32 = 0 };
++	gintmsk_common.b.wkupintr = 1;
++	gintmsk_common.b.sessreqintr = 1;
++	gintmsk_common.b.conidstschng = 1;
++	gintmsk_common.b.otgintr = 1;
++	gintmsk_common.b.modemismatch = 1;
++	gintmsk_common.b.disconnect = 1;
++	gintmsk_common.b.usbsuspend = 1;
++#ifdef CONFIG_USB_FH_OTG_LPM
++	gintmsk_common.b.lpmtranrcvd = 1;
++#endif
++	gintmsk_common.b.restoredone = 1;
++	/** @todo: The port interrupt occurs while in device
++         * mode. Added code to CIL to clear the interrupt for now!
++         */
++	gintmsk_common.b.portintr = 1;
++
++	gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++	gintmsk.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
++	gahbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gahbcfg);
++
++#ifdef DEBUG
++	/* if any common interrupts set */
++	if (gintsts.d32 & gintmsk_common.d32) {
++		FH_DEBUGPL(DBG_ANY, "gintsts=%08x  gintmsk=%08x\n",
++			    gintsts.d32, gintmsk.d32);
++	}
++#endif
++	if (gahbcfg.b.glblintrmsk)
++		return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32);
++	else
++		return 0;
++
++}
++
++/* MACRO for clearing interupt bits in GPWRDN register */
++#define CLEAR_GPWRDN_INTR(__core_if,__intr) \
++do { \
++		gpwrdn_data_t gpwrdn = {.d32=0}; \
++		gpwrdn.b.__intr = 1; \
++		FH_MODIFY_REG32(&__core_if->core_global_regs->gpwrdn, \
++		0, gpwrdn.d32); \
++} while (0)
++
++	
++/**
++ * Common interrupt handler.
++ *
++ * The common interrupts are those that occur in both Host and Device mode.
++ * This handler handles the following interrupts:
++ * - Mode Mismatch Interrupt
++ * - Disconnect Interrupt
++ * - OTG Interrupt
++ * - Connector ID Status Change Interrupt
++ * - Session Request Interrupt.
++ * - Resume / Remote Wakeup Detected Interrupt.
++ * - LPM Transaction Received Interrupt
++ * - ADP Transaction Received Interrupt
++ *
++ */
++int32_t fh_otg_handle_common_intr(void *dev)
++{
++	int retval = 0;
++	gintsts_data_t gintsts;
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	fh_otg_device_t *otg_dev = dev;
++	fh_otg_core_if_t *core_if = otg_dev->core_if;
++	gpwrdn.d32 = FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++	
++	if (fh_otg_check_haps_status(core_if) == -1 ) {
++		FH_WARN("HAPS is disconnected");			
++		return retval;
++	}
++	
++	if (fh_otg_is_device_mode(core_if))
++		core_if->frame_num = fh_otg_get_frame_number(core_if);
++		
++	if (core_if->lock)
++		FH_SPINLOCK(core_if->lock);
++
++	if (core_if->power_down == 3 && core_if->xhib == 1) {
++		FH_DEBUGPL(DBG_ANY, "Exiting from xHIB state\n");
++		retval |= fh_otg_handle_xhib_exit_intr(core_if);
++		core_if->xhib = 2;
++		if (core_if->lock)
++			FH_SPINUNLOCK(core_if->lock);
++
++		return retval;
++	}
++
++	if (core_if->hibernation_suspend <= 0) {
++		gintsts.d32 = fh_otg_read_common_intr(core_if);
++
++		if (gintsts.b.modemismatch) {
++			retval |= fh_otg_handle_mode_mismatch_intr(core_if);
++		}
++		if (gintsts.b.otgintr) {
++			retval |= fh_otg_handle_otg_intr(core_if);
++		}
++		if (gintsts.b.conidstschng) {
++			retval |=
++			    fh_otg_handle_conn_id_status_change_intr(core_if);
++		}
++		if (gintsts.b.disconnect) {
++			retval |= fh_otg_handle_disconnect_intr(core_if);
++		}
++		if (gintsts.b.sessreqintr) {
++			retval |= fh_otg_handle_session_req_intr(core_if);
++		}
++		if (gintsts.b.wkupintr) {
++			retval |= fh_otg_handle_wakeup_detected_intr(core_if);
++		}
++		if (gintsts.b.usbsuspend) {
++			retval |= fh_otg_handle_usb_suspend_intr(core_if);
++		}
++#ifdef CONFIG_USB_FH_OTG_LPM
++		if (gintsts.b.lpmtranrcvd) {
++			retval |= fh_otg_handle_lpm_intr(core_if);
++		}
++#endif
++		if (gintsts.b.restoredone) {
++			gintsts.d32 = 0;
++			if (core_if->power_down == 2)
++				core_if->hibernation_suspend = -1;
++			else if (core_if->power_down == 3 && core_if->xhib == 2) {
++				gpwrdn_data_t gpwrdn = {.d32 = 0 };
++				pcgcctl_data_t pcgcctl = {.d32 = 0 };
++				dctl_data_t dctl = {.d32 = 0 };
++
++				FH_WRITE_REG32(&core_if->core_global_regs->
++						gintsts, 0xFFFFFFFF);
++
++				FH_DEBUGPL(DBG_ANY,
++					    "RESTORE DONE generated\n");
++
++				gpwrdn.b.restore = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++				fh_udelay(10);
++
++				pcgcctl.b.rstpdwnmodule = 1;
++				FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++
++				FH_WRITE_REG32(&core_if->core_global_regs->gusbcfg, core_if->gr_backup->gusbcfg_local);
++				FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, core_if->dr_backup->dcfg);
++				FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, core_if->dr_backup->dctl);
++				fh_udelay(50);
++				
++				dctl.b.pwronprgdone = 1;
++				FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
++				fh_udelay(10);
++
++				fh_otg_restore_global_regs(core_if);
++				fh_otg_restore_dev_regs(core_if, 0);
++
++				dctl.d32 = 0;
++				dctl.b.pwronprgdone = 1;
++				FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
++				fh_udelay(10);
++
++				pcgcctl.d32 = 0;
++				pcgcctl.b.enbl_extnd_hiber = 1;
++				FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++
++				/* The core will be in ON STATE */
++				core_if->lx_state = FH_OTG_L0;
++				core_if->xhib = 0;
++
++				FH_SPINUNLOCK(core_if->lock);
++				if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
++					core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
++				}
++				FH_SPINLOCK(core_if->lock);
++
++			}
++
++			gintsts.b.restoredone = 1;
++			FH_WRITE_REG32(&core_if->core_global_regs->gintsts,gintsts.d32);
++			FH_PRINTF(" --Restore done interrupt received-- \n");
++			retval |= 1;
++		}
++		if (gintsts.b.portintr && fh_otg_is_device_mode(core_if)) {
++			/* The port interrupt occurs while in device mode with HPRT0
++			 * Port Enable/Disable.
++			 */
++			gintsts.d32 = 0;
++			gintsts.b.portintr = 1;
++			FH_WRITE_REG32(&core_if->core_global_regs->gintsts,gintsts.d32);
++			retval |= 1;
++
++		}
++	} else {
++		FH_DEBUGPL(DBG_ANY, "gpwrdn=%08x\n", gpwrdn.d32);
++
++		if (gpwrdn.b.disconn_det && gpwrdn.b.disconn_det_msk) {
++			CLEAR_GPWRDN_INTR(core_if, disconn_det);
++			if (gpwrdn.b.linestate == 0) {
++				fh_otg_handle_pwrdn_disconnect_intr(core_if);
++			} else {
++				FH_PRINTF("Disconnect detected while linestate is not 0\n");
++			}
++
++			retval |= 1;
++		}
++		if (gpwrdn.b.lnstschng && gpwrdn.b.lnstchng_msk) {
++			CLEAR_GPWRDN_INTR(core_if, lnstschng);
++			/* remote wakeup from hibernation */
++			if (gpwrdn.b.linestate == 2 || gpwrdn.b.linestate == 1) {
++				fh_otg_handle_pwrdn_wakeup_detected_intr(core_if);
++			} else {
++				FH_PRINTF("gpwrdn.linestate = %d\n", gpwrdn.b.linestate);
++			}
++			retval |= 1;
++		}
++		if (gpwrdn.b.rst_det && gpwrdn.b.rst_det_msk) {
++			CLEAR_GPWRDN_INTR(core_if, rst_det);
++			if (gpwrdn.b.linestate == 0) {
++				FH_PRINTF("Reset detected\n");
++				retval |= fh_otg_device_hibernation_restore(core_if, 0, 1);
++			}
++		}
++		if (gpwrdn.b.srp_det && gpwrdn.b.srp_det_msk) {
++			CLEAR_GPWRDN_INTR(core_if, srp_det);
++			fh_otg_handle_pwrdn_srp_intr(core_if);
++			retval |= 1;
++		}
++	}
++	/* Handle ADP interrupt here */
++	if (gpwrdn.b.adp_int) {
++		CLEAR_GPWRDN_INTR(core_if, adp_int);
++		fh_otg_adp_handle_intr(core_if);
++		retval |= 1;
++	}
++	if (gpwrdn.b.sts_chngint && gpwrdn.b.sts_chngint_msk) {
++		CLEAR_GPWRDN_INTR(core_if, sts_chngint);
++		fh_otg_handle_pwrdn_stschng_intr(otg_dev);
++
++		retval |= 1;
++	}
++	if (gpwrdn.b.srp_det && gpwrdn.b.srp_det_msk) {
++		CLEAR_GPWRDN_INTR(core_if, srp_det);
++		fh_otg_handle_pwrdn_srp_intr(core_if);
++		retval |= 1;
++	}
++	if (core_if->lock)
++		FH_SPINUNLOCK(core_if->lock);
++
++	return retval;
++}
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_core_if.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_core_if.h
+new file mode 100644
+index 00000000..822b8d67
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_core_if.h
+@@ -0,0 +1,746 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_core_if.h $
++ * $Revision: #20 $
++ * $Date: 2015/10/12 $
++ * $Change: 2972621 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#if !defined(__FH_CORE_IF_H__)
++#define __FH_CORE_IF_H__
++
++#include "../fh_common_port/fh_os.h"
++
++/** @file
++ * This file defines FH_OTG Core API
++ */
++
++struct fh_otg_core_if;
++typedef struct fh_otg_core_if fh_otg_core_if_t;
++
++/** Maximum number of Periodic FIFOs */
++#define MAX_PERIO_FIFOS 15
++/** Maximum number of Periodic FIFOs */
++#define MAX_TX_FIFOS 15
++
++/** Maximum number of Endpoints/HostChannels */
++#define MAX_EPS_CHANNELS 16
++
++extern fh_otg_core_if_t *fh_otg_cil_init(const uint32_t * _reg_base_addr);
++extern void fh_otg_core_init(fh_otg_core_if_t * _core_if);
++extern void fh_otg_cil_remove(fh_otg_core_if_t * _core_if);
++
++extern void fh_otg_enable_global_interrupts(fh_otg_core_if_t * _core_if);
++extern void fh_otg_disable_global_interrupts(fh_otg_core_if_t * _core_if);
++
++extern uint8_t fh_otg_is_device_mode(fh_otg_core_if_t * _core_if);
++extern uint8_t fh_otg_is_host_mode(fh_otg_core_if_t * _core_if);
++
++extern uint8_t fh_otg_is_dma_enable(fh_otg_core_if_t * core_if);
++
++/** This function should be called on every hardware interrupt. */
++extern int32_t fh_otg_handle_common_intr(void *otg_dev);
++
++
++/** @name OTG Core Parameters */
++/** @{ */
++
++/**
++ * Specifies the OTG capabilities. The driver will automatically
++ * detect the value for this parameter if none is specified.
++ * 0 - HNP and SRP capable (default)
++ * 1 - SRP Only capable
++ * 2 - No HNP/SRP capable
++ */
++extern int fh_otg_set_param_otg_cap(fh_otg_core_if_t * core_if, int32_t val);
++extern int32_t fh_otg_get_param_otg_cap(fh_otg_core_if_t * core_if);
++#define FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0
++#define FH_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1
++#define FH_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
++#define fh_param_otg_cap_default FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE
++
++extern int fh_otg_set_param_opt(fh_otg_core_if_t * core_if, int32_t val);
++extern int32_t fh_otg_get_param_opt(fh_otg_core_if_t * core_if);
++#define fh_param_opt_default 1
++
++/**
++ * Specifies whether to use slave or DMA mode for accessing the data
++ * FIFOs. The driver will automatically detect the value for this
++ * parameter if none is specified.
++ * 0 - Slave
++ * 1 - DMA (default, if available)
++ */
++extern int fh_otg_set_param_dma_enable(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_dma_enable(fh_otg_core_if_t * core_if);
++#define fh_param_dma_enable_default 1
++
++/**
++ * When DMA mode is enabled specifies whether to use
++ * address DMA or DMA Descritor mode for accessing the data
++ * FIFOs in device mode. The driver will automatically detect
++ * the value for this parameter if none is specified.
++ * 0 - address DMA
++ * 1 - DMA Descriptor(default, if available)
++ */
++extern int fh_otg_set_param_dma_desc_enable(fh_otg_core_if_t * core_if,
++					     int32_t val);
++extern int32_t fh_otg_get_param_dma_desc_enable(fh_otg_core_if_t * core_if);
++#define fh_param_dma_desc_enable_default 1
++
++/** The DMA Burst size (applicable only for External DMA
++ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
++ */
++extern int fh_otg_set_param_dma_burst_size(fh_otg_core_if_t * core_if,
++					    int32_t val);
++extern int32_t fh_otg_get_param_dma_burst_size(fh_otg_core_if_t * core_if);
++#define fh_param_dma_burst_size_default 32
++
++/**
++ * Specifies the maximum speed of operation in host and device mode.
++ * The actual speed depends on the speed of the attached device and
++ * the value of phy_type. The actual speed depends on the speed of the
++ * attached device.
++ * 0 - High Speed (default)
++ * 1 - Full Speed
++ */
++extern int fh_otg_set_param_speed(fh_otg_core_if_t * core_if, int32_t val);
++extern int32_t fh_otg_get_param_speed(fh_otg_core_if_t * core_if);
++#define fh_param_speed_default 0
++#define FH_SPEED_PARAM_HIGH 0
++#define FH_SPEED_PARAM_FULL 1
++
++/** Specifies whether low power mode is supported when attached
++ *	to a Full Speed or Low Speed device in host mode.
++ * 0 - Don't support low power mode (default)
++ * 1 - Support low power mode
++ */
++extern int fh_otg_set_param_host_support_fs_ls_low_power(fh_otg_core_if_t *
++							  core_if, int32_t val);
++extern int32_t fh_otg_get_param_host_support_fs_ls_low_power(fh_otg_core_if_t
++							      * core_if);
++#define fh_param_host_support_fs_ls_low_power_default 0
++
++/** Specifies the PHY clock rate in low power mode when connected to a
++ * Low Speed device in host mode. This parameter is applicable only if
++ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
++ * then defaults to 6 MHZ otherwise 48 MHZ.
++ *
++ * 0 - 48 MHz
++ * 1 - 6 MHz
++ */
++extern int fh_otg_set_param_host_ls_low_power_phy_clk(fh_otg_core_if_t *
++						       core_if, int32_t val);
++extern int32_t fh_otg_get_param_host_ls_low_power_phy_clk(fh_otg_core_if_t *
++							   core_if);
++#define fh_param_host_ls_low_power_phy_clk_default 0
++#define FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
++#define FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
++
++#define FH_CC_CLK_FREQ_30MHZ 0x7530
++
++/**
++ * 0 - Use cC FIFO size parameters
++ * 1 - Allow dynamic FIFO sizing (default)
++ */
++extern int fh_otg_set_param_enable_dynamic_fifo(fh_otg_core_if_t * core_if,
++						 int32_t val);
++extern int32_t fh_otg_get_param_enable_dynamic_fifo(fh_otg_core_if_t *
++						     core_if);
++#define fh_param_enable_dynamic_fifo_default 1
++
++/** Total number of 4-byte words in the data FIFO memory. This
++ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
++ * Tx FIFOs.
++ * 32 to 32768 (default 8192)
++ * Note: The total FIFO memory depth in the FPGA configuration is 8192.
++ */
++extern int fh_otg_set_param_data_fifo_size(fh_otg_core_if_t * core_if,
++					    int32_t val);
++extern int32_t fh_otg_get_param_data_fifo_size(fh_otg_core_if_t * core_if);
++#define fh_param_data_fifo_size_default 2560
++
++/** Number of 4-byte words in the Rx FIFO in device mode when dynamic
++ * FIFO sizing is enabled.
++ * 16 to 32768 (default 1064)
++ */
++extern int fh_otg_set_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if,
++					      int32_t val);
++extern int32_t fh_otg_get_param_dev_rx_fifo_size(fh_otg_core_if_t * core_if);
++#define fh_param_dev_rx_fifo_size_default 1064
++
++/** Number of 4-byte words in the non-periodic Tx FIFO in device mode
++ * when dynamic FIFO sizing is enabled.
++ * 16 to 32768 (default 1024)
++ */
++extern int fh_otg_set_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t *
++						     core_if, int32_t val);
++extern int32_t fh_otg_get_param_dev_nperio_tx_fifo_size(fh_otg_core_if_t *
++							 core_if);
++#define fh_param_dev_nperio_tx_fifo_size_default 128
++
++/** Number of 4-byte words in each of the periodic Tx FIFOs in device
++ * mode when dynamic FIFO sizing is enabled.
++ * 4 to 768 (default 256)
++ */
++extern int fh_otg_set_param_dev_perio_tx_fifo_size(fh_otg_core_if_t * core_if,
++						    int32_t val, int fifo_num);
++extern int32_t fh_otg_get_param_dev_perio_tx_fifo_size(fh_otg_core_if_t *
++							core_if, int fifo_num);
++#define fh_param_dev_perio_tx_fifo_size_default 256
++
++/** Number of 4-byte words in the Rx FIFO in host mode when dynamic
++ * FIFO sizing is enabled.
++ * 16 to 32768 (default 1024)
++ */
++extern int fh_otg_set_param_host_rx_fifo_size(fh_otg_core_if_t * core_if,
++					       int32_t val);
++extern int32_t fh_otg_get_param_host_rx_fifo_size(fh_otg_core_if_t * core_if);
++#define fh_param_host_rx_fifo_size_default 512
++
++/** Number of 4-byte words in the non-periodic Tx FIFO in host mode
++ * when Dynamic FIFO sizing is enabled in the core.
++ * 16 to 32768 (default 1024)
++ */
++extern int fh_otg_set_param_host_nperio_tx_fifo_size(fh_otg_core_if_t *
++						      core_if, int32_t val);
++extern int32_t fh_otg_get_param_host_nperio_tx_fifo_size(fh_otg_core_if_t *
++							  core_if);
++#define fh_param_host_nperio_tx_fifo_size_default 128
++
++/** Number of 4-byte words in the host periodic Tx FIFO when dynamic
++ * FIFO sizing is enabled.
++ * 16 to 32768 (default 1024)
++ */
++extern int fh_otg_set_param_host_perio_tx_fifo_size(fh_otg_core_if_t *
++						     core_if, int32_t val);
++extern int32_t fh_otg_get_param_host_perio_tx_fifo_size(fh_otg_core_if_t *
++							 core_if);
++#define fh_param_host_perio_tx_fifo_size_default 256
++
++/** The maximum transfer size supported in bytes.
++ * 2047 to 65,535  (default 65,535)
++ */
++extern int fh_otg_set_param_max_transfer_size(fh_otg_core_if_t * core_if,
++					       int32_t val);
++extern int32_t fh_otg_get_param_max_transfer_size(fh_otg_core_if_t * core_if);
++#define fh_param_max_transfer_size_default 65535
++
++/** The maximum number of packets in a transfer.
++ * 15 to 511  (default 511)
++ */
++extern int fh_otg_set_param_max_packet_count(fh_otg_core_if_t * core_if,
++					      int32_t val);
++extern int32_t fh_otg_get_param_max_packet_count(fh_otg_core_if_t * core_if);
++#define fh_param_max_packet_count_default 511
++
++/** The number of host channel registers to use.
++ * 1 to 16 (default 12)
++ * Note: The FPGA configuration supports a maximum of 12 host channels.
++ */
++extern int fh_otg_set_param_host_channels(fh_otg_core_if_t * core_if,
++					   int32_t val);
++extern int32_t fh_otg_get_param_host_channels(fh_otg_core_if_t * core_if);
++#define fh_param_host_channels_default 16
++
++/** The number of endpoints in addition to EP0 available for device
++ * mode operations.
++ * 1 to 15 (default 6 IN and OUT)
++ * Note: The FPGA configuration supports a maximum of 6 IN and OUT
++ * endpoints in addition to EP0.
++ */
++extern int fh_otg_set_param_dev_endpoints(fh_otg_core_if_t * core_if,
++					   int32_t val);
++extern int32_t fh_otg_get_param_dev_endpoints(fh_otg_core_if_t * core_if);
++#define fh_param_dev_endpoints_default 8
++
++/**
++ * Specifies the type of PHY interface to use. By default, the driver
++ * will automatically detect the phy_type.
++ *
++ * 0 - Full Speed PHY
++ * 1 - UTMI+ (default)
++ * 2 - ULPI
++ */
++extern int fh_otg_set_param_phy_type(fh_otg_core_if_t * core_if, int32_t val);
++extern int32_t fh_otg_get_param_phy_type(fh_otg_core_if_t * core_if);
++#define FH_PHY_TYPE_PARAM_FS 0
++#define FH_PHY_TYPE_PARAM_UTMI 1
++#define FH_PHY_TYPE_PARAM_ULPI 2
++#define fh_param_phy_type_default FH_PHY_TYPE_PARAM_UTMI
++
++/**
++ * Specifies the UTMI+ Data Width. This parameter is
++ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
++ * PHY_TYPE, this parameter indicates the data width between
++ * the MAC and the ULPI Wrapper.) Also, this parameter is
++ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
++ * to "8 and 16 bits", meaning that the core has been
++ * configured to work at either data path width.
++ *
++ * 8 or 16 bits (default 16)
++ */
++extern int fh_otg_set_param_phy_utmi_width(fh_otg_core_if_t * core_if,
++					    int32_t val);
++extern int32_t fh_otg_get_param_phy_utmi_width(fh_otg_core_if_t * core_if);
++#define fh_param_phy_utmi_width_default 16
++
++/**
++ * Specifies whether the ULPI operates at double or single
++ * data rate. This parameter is only applicable if PHY_TYPE is
++ * ULPI.
++ *
++ * 0 - single data rate ULPI interface with 8 bit wide data
++ * bus (default)
++ * 1 - double data rate ULPI interface with 4 bit wide data
++ * bus
++ */
++extern int fh_otg_set_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if,
++					  int32_t val);
++extern int32_t fh_otg_get_param_phy_ulpi_ddr(fh_otg_core_if_t * core_if);
++#define fh_param_phy_ulpi_ddr_default 0
++
++/**
++ * Specifies whether to use the internal or external supply to
++ * drive the vbus with a ULPI phy.
++ */
++extern int fh_otg_set_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if,
++					       int32_t val);
++extern int32_t fh_otg_get_param_phy_ulpi_ext_vbus(fh_otg_core_if_t * core_if);
++#define FH_PHY_ULPI_INTERNAL_VBUS 0
++#define FH_PHY_ULPI_EXTERNAL_VBUS 1
++#define fh_param_phy_ulpi_ext_vbus_default FH_PHY_ULPI_INTERNAL_VBUS
++
++/**
++ * Specifies whether to use the I2Cinterface for full speed PHY. This
++ * parameter is only applicable if PHY_TYPE is FS.
++ * 0 - No (default)
++ * 1 - Yes
++ */
++extern int fh_otg_set_param_i2c_enable(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_i2c_enable(fh_otg_core_if_t * core_if);
++#define fh_param_i2c_enable_default 0
++
++extern int fh_otg_set_param_ulpi_fs_ls(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_ulpi_fs_ls(fh_otg_core_if_t * core_if);
++#define fh_param_ulpi_fs_ls_default 0
++
++extern int fh_otg_set_param_ts_dline(fh_otg_core_if_t * core_if, int32_t val);
++extern int32_t fh_otg_get_param_ts_dline(fh_otg_core_if_t * core_if);
++#define fh_param_ts_dline_default 0
++
++/**
++ * Specifies whether dedicated transmit FIFOs are
++ * enabled for non periodic IN endpoints in device mode
++ * 0 - No
++ * 1 - Yes
++ */
++extern int fh_otg_set_param_en_multiple_tx_fifo(fh_otg_core_if_t * core_if,
++						 int32_t val);
++extern int32_t fh_otg_get_param_en_multiple_tx_fifo(fh_otg_core_if_t *
++						     core_if);
++#define fh_param_en_multiple_tx_fifo_default 1
++
++/** Number of 4-byte words in each of the Tx FIFOs in device
++ * mode when dynamic FIFO sizing is enabled.
++ * 4 to 768 (default 256)
++ */
++extern int fh_otg_set_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if,
++					      int fifo_num, int32_t val);
++extern int32_t fh_otg_get_param_dev_tx_fifo_size(fh_otg_core_if_t * core_if,
++						  int fifo_num);
++#define fh_param_dev_tx_fifo_size_default 256
++
++/** Thresholding enable flag-
++ * bit 0 - enable non-ISO Tx thresholding
++ * bit 1 - enable ISO Tx thresholding
++ * bit 2 - enable Rx thresholding
++ */
++extern int fh_otg_set_param_thr_ctl(fh_otg_core_if_t * core_if, int32_t val);
++extern int32_t fh_otg_get_thr_ctl(fh_otg_core_if_t * core_if, int fifo_num);
++#define fh_param_thr_ctl_default 0
++
++/** Thresholding length for Tx
++ * FIFOs in 32 bit DWORDs
++ */
++extern int fh_otg_set_param_tx_thr_length(fh_otg_core_if_t * core_if,
++					   int32_t val);
++extern int32_t fh_otg_get_tx_thr_length(fh_otg_core_if_t * core_if);
++#define fh_param_tx_thr_length_default 64
++
++/** Thresholding length for Rx
++ *	FIFOs in 32 bit DWORDs
++ */
++extern int fh_otg_set_param_rx_thr_length(fh_otg_core_if_t * core_if,
++					   int32_t val);
++extern int32_t fh_otg_get_rx_thr_length(fh_otg_core_if_t * core_if);
++#define fh_param_rx_thr_length_default 64
++
++/**
++ * Specifies whether LPM (Link Power Management) support is enabled
++ */
++extern int fh_otg_set_param_lpm_enable(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_lpm_enable(fh_otg_core_if_t * core_if);
++#define fh_param_lpm_enable_default 1
++
++/**
++ * Specifies whether LPM Errata (Link Power Management) support is enabled
++ */
++extern int fh_otg_set_param_besl_enable(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_besl_enable(fh_otg_core_if_t * core_if);
++#define fh_param_besl_enable_default 0
++
++/**
++ * Specifies baseline_besl default value
++ */
++extern int fh_otg_set_param_baseline_besl(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_baseline_besl(fh_otg_core_if_t * core_if);
++#define fh_param_baseline_besl_default 0
++
++/**
++ * Specifies deep_besl default value
++ */
++extern int fh_otg_set_param_deep_besl(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_deep_besl(fh_otg_core_if_t * core_if);
++#define fh_param_deep_besl_default 15
++
++/**
++ * Specifies whether PTI enhancement is enabled
++ */
++extern int fh_otg_set_param_pti_enable(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_pti_enable(fh_otg_core_if_t * core_if);
++#define fh_param_pti_enable_default 0
++
++/**
++ * Specifies whether MPI enhancement is enabled
++ */
++extern int fh_otg_set_param_mpi_enable(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_mpi_enable(fh_otg_core_if_t * core_if);
++#define fh_param_mpi_enable_default 0
++
++/**
++ * Specifies whether ADP capability is enabled
++ */
++extern int fh_otg_set_param_adp_enable(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_adp_enable(fh_otg_core_if_t * core_if);
++#define fh_param_adp_enable_default 0
++
++/**
++ * Specifies whether IC_USB capability is enabled
++ */
++
++extern int fh_otg_set_param_ic_usb_cap(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_ic_usb_cap(fh_otg_core_if_t * core_if);
++#define fh_param_ic_usb_cap_default 0
++
++extern int fh_otg_set_param_ahb_thr_ratio(fh_otg_core_if_t * core_if,
++					   int32_t val);
++extern int32_t fh_otg_get_param_ahb_thr_ratio(fh_otg_core_if_t * core_if);
++#define fh_param_ahb_thr_ratio_default 0
++
++extern int fh_otg_set_param_power_down(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_power_down(fh_otg_core_if_t * core_if);
++#define fh_param_power_down_default 0
++
++extern int fh_otg_set_param_reload_ctl(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_reload_ctl(fh_otg_core_if_t * core_if);
++#define fh_param_reload_ctl_default 0
++
++extern int fh_otg_set_param_dev_out_nak(fh_otg_core_if_t * core_if,
++					 int32_t val);
++extern int32_t fh_otg_get_param_dev_out_nak(fh_otg_core_if_t * core_if);
++#define fh_param_dev_out_nak_default 0
++
++extern int fh_otg_set_param_cont_on_bna(fh_otg_core_if_t * core_if,
++					 int32_t val);
++extern int32_t fh_otg_get_param_cont_on_bna(fh_otg_core_if_t * core_if);
++#define fh_param_cont_on_bna_default 0
++
++extern int fh_otg_set_param_ahb_single(fh_otg_core_if_t * core_if,
++					int32_t val);
++extern int32_t fh_otg_get_param_ahb_single(fh_otg_core_if_t * core_if);
++#define fh_param_ahb_single_default 0
++
++extern int fh_otg_set_param_otg_ver(fh_otg_core_if_t * core_if, int32_t val);
++extern int32_t fh_otg_get_param_otg_ver(fh_otg_core_if_t * core_if);
++#define fh_param_otg_ver_default 0
++
++/** @} */
++
++/** @name Access to registers and bit-fields */
++
++/**
++ * Dump core registers and SPRAM
++ */
++extern void fh_otg_dump_dev_registers(fh_otg_core_if_t * _core_if);
++extern void fh_otg_dump_spram(fh_otg_core_if_t * _core_if);
++extern void fh_otg_dump_host_registers(fh_otg_core_if_t * _core_if);
++extern void fh_otg_dump_global_registers(fh_otg_core_if_t * _core_if);
++
++/**
++ * Get host negotiation status.
++ */
++extern uint32_t fh_otg_get_hnpstatus(fh_otg_core_if_t * core_if);
++
++/**
++ * Get srp status
++ */
++extern uint32_t fh_otg_get_srpstatus(fh_otg_core_if_t * core_if);
++
++/**
++ * Set hnpreq bit in the GOTGCTL register.
++ */
++extern void fh_otg_set_hnpreq(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get Content of SNPSID register.
++ */
++extern uint32_t fh_otg_get_gsnpsid(fh_otg_core_if_t * core_if);
++
++/**
++ * Get current mode.
++ * Returns 0 if in device mode, and 1 if in host mode.
++ */
++extern uint32_t fh_otg_get_mode(fh_otg_core_if_t * core_if);
++
++/**
++ * Get value of hnpcapable field in the GUSBCFG register
++ */
++extern uint32_t fh_otg_get_hnpcapable(fh_otg_core_if_t * core_if);
++/**
++ * Set value of hnpcapable field in the GUSBCFG register
++ */
++extern void fh_otg_set_hnpcapable(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of srpcapable field in the GUSBCFG register
++ */
++extern uint32_t fh_otg_get_srpcapable(fh_otg_core_if_t * core_if);
++/**
++ * Set value of srpcapable field in the GUSBCFG register
++ */
++extern void fh_otg_set_srpcapable(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of devspeed field in the DCFG register
++ */
++extern uint32_t fh_otg_get_devspeed(fh_otg_core_if_t * core_if);
++/**
++ * Set value of devspeed field in the DCFG register
++ */
++extern void fh_otg_set_devspeed(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get the value of busconnected field from the HPRT0 register
++ */
++extern uint32_t fh_otg_get_busconnected(fh_otg_core_if_t * core_if);
++
++/**
++ * Gets the device enumeration Speed.
++ */
++extern uint32_t fh_otg_get_enumspeed(fh_otg_core_if_t * core_if);
++
++/**
++ * Get value of prtpwr field from the HPRT0 register
++ */
++extern uint32_t fh_otg_get_prtpower(fh_otg_core_if_t * core_if);
++
++/**
++ * Get value of flag indicating core state - hibernated or not
++ */
++extern uint32_t fh_otg_get_core_state(fh_otg_core_if_t * core_if);
++
++/**
++ * Set value of prtpwr field from the HPRT0 register
++ */
++extern void fh_otg_set_prtpower(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of prtsusp field from the HPRT0 regsiter
++ */
++extern uint32_t fh_otg_get_prtsuspend(fh_otg_core_if_t * core_if);
++/**
++ * Set value of prtpwr field from the HPRT0 register
++ */
++extern void fh_otg_set_prtsuspend(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of ModeChTimEn field from the HCFG regsiter
++ */
++extern uint32_t fh_otg_get_mode_ch_tim(fh_otg_core_if_t * core_if);
++/**
++ * Set value of ModeChTimEn field from the HCFG regsiter
++ */
++extern void fh_otg_set_mode_ch_tim(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of Fram Interval field from the HFIR regsiter
++ */
++extern uint32_t fh_otg_get_fr_interval(fh_otg_core_if_t * core_if);
++/**
++ * Set value of Frame Interval field from the HFIR regsiter
++ */
++extern void fh_otg_set_fr_interval(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Set value of prtres field from the HPRT0 register
++ *FIXME Remove?
++ */
++extern void fh_otg_set_prtresume(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of rmtwkupsig bit in DCTL register
++ */
++extern uint32_t fh_otg_get_remotewakesig(fh_otg_core_if_t * core_if);
++
++/**
++ * Get value of besl_reject bit in DCTL register
++ */
++ 
++extern uint32_t fh_otg_get_beslreject(fh_otg_core_if_t * core_if);
++
++/**
++ * Set value of besl_reject bit in DCTL register
++ */
++ 
++extern void fh_otg_set_beslreject(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of prt_sleep_sts field from the GLPMCFG register
++ */
++extern uint32_t fh_otg_get_lpm_portsleepstatus(fh_otg_core_if_t * core_if);
++
++/**
++ * Get value of rem_wkup_en field from the GLPMCFG register
++ */
++extern uint32_t fh_otg_get_lpm_remotewakeenabled(fh_otg_core_if_t * core_if);
++
++/**
++ * Get value of appl_resp field from the GLPMCFG register
++ */
++extern uint32_t fh_otg_get_lpmresponse(fh_otg_core_if_t * core_if);
++/**
++ * Set value of appl_resp field from the GLPMCFG register
++ */
++extern void fh_otg_set_lpmresponse(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of hsic_connect field from the GLPMCFG register
++ */
++extern uint32_t fh_otg_get_hsic_connect(fh_otg_core_if_t * core_if);
++/**
++ * Set value of hsic_connect field from the GLPMCFG register
++ */
++extern void fh_otg_set_hsic_connect(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * Get value of inv_sel_hsic field from the GLPMCFG register.
++ */
++extern uint32_t fh_otg_get_inv_sel_hsic(fh_otg_core_if_t * core_if);
++/**
++ * Set value of inv_sel_hsic field from the GLPMFG register.
++ */
++extern void fh_otg_set_inv_sel_hsic(fh_otg_core_if_t * core_if, uint32_t val);
++/**
++ * Set value of hird_thresh field from the GLPMFG register.
++ */
++extern void fh_otg_set_hirdthresh(fh_otg_core_if_t * core_if, uint32_t val);
++/**
++ * Get value of hird_thresh field from the GLPMFG register.
++ */
++extern uint32_t fh_otg_get_hirdthresh(fh_otg_core_if_t * core_if);
++
++
++/*
++ * Some functions for accessing registers
++ */
++
++/**
++ *  GOTGCTL register
++ */
++extern uint32_t fh_otg_get_gotgctl(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_gotgctl(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * GUSBCFG register
++ */
++extern uint32_t fh_otg_get_gusbcfg(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_gusbcfg(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * GRXFSIZ register
++ */
++extern uint32_t fh_otg_get_grxfsiz(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_grxfsiz(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * GNPTXFSIZ register
++ */
++extern uint32_t fh_otg_get_gnptxfsiz(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_gnptxfsiz(fh_otg_core_if_t * core_if, uint32_t val);
++
++extern uint32_t fh_otg_get_gpvndctl(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_gpvndctl(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * GGPIO register
++ */
++extern uint32_t fh_otg_get_ggpio(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_ggpio(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * GUID register
++ */
++extern uint32_t fh_otg_get_guid(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_guid(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * HPRT0 register
++ */
++extern uint32_t fh_otg_get_hprt0(fh_otg_core_if_t * core_if);
++extern void fh_otg_set_hprt0(fh_otg_core_if_t * core_if, uint32_t val);
++
++/**
++ * GHPTXFSIZE
++ */
++extern uint32_t fh_otg_get_hptxfsiz(fh_otg_core_if_t * core_if);
++
++/** @} */
++
++#endif /* __FH_CORE_IF_H__ */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_dbg.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_dbg.h
+new file mode 100644
+index 00000000..661bb020
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_dbg.h
+@@ -0,0 +1,113 @@
++/* ==========================================================================
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ * 
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ * 
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#ifndef __FH_OTG_DBG_H__
++#define __FH_OTG_DBG_H__
++
++/** @file
++ * This file defines debug levels.
++ * Debugging support vanishes in non-debug builds.  
++ */
++
++/**
++ * The Debug Level bit-mask variable.
++ */
++extern uint32_t g_dbg_lvl;
++/**
++ * Set the Debug Level variable.
++ */
++static inline uint32_t SET_DEBUG_LEVEL(const uint32_t new)
++{
++	uint32_t old = g_dbg_lvl;
++	g_dbg_lvl = new;
++	return old;
++}
++
++/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */
++#define DBG_CIL		(0x2)
++/** When debug level has the DBG_CILV bit set, display CIL Verbose debug
++ * messages */
++#define DBG_CILV	(0x20)
++/**  When debug level has the DBG_PCD bit set, display PCD (Device) debug
++ *  messages */
++#define DBG_PCD		(0x4)
++/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug
++ * messages */
++#define DBG_PCDV	(0x40)
++/** When debug level has the DBG_HCD bit set, display Host debug messages */
++#define DBG_HCD		(0x8)
++/** When debug level has the DBG_HCDV bit set, display Verbose Host debug
++ * messages */
++#define DBG_HCDV	(0x80)
++/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host
++ *  mode. */
++#define DBG_HCD_URB	(0x800)
++
++/** When debug level has any bit set, display debug messages */
++#define DBG_ANY		(0xFF)
++
++/** All debug messages off */
++#define DBG_OFF		0
++
++/** Prefix string for FH_DEBUG print macros. */
++#define USB_FH "FH_otg: "
++
++/** 
++ * Print a debug message when the Global debug level variable contains
++ * the bit defined in <code>lvl</code>.
++ *
++ * @param[in] lvl - Debug level, use one of the DBG_ constants above.
++ * @param[in] x - like printf
++ *
++ *    Example:<p>
++ * <code>
++ *      FH_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr);
++ * </code>
++ * <br>
++ * results in:<br> 
++ * <code>
++ * usb-FH_otg: fh_otg_cil_init(ca867000)
++ * </code>
++ */
++#ifdef DEBUG
++
++# define FH_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)__FH_DEBUG(USB_FH x ); }while(0)
++# define FH_DEBUGP(x...)	FH_DEBUGPL(DBG_ANY, x )
++
++# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl)
++
++#else
++
++# define FH_DEBUGPL(lvl, x...) do{}while(0)
++# define FH_DEBUGP(x...)
++
++# define CHK_DEBUG_LEVEL(level) (0)
++
++#endif /*DEBUG*/
++#endif
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.c
+new file mode 100644
+index 00000000..07a97e10
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.c
+@@ -0,0 +1,1463 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_driver.c $
++ * $Revision: #105 $
++ * $Date: 2015/10/13 $
++ * $Change: 2974245 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++/** @file
++ * The fh_otg_driver module provides the initialization and cleanup entry
++ * points for the FH_otg driver. This module will be dynamically installed
++ * after Linux is booted using the insmod command. When the module is
++ * installed, the fh_otg_driver_init function is called. When the module is
++ * removed (using rmmod), the fh_otg_driver_cleanup function is called.
++ *
++ * This module also defines a data structure for the fh_otg_driver, which is
++ * used in conjunction with the standard ARM lm_device structure. These
++ * structures allow the OTG driver to comply with the standard Linux driver
++ * model in which devices and drivers are registered with a bus driver. This
++ * has the benefit that Linux can expose attributes of the driver and device
++ * in its special sysfs file system. Users can then read or write files in
++ * this file system to perform diagnostics on the driver components or the
++ * device.
++ */
++
++#include <linux/platform_device.h>
++
++#include "fh_otg_os_dep.h"
++#include "../fh_common_port/fh_os.h"
++#include "fh_otg_dbg.h"
++#include "fh_otg_driver.h"
++#include "fh_otg_attr.h"
++#include "fh_otg_core_if.h"
++#include "fh_otg_pcd_if.h"
++#include "fh_otg_hcd_if.h"
++
++#define FH_DRIVER_VERSION	"3.30a 13-OCT-2015"
++#define FH_DRIVER_DESC		"HS OTG USB Controller driver"
++
++static const char fh_driver_name[] = "fh_otg";
++int g_irq;
++
++extern int pcd_init(struct platform_device *dev, int irq);
++
++extern int hcd_init(struct platform_device *dev, int irq);
++
++extern int pcd_remove(struct platform_device *dev, int irq);
++
++extern void hcd_remove(struct platform_device *dev);
++
++extern void fh_otg_adp_start(fh_otg_core_if_t * core_if, uint8_t is_host);
++
++/*-------------------------------------------------------------------------*/
++/* Encapsulate the module parameter settings */
++
++struct fh_otg_driver_module_params {
++	int32_t opt;
++	int32_t otg_cap;
++	int32_t dma_enable;
++	int32_t dma_desc_enable;
++	int32_t dma_burst_size;
++	int32_t speed;
++	int32_t host_support_fs_ls_low_power;
++	int32_t host_ls_low_power_phy_clk;
++	int32_t enable_dynamic_fifo;
++	int32_t data_fifo_size;
++	int32_t dev_rx_fifo_size;
++	int32_t dev_nperio_tx_fifo_size;
++	uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS];
++	int32_t host_rx_fifo_size;
++	int32_t host_nperio_tx_fifo_size;
++	int32_t host_perio_tx_fifo_size;
++	int32_t max_transfer_size;
++	int32_t max_packet_count;
++	int32_t host_channels;
++	int32_t dev_endpoints;
++	int32_t phy_type;
++	int32_t phy_utmi_width;
++	int32_t phy_ulpi_ddr;
++	int32_t phy_ulpi_ext_vbus;
++	int32_t i2c_enable;
++	int32_t ulpi_fs_ls;
++	int32_t ts_dline;
++	int32_t en_multiple_tx_fifo;
++	uint32_t dev_tx_fifo_size[MAX_TX_FIFOS];
++	uint32_t thr_ctl;
++	uint32_t tx_thr_length;
++	uint32_t rx_thr_length;
++	int32_t pti_enable;
++	int32_t mpi_enable;
++	int32_t lpm_enable;
++	int32_t besl_enable;
++	int32_t baseline_besl;
++	int32_t deep_besl;
++	int32_t ic_usb_cap;
++	int32_t ahb_thr_ratio;
++	int32_t power_down;
++	int32_t reload_ctl;
++	int32_t dev_out_nak;
++	int32_t cont_on_bna;
++	int32_t ahb_single;
++	int32_t otg_ver;
++	int32_t adp_enable;
++};
++
++static struct fh_otg_driver_module_params fh_otg_module_params = {
++	.opt = -1,
++	.otg_cap = 0,
++	.dma_enable = 1,
++	.dma_desc_enable = 1,
++	.dma_burst_size = -1,
++	.speed = 0,
++	.host_support_fs_ls_low_power = 0,
++	.host_ls_low_power_phy_clk = 0,
++	.enable_dynamic_fifo = 1,
++	.data_fifo_size = -1,
++	.dev_rx_fifo_size = 549,
++	.dev_nperio_tx_fifo_size = 256,
++	.dev_perio_tx_fifo_size = {
++				   /* dev_perio_tx_fifo_size_1 */
++				   32,
++				   256,
++				   256,
++				   256,
++				   256,
++				   256,
++				   16,
++				   16,
++				   16,
++				   16,
++				   16,
++				   16,
++				   16,
++				   16,
++				   16 
++				   /* 15 */
++				   },
++	.host_rx_fifo_size = 542,
++	.host_nperio_tx_fifo_size = 256,
++	.host_perio_tx_fifo_size = 512,
++	.max_transfer_size = 65535,
++	.max_packet_count = 511,
++	.host_channels = 16,
++	.dev_endpoints = 5,
++	.phy_type = 1,
++	.phy_utmi_width = 8,
++	.phy_ulpi_ddr = 0,
++	.phy_ulpi_ext_vbus = 0,
++	.i2c_enable = 0,
++	.ulpi_fs_ls = 0,
++	.ts_dline = 0,
++	.en_multiple_tx_fifo = 1,
++	.dev_tx_fifo_size = {
++			     /* dev_tx_fifo_size */
++			     32,
++			     256,
++			     256,
++			     256,
++			     256,
++			     256,
++			     16,
++			     16,
++			     16,
++			     16,
++			     16,
++			     16,
++			     16,
++			     16,
++			     16
++			     /* 15 */
++			     },
++	.thr_ctl = 0,
++	.tx_thr_length = -1,
++	.rx_thr_length = -1,
++	.pti_enable = 0,
++	.mpi_enable = 0,
++	.lpm_enable = 0,
++	.besl_enable = 0,
++	.baseline_besl = 0,
++	.deep_besl = -1,
++	.ic_usb_cap = 0,
++	.ahb_thr_ratio = 0,
++	.power_down = 0,
++	.reload_ctl = 0,
++	.dev_out_nak = 0,
++	.cont_on_bna = 0,
++	.ahb_single = 0,
++	.otg_ver = 0,
++	.adp_enable = -1,
++};
++
++
++/**
++ * Global Debug Level Mask.
++ */
++uint32_t g_dbg_lvl = 0;		/* OFF */
++
++
++/**
++ * This function is called during module intialization
++ * to pass module parameters to the FH_OTG CORE.
++ */
++static int set_parameters(fh_otg_core_if_t * core_if)
++{
++	int retval = 0;
++	int i;
++
++	if (fh_otg_module_params.otg_cap != -1) {
++		retval +=
++		    fh_otg_set_param_otg_cap(core_if,
++					      fh_otg_module_params.otg_cap);
++	}
++	printk(KERN_ERR "dma_enable :%d\n", fh_otg_module_params.dma_enable);
++	if (fh_otg_module_params.dma_enable != -1) {
++		retval +=
++		    fh_otg_set_param_dma_enable(core_if,
++						 fh_otg_module_params.
++						 dma_enable);
++	}
++	printk(KERN_ERR "dma_desc_enable :%d\n", fh_otg_module_params.dma_desc_enable);
++	if (fh_otg_module_params.dma_desc_enable != -1) {
++		retval +=
++		    fh_otg_set_param_dma_desc_enable(core_if,
++						      fh_otg_module_params.
++						      dma_desc_enable);
++	}
++	if (fh_otg_module_params.opt != -1) {
++		retval +=
++		    fh_otg_set_param_opt(core_if, fh_otg_module_params.opt);
++	}
++	if (fh_otg_module_params.dma_burst_size != -1) {
++		retval +=
++		    fh_otg_set_param_dma_burst_size(core_if,
++						     fh_otg_module_params.
++						     dma_burst_size);
++	}
++	if (fh_otg_module_params.host_support_fs_ls_low_power != -1) {
++		retval +=
++		    fh_otg_set_param_host_support_fs_ls_low_power(core_if,
++								   fh_otg_module_params.
++								   host_support_fs_ls_low_power);
++	}
++	if (fh_otg_module_params.enable_dynamic_fifo != -1) {
++		retval +=
++		    fh_otg_set_param_enable_dynamic_fifo(core_if,
++							  fh_otg_module_params.
++							  enable_dynamic_fifo);
++	}
++	if (fh_otg_module_params.data_fifo_size != -1) {
++		retval +=
++		    fh_otg_set_param_data_fifo_size(core_if,
++						     fh_otg_module_params.
++						     data_fifo_size);
++	}
++	if (fh_otg_module_params.dev_rx_fifo_size != -1) {
++		retval +=
++		    fh_otg_set_param_dev_rx_fifo_size(core_if,
++						       fh_otg_module_params.
++						       dev_rx_fifo_size);
++	}
++	if (fh_otg_module_params.dev_nperio_tx_fifo_size != -1) {
++		retval +=
++		    fh_otg_set_param_dev_nperio_tx_fifo_size(core_if,
++							      fh_otg_module_params.
++							      dev_nperio_tx_fifo_size);
++	}
++	if (fh_otg_module_params.host_rx_fifo_size != -1) {
++		retval +=
++		    fh_otg_set_param_host_rx_fifo_size(core_if,
++							fh_otg_module_params.host_rx_fifo_size);
++	}
++	if (fh_otg_module_params.host_nperio_tx_fifo_size != -1) {
++		retval +=
++		    fh_otg_set_param_host_nperio_tx_fifo_size(core_if,
++							       fh_otg_module_params.
++							       host_nperio_tx_fifo_size);
++	}
++	if (fh_otg_module_params.host_perio_tx_fifo_size != -1) {
++		retval +=
++		    fh_otg_set_param_host_perio_tx_fifo_size(core_if,
++							      fh_otg_module_params.
++							      host_perio_tx_fifo_size);
++	}
++	if (fh_otg_module_params.max_transfer_size != -1) {
++		retval +=
++		    fh_otg_set_param_max_transfer_size(core_if,
++							fh_otg_module_params.
++							max_transfer_size);
++	}
++	if (fh_otg_module_params.max_packet_count != -1) {
++		retval +=
++		    fh_otg_set_param_max_packet_count(core_if,
++						       fh_otg_module_params.
++						       max_packet_count);
++	}
++	if (fh_otg_module_params.host_channels != -1) {
++		retval +=
++		    fh_otg_set_param_host_channels(core_if,
++						    fh_otg_module_params.
++						    host_channels);
++	}
++	if (fh_otg_module_params.dev_endpoints != -1) {
++		retval +=
++		    fh_otg_set_param_dev_endpoints(core_if,
++						    fh_otg_module_params.
++						    dev_endpoints);
++	}
++	if (fh_otg_module_params.phy_type != -1) {
++		retval +=
++		    fh_otg_set_param_phy_type(core_if,
++					       fh_otg_module_params.phy_type);
++	}
++	if (fh_otg_module_params.speed != -1) {
++		retval +=
++		    fh_otg_set_param_speed(core_if,
++					    fh_otg_module_params.speed);
++	}
++	if (fh_otg_module_params.host_ls_low_power_phy_clk != -1) {
++		retval +=
++		    fh_otg_set_param_host_ls_low_power_phy_clk(core_if,
++								fh_otg_module_params.
++								host_ls_low_power_phy_clk);
++	}
++	if (fh_otg_module_params.phy_ulpi_ddr != -1) {
++		retval +=
++		    fh_otg_set_param_phy_ulpi_ddr(core_if,
++						   fh_otg_module_params.
++						   phy_ulpi_ddr);
++	}
++	if (fh_otg_module_params.phy_ulpi_ext_vbus != -1) {
++		retval +=
++		    fh_otg_set_param_phy_ulpi_ext_vbus(core_if,
++							fh_otg_module_params.
++							phy_ulpi_ext_vbus);
++	}
++	if (fh_otg_module_params.phy_utmi_width != -1) {
++		retval +=
++		    fh_otg_set_param_phy_utmi_width(core_if,
++						     fh_otg_module_params.
++						     phy_utmi_width);
++	}
++	if (fh_otg_module_params.ulpi_fs_ls != -1) {
++		retval +=
++		    fh_otg_set_param_ulpi_fs_ls(core_if,
++						 fh_otg_module_params.ulpi_fs_ls);
++	}
++	if (fh_otg_module_params.ts_dline != -1) {
++		retval +=
++		    fh_otg_set_param_ts_dline(core_if,
++					       fh_otg_module_params.ts_dline);
++	}
++	if (fh_otg_module_params.i2c_enable != -1) {
++		retval +=
++		    fh_otg_set_param_i2c_enable(core_if,
++						 fh_otg_module_params.
++						 i2c_enable);
++	}
++	if (fh_otg_module_params.en_multiple_tx_fifo != -1) {
++		retval +=
++		    fh_otg_set_param_en_multiple_tx_fifo(core_if,
++							  fh_otg_module_params.
++							  en_multiple_tx_fifo);
++	}
++	for (i = 0; i < 15; i++) {
++		if (fh_otg_module_params.dev_perio_tx_fifo_size[i] != -1) {
++			retval +=
++			    fh_otg_set_param_dev_perio_tx_fifo_size(core_if,
++								     fh_otg_module_params.
++								     dev_perio_tx_fifo_size
++								     [i], i);
++		}
++	}
++
++	for (i = 0; i < 15; i++) {
++		if (fh_otg_module_params.dev_tx_fifo_size[i] != -1) {
++			retval += fh_otg_set_param_dev_tx_fifo_size(core_if,
++								     fh_otg_module_params.
++								     dev_tx_fifo_size
++								     [i], i);
++		}
++	}
++	if (fh_otg_module_params.thr_ctl != -1) {
++		retval +=
++		    fh_otg_set_param_thr_ctl(core_if,
++					      fh_otg_module_params.thr_ctl);
++	}
++	if (fh_otg_module_params.mpi_enable != -1) {
++		retval +=
++		    fh_otg_set_param_mpi_enable(core_if,
++						 fh_otg_module_params.
++						 mpi_enable);
++	}
++	if (fh_otg_module_params.pti_enable != -1) {
++		retval +=
++		    fh_otg_set_param_pti_enable(core_if,
++						 fh_otg_module_params.
++						 pti_enable);
++	}
++	if (fh_otg_module_params.lpm_enable != -1) {
++		retval +=
++		    fh_otg_set_param_lpm_enable(core_if,
++						 fh_otg_module_params.
++						 lpm_enable);
++	}	
++	if (fh_otg_module_params.besl_enable != -1) {
++		retval +=
++		    fh_otg_set_param_besl_enable(core_if,
++						 fh_otg_module_params.
++						 besl_enable);
++	}
++	if (fh_otg_module_params.baseline_besl != -1) {
++		retval +=
++		    fh_otg_set_param_baseline_besl(core_if,
++						 fh_otg_module_params.
++						 baseline_besl);
++	}
++	if (fh_otg_module_params.deep_besl != -1) {
++		retval +=
++		    fh_otg_set_param_deep_besl(core_if,
++						 fh_otg_module_params.
++						 deep_besl);
++	}		
++	if (fh_otg_module_params.ic_usb_cap != -1) {
++		retval +=
++		    fh_otg_set_param_ic_usb_cap(core_if,
++						 fh_otg_module_params.
++						 ic_usb_cap);
++	}
++	if (fh_otg_module_params.tx_thr_length != -1) {
++		retval +=
++		    fh_otg_set_param_tx_thr_length(core_if,
++						    fh_otg_module_params.tx_thr_length);
++	}
++	if (fh_otg_module_params.rx_thr_length != -1) {
++		retval +=
++		    fh_otg_set_param_rx_thr_length(core_if,
++						    fh_otg_module_params.
++						    rx_thr_length);
++	}
++	if (fh_otg_module_params.ahb_thr_ratio != -1) {
++		retval +=
++		    fh_otg_set_param_ahb_thr_ratio(core_if,
++						    fh_otg_module_params.ahb_thr_ratio);
++	}
++	if (fh_otg_module_params.power_down != -1) {
++		retval +=
++		    fh_otg_set_param_power_down(core_if,
++						 fh_otg_module_params.power_down);
++	}
++	if (fh_otg_module_params.reload_ctl != -1) {
++		retval +=
++		    fh_otg_set_param_reload_ctl(core_if,
++						 fh_otg_module_params.reload_ctl);
++	}
++
++	if (fh_otg_module_params.dev_out_nak != -1) {
++		retval +=
++			fh_otg_set_param_dev_out_nak(core_if,
++			fh_otg_module_params.dev_out_nak);
++	}
++
++	if (fh_otg_module_params.cont_on_bna != -1) {
++		retval +=
++			fh_otg_set_param_cont_on_bna(core_if,
++			fh_otg_module_params.cont_on_bna);
++	}
++
++	if (fh_otg_module_params.ahb_single != -1) {
++		retval +=
++			fh_otg_set_param_ahb_single(core_if,
++			fh_otg_module_params.ahb_single);
++	}
++
++	if (fh_otg_module_params.otg_ver != -1) {
++		retval +=
++		    fh_otg_set_param_otg_ver(core_if,
++					      fh_otg_module_params.otg_ver);
++	}
++	if (fh_otg_module_params.adp_enable != -1) {
++		retval +=
++		    fh_otg_set_param_adp_enable(core_if,
++						 fh_otg_module_params.
++						 adp_enable);
++	}
++	return retval;
++}
++
++
++/**
++ * This function is the top level interrupt handler for the Common
++ * (Device and host modes) interrupts.
++ */
++static irqreturn_t fh_otg_common_irq(int irq, void *dev)
++{
++	int32_t retval = IRQ_NONE;
++
++	retval = fh_otg_handle_common_intr(dev);
++	if (retval != 0) {
++		S3C2410X_CLEAR_EINTPEND();
++	}
++	return IRQ_RETVAL(retval);
++}
++
++/**
++ * This function is called when a lm_device is unregistered with the
++ * fh_otg_driver. This happens, for example, when the rmmod command is
++ * executed. The device may or may not be electrically present. If it is
++ * present, the driver stops device processing. Any resources used on behalf
++ * of this device are freed.
++ *
++ * @param _dev
++ */
++static int fh_otg_driver_remove(struct platform_device *dev)
++{
++	int retval = 0;
++	fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
++
++
++	printk(KERN_ERR "%s (%p)\n", __func__, dev);
++
++	if (!otg_dev) {
++		/* Memory allocation for the fh_otg_device failed. */
++		FH_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
++		return -EINVAL;
++	}
++#ifndef FH_DEVICE_ONLY
++	if (otg_dev->hcd) {
++		hcd_remove(dev);
++	} else {
++		FH_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
++
++	}
++#endif
++
++#ifndef FH_HOST_ONLY
++	if (otg_dev->pcd) {
++		pcd_remove(dev, g_irq);
++	} else {
++		FH_DEBUGPL(DBG_ANY, "%s: otg_dev->pcd NULL!\n", __func__);
++
++	}
++#endif
++	/*
++	 * Free the IRQ
++	 */	 
++	if (otg_dev->common_irq_installed) {
++		free_irq(g_irq, otg_dev);
++	} else {
++		FH_DEBUGPL(DBG_ANY, "%s: There is no installed irq!\n", __func__);
++
++	}
++
++	if (otg_dev->core_if) {
++		fh_otg_cil_remove(otg_dev->core_if);
++	} else {
++		FH_DEBUGPL(DBG_ANY, "%s: otg_dev->core_if NULL!\n", __func__);
++
++	}
++
++	/*
++	 * Remove the device attributes
++	 */
++	//fh_otg_attr_remove(dev);
++
++	/*
++	 * Return the memory.
++	 */
++	if (otg_dev->os_dep.base)
++		iounmap(otg_dev->os_dep.base);
++	FH_FREE(otg_dev);
++
++	/*
++	 * Clear the drvdata pointer.
++	 */
++
++    release_mem_region(otg_dev->os_dep.rsrc_start, otg_dev->os_dep.rsrc_len);
++	platform_set_drvdata(dev, 0);
++	g_irq = 0;
++	return retval;
++}
++
++/**
++ * This function is called when an lm_device is bound to a
++ * fh_otg_driver. It creates the driver components required to
++ * control the device (CIL, HCD, and PCD) and it initializes the
++ * device. The driver components are stored in a fh_otg_device
++ * structure. A reference to the fh_otg_device is saved in the
++ * lm_device. This allows the driver to access the fh_otg_device
++ * structure on subsequent calls to driver methods for this device.
++ *
++ * @param _dev Bus device
++ */
++static int fh_otg_driver_probe(struct platform_device *dev)
++{
++	int retval = 0;
++	fh_otg_device_t *fh_otg_device;
++	struct resource *res;
++	int irq;
++
++	//printk(KERN_ERR "fh_otg_driver_probe(%p)\n", dev);
++
++	fh_otg_device = FH_ALLOC(sizeof(fh_otg_device_t));
++
++	if (!fh_otg_device) {
++		dev_err(&dev->dev, "kmalloc of fh_otg_device failed\n");
++		return -ENOMEM;
++	}
++
++	memset(fh_otg_device, 0, sizeof(*fh_otg_device));
++	fh_otg_device->os_dep.reg_offset = 0xFFFFFFFF;
++
++	/*
++	 * Map the FH_otg Core memory into virtual address space.
++	 */
++	res = platform_get_resource(dev, IORESOURCE_IRQ, 0);
++	if (!res) {
++		dev_err(&dev->dev,
++			"Found HC with no IRQ. Check %s setup!\n",
++			dev_name(&dev->dev));
++		return -ENODEV;
++	}
++	irq = res->start;
++	g_irq = irq;
++	
++	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
++	if (!res) {
++		dev_err(&dev->dev,
++			"Found HC with no register addr. Check %s setup!\n",
++			dev_name(&dev->dev));
++		return -ENODEV;
++	}
++	fh_otg_device->os_dep.rsrc_start = res->start;
++	fh_otg_device->os_dep.rsrc_len = res->end - res->start + 1;
++
++
++	printk(KERN_ERR "resource: start=%08x, len=%08x\n",
++		    (unsigned)fh_otg_device->os_dep.rsrc_start,
++		    (unsigned)fh_otg_device->os_dep.rsrc_len);
++	
++	if (!request_mem_region
++	    (fh_otg_device->os_dep.rsrc_start, fh_otg_device->os_dep.rsrc_len,
++	     "fh_otg")) {
++		dev_dbg(&dev->dev, "error requesting memory\n");
++		FH_FREE(fh_otg_device);
++		return -EFAULT;
++	}
++
++	fh_otg_device->os_dep.base =
++	    ioremap_nocache(fh_otg_device->os_dep.rsrc_start,
++			    fh_otg_device->os_dep.rsrc_len);
++	if (fh_otg_device->os_dep.base == NULL) {
++		dev_dbg(&dev->dev, "error mapping memory\n");
++		release_mem_region(fh_otg_device->os_dep.rsrc_start,
++				   fh_otg_device->os_dep.rsrc_len);
++		FH_FREE(fh_otg_device);
++		return -EFAULT;
++	}
++	//printk(KERN_INFO "base=0x%p (before adjust) \n",
++	//	fh_otg_device->os_dep.base);
++	fh_otg_device->os_dep.base = (char *)fh_otg_device->os_dep.base;
++	printk(KERN_ERR "base=0x%p (after adjust) \n",
++		fh_otg_device->os_dep.base);
++	printk(KERN_INFO "%s: mapped PA 0x%x to VA 0x%p\n", __func__,
++		(unsigned)fh_otg_device->os_dep.rsrc_start,
++		fh_otg_device->os_dep.base);
++
++	/*
++	 * Initialize driver data to point to the global FH_otg
++	 * Device structure.
++	 */
++
++	dev_dbg(&dev->dev, "fh_otg_device=0x%p\n", fh_otg_device);
++
++	fh_otg_device->core_if = fh_otg_cil_init(fh_otg_device->os_dep.base);
++	if (!fh_otg_device->core_if) {
++		dev_err(&dev->dev, "CIL initialization failed!\n");
++		retval = -ENOMEM;
++		goto fail;
++	}
++
++	/*
++	 * Attempt to ensure this device is really a FH_otg Controller.
++	 * Read and verify the SNPSID register contents. The value should be
++	 * 0x45F42XXX or 0x45F42XXX, which corresponds to either "OT2" or "OTG3",
++	 * as in "OTG version 2.XX" or "OTG version 3.XX".
++	 */
++
++	if (((fh_otg_get_gsnpsid(fh_otg_device->core_if) & 0xFFFFF000) !=	0x4F542000) &&
++		((fh_otg_get_gsnpsid(fh_otg_device->core_if) & 0xFFFFF000) != 0x4F543000) && 
++		((fh_otg_get_gsnpsid(fh_otg_device->core_if) & 0xFFFFF000) != 0x4F544000)) {
++		dev_err(&dev->dev, "Bad value for SNPSID: 0x%08x\n",
++			fh_otg_get_gsnpsid(fh_otg_device->core_if));
++		retval = -EINVAL;
++		goto fail;
++	}
++
++	/*
++	 * Validate parameter values.
++	 */
++	if (set_parameters(fh_otg_device->core_if)) {
++		retval = -EINVAL;
++		goto fail;
++	}
++	
++	/*
++	 * Create Device Attributes in sysfs
++	 */
++	/*fh_otg_attr_create(dev);*/
++
++	
++	/*
++	 * Disable the global interrupt until all the interrupt
++	 * handlers are installed.
++	 */
++	fh_otg_disable_global_interrupts(fh_otg_device->core_if);
++
++	/*
++	 * Install the interrupt handler for the common interrupts before
++	 * enabling common interrupts in core_init below.
++	 */
++	retval = request_irq(irq, fh_otg_common_irq,
++			     IRQF_SHARED | IRQF_DISABLED | IRQ_LEVEL, "fh_otg",
++			     fh_otg_device);
++	if (retval) {
++		FH_ERROR("request of irq%d failed\n", irq);
++		retval = -EBUSY;
++		goto fail;
++	} else {
++		fh_otg_device->common_irq_installed = 1;
++	}
++
++	/*
++	 * Initialize the FH_otg core.
++	 */
++	fh_otg_core_init(fh_otg_device->core_if);
++	fh_otg_device->os_dep.pdev = dev;
++	platform_set_drvdata(dev, fh_otg_device);
++
++#ifndef FH_HOST_ONLY
++	/*
++	 * Initialize the PCD
++	 */
++	retval = pcd_init(dev, irq);
++	if (retval != 0) {
++		FH_ERROR("pcd_init failed\n");
++		fh_otg_device->pcd = NULL;
++		goto fail;
++	}
++#endif
++
++#ifndef FH_DEVICE_ONLY
++	/*
++	 * Initialize the HCD
++	 */
++	retval = hcd_init(dev, irq);
++	if (retval != 0) {
++		FH_ERROR("hcd_init failed\n");
++		fh_otg_device->hcd = NULL;
++		goto fail;
++	}
++#endif
++
++	/*
++	 * Enable the global interrupt after all the interrupt
++	 * handlers are installed if there is no ADP support else 
++	 * perform initial actions required for Internal ADP logic.
++	 */
++	if (!fh_otg_get_param_adp_enable(fh_otg_device->core_if))	
++		fh_otg_enable_global_interrupts(fh_otg_device->core_if);
++	else
++		fh_otg_adp_start(fh_otg_device->core_if, 
++							fh_otg_is_host_mode(fh_otg_device->core_if));
++
++
++	return 0;
++
++fail:
++	fh_otg_driver_remove(dev);
++	return retval;
++}
++
++static const struct of_device_id fh_of_match_table[] = {
++	{ .compatible = "brcm,bcm2835-usb", .data = NULL },
++	{ .compatible = "rockchip,rk3066-usb", .data = NULL },
++	{ .compatible = "fullhan,fh2", .data = NULL },
++	{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
++	{},
++};
++MODULE_DEVICE_TABLE(of, fh_of_match_table);
++
++static struct platform_driver fh_otg_driver = {
++	.driver = {
++		.owner = THIS_MODULE,
++        .name = fh_driver_name,
++        .of_match_table = fh_of_match_table,
++	},
++		   
++	.probe = fh_otg_driver_probe,
++	.remove = fh_otg_driver_remove,
++};
++
++
++static int __init fh_otg_driver_init(void)
++{
++	return platform_driver_register(&fh_otg_driver);
++}
++module_init(fh_otg_driver_init);
++
++static void __exit fh_otg_driver_cleanup(void)
++{
++	platform_driver_unregister(&fh_otg_driver);
++}
++
++module_exit(fh_otg_driver_cleanup);
++
++MODULE_DESCRIPTION(FH_DRIVER_DESC);
++MODULE_AUTHOR("Synopsys Inc.");
++MODULE_LICENSE("GPL");
++
++module_param_named(otg_cap, fh_otg_module_params.otg_cap, int, 0444);
++MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None");
++module_param_named(opt, fh_otg_module_params.opt, int, 0444);
++MODULE_PARM_DESC(opt, "OPT Mode");
++module_param_named(dma_enable, fh_otg_module_params.dma_enable, int, 0444);
++MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled");
++
++module_param_named(dma_desc_enable, fh_otg_module_params.dma_desc_enable, int,
++		   0444);
++MODULE_PARM_DESC(dma_desc_enable,
++		 "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled");
++
++module_param_named(dma_burst_size, fh_otg_module_params.dma_burst_size, int,
++		   0444);
++MODULE_PARM_DESC(dma_burst_size,
++		 "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256");
++module_param_named(speed, fh_otg_module_params.speed, int, 0444);
++MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed");
++module_param_named(host_support_fs_ls_low_power,
++		   fh_otg_module_params.host_support_fs_ls_low_power, int,
++		   0444);
++MODULE_PARM_DESC(host_support_fs_ls_low_power,
++		 "Support Low Power w/FS or LS 0=Support 1=Don't Support");
++module_param_named(host_ls_low_power_phy_clk,
++		   fh_otg_module_params.host_ls_low_power_phy_clk, int, 0444);
++MODULE_PARM_DESC(host_ls_low_power_phy_clk,
++		 "Low Speed Low Power Clock 0=48Mhz 1=6Mhz");
++module_param_named(enable_dynamic_fifo,
++		   fh_otg_module_params.enable_dynamic_fifo, int, 0444);
++MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing");
++module_param_named(data_fifo_size, fh_otg_module_params.data_fifo_size, int,
++		   0444);
++MODULE_PARM_DESC(data_fifo_size,
++		 "Total number of words in the data FIFO memory 32-32768");
++module_param_named(dev_rx_fifo_size, fh_otg_module_params.dev_rx_fifo_size,
++		   int, 0444);
++MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
++module_param_named(dev_nperio_tx_fifo_size,
++		   fh_otg_module_params.dev_nperio_tx_fifo_size, int, 0444);
++MODULE_PARM_DESC(dev_nperio_tx_fifo_size,
++		 "Number of words in the non-periodic Tx FIFO 16-32768");
++module_param_named(dev_perio_tx_fifo_size_1,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_1,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_2,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_2,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_3,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_3,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_4,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_4,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_5,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_5,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_6,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_6,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_7,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_7,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_8,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_8,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_9,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_9,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_10,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_10,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_11,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_11,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_12,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_12,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_13,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_13,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_14,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_14,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(dev_perio_tx_fifo_size_15,
++		   fh_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444);
++MODULE_PARM_DESC(dev_perio_tx_fifo_size_15,
++		 "Number of words in the periodic Tx FIFO 4-768");
++module_param_named(host_rx_fifo_size, fh_otg_module_params.host_rx_fifo_size,
++		   int, 0444);
++MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
++module_param_named(host_nperio_tx_fifo_size,
++		   fh_otg_module_params.host_nperio_tx_fifo_size, int, 0444);
++MODULE_PARM_DESC(host_nperio_tx_fifo_size,
++		 "Number of words in the non-periodic Tx FIFO 16-32768");
++module_param_named(host_perio_tx_fifo_size,
++		   fh_otg_module_params.host_perio_tx_fifo_size, int, 0444);
++MODULE_PARM_DESC(host_perio_tx_fifo_size,
++		 "Number of words in the host periodic Tx FIFO 16-32768");
++module_param_named(max_transfer_size, fh_otg_module_params.max_transfer_size,
++		   int, 0444);
++/** @todo Set the max to 512K, modify checks */
++MODULE_PARM_DESC(max_transfer_size,
++		 "The maximum transfer size supported in bytes 2047-65535");
++module_param_named(max_packet_count, fh_otg_module_params.max_packet_count,
++		   int, 0444);
++MODULE_PARM_DESC(max_packet_count,
++		 "The maximum number of packets in a transfer 15-511");
++module_param_named(host_channels, fh_otg_module_params.host_channels, int,
++		   0444);
++MODULE_PARM_DESC(host_channels,
++		 "The number of host channel registers to use 1-16");
++module_param_named(dev_endpoints, fh_otg_module_params.dev_endpoints, int,
++		   0444);
++MODULE_PARM_DESC(dev_endpoints,
++		 "The number of endpoints in addition to EP0 available for device mode 1-15");
++module_param_named(phy_type, fh_otg_module_params.phy_type, int, 0444);
++MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI");
++module_param_named(phy_utmi_width, fh_otg_module_params.phy_utmi_width, int,
++		   0444);
++MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits");
++module_param_named(phy_ulpi_ddr, fh_otg_module_params.phy_ulpi_ddr, int, 0444);
++MODULE_PARM_DESC(phy_ulpi_ddr,
++		 "ULPI at double or single data rate 0=Single 1=Double");
++module_param_named(phy_ulpi_ext_vbus, fh_otg_module_params.phy_ulpi_ext_vbus,
++		   int, 0444);
++MODULE_PARM_DESC(phy_ulpi_ext_vbus,
++		 "ULPI PHY using internal or external vbus 0=Internal");
++module_param_named(i2c_enable, fh_otg_module_params.i2c_enable, int, 0444);
++MODULE_PARM_DESC(i2c_enable, "FS PHY Interface");
++module_param_named(ulpi_fs_ls, fh_otg_module_params.ulpi_fs_ls, int, 0444);
++MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only");
++module_param_named(ts_dline, fh_otg_module_params.ts_dline, int, 0444);
++MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs");
++module_param_named(debug, g_dbg_lvl, int, 0444);
++MODULE_PARM_DESC(debug, "");
++
++module_param_named(en_multiple_tx_fifo,
++		   fh_otg_module_params.en_multiple_tx_fifo, int, 0444);
++MODULE_PARM_DESC(en_multiple_tx_fifo,
++		 "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled");
++module_param_named(dev_tx_fifo_size_1,
++		   fh_otg_module_params.dev_tx_fifo_size[0], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_2,
++		   fh_otg_module_params.dev_tx_fifo_size[1], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_3,
++		   fh_otg_module_params.dev_tx_fifo_size[2], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_4,
++		   fh_otg_module_params.dev_tx_fifo_size[3], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_5,
++		   fh_otg_module_params.dev_tx_fifo_size[4], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_6,
++		   fh_otg_module_params.dev_tx_fifo_size[5], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_7,
++		   fh_otg_module_params.dev_tx_fifo_size[6], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_8,
++		   fh_otg_module_params.dev_tx_fifo_size[7], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_9,
++		   fh_otg_module_params.dev_tx_fifo_size[8], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_10,
++		   fh_otg_module_params.dev_tx_fifo_size[9], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_11,
++		   fh_otg_module_params.dev_tx_fifo_size[10], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_12,
++		   fh_otg_module_params.dev_tx_fifo_size[11], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_13,
++		   fh_otg_module_params.dev_tx_fifo_size[12], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_14,
++		   fh_otg_module_params.dev_tx_fifo_size[13], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768");
++module_param_named(dev_tx_fifo_size_15,
++		   fh_otg_module_params.dev_tx_fifo_size[14], int, 0444);
++MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768");
++
++module_param_named(thr_ctl, fh_otg_module_params.thr_ctl, int, 0444);
++MODULE_PARM_DESC(thr_ctl,
++		 "Thresholding enable flag bit 0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled");
++module_param_named(tx_thr_length, fh_otg_module_params.tx_thr_length, int,
++		   0444);
++MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs");
++module_param_named(rx_thr_length, fh_otg_module_params.rx_thr_length, int,
++		   0444);
++MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs");
++
++module_param_named(pti_enable, fh_otg_module_params.pti_enable, int, 0444);
++module_param_named(mpi_enable, fh_otg_module_params.mpi_enable, int, 0444);
++module_param_named(lpm_enable, fh_otg_module_params.lpm_enable, int, 0444);
++MODULE_PARM_DESC(lpm_enable, "LPM Enable 0=LPM Disabled 1=LPM Enabled");
++
++module_param_named(besl_enable, fh_otg_module_params.besl_enable, int, 0444);
++MODULE_PARM_DESC(besl_enable, "BESL Enable 0=BESL Disabled 1=BESL Enabled");
++module_param_named(baseline_besl, fh_otg_module_params.baseline_besl, int, 0444);
++MODULE_PARM_DESC(baseline_besl, "Set the baseline besl value");
++module_param_named(deep_besl, fh_otg_module_params.deep_besl, int, 0444);
++MODULE_PARM_DESC(deep_besl, "Set the deep besl value");
++
++module_param_named(ic_usb_cap, fh_otg_module_params.ic_usb_cap, int, 0444);
++MODULE_PARM_DESC(ic_usb_cap,
++		 "IC_USB Capability 0=IC_USB Disabled 1=IC_USB Enabled");
++module_param_named(ahb_thr_ratio, fh_otg_module_params.ahb_thr_ratio, int,
++		   0444);
++MODULE_PARM_DESC(ahb_thr_ratio, "AHB Threshold Ratio");
++module_param_named(power_down, fh_otg_module_params.power_down, int, 0444);
++MODULE_PARM_DESC(power_down, "Power Down Mode");
++module_param_named(reload_ctl, fh_otg_module_params.reload_ctl, int, 0444);
++MODULE_PARM_DESC(reload_ctl, "HFIR Reload Control");
++module_param_named(dev_out_nak, fh_otg_module_params.dev_out_nak, int, 0444);
++MODULE_PARM_DESC(dev_out_nak, "Enable Device OUT NAK");
++module_param_named(cont_on_bna, fh_otg_module_params.cont_on_bna, int, 0444);
++MODULE_PARM_DESC(cont_on_bna, "Enable Enable Continue on BNA");
++module_param_named(ahb_single, fh_otg_module_params.ahb_single, int, 0444);
++MODULE_PARM_DESC(ahb_single, "Enable AHB Single Support");
++module_param_named(adp_enable, fh_otg_module_params.adp_enable, int, 0444);
++MODULE_PARM_DESC(adp_enable, "ADP Enable 0=ADP Disabled 1=ADP Enabled");
++module_param_named(otg_ver, fh_otg_module_params.otg_ver, int, 0444);
++MODULE_PARM_DESC(otg_ver, "OTG revision supported 0=OTG 1.3 1=OTG 2.0");
++
++/** @page "Module Parameters"
++ *
++ * The following parameters may be specified when starting the module.
++ * These parameters define how the FH_otg controller should be
++ * configured. Parameter values are passed to the CIL initialization
++ * function fh_otg_cil_init
++ *
++ * Example: <code>modprobe fh_otg speed=1 otg_cap=1</code>
++ *
++
++ <table>
++ <tr><td>Parameter Name</td><td>Meaning</td></tr>
++
++ <tr>
++ <td>otg_cap</td>
++ <td>Specifies the OTG capabilities. The driver will automatically detect the
++ value for this parameter if none is specified.
++ - 0: HNP and SRP capable (default, if available)
++ - 1: SRP Only capable
++ - 2: No HNP/SRP capable
++ </td></tr>
++
++ <tr>
++ <td>dma_enable</td>
++ <td>Specifies whether to use slave or DMA mode for accessing the data FIFOs.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: Slave
++ - 1: DMA (default, if available)
++ </td></tr>
++
++ <tr>
++ <td>dma_burst_size</td>
++ <td>The DMA Burst size (applicable only for External DMA Mode).
++ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32)
++ </td></tr>
++
++ <tr>
++ <td>speed</td>
++ <td>Specifies the maximum speed of operation in host and device mode. The
++ actual speed depends on the speed of the attached device and the value of
++ phy_type.
++ - 0: High Speed (default)
++ - 1: Full Speed
++ </td></tr>
++
++ <tr>
++ <td>host_support_fs_ls_low_power</td>
++ <td>Specifies whether low power mode is supported when attached to a Full
++ Speed or Low Speed device in host mode.
++ - 0: Don't support low power mode (default)
++ - 1: Support low power mode
++ </td></tr>
++
++ <tr>
++ <td>host_ls_low_power_phy_clk</td>
++ <td>Specifies the PHY clock rate in low power mode when connected to a Low
++ Speed device in host mode. This parameter is applicable only if
++ HOST_SUPPORT_FS_LS_LOW_POWER is enabled.
++ - 0: 48 MHz (default)
++ - 1: 6 MHz
++ </td></tr>
++
++ <tr>
++ <td>enable_dynamic_fifo</td>
++ <td> Specifies whether FIFOs may be resized by the driver software.
++ - 0: Use cC FIFO size parameters
++ - 1: Allow dynamic FIFO sizing (default)
++ </td></tr>
++
++ <tr>
++ <td>data_fifo_size</td>
++ <td>Total number of 4-byte words in the data FIFO memory. This memory
++ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs.
++ - Values: 32 to 32768 (default 8192)
++
++ Note: The total FIFO memory depth in the FPGA configuration is 8192.
++ </td></tr>
++
++ <tr>
++ <td>dev_rx_fifo_size</td>
++ <td>Number of 4-byte words in the Rx FIFO in device mode when dynamic
++ FIFO sizing is enabled.
++ - Values: 16 to 32768 (default 1064)
++ </td></tr>
++
++ <tr>
++ <td>dev_nperio_tx_fifo_size</td>
++ <td>Number of 4-byte words in the non-periodic Tx FIFO in device mode when
++ dynamic FIFO sizing is enabled.
++ - Values: 16 to 32768 (default 1024)
++ </td></tr>
++
++ <tr>
++ <td>dev_perio_tx_fifo_size_n (n = 1 to 15)</td>
++ <td>Number of 4-byte words in each of the periodic Tx FIFOs in device mode
++ when dynamic FIFO sizing is enabled.
++ - Values: 4 to 768 (default 256)
++ </td></tr>
++
++ <tr>
++ <td>host_rx_fifo_size</td>
++ <td>Number of 4-byte words in the Rx FIFO in host mode when dynamic FIFO
++ sizing is enabled.
++ - Values: 16 to 32768 (default 1024)
++ </td></tr>
++
++ <tr>
++ <td>host_nperio_tx_fifo_size</td>
++ <td>Number of 4-byte words in the non-periodic Tx FIFO in host mode when
++ dynamic FIFO sizing is enabled in the core.
++ - Values: 16 to 32768 (default 1024)
++ </td></tr>
++
++ <tr>
++ <td>host_perio_tx_fifo_size</td>
++ <td>Number of 4-byte words in the host periodic Tx FIFO when dynamic FIFO
++ sizing is enabled.
++ - Values: 16 to 32768 (default 1024)
++ </td></tr>
++
++ <tr>
++ <td>max_transfer_size</td>
++ <td>The maximum transfer size supported in bytes.
++ - Values: 2047 to 65,535 (default 65,535)
++ </td></tr>
++
++ <tr>
++ <td>max_packet_count</td>
++ <td>The maximum number of packets in a transfer.
++ - Values: 15 to 511 (default 511)
++ </td></tr>
++
++ <tr>
++ <td>host_channels</td>
++ <td>The number of host channel registers to use.
++ - Values: 1 to 16 (default 12)
++
++ Note: The FPGA configuration supports a maximum of 12 host channels.
++ </td></tr>
++
++ <tr>
++ <td>dev_endpoints</td>
++ <td>The number of endpoints in addition to EP0 available for device mode
++ operations.
++ - Values: 1 to 15 (default 6 IN and OUT)
++
++ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in
++ addition to EP0.
++ </td></tr>
++
++ <tr>
++ <td>phy_type</td>
++ <td>Specifies the type of PHY interface to use. By default, the driver will
++ automatically detect the phy_type.
++ - 0: Full Speed
++ - 1: UTMI+ (default, if available)
++ - 2: ULPI
++ </td></tr>
++
++ <tr>
++ <td>phy_utmi_width</td>
++ <td>Specifies the UTMI+ Data Width. This parameter is applicable for a
++ phy_type of UTMI+. Also, this parameter is applicable only if the
++ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the
++ core has been configured to work at either data path width.
++ - Values: 8 or 16 bits (default 16)
++ </td></tr>
++
++ <tr>
++ <td>phy_ulpi_ddr</td>
++ <td>Specifies whether the ULPI operates at double or single data rate. This
++ parameter is only applicable if phy_type is ULPI.
++ - 0: single data rate ULPI interface with 8 bit wide data bus (default)
++ - 1: double data rate ULPI interface with 4 bit wide data bus
++ </td></tr>
++
++ <tr>
++ <td>i2c_enable</td>
++ <td>Specifies whether to use the I2C interface for full speed PHY. This
++ parameter is only applicable if PHY_TYPE is FS.
++ - 0: Disabled (default)
++ - 1: Enabled
++ </td></tr>
++
++ <tr>
++ <td>ulpi_fs_ls</td>
++ <td>Specifies whether to use ULPI FS/LS mode only.
++ - 0: Disabled (default)
++ - 1: Enabled
++ </td></tr>
++
++ <tr>
++ <td>ts_dline</td>
++ <td>Specifies whether term select D-Line pulsing for all PHYs is enabled.
++ - 0: Disabled (default)
++ - 1: Enabled
++ </td></tr>
++ 
++ <tr>
++ <td>en_multiple_tx_fifo</td>
++ <td>Specifies whether dedicatedto tx fifos are enabled for non periodic IN EPs.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: Disabled
++ - 1: Enabled (default, if available)
++ </td></tr>
++
++ <tr>
++ <td>dev_tx_fifo_size_n (n = 1 to 15)</td>
++ <td>Number of 4-byte words in each of the Tx FIFOs in device mode
++ when dynamic FIFO sizing is enabled.
++ - Values: 4 to 768 (default 256)
++ </td></tr>
++
++ <tr>
++ <td>tx_thr_length</td>
++ <td>Transmit Threshold length in 32 bit double words
++ - Values: 8 to 128 (default 64)
++ </td></tr>
++
++ <tr>
++ <td>rx_thr_length</td>
++ <td>Receive Threshold length in 32 bit double words
++ - Values: 8 to 128 (default 64)
++ </td></tr>
++
++<tr>
++ <td>thr_ctl</td>
++ <td>Specifies whether to enable Thresholding for Device mode. Bits 0, 1, 2 of 
++ this parmater specifies if thresholding is enabled for non-Iso Tx, Iso Tx and
++ Rx transfers accordingly.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - Values: 0 to 7 (default 0)
++ Bit values indicate:
++ - 0: Thresholding disabled
++ - 1: Thresholding enabled
++ </td></tr>
++
++<tr>
++ <td>dma_desc_enable</td>
++ <td>Specifies whether to enable Descriptor DMA mode.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: Descriptor DMA disabled
++ - 1: Descriptor DMA (default, if available)
++ </td></tr>
++
++<tr>
++ <td>mpi_enable</td>
++ <td>Specifies whether to enable MPI enhancement mode.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: MPI disabled (default)
++ - 1: MPI enable
++ </td></tr>
++
++<tr>
++ <td>pti_enable</td>
++ <td>Specifies whether to enable PTI enhancement support.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: PTI disabled (default)
++ - 1: PTI enable
++ </td></tr>
++
++<tr>
++ <td>lpm_enable</td>
++ <td>Specifies whether to enable LPM support.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: LPM disabled
++ - 1: LPM enable (default, if available)
++ </td></tr>
++  
++ <tr>
++ <td>besl_enable</td>
++ <td>Specifies whether to enable LPM Errata support.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: LPM Errata disabled (default)
++ - 1: LPM Errata enable 
++ </td></tr>
++ 
++  <tr>
++ <td>baseline_besl</td>
++ <td>Specifies the baseline besl value.
++ - Values: 0 to 15 (default 0)
++ </td></tr>
++ 
++  <tr>
++ <td>deep_besl</td>
++ <td>Specifies the deep besl value.
++ - Values: 0 to 15 (default 15)
++ </td></tr>
++
++<tr>
++ <td>ic_usb_cap</td>
++ <td>Specifies whether to enable IC_USB capability.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: IC_USB disabled (default, if available)
++ - 1: IC_USB enable 
++ </td></tr>
++
++<tr>
++ <td>ahb_thr_ratio</td>
++ <td>Specifies AHB Threshold ratio.
++ - Values: 0 to 3 (default 0)
++ </td></tr>
++
++<tr>
++ <td>power_down</td>
++ <td>Specifies Power Down(Hibernation) Mode.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: Power Down disabled (default)
++ - 2: Power Down enabled
++ </td></tr>
++ 
++ <tr>
++ <td>reload_ctl</td>
++ <td>Specifies whether dynamic reloading of the HFIR register is allowed during
++ run time. The driver will automatically detect the value for this parameter if
++ none is specified. In case the HFIR value is reloaded when HFIR.RldCtrl == 1'b0
++ the core might misbehave.
++ - 0: Reload Control disabled (default)
++ - 1: Reload Control enabled
++ </td></tr>
++
++ <tr>
++ <td>dev_out_nak</td>
++ <td>Specifies whether  Device OUT NAK enhancement enabled or no.
++ The driver will automatically detect the value for this parameter if
++ none is specified. This parameter is valid only when OTG_EN_DESC_DMA == 1'b1.
++ - 0: The core does not set NAK after Bulk OUT transfer complete (default)
++ - 1: The core sets NAK after Bulk OUT transfer complete
++ </td></tr>
++
++ <tr>
++ <td>cont_on_bna</td>
++ <td>Specifies whether Enable Continue on BNA enabled or no. 
++ After receiving BNA interrupt the core disables the endpoint,when the
++ endpoint is re-enabled by the application the  
++ - 0: Core starts processing from the DOEPDMA descriptor (default)
++ - 1: Core starts processing from the descriptor which received the BNA.
++ This parameter is valid only when OTG_EN_DESC_DMA == 1'b1.
++ </td></tr>
++
++ <tr>
++ <td>ahb_single</td>
++ <td>This bit when programmed supports SINGLE transfers for remainder data
++ in a transfer for DMA mode of operation. 
++ - 0: The remainder data will be sent using INCR burst size (default)
++ - 1: The remainder data will be sent using SINGLE burst size.
++ </td></tr>
++
++<tr>
++ <td>adp_enable</td>
++ <td>Specifies whether ADP feature is enabled.
++ The driver will automatically detect the value for this parameter if none is
++ specified.
++ - 0: ADP feature disabled (default)
++ - 1: ADP feature enabled
++ </td></tr>
++
++  <tr>
++ <td>otg_ver</td>
++ <td>Specifies whether OTG is performing as USB OTG Revision 2.0 or Revision 1.3
++ USB OTG device.
++ - 0: OTG 2.0 support disabled (default)
++ - 1: OTG 2.0 support enabled 
++ </td></tr>
++
++*/
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.h
+new file mode 100644
+index 00000000..cddb6347
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_driver.h
+@@ -0,0 +1,86 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_driver.h $
++ * $Revision: #21 $
++ * $Date: 2015/10/12 $
++ * $Change: 2972621 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ * 
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ * 
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#ifndef __FH_OTG_DRIVER_H__
++#define __FH_OTG_DRIVER_H__
++
++/** @file
++ * This file contains the interface to the Linux driver.
++ */
++#include "fh_otg_os_dep.h"
++#include "fh_otg_core_if.h"
++
++/* Type declarations */
++struct fh_otg_pcd;
++struct fh_otg_hcd;
++
++/**
++ * This structure is a wrapper that encapsulates the driver components used to
++ * manage a single FH_otg controller.
++ */
++typedef struct fh_otg_device {
++	/** Structure containing OS-dependent stuff. KEEP THIS STRUCT AT THE
++	 * VERY BEGINNING OF THE DEVICE STRUCT. OSes such as FreeBSD and NetBSD
++	 * require this. */
++	struct os_dependent os_dep;
++
++	/** Pointer to the core interface structure. */
++	fh_otg_core_if_t *core_if;
++
++	/** Pointer to the PCD structure. */
++	struct fh_otg_pcd *pcd;
++
++	/** Pointer to the HCD structure. */
++	struct fh_otg_hcd *hcd;
++
++	/** Flag to indicate whether the common IRQ handler is installed. */
++	uint8_t common_irq_installed;
++
++} fh_otg_device_t;
++
++/*We must clear S3C24XX_EINTPEND external interrupt register 
++ * because after clearing in this register trigerred IRQ from 
++ * H/W core in kernel interrupt can be occured again before OTG
++ * handlers clear all IRQ sources of Core registers because of
++ * timing latencies and Low Level IRQ Type.
++ */
++#ifdef CONFIG_MACH_IPMATE
++#define  S3C2410X_CLEAR_EINTPEND()   \
++do { \
++	__raw_writel(1UL << 11,S3C24XX_EINTPEND); \
++} while (0)
++#else
++#define  S3C2410X_CLEAR_EINTPEND()   do { } while (0)
++#endif
++
++#endif
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.c
+new file mode 100644
+index 00000000..ba135027
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.c
+@@ -0,0 +1,3450 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd.c $
++ * $Revision: #110 $
++ * $Date: 2013/05/19 $
++ * $Change: 2234022 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_DEVICE_ONLY
++
++/** @file
++ * This file implements HCD Core. All code in this file is portable and doesn't
++ * use any OS specific functions.
++ * Interface provided by HCD Core is defined in <code><hcd_if.h></code>
++ * header file.
++ */
++
++#include "fh_otg_hcd.h"
++#include "fh_otg_regs.h"
++
++fh_otg_hcd_t *fh_otg_hcd_alloc_hcd(void)
++{
++	return FH_ALLOC(sizeof(fh_otg_hcd_t));
++}
++
++/**
++ * Connection timeout function.  An OTG host is required to display a
++ * message if the device does not connect within 10 seconds.
++ */
++void fh_otg_hcd_connect_timeout(void *ptr)
++{
++	fh_otg_hcd_t *hcd;
++	gpwrdn_data_t gpwrdn;
++	FH_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, ptr);
++	FH_PRINTF("Connect Timeout\n");
++	__FH_ERROR("Device Not Connected/Responding\n");
++	/** Remove buspower after 10s */
++	hcd = ptr;
++	if (hcd->core_if->otg_ver)
++		fh_otg_set_prtpower(hcd->core_if, 0);
++	if (hcd->core_if->adp_enable && !hcd->core_if->adp.probe_enabled) {
++		cil_hcd_disconnect(hcd->core_if);
++		gpwrdn.d32 = 0;
++		/* Enable Power Down Logic */
++		gpwrdn.b.pmuintsel = 1;
++		gpwrdn.b.pmuactv = 1;
++		gpwrdn.b.dis_vbus = 1;
++		FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++		
++		/* Unmask SRP detected interrupt from Power Down Logic */
++		gpwrdn.d32 = 0;
++		gpwrdn.b.srp_det_msk = 1;
++		FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++
++		fh_mdelay(220);
++		fh_otg_adp_probe_start(hcd->core_if);
++	}
++}
++
++#ifdef DEBUG
++static void dump_channel_info(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	if (qh->channel != NULL) {
++		fh_hc_t *hc = qh->channel;
++		fh_list_link_t *item;
++		fh_otg_qh_t *qh_item;
++		int num_channels = hcd->core_if->core_params->host_channels;
++		int i;
++
++		fh_otg_hc_regs_t *hc_regs;
++		hcchar_data_t hcchar;
++		hcsplt_data_t hcsplt;
++		hctsiz_data_t hctsiz;
++		uint32_t hcdma;
++
++		hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++		hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
++		hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++		hcdma = FH_READ_REG32(&hc_regs->hcdma);
++
++		FH_PRINTF("  Assigned to channel %p:\n", hc);
++		FH_PRINTF("    hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32,
++			   hcsplt.d32);
++		FH_PRINTF("    hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32,
++			   hcdma);
++		FH_PRINTF("    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
++			   hc->dev_addr, hc->ep_num, hc->ep_is_in);
++		FH_PRINTF("    ep_type: %d\n", hc->ep_type);
++		FH_PRINTF("    max_packet: %d\n", hc->max_packet);
++		FH_PRINTF("    data_pid_start: %d\n", hc->data_pid_start);
++		FH_PRINTF("    xfer_started: %d\n", hc->xfer_started);
++		FH_PRINTF("    halt_status: %d\n", hc->halt_status);
++		FH_PRINTF("    xfer_buff: %p\n", hc->xfer_buff);
++		FH_PRINTF("    xfer_len: %d\n", hc->xfer_len);
++		FH_PRINTF("    qh: %p\n", hc->qh);
++		FH_PRINTF("  NP inactive sched:\n");
++		FH_LIST_FOREACH(item, &hcd->non_periodic_sched_inactive) {
++			qh_item =
++			    FH_LIST_ENTRY(item, fh_otg_qh_t, qh_list_entry);
++			FH_PRINTF("    %p\n", qh_item);
++		}
++		FH_PRINTF("  NP active sched:\n");
++		FH_LIST_FOREACH(item, &hcd->non_periodic_sched_active) {
++			qh_item =
++			    FH_LIST_ENTRY(item, fh_otg_qh_t, qh_list_entry);
++			FH_PRINTF("    %p\n", qh_item);
++		}
++		FH_PRINTF("  Channels: \n");
++		for (i = 0; i < num_channels; i++) {
++			fh_hc_t *hc = hcd->hc_ptr_array[i];
++			FH_PRINTF("    %2d: %p\n", i, hc);
++		}
++	}
++}
++#endif /* DEBUG */
++
++/**
++ * Work queue function for starting the HCD when A-Cable is connected.
++ * The hcd_start() must be called in a process context.
++ */
++static void hcd_start_func(void *_vp)
++{
++	fh_otg_hcd_t *hcd = (fh_otg_hcd_t *) _vp;
++
++	FH_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, hcd);
++	if (hcd) {
++		hcd->fops->start(hcd);
++	}
++}
++
++static void del_xfer_timers(fh_otg_hcd_t * hcd)
++{
++#ifdef DEBUG
++	int i;
++	int num_channels = hcd->core_if->core_params->host_channels;
++	for (i = 0; i < num_channels; i++) {
++		FH_TIMER_CANCEL(hcd->core_if->hc_xfer_timer[i]);
++	}
++#endif
++}
++
++static void del_timers(fh_otg_hcd_t * hcd)
++{
++	del_xfer_timers(hcd);
++	FH_TIMER_CANCEL(hcd->conn_timer);
++}
++
++/**
++ * Processes all the URBs in a single list of QHs. Completes them with
++ * -ETIMEDOUT and frees the QTD.
++ */
++static void kill_urbs_in_qh_list(fh_otg_hcd_t * hcd, fh_list_link_t * qh_list)
++{
++	fh_list_link_t *qh_item;
++	fh_otg_qh_t *qh;
++	fh_otg_qtd_t *qtd, *qtd_tmp;
++
++	FH_LIST_FOREACH(qh_item, qh_list) {
++		qh = FH_LIST_ENTRY(qh_item, fh_otg_qh_t, qh_list_entry);
++		FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp,
++					 &qh->qtd_list, qtd_list_entry) {
++			qtd = FH_CIRCLEQ_FIRST(&qh->qtd_list);
++			if (qtd->urb != NULL) {
++				if(!qtd->urb->priv) {
++					FH_ERROR("urb->priv is NULL !!!!\n");
++					return;
++				}
++				if(!hcd->fops)
++					FH_ERROR("hcd->fops is NULL !!!!!\n");
++				if(!hcd->fops->complete)
++					FH_ERROR("fops->complete is NULL !!!!\n");
++				hcd->fops->complete(hcd, qtd->urb->priv,
++						    qtd->urb, -FH_E_TIMEOUT);
++				fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
++			}
++
++		}
++	}
++}
++
++/**
++ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
++ * and periodic schedules. The QTD associated with each URB is removed from
++ * the schedule and freed. This function may be called when a disconnect is
++ * detected or when the HCD is being stopped.
++ */
++static void kill_all_urbs(fh_otg_hcd_t * hcd)
++{
++	kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
++	kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
++	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
++	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
++	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
++	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
++}
++
++/**
++ * Start the connection timer.  An OTG host is required to display a
++ * message if the device does not connect within 10 seconds.  The
++ * timer is deleted if a port connect interrupt occurs before the
++ * timer expires.
++ */
++static void fh_otg_hcd_start_connect_timer(fh_otg_hcd_t * hcd)
++{
++	FH_TIMER_SCHEDULE(hcd->conn_timer, 10000 /* 10 secs */ );
++}
++
++/**
++ * HCD Callback function for disconnect of the HCD.
++ *
++ * @param p void pointer to the <code>struct usb_hcd</code>
++ */
++static int32_t fh_otg_hcd_session_start_cb(void *p)
++{
++	fh_otg_hcd_t *fh_otg_hcd;
++	FH_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
++	fh_otg_hcd = p;
++	fh_otg_hcd_start_connect_timer(fh_otg_hcd);
++	return 1;
++}
++
++/**
++ * HCD Callback function for starting the HCD when A-Cable is
++ * connected.
++ *
++ * @param p void pointer to the <code>struct usb_hcd</code>
++ */
++static int32_t fh_otg_hcd_start_cb(void *p)
++{
++	fh_otg_hcd_t *fh_otg_hcd = p;
++	fh_otg_core_if_t *core_if;
++	hprt0_data_t hprt0;
++	uint32_t timeout = 50;
++
++	core_if = fh_otg_hcd->core_if;
++	/**@todo vahrama: Check the timeout value for OTG 2.0 */
++	if (core_if->otg_ver)
++		timeout = 25;
++	if (core_if->op_state == B_HOST) {
++		/*
++		 * Reset the port.  During a HNP mode switch the reset
++		 * needs to occur within 1ms and have a duration of at
++		 * least 50ms.
++		 */
++		hprt0.d32 = fh_otg_read_hprt0(core_if);
++		hprt0.b.prtrst = 1;
++		FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++		if (core_if->otg_ver) {
++			fh_mdelay(60);
++			hprt0.d32 = fh_otg_read_hprt0(core_if);
++			hprt0.b.prtrst = 0;
++			FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++		}
++	}
++	FH_WORKQ_SCHEDULE_DELAYED(core_if->wq_otg,
++				hcd_start_func, fh_otg_hcd, timeout,
++				"start hcd");
++
++	return 1;
++}
++
++/**
++ * HCD Callback function for disconnect of the HCD.
++ *
++ * @param p void pointer to the <code>struct usb_hcd</code>
++ */
++static int32_t fh_otg_hcd_disconnect_cb(void *p)
++{
++	gintsts_data_t intr;
++	fh_otg_hcd_t *fh_otg_hcd = p;
++
++	/*
++	 * Set status flags for the hub driver.
++	 */
++	fh_otg_hcd->flags.b.port_connect_status_change = 1;
++	fh_otg_hcd->flags.b.port_connect_status = 0;
++
++	/*
++	 * Shutdown any transfers in process by clearing the Tx FIFO Empty
++	 * interrupt mask and status bits and disabling subsequent host
++	 * channel interrupts.
++	 */
++	intr.d32 = 0;
++	intr.b.nptxfempty = 1;
++	intr.b.ptxfempty = 1;
++	intr.b.hcintr = 1;
++	FH_MODIFY_REG32(&fh_otg_hcd->core_if->core_global_regs->gintmsk,
++			 intr.d32, 0);
++	FH_MODIFY_REG32(&fh_otg_hcd->core_if->core_global_regs->gintsts,
++			 intr.d32, 0);
++
++	/*
++	 * Turn off the vbus power only if the core has transitioned to device
++	 * mode. If still in host mode, need to keep power on to detect a
++	 * reconnection.
++	 */
++	if (fh_otg_is_device_mode(fh_otg_hcd->core_if)) {
++		if (fh_otg_hcd->core_if->op_state != A_SUSPEND) {
++			hprt0_data_t hprt0 = {.d32 = 0 };
++			FH_PRINTF("Disconnect: PortPower off\n");
++			hprt0.b.prtpwr = 0;
++			FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0,
++					hprt0.d32);
++		}
++		/** Delete timers if become device */
++		del_timers(fh_otg_hcd);
++		fh_otg_disable_host_interrupts(fh_otg_hcd->core_if);
++	}
++
++	/* Respond with an error status to all URBs in the schedule. */
++	kill_all_urbs(fh_otg_hcd);
++
++	if (fh_otg_is_host_mode(fh_otg_hcd->core_if)) {
++		/* Clean up any host channels that were in use. */
++		int num_channels;
++		int i;
++		fh_hc_t *channel;
++		fh_otg_hc_regs_t *hc_regs;
++		hcchar_data_t hcchar;
++
++		if (fh_otg_hcd->core_if->otg_ver == 1)
++			del_xfer_timers(fh_otg_hcd);
++		else
++			del_timers(fh_otg_hcd);
++
++		num_channels = fh_otg_hcd->core_if->core_params->host_channels;
++
++		if (!fh_otg_hcd->core_if->dma_enable) {
++			/* Flush out any channel requests in slave mode. */
++			for (i = 0; i < num_channels; i++) {
++				channel = fh_otg_hcd->hc_ptr_array[i];
++				if (FH_CIRCLEQ_EMPTY_ENTRY
++				    (channel, hc_list_entry)) {
++					hc_regs =
++					    fh_otg_hcd->core_if->
++					    host_if->hc_regs[i];
++					hcchar.d32 =
++					    FH_READ_REG32(&hc_regs->hcchar);
++					if (hcchar.b.chen) {
++						hcchar.b.chen = 0;
++						hcchar.b.chdis = 1;
++						hcchar.b.epdir = 0;
++						FH_WRITE_REG32
++						    (&hc_regs->hcchar,
++						     hcchar.d32);
++					}
++				}
++			}
++		}
++
++		for (i = 0; i < num_channels; i++) {
++			channel = fh_otg_hcd->hc_ptr_array[i];
++			if (FH_CIRCLEQ_EMPTY_ENTRY(channel, hc_list_entry)) {
++				hc_regs =
++				    fh_otg_hcd->core_if->host_if->hc_regs[i];
++				hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++				if (hcchar.b.chen) {
++					/* Halt the channel. */
++					hcchar.b.chdis = 1;
++					FH_WRITE_REG32(&hc_regs->hcchar,
++							hcchar.d32);
++				}
++
++				fh_otg_hc_cleanup(fh_otg_hcd->core_if,
++						   channel);
++				FH_CIRCLEQ_INSERT_TAIL
++				    (&fh_otg_hcd->free_hc_list, channel,
++				     hc_list_entry);
++				/*
++				 * Added for Descriptor DMA to prevent channel double cleanup
++				 * in release_channel_ddma(). Which called from ep_disable
++				 * when device disconnect.
++				 */
++				channel->qh = NULL;
++			}
++		}
++	}
++
++	if (fh_otg_hcd->fops->disconnect) {
++		fh_otg_hcd->fops->disconnect(fh_otg_hcd);
++	}
++
++	return 1;
++}
++
++/**
++ * HCD Callback function for stopping the HCD.
++ *
++ * @param p void pointer to the <code>struct usb_hcd</code>
++ */
++static int32_t fh_otg_hcd_stop_cb(void *p)
++{
++	fh_otg_hcd_t *fh_otg_hcd = p;
++
++	FH_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
++	fh_otg_hcd_stop(fh_otg_hcd);
++	return 1;
++}
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++/**
++ * HCD Callback function for sleep of HCD.
++ *
++ * @param p void pointer to the <code>struct usb_hcd</code>
++ */
++static int fh_otg_hcd_sleep_cb(void *p)
++{
++	fh_otg_hcd_t *hcd = p;
++
++	fh_otg_hcd_free_hc_from_lpm(hcd);
++
++	return 0;
++}
++#endif
++
++/**
++ * HCD Callback function for Remote Wakeup.
++ *
++ * @param p void pointer to the <code>struct usb_hcd</code>
++ */
++static int fh_otg_hcd_rem_wakeup_cb(void *p)
++{
++	fh_otg_hcd_t *hcd = p;
++
++	if (hcd->core_if->lx_state == FH_OTG_L2) {
++		hcd->flags.b.port_suspend_change = 1;
++	}
++#ifdef CONFIG_USB_FH_OTG_LPM
++	else {
++		hcd->flags.b.port_l1_change = 1;
++	}
++#endif
++	return 0;
++}
++
++/**
++ * Halts the FH_otg host mode operations in a clean manner. USB transfers are
++ * stopped.
++ */
++void fh_otg_hcd_stop(fh_otg_hcd_t * hcd)
++{
++	hprt0_data_t hprt0 = {.d32 = 0 };
++
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD STOP\n");
++
++	/*
++	 * The root hub should be disconnected before this function is called.
++	 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
++	 * and the QH lists (via ..._hcd_endpoint_disable).
++	 */
++
++	/* Turn off all host-specific interrupts. */
++	fh_otg_disable_host_interrupts(hcd->core_if);
++
++	/* Turn off the vbus power */
++	FH_PRINTF("PortPower off\n");
++	hprt0.b.prtpwr = 0;
++	FH_WRITE_REG32(hcd->core_if->host_if->hprt0, hprt0.d32);
++	fh_mdelay(1);
++}
++
++int fh_otg_hcd_urb_enqueue(fh_otg_hcd_t * hcd,
++			    fh_otg_hcd_urb_t * fh_otg_urb, void **ep_handle,
++			    int atomic_alloc)
++{
++	fh_irqflags_t flags;
++	int retval = 0;
++	fh_otg_qtd_t *qtd;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	if (!hcd->flags.b.port_connect_status) {
++		/* No longer connected. */
++		FH_ERROR("Not connected\n");
++		return -FH_E_NO_DEVICE;
++	}
++
++	qtd = fh_otg_hcd_qtd_create(fh_otg_urb, atomic_alloc);
++	if (qtd == NULL) {
++		FH_ERROR("FH OTG HCD URB Enqueue failed creating QTD\n");
++		return -FH_E_NO_MEMORY;
++	}
++
++	retval =
++	    fh_otg_hcd_qtd_add(qtd, hcd, (fh_otg_qh_t **) ep_handle, atomic_alloc);
++	if (retval < 0) {
++		FH_ERROR("FH OTG HCD URB Enqueue failed adding QTD. "
++			  "Error status %d\n", retval);
++		fh_otg_hcd_qtd_free(qtd);
++	} else {
++		qtd->qh = *ep_handle;
++	}
++	intr_mask.d32 = FH_READ_REG32(&hcd->core_if->core_global_regs->gintmsk);
++	if (!intr_mask.b.sofintr && retval == 0) {
++		fh_otg_transaction_type_e tr_type;
++		if ((qtd->qh->ep_type == UE_BULK)
++		    && !(qtd->urb->flags & URB_GIVEBACK_ASAP)) {
++			/* Do not schedule SG transactions until qtd has URB_GIVEBACK_ASAP set */
++			return 0;
++		}
++		FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++		tr_type = fh_otg_hcd_select_transactions(hcd);
++		if (tr_type != FH_OTG_TRANSACTION_NONE) {
++			fh_otg_hcd_queue_transactions(hcd, tr_type);
++		}
++		FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++	}
++
++	return retval;
++}
++
++int fh_otg_hcd_urb_dequeue(fh_otg_hcd_t * hcd,
++			    fh_otg_hcd_urb_t * fh_otg_urb)
++{
++	fh_otg_qh_t *qh;
++	fh_otg_qtd_t *urb_qtd;
++
++	urb_qtd = fh_otg_urb->qtd;
++	qh = urb_qtd->qh;
++	if (!urb_qtd) {
++		printk(KERN_ERR "## Urb QTD is NULL ##\n");
++		return -EINVAL;
++	}
++
++	qh = urb_qtd->qh;
++	if (!qh) {
++		printk(KERN_ERR "## Urb QH is NULL ##\n");
++		return -EINVAL;
++	}
++#ifdef DEBUG
++	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
++		if (urb_qtd->in_process) {
++			dump_channel_info(hcd, qh);
++		}
++	}
++#endif
++	if (urb_qtd->in_process && qh->channel) {
++		/* The QTD is in process (it has been assigned to a channel). */
++		if (hcd->flags.b.port_connect_status) {
++			/*
++			 * If still connected (i.e. in host mode), halt the
++			 * channel so it can be used for other transfers. If
++			 * no longer connected, the host registers can't be
++			 * written to halt the channel since the core is in
++			 * device mode.
++			 */
++			fh_otg_hc_halt(hcd->core_if, qh->channel,
++					FH_OTG_HC_XFER_URB_DEQUEUE);
++		}
++	}
++
++	/*
++	 * Free the QTD and clean up the associated QH. Leave the QH in the
++	 * schedule if it has any remaining QTDs.
++	 */
++
++	if (!hcd->core_if->dma_desc_enable) {
++		uint8_t b = urb_qtd->in_process;
++		fh_otg_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
++		if (b) {
++			fh_otg_hcd_qh_deactivate(hcd, qh, 0);
++			qh->channel = NULL;
++		} else if (FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
++			fh_otg_hcd_qh_remove(hcd, qh);
++		}
++	} else {
++		fh_otg_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
++	}
++	return 0;
++}
++
++int fh_otg_hcd_endpoint_disable(fh_otg_hcd_t * hcd, void *ep_handle,
++				 int retry)
++{
++	fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
++	int retval = 0;
++	fh_irqflags_t flags;
++
++	if (retry < 0) {
++		retval = -FH_E_INVALID;
++		goto done;
++	}
++
++	if (!qh) {
++		retval = -FH_E_INVALID;
++		goto done;
++	}
++
++	FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++
++	while (!FH_CIRCLEQ_EMPTY(&qh->qtd_list) && retry) {
++		FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++		retry--;
++		fh_msleep(5);
++		FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++	}
++
++	fh_otg_hcd_qh_remove(hcd, qh);
++
++	FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++	/*
++	 * Split fh_otg_hcd_qh_remove_and_free() into qh_remove
++	 * and qh_free to prevent stack dump on FH_DMA_FREE() with
++	 * irq_disabled (spinlock_irqsave) in fh_otg_hcd_desc_list_free()
++	 * and fh_otg_hcd_frame_list_alloc().
++	 */
++	fh_otg_hcd_qh_free(hcd, qh);
++
++done:
++	return retval;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
++int fh_otg_hcd_endpoint_reset(fh_otg_hcd_t * hcd, void *ep_handle)
++{
++	int retval = 0;
++	fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
++	if (!qh)
++		return -FH_E_INVALID;
++
++	qh->data_toggle = FH_OTG_HC_PID_DATA0;
++	return retval;
++}
++#endif
++
++/**
++ * HCD Callback structure for handling mode switching.
++ */
++static fh_otg_cil_callbacks_t hcd_cil_callbacks = {
++	.start = fh_otg_hcd_start_cb,
++	.stop = fh_otg_hcd_stop_cb,
++	.disconnect = fh_otg_hcd_disconnect_cb,
++	.session_start = fh_otg_hcd_session_start_cb,
++	.resume_wakeup = fh_otg_hcd_rem_wakeup_cb,
++#ifdef CONFIG_USB_FH_OTG_LPM
++	.sleep = fh_otg_hcd_sleep_cb,
++#endif
++	.p = 0,
++};
++
++/**
++ * Reset tasklet function
++ */
++static void reset_tasklet_func(void *data)
++{
++	fh_otg_hcd_t *fh_otg_hcd = (fh_otg_hcd_t *) data;
++	fh_otg_core_if_t *core_if = fh_otg_hcd->core_if;
++	hprt0_data_t hprt0;
++
++	FH_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
++
++	hprt0.d32 = fh_otg_read_hprt0(core_if);
++	hprt0.b.prtrst = 1;
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++	fh_mdelay(60);
++
++	hprt0.b.prtrst = 0;
++	FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++	fh_otg_hcd->flags.b.port_reset_change = 1;
++}
++
++static void qh_list_free(fh_otg_hcd_t * hcd, fh_list_link_t * qh_list)
++{
++	fh_list_link_t *item;
++	fh_otg_qh_t *qh;
++	fh_irqflags_t flags;
++
++	if (!qh_list->next) {
++		/* The list hasn't been initialized yet. */
++		return;
++	}
++	/*
++	 * Hold spinlock here. Not needed in that case if bellow 
++	 * function is being called from ISR 
++	 */
++	FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++	/* Ensure there are no QTDs or URBs left. */
++	kill_urbs_in_qh_list(hcd, qh_list);
++	FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++
++	FH_LIST_FOREACH(item, qh_list) {
++		qh = FH_LIST_ENTRY(item, fh_otg_qh_t, qh_list_entry);
++		fh_otg_hcd_qh_remove_and_free(hcd, qh);
++	}
++}
++
++/**
++ * Exit from Hibernation if Host did not detect SRP from connected SRP capable
++ * Device during SRP time by host power up.
++ */
++void fh_otg_hcd_power_up(void *ptr)
++{
++	gpwrdn_data_t gpwrdn = {.d32 = 0 };
++	fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
++
++	FH_PRINTF("%s called\n", __FUNCTION__);
++
++	if (!core_if->hibernation_suspend) {
++		FH_PRINTF("Already exited from Hibernation\n");
++		return;
++	}
++
++	/* Switch on the voltage to the core */
++	gpwrdn.b.pwrdnswtch = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Reset the core */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Disable power clamps */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnclmp = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	/* Remove reset the core signal */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pwrdnrstn = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
++	fh_udelay(10);
++
++	/* Disable PMU interrupt */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuintsel = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	core_if->hibernation_suspend = 0;
++
++	/* Disable PMU */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.pmuactv = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++	fh_udelay(10);
++
++	/* Enable VBUS */
++	gpwrdn.d32 = 0;
++	gpwrdn.b.dis_vbus = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
++
++	core_if->op_state = A_HOST;
++	fh_otg_core_init(core_if);
++	fh_otg_enable_global_interrupts(core_if);
++	cil_hcd_start(core_if);
++}
++
++/**
++ * Frees secondary storage associated with the fh_otg_hcd structure contained
++ * in the struct usb_hcd field.
++ */
++static void fh_otg_hcd_free(fh_otg_hcd_t * fh_otg_hcd)
++{
++	int i;
++
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD FREE\n");
++
++	del_timers(fh_otg_hcd);
++
++	/* Free memory for QH/QTD lists */
++	qh_list_free(fh_otg_hcd, &fh_otg_hcd->non_periodic_sched_inactive);
++	qh_list_free(fh_otg_hcd, &fh_otg_hcd->non_periodic_sched_active);
++	qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_inactive);
++	qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_ready);
++	qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_assigned);
++	qh_list_free(fh_otg_hcd, &fh_otg_hcd->periodic_sched_queued);
++
++	/* Free memory for the host channels. */
++	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++		fh_hc_t *hc = fh_otg_hcd->hc_ptr_array[i];
++
++#ifdef DEBUG
++		if (fh_otg_hcd->core_if->hc_xfer_timer[i]) {
++			FH_TIMER_FREE(fh_otg_hcd->core_if->hc_xfer_timer[i]);
++		}
++#endif
++		if (hc != NULL) {
++			FH_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n",
++				    i, hc);
++			FH_FREE(hc);
++		}
++	}
++
++	if (fh_otg_hcd->core_if->dma_enable) {
++		if (fh_otg_hcd->status_buf_dma) {
++			FH_DMA_FREE(FH_OTG_HCD_STATUS_BUF_SIZE,
++				     fh_otg_hcd->status_buf,
++				     fh_otg_hcd->status_buf_dma);
++		}
++	} else if (fh_otg_hcd->status_buf != NULL) {
++		FH_FREE(fh_otg_hcd->status_buf);
++	}
++	FH_SPINLOCK_FREE(fh_otg_hcd->lock);
++	/* Set core_if's lock pointer to NULL */
++	fh_otg_hcd->core_if->lock = NULL;
++
++	FH_TIMER_FREE(fh_otg_hcd->conn_timer);
++	FH_TASK_FREE(fh_otg_hcd->reset_tasklet);
++
++#ifdef FH_DEV_SRPCAP
++	if (fh_otg_hcd->core_if->power_down == 2 &&
++	    fh_otg_hcd->core_if->pwron_timer) {
++		FH_TIMER_FREE(fh_otg_hcd->core_if->pwron_timer);
++	}
++#endif
++	FH_FREE(fh_otg_hcd);
++}
++
++int fh_otg_hcd_init(fh_otg_hcd_t * hcd, fh_otg_core_if_t * core_if)
++{
++	int retval = 0;
++	int num_channels;
++	int i;
++	fh_hc_t *channel;
++
++	hcd->lock = FH_SPINLOCK_ALLOC();
++	if (!hcd->lock) {
++		FH_ERROR("Could not allocate lock for pcd");
++		FH_FREE(hcd);
++		retval = -FH_E_NO_MEMORY;
++		goto out;
++	}
++	hcd->core_if = core_if;
++
++	/* Register the HCD CIL Callbacks */
++	fh_otg_cil_register_hcd_callbacks(hcd->core_if,
++					   &hcd_cil_callbacks, hcd);
++
++	/* Initialize the non-periodic schedule. */
++	FH_LIST_INIT(&hcd->non_periodic_sched_inactive);
++	FH_LIST_INIT(&hcd->non_periodic_sched_active);
++
++	/* Initialize the periodic schedule. */
++	FH_LIST_INIT(&hcd->periodic_sched_inactive);
++	FH_LIST_INIT(&hcd->periodic_sched_ready);
++	FH_LIST_INIT(&hcd->periodic_sched_assigned);
++	FH_LIST_INIT(&hcd->periodic_sched_queued);
++
++	/*
++	 * Create a host channel descriptor for each host channel implemented
++	 * in the controller. Initialize the channel descriptor array.
++	 */
++	FH_CIRCLEQ_INIT(&hcd->free_hc_list);
++	num_channels = hcd->core_if->core_params->host_channels;
++	FH_MEMSET(hcd->hc_ptr_array, 0, sizeof(hcd->hc_ptr_array));
++	for (i = 0; i < num_channels; i++) {
++		channel = FH_ALLOC(sizeof(fh_hc_t));
++		if (channel == NULL) {
++			retval = -FH_E_NO_MEMORY;
++			FH_ERROR("%s: host channel allocation failed\n",
++				  __func__);
++			fh_otg_hcd_free(hcd);
++			goto out;
++		}
++		channel->hc_num = i;
++		hcd->hc_ptr_array[i] = channel;
++#ifdef DEBUG
++		hcd->core_if->hc_xfer_timer[i] =
++		    FH_TIMER_ALLOC("hc timer", hc_xfer_timeout,
++				    &hcd->core_if->hc_xfer_info[i]);
++#endif
++		FH_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i,
++			    channel);
++	}
++
++	/* Initialize the Connection timeout timer. */
++	hcd->conn_timer = FH_TIMER_ALLOC("Connection timer",
++					  fh_otg_hcd_connect_timeout, hcd);
++
++	/* Initialize reset tasklet. */
++	hcd->reset_tasklet = FH_TASK_ALLOC("reset_tasklet", reset_tasklet_func, hcd);
++#ifdef FH_DEV_SRPCAP
++	if (hcd->core_if->power_down == 2) {
++		/* Initialize Power on timer for Host power up in case hibernation */
++		hcd->core_if->pwron_timer = FH_TIMER_ALLOC("PWRON TIMER",
++									fh_otg_hcd_power_up, core_if);
++	}
++#endif	
++
++	/*
++	 * Allocate space for storing data on status transactions. Normally no
++	 * data is sent, but this space acts as a bit bucket. This must be
++	 * done after usb_add_hcd since that function allocates the DMA buffer
++	 * pool.
++	 */
++	if (hcd->core_if->dma_enable) {
++		hcd->status_buf =
++		    FH_DMA_ALLOC(FH_OTG_HCD_STATUS_BUF_SIZE,
++				  &hcd->status_buf_dma);
++	} else {
++		hcd->status_buf = FH_ALLOC(FH_OTG_HCD_STATUS_BUF_SIZE);
++	}
++	if (!hcd->status_buf) {
++		retval = -FH_E_NO_MEMORY;
++		FH_ERROR("%s: status_buf allocation failed\n", __func__);
++		fh_otg_hcd_free(hcd);
++		goto out;
++	}
++
++	hcd->otg_port = 1;
++	hcd->frame_list = NULL;
++	hcd->frame_list_dma = 0;
++	hcd->periodic_qh_count = 0;
++out:
++	return retval;
++}
++
++void fh_otg_hcd_remove(fh_otg_hcd_t * hcd)
++{
++	/* Turn off all host-specific interrupts. */
++	fh_otg_disable_host_interrupts(hcd->core_if);
++
++	fh_otg_hcd_free(hcd);
++}
++
++/**
++ * Initializes dynamic portions of the FH_otg HCD state.
++ */
++static void fh_otg_hcd_reinit(fh_otg_hcd_t * hcd)
++{
++	int num_channels;
++	int i;
++	fh_hc_t *channel;
++	fh_hc_t *channel_tmp;
++
++	hcd->flags.d32 = 0;
++
++	hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
++	hcd->non_periodic_channels = 0;
++	hcd->periodic_channels = 0;
++
++	/*
++	 * Put all channels in the free channel list and clean up channel
++	 * states.
++	 */
++	FH_CIRCLEQ_FOREACH_SAFE(channel, channel_tmp,
++				 &hcd->free_hc_list, hc_list_entry) {
++		FH_CIRCLEQ_REMOVE(&hcd->free_hc_list, channel, hc_list_entry);
++	}
++
++	num_channels = hcd->core_if->core_params->host_channels;
++	for (i = 0; i < num_channels; i++) {
++		channel = hcd->hc_ptr_array[i];
++		FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, channel,
++					hc_list_entry);
++		fh_otg_hc_cleanup(hcd->core_if, channel);
++	}
++
++	/* Initialize the FH core for host mode operation. */
++	fh_otg_core_host_init(hcd->core_if);
++
++	/* Set core_if's lock pointer to the hcd->lock */
++	hcd->core_if->lock = hcd->lock;
++}
++
++/**
++ * Assigns transactions from a QTD to a free host channel and initializes the
++ * host channel to perform the transactions. The host channel is removed from
++ * the free list.
++ *
++ * @param hcd The HCD state structure.
++ * @param qh Transactions from the first QTD for this QH are selected and
++ * assigned to a free host channel.
++ */
++static int assign_and_init_hc(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	fh_hc_t *hc = NULL;
++	fh_otg_qtd_t *qtd;
++	fh_otg_hcd_urb_t *urb;
++	void* ptr = NULL;
++	hcchar_data_t hcchar;
++	int num_channels;
++	int i;
++
++	FH_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh);
++
++	num_channels = hcd->core_if->core_params->host_channels;
++
++	/* WA to not select channel with chdis bit set, this was 
++	 * observed after role switch as part of OTG 2.0 HNP
++	 */
++	for (i = 0; i < num_channels; i++) {
++		hc = FH_CIRCLEQ_FIRST(&hcd->free_hc_list);
++		hcchar.d32 = FH_READ_REG32(&hcd->core_if->host_if->hc_regs[hc->hc_num]->hcchar);
++		FH_DEBUGPL(DBG_HCDV, "HC num = %d HCCHAR %08x\n", hc->hc_num, hcchar.d32);
++		if(!hcchar.b.chdis && !hcchar.b.chen)
++			break;
++		FH_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
++		FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
++		hc = NULL;
++	}
++		if (!hc) {
++			FH_ERROR("No free channel with en and dis bits 0\n");
++			return -ENOMEM;
++	}
++
++
++
++	/* Remove the host channel from the free list. */
++	FH_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
++
++	qtd = FH_CIRCLEQ_FIRST(&qh->qtd_list);
++
++	urb = qtd->urb;
++	qh->channel = hc;
++
++	qtd->in_process = 1;
++
++	/*
++	 * Use usb_pipedevice to determine device address. This address is
++	 * 0 before the SET_ADDRESS command and the correct address afterward.
++	 */
++	hc->dev_addr = fh_otg_hcd_get_dev_addr(&urb->pipe_info);
++	hc->ep_num = fh_otg_hcd_get_ep_num(&urb->pipe_info);
++	hc->speed = qh->dev_speed;
++	hc->max_packet = fh_max_packet(qh->maxp);
++
++	hc->xfer_started = 0;
++	hc->halt_status = FH_OTG_HC_XFER_NO_HALT_STATUS;
++	hc->error_state = (qtd->error_count > 0);
++	hc->halt_on_queue = 0;
++	hc->halt_pending = 0;
++	hc->requests = 0;
++
++	/*
++	 * The following values may be modified in the transfer type section
++	 * below. The xfer_len value may be reduced when the transfer is
++	 * started to accommodate the max widths of the XferSize and PktCnt
++	 * fields in the HCTSIZn register.
++	 */
++
++	hc->ep_is_in = (fh_otg_hcd_is_pipe_in(&urb->pipe_info) != 0);
++	if (hc->ep_is_in) {
++		hc->do_ping = 0;
++	} else {
++		hc->do_ping = qh->ping_state;
++	}
++
++	hc->data_pid_start = qh->data_toggle;
++	hc->multi_count = 1;
++
++	if (urb->actual_length > urb->length &&
++		!fh_otg_hcd_is_pipe_in(&urb->pipe_info)) {
++		urb->actual_length = urb->length;
++	}
++
++	if (hcd->core_if->dma_enable) {
++		hc->xfer_buff = (uint8_t *) urb->dma + urb->actual_length;
++
++		/* For non-dword aligned case */
++		if (((unsigned long)hc->xfer_buff & 0x3)
++		    && !hcd->core_if->dma_desc_enable) {
++			ptr = (uint8_t *) urb->buf + urb->actual_length;
++		}
++	} else {
++		hc->xfer_buff = (uint8_t *) urb->buf + urb->actual_length;
++	}
++	hc->xfer_len = urb->length - urb->actual_length;
++	hc->xfer_count = 0;
++
++	/*
++	 * Set the split attributes
++	 */
++	hc->do_split = 0;
++	if (qh->do_split) {
++		uint32_t hub_addr, port_addr;
++		hc->do_split = 1;
++		hc->xact_pos = qtd->isoc_split_pos;
++		hc->complete_split = qtd->complete_split;
++		hcd->fops->hub_info(hcd, urb->priv, &hub_addr, &port_addr);
++		hc->hub_addr = (uint8_t) hub_addr;
++		hc->port_addr = (uint8_t) port_addr;
++	}
++
++	switch (fh_otg_hcd_get_pipe_type(&urb->pipe_info)) {
++	case UE_CONTROL:
++		hc->ep_type = FH_OTG_EP_TYPE_CONTROL;
++		switch (qtd->control_phase) {
++		case FH_OTG_CONTROL_SETUP:
++			FH_DEBUGPL(DBG_HCDV, "  Control setup transaction\n");
++			hc->do_ping = 0;
++			hc->ep_is_in = 0;
++			hc->data_pid_start = FH_OTG_HC_PID_SETUP;
++			if (hcd->core_if->dma_enable) {
++				hc->xfer_buff = (uint8_t *) urb->setup_dma;
++			} else {
++				hc->xfer_buff = (uint8_t *) urb->setup_packet;
++			}
++			hc->xfer_len = 8;
++			ptr = NULL;
++			break;
++		case FH_OTG_CONTROL_DATA:
++			FH_DEBUGPL(DBG_HCDV, "  Control data transaction\n");
++			hc->data_pid_start = qtd->data_toggle;
++			break;
++		case FH_OTG_CONTROL_STATUS:
++			/*
++			 * Direction is opposite of data direction or IN if no
++			 * data.
++			 */
++			FH_DEBUGPL(DBG_HCDV, "  Control status transaction\n");
++			if (urb->length == 0) {
++				hc->ep_is_in = 1;
++			} else {
++				hc->ep_is_in =
++				    fh_otg_hcd_is_pipe_out(&urb->pipe_info);
++			}
++			if (hc->ep_is_in) {
++				hc->do_ping = 0;
++			}
++
++			hc->data_pid_start = FH_OTG_HC_PID_DATA1;
++
++			hc->xfer_len = 0;
++			if (hcd->core_if->dma_enable) {
++				hc->xfer_buff = (uint8_t *) hcd->status_buf_dma;
++			} else {
++				hc->xfer_buff = (uint8_t *) hcd->status_buf;
++			}
++			ptr = NULL;
++			break;
++		}
++		break;
++	case UE_BULK:
++		hc->ep_type = FH_OTG_EP_TYPE_BULK;
++		break;
++	case UE_INTERRUPT:
++		hc->ep_type = FH_OTG_EP_TYPE_INTR;
++		break;
++	case UE_ISOCHRONOUS:
++		{
++			struct fh_otg_hcd_iso_packet_desc *frame_desc;
++
++			hc->ep_type = FH_OTG_EP_TYPE_ISOC;
++
++			if (hcd->core_if->dma_desc_enable)
++				break;
++
++			frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
++
++			frame_desc->status = 0;
++
++			if (hcd->core_if->dma_enable) {
++				hc->xfer_buff = (uint8_t *) urb->dma;
++			} else {
++				hc->xfer_buff = (uint8_t *) urb->buf;
++			}
++			hc->xfer_buff +=
++			    frame_desc->offset + qtd->isoc_split_offset;
++			hc->xfer_len =
++			    frame_desc->length - qtd->isoc_split_offset;
++
++			/* For non-dword aligned buffers */
++			if (((unsigned long)hc->xfer_buff & 0x3)
++			    && hcd->core_if->dma_enable) {
++				ptr =
++				    (uint8_t *) urb->buf + frame_desc->offset +
++				    qtd->isoc_split_offset;
++			} else
++				ptr = NULL;
++
++			if (hc->xact_pos == FH_HCSPLIT_XACTPOS_ALL) {
++				if (hc->xfer_len <= 188) {
++					hc->xact_pos = FH_HCSPLIT_XACTPOS_ALL;
++				} else {
++					hc->xact_pos =
++					    FH_HCSPLIT_XACTPOS_BEGIN;
++				}
++			}
++		}
++		break;
++	}
++	/* non DWORD-aligned buffer case */	
++	if (ptr) {
++		uint32_t buf_size;
++		if (hc->ep_type != FH_OTG_EP_TYPE_ISOC) {
++			buf_size = hcd->core_if->core_params->max_transfer_size;
++		} else {				
++			buf_size = 4096;
++		}
++		if (!qh->dw_align_buf) {
++			qh->dw_align_buf = FH_DMA_ALLOC_ATOMIC(buf_size,
++							 &qh->dw_align_buf_dma);
++            printk(KERN_ERR "FH_DMA_ALLOC_ATOMIC (%p)\n", qh->dw_align_buf);
++			if (!qh->dw_align_buf) {
++				FH_ERROR
++				    ("%s: Failed to allocate memory to handle "
++				     "non-dword aligned buffer case\n",
++				     __func__);
++				return -ENOMEM;
++			}
++		}
++		if (!hc->ep_is_in) {
++			fh_memcpy(qh->dw_align_buf, ptr, hc->xfer_len);
++		}
++		hc->align_buff = qh->dw_align_buf_dma;
++	} else {
++		hc->align_buff = 0;
++	}
++
++	if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
++	    hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++		/*
++		 * This value may be modified when the transfer is started to
++		 * reflect the actual transfer length.
++		 */
++		hc->multi_count = fh_hb_mult(qh->maxp);
++	}
++
++	if (hcd->core_if->dma_desc_enable)
++		hc->desc_list_addr = qh->desc_list_dma;
++
++	fh_otg_hc_init(hcd->core_if, hc);
++	hc->qh = qh;
++
++	return 0;
++}
++
++/**
++ * This function selects transactions from the HCD transfer schedule and
++ * assigns them to available host channels. It is called from HCD interrupt
++ * handler functions.
++ *
++ * @param hcd The HCD state structure.
++ *
++ * @return The types of new transactions that were assigned to host channels.
++ */
++fh_otg_transaction_type_e fh_otg_hcd_select_transactions(fh_otg_hcd_t * hcd)
++{
++	fh_list_link_t *qh_ptr;
++	fh_otg_qh_t *qh;
++	int num_channels;
++	fh_otg_transaction_type_e ret_val = FH_OTG_TRANSACTION_NONE;
++
++#ifdef DEBUG_SOF
++	FH_DEBUGPL(DBG_HCD, "  Select Transactions\n");
++#endif
++
++	/* Process entries in the periodic ready list. */
++	qh_ptr = FH_LIST_FIRST(&hcd->periodic_sched_ready);
++
++	while (qh_ptr != &hcd->periodic_sched_ready &&
++	       !FH_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
++
++		qh = FH_LIST_ENTRY(qh_ptr, fh_otg_qh_t, qh_list_entry);
++		if (assign_and_init_hc(hcd, qh))
++			break;
++
++		/*
++		 * Move the QH from the periodic ready schedule to the
++		 * periodic assigned schedule.
++		 */
++		qh_ptr = FH_LIST_NEXT(qh_ptr);
++		FH_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
++				   &qh->qh_list_entry);
++
++		ret_val = FH_OTG_TRANSACTION_PERIODIC;
++	}
++
++	/*
++	 * Process entries in the inactive portion of the non-periodic
++	 * schedule. Some free host channels may not be used if they are
++	 * reserved for periodic transfers.
++	 */
++	qh_ptr = hcd->non_periodic_sched_inactive.next;
++	num_channels = hcd->core_if->core_params->host_channels;
++	while (qh_ptr != &hcd->non_periodic_sched_inactive &&
++	       (hcd->non_periodic_channels <
++		num_channels - hcd->periodic_channels) &&
++	       !FH_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
++		qh = FH_LIST_ENTRY(qh_ptr, fh_otg_qh_t, qh_list_entry);
++
++		if (assign_and_init_hc(hcd, qh))
++			break;
++
++		/*
++		 * Move the QH from the non-periodic inactive schedule to the
++		 * non-periodic active schedule.
++		 */
++		qh_ptr = FH_LIST_NEXT(qh_ptr);
++		FH_LIST_MOVE_HEAD(&hcd->non_periodic_sched_active,
++				   &qh->qh_list_entry);
++
++		if (ret_val == FH_OTG_TRANSACTION_NONE) {
++			ret_val = FH_OTG_TRANSACTION_NON_PERIODIC;
++		} else {
++			ret_val = FH_OTG_TRANSACTION_ALL;
++		}
++
++		hcd->non_periodic_channels++;
++	}
++
++	return ret_val;
++}
++
++/**
++ * Attempts to queue a single transaction request for a host channel
++ * associated with either a periodic or non-periodic transfer. This function
++ * assumes that there is space available in the appropriate request queue. For
++ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
++ * is available in the appropriate Tx FIFO.
++ *
++ * @param hcd The HCD state structure.
++ * @param hc Host channel descriptor associated with either a periodic or
++ * non-periodic transfer.
++ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx
++ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
++ * transfers.
++ *
++ * @return 1 if a request is queued and more requests may be needed to
++ * complete the transfer, 0 if no more requests are required for this
++ * transfer, -1 if there is insufficient space in the Tx FIFO.
++ */
++static int queue_transaction(fh_otg_hcd_t * hcd,
++			     fh_hc_t * hc, uint16_t fifo_dwords_avail)
++{
++	int retval;
++
++	if (hcd->core_if->dma_enable) {
++		if (hcd->core_if->dma_desc_enable) {
++			if (!hc->xfer_started
++			    || (hc->ep_type == FH_OTG_EP_TYPE_ISOC)) {
++				fh_otg_hcd_start_xfer_ddma(hcd, hc->qh);
++				hc->qh->ping_state = 0;
++			}
++		} else if (!hc->xfer_started) {
++			fh_otg_hc_start_transfer(hcd->core_if, hc);
++			hc->qh->ping_state = 0;
++		}
++		retval = 0;
++	} else if (hc->halt_pending) {
++		/* Don't queue a request if the channel has been halted. */
++		retval = 0;
++	} else if (hc->halt_on_queue) {
++		fh_otg_hc_halt(hcd->core_if, hc, hc->halt_status);
++		retval = 0;
++	} else if (hc->do_ping) {
++		if (!hc->xfer_started) {
++			fh_otg_hc_start_transfer(hcd->core_if, hc);
++		}
++		retval = 0;
++	} else if (!hc->ep_is_in || hc->data_pid_start == FH_OTG_HC_PID_SETUP) {
++		if ((fifo_dwords_avail * 4) >= hc->max_packet) {
++			if (!hc->xfer_started) {
++				fh_otg_hc_start_transfer(hcd->core_if, hc);
++				retval = 1;
++			} else {
++				retval =
++				    fh_otg_hc_continue_transfer(hcd->core_if,
++								 hc);
++			}
++		} else {
++			retval = -1;
++		}
++	} else {
++		if (!hc->xfer_started) {
++			fh_otg_hc_start_transfer(hcd->core_if, hc);
++			retval = 1;
++		} else {
++			retval = fh_otg_hc_continue_transfer(hcd->core_if, hc);
++		}
++	}
++
++	return retval;
++}
++
++/**
++ * Processes periodic channels for the next frame and queues transactions for
++ * these channels to the FH_otg controller. After queueing transactions, the
++ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
++ * to queue as Periodic Tx FIFO or request queue space becomes available.
++ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
++ */
++static void process_periodic_channels(fh_otg_hcd_t * hcd)
++{
++	hptxsts_data_t tx_status;
++	fh_list_link_t *qh_ptr;
++	fh_otg_qh_t *qh;
++	int status;
++	int no_queue_space = 0;
++	int no_fifo_space = 0;
++
++	fh_otg_host_global_regs_t *host_regs;
++	host_regs = hcd->core_if->host_if->host_global_regs;
++
++	FH_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
++#ifdef DEBUG
++	tx_status.d32 = FH_READ_REG32(&host_regs->hptxsts);
++	FH_DEBUGPL(DBG_HCDV,
++		    "  P Tx Req Queue Space Avail (before queue): %d\n",
++		    tx_status.b.ptxqspcavail);
++	FH_DEBUGPL(DBG_HCDV, "  P Tx FIFO Space Avail (before queue): %d\n",
++		    tx_status.b.ptxfspcavail);
++#endif
++
++	qh_ptr = hcd->periodic_sched_assigned.next;
++	while (qh_ptr != &hcd->periodic_sched_assigned) {
++		tx_status.d32 = FH_READ_REG32(&host_regs->hptxsts);
++		if (tx_status.b.ptxqspcavail == 0) {
++			no_queue_space = 1;
++			break;
++		}
++
++		qh = FH_LIST_ENTRY(qh_ptr, fh_otg_qh_t, qh_list_entry);
++
++		/*
++		 * Set a flag if we're queuing high-bandwidth in slave mode.
++		 * The flag prevents any halts to get into the request queue in
++		 * the middle of multiple high-bandwidth packets getting queued.
++		 */
++		if (!hcd->core_if->dma_enable && qh->channel->multi_count > 1) {
++			hcd->core_if->queuing_high_bandwidth = 1;
++		}
++		status =
++		    queue_transaction(hcd, qh->channel,
++				      tx_status.b.ptxfspcavail);
++		if (status < 0) {
++			no_fifo_space = 1;
++			break;
++		}
++
++		/*
++		 * In Slave mode, stay on the current transfer until there is
++		 * nothing more to do or the high-bandwidth request count is
++		 * reached. In DMA mode, only need to queue one request. The
++		 * controller automatically handles multiple packets for
++		 * high-bandwidth transfers.
++		 */
++		if (hcd->core_if->dma_enable || status == 0 ||
++		    qh->channel->requests == qh->channel->multi_count) {
++			qh_ptr = qh_ptr->next;
++			/*
++			 * Move the QH from the periodic assigned schedule to
++			 * the periodic queued schedule.
++			 */
++			FH_LIST_MOVE_HEAD(&hcd->periodic_sched_queued,
++					   &qh->qh_list_entry);
++
++			/* done queuing high bandwidth */
++			hcd->core_if->queuing_high_bandwidth = 0;
++		}
++	}
++
++	if (!hcd->core_if->dma_enable) {
++		fh_otg_core_global_regs_t *global_regs;
++		gintmsk_data_t intr_mask = {.d32 = 0 };
++
++		global_regs = hcd->core_if->core_global_regs;
++		intr_mask.b.ptxfempty = 1;
++#ifdef DEBUG
++		tx_status.d32 = FH_READ_REG32(&host_regs->hptxsts);
++		FH_DEBUGPL(DBG_HCDV,
++			    "  P Tx Req Queue Space Avail (after queue): %d\n",
++			    tx_status.b.ptxqspcavail);
++		FH_DEBUGPL(DBG_HCDV,
++			    "  P Tx FIFO Space Avail (after queue): %d\n",
++			    tx_status.b.ptxfspcavail);
++#endif
++		if (!FH_LIST_EMPTY(&hcd->periodic_sched_assigned) ||
++		    no_queue_space || no_fifo_space) {
++			/*
++			 * May need to queue more transactions as the request
++			 * queue or Tx FIFO empties. Enable the periodic Tx
++			 * FIFO empty interrupt. (Always use the half-empty
++			 * level to ensure that new requests are loaded as
++			 * soon as possible.)
++			 */
++			FH_MODIFY_REG32(&global_regs->gintmsk, 0,
++					 intr_mask.d32);
++		} else {
++			/*
++			 * Disable the Tx FIFO empty interrupt since there are
++			 * no more transactions that need to be queued right
++			 * now. This function is called from interrupt
++			 * handlers to queue more transactions as transfer
++			 * states change.
++			 */
++			FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32,
++					 0);
++		}
++	}
++}
++
++/**
++ * Processes active non-periodic channels and queues transactions for these
++ * channels to the FH_otg controller. After queueing transactions, the NP Tx
++ * FIFO Empty interrupt is enabled if there are more transactions to queue as
++ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
++ * FIFO Empty interrupt is disabled.
++ */
++static void process_non_periodic_channels(fh_otg_hcd_t * hcd)
++{
++	gnptxsts_data_t tx_status;
++	fh_list_link_t *orig_qh_ptr;
++	fh_otg_qh_t *qh;
++	int status;
++	int no_queue_space = 0;
++	int no_fifo_space = 0;
++	int more_to_do = 0;
++
++	fh_otg_core_global_regs_t *global_regs =
++	    hcd->core_if->core_global_regs;
++
++	FH_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
++#ifdef DEBUG
++	tx_status.d32 = FH_READ_REG32(&global_regs->gnptxsts);
++	FH_DEBUGPL(DBG_HCDV,
++		    "  NP Tx Req Queue Space Avail (before queue): %d\n",
++		    tx_status.b.nptxqspcavail);
++	FH_DEBUGPL(DBG_HCDV, "  NP Tx FIFO Space Avail (before queue): %d\n",
++		    tx_status.b.nptxfspcavail);
++#endif
++	/*
++	 * Keep track of the starting point. Skip over the start-of-list
++	 * entry.
++	 */
++	if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
++		hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
++	}
++	orig_qh_ptr = hcd->non_periodic_qh_ptr;
++
++	/*
++	 * Process once through the active list or until no more space is
++	 * available in the request queue or the Tx FIFO.
++	 */
++	do {
++		tx_status.d32 = FH_READ_REG32(&global_regs->gnptxsts);
++		if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) {
++			no_queue_space = 1;
++			break;
++		}
++
++		qh = FH_LIST_ENTRY(hcd->non_periodic_qh_ptr, fh_otg_qh_t,
++				    qh_list_entry);
++		status =
++		    queue_transaction(hcd, qh->channel,
++				      tx_status.b.nptxfspcavail);
++
++		if (status > 0) {
++			more_to_do = 1;
++		} else if (status < 0) {
++			no_fifo_space = 1;
++			break;
++		}
++
++		/* Advance to next QH, skipping start-of-list entry. */
++		hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
++		if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
++			hcd->non_periodic_qh_ptr =
++			    hcd->non_periodic_qh_ptr->next;
++		}
++
++	} while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
++
++	if (!hcd->core_if->dma_enable) {
++		gintmsk_data_t intr_mask = {.d32 = 0 };
++		intr_mask.b.nptxfempty = 1;
++
++#ifdef DEBUG
++		tx_status.d32 = FH_READ_REG32(&global_regs->gnptxsts);
++		FH_DEBUGPL(DBG_HCDV,
++			    "  NP Tx Req Queue Space Avail (after queue): %d\n",
++			    tx_status.b.nptxqspcavail);
++		FH_DEBUGPL(DBG_HCDV,
++			    "  NP Tx FIFO Space Avail (after queue): %d\n",
++			    tx_status.b.nptxfspcavail);
++#endif
++		if (more_to_do || no_queue_space || no_fifo_space) {
++			/*
++			 * May need to queue more transactions as the request
++			 * queue or Tx FIFO empties. Enable the non-periodic
++			 * Tx FIFO empty interrupt. (Always use the half-empty
++			 * level to ensure that new requests are loaded as
++			 * soon as possible.)
++			 */
++			FH_MODIFY_REG32(&global_regs->gintmsk, 0,
++					 intr_mask.d32);
++		} else {
++			/*
++			 * Disable the Tx FIFO empty interrupt since there are
++			 * no more transactions that need to be queued right
++			 * now. This function is called from interrupt
++			 * handlers to queue more transactions as transfer
++			 * states change.
++			 */
++			FH_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32,
++					 0);
++		}
++	}
++}
++
++/**
++ * This function processes the currently active host channels and queues
++ * transactions for these channels to the FH_otg controller. It is called
++ * from HCD interrupt handler functions.
++ *
++ * @param hcd The HCD state structure.
++ * @param tr_type The type(s) of transactions to queue (non-periodic,
++ * periodic, or both).
++ */
++void fh_otg_hcd_queue_transactions(fh_otg_hcd_t * hcd,
++				    fh_otg_transaction_type_e tr_type)
++{
++#ifdef DEBUG_SOF
++	FH_DEBUGPL(DBG_HCD, "Queue Transactions\n");
++#endif
++	/* Process host channels associated with periodic transfers. */
++	if ((tr_type == FH_OTG_TRANSACTION_PERIODIC ||
++	     tr_type == FH_OTG_TRANSACTION_ALL) &&
++	    !FH_LIST_EMPTY(&hcd->periodic_sched_assigned)) {
++
++		process_periodic_channels(hcd);
++	}
++
++	/* Process host channels associated with non-periodic transfers. */
++	if (tr_type == FH_OTG_TRANSACTION_NON_PERIODIC ||
++	    tr_type == FH_OTG_TRANSACTION_ALL) {
++		if (!FH_LIST_EMPTY(&hcd->non_periodic_sched_active)) {
++			process_non_periodic_channels(hcd);
++		} else {
++			/*
++			 * Ensure NP Tx FIFO empty interrupt is disabled when
++			 * there are no non-periodic transfers to process.
++			 */
++			gintmsk_data_t gintmsk = {.d32 = 0 };
++			gintmsk.b.nptxfempty = 1;
++			FH_MODIFY_REG32(&hcd->core_if->
++					 core_global_regs->gintmsk, gintmsk.d32,
++					 0);
++		}
++	}
++}
++
++#ifdef FH_HS_ELECT_TST
++/*
++ * Quick and dirty hack to implement the HS Electrical Test
++ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
++ *
++ * This code was copied from our userspace app "hset". It sends a
++ * Get Device Descriptor control sequence in two parts, first the
++ * Setup packet by itself, followed some time later by the In and
++ * Ack packets. Rather than trying to figure out how to add this
++ * functionality to the normal driver code, we just hijack the
++ * hardware, using these two function to drive the hardware
++ * directly.
++ */
++
++static fh_otg_core_global_regs_t *global_regs;
++static fh_otg_host_global_regs_t *hc_global_regs;
++static fh_otg_hc_regs_t *hc_regs;
++static uint32_t *data_fifo;
++
++static void do_setup(void)
++{
++	gintsts_data_t gintsts;
++	hctsiz_data_t hctsiz;
++	hcchar_data_t hcchar;
++	haint_data_t haint;
++	hcint_data_t hcint;
++
++	/* Enable HAINTs */
++	FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0001);
++
++	/* Enable HCINTs */
++	FH_WRITE_REG32(&hc_regs->hcintmsk, 0x04a3);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Read HAINT */
++	haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++	/* Read HCINT */
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++	/* Read HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++	/* Clear HCINT */
++	FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++	/* Clear HAINT */
++	FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++	/* Clear GINTSTS */
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/*
++	 * Send Setup packet (Get Device Descriptor)
++	 */
++
++	/* Make sure channel is disabled */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	if (hcchar.b.chen) {
++		hcchar.b.chdis = 1;
++//              hcchar.b.chen = 1;
++		FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++		//sleep(1);
++		fh_mdelay(1000);
++
++		/* Read GINTSTS */
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++		/* Read HAINT */
++		haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++		/* Read HCINT */
++		hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++		/* Read HCCHAR */
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++		/* Clear HCINT */
++		FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++		/* Clear HAINT */
++		FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++		/* Clear GINTSTS */
++		FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	}
++
++	/* Set HCTSIZ */
++	hctsiz.d32 = 0;
++	hctsiz.b.xfersize = 8;
++	hctsiz.b.pktcnt = 1;
++	hctsiz.b.pid = FH_OTG_HC_PID_SETUP;
++	FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
++
++	/* Set HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
++	hcchar.b.epdir = 0;
++	hcchar.b.epnum = 0;
++	hcchar.b.mps = 8;
++	hcchar.b.chen = 1;
++	FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++
++	/* Fill FIFO with Setup data for Get Device Descriptor */
++	data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
++	FH_WRITE_REG32(data_fifo++, 0x01000680);
++	FH_WRITE_REG32(data_fifo++, 0x00080000);
++
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Wait for host channel interrupt */
++	do {
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++	} while (gintsts.b.hcintr == 0);
++
++	/* Disable HCINTs */
++	FH_WRITE_REG32(&hc_regs->hcintmsk, 0x0000);
++
++	/* Disable HAINTs */
++	FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0000);
++
++	/* Read HAINT */
++	haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++	/* Read HCINT */
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++	/* Read HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++	/* Clear HCINT */
++	FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++	/* Clear HAINT */
++	FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++	/* Clear GINTSTS */
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++}
++
++static void do_in_ack(void)
++{
++	gintsts_data_t gintsts;
++	hctsiz_data_t hctsiz;
++	hcchar_data_t hcchar;
++	haint_data_t haint;
++	hcint_data_t hcint;
++	host_grxsts_data_t grxsts;
++
++	/* Enable HAINTs */
++	FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0001);
++
++	/* Enable HCINTs */
++	FH_WRITE_REG32(&hc_regs->hcintmsk, 0x04a3);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Read HAINT */
++	haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++	/* Read HCINT */
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++	/* Read HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++	/* Clear HCINT */
++	FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++	/* Clear HAINT */
++	FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++	/* Clear GINTSTS */
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/*
++	 * Receive Control In packet
++	 */
++
++	/* Make sure channel is disabled */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	if (hcchar.b.chen) {
++		hcchar.b.chdis = 1;
++		hcchar.b.chen = 1;
++		FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++		//sleep(1);
++		fh_mdelay(1000);
++
++		/* Read GINTSTS */
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++		/* Read HAINT */
++		haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++		/* Read HCINT */
++		hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++		/* Read HCCHAR */
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++		/* Clear HCINT */
++		FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++		/* Clear HAINT */
++		FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++		/* Clear GINTSTS */
++		FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	}
++
++	/* Set HCTSIZ */
++	hctsiz.d32 = 0;
++	hctsiz.b.xfersize = 8;
++	hctsiz.b.pktcnt = 1;
++	hctsiz.b.pid = FH_OTG_HC_PID_DATA1;
++	FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
++
++	/* Set HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
++	hcchar.b.epdir = 1;
++	hcchar.b.epnum = 0;
++	hcchar.b.mps = 8;
++	hcchar.b.chen = 1;
++	FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Wait for receive status queue interrupt */
++	do {
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++	} while (gintsts.b.rxstsqlvl == 0);
++
++	/* Read RXSTS */
++	grxsts.d32 = FH_READ_REG32(&global_regs->grxstsp);
++
++	/* Clear RXSTSQLVL in GINTSTS */
++	gintsts.d32 = 0;
++	gintsts.b.rxstsqlvl = 1;
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	switch (grxsts.b.pktsts) {
++	case FH_GRXSTS_PKTSTS_IN:
++		/* Read the data into the host buffer */
++		if (grxsts.b.bcnt > 0) {
++			int i;
++			int word_count = (grxsts.b.bcnt + 3) / 4;
++
++			data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
++
++			for (i = 0; i < word_count; i++) {
++				(void)FH_READ_REG32(data_fifo++);
++			}
++		}
++		break;
++
++	default:
++		break;
++	}
++
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Wait for receive status queue interrupt */
++	do {
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++	} while (gintsts.b.rxstsqlvl == 0);
++
++	/* Read RXSTS */
++	grxsts.d32 = FH_READ_REG32(&global_regs->grxstsp);
++
++	/* Clear RXSTSQLVL in GINTSTS */
++	gintsts.d32 = 0;
++	gintsts.b.rxstsqlvl = 1;
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	switch (grxsts.b.pktsts) {
++	case FH_GRXSTS_PKTSTS_IN_XFER_COMP:
++		break;
++
++	default:
++		break;
++	}
++
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Wait for host channel interrupt */
++	do {
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++	} while (gintsts.b.hcintr == 0);
++
++	/* Read HAINT */
++	haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++	/* Read HCINT */
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++	/* Read HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++	/* Clear HCINT */
++	FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++	/* Clear HAINT */
++	FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++	/* Clear GINTSTS */
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++//      usleep(100000);
++//      mdelay(100);
++	fh_mdelay(1);
++
++	/*
++	 * Send handshake packet
++	 */
++
++	/* Read HAINT */
++	haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++	/* Read HCINT */
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++	/* Read HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++	/* Clear HCINT */
++	FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++	/* Clear HAINT */
++	FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++	/* Clear GINTSTS */
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Make sure channel is disabled */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	if (hcchar.b.chen) {
++		hcchar.b.chdis = 1;
++		hcchar.b.chen = 1;
++		FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++		//sleep(1);
++		fh_mdelay(1000);
++
++		/* Read GINTSTS */
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++		/* Read HAINT */
++		haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++		/* Read HCINT */
++		hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++		/* Read HCCHAR */
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++		/* Clear HCINT */
++		FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++		/* Clear HAINT */
++		FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++		/* Clear GINTSTS */
++		FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	}
++
++	/* Set HCTSIZ */
++	hctsiz.d32 = 0;
++	hctsiz.b.xfersize = 0;
++	hctsiz.b.pktcnt = 1;
++	hctsiz.b.pid = FH_OTG_HC_PID_DATA1;
++	FH_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
++
++	/* Set HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
++	hcchar.b.epdir = 0;
++	hcchar.b.epnum = 0;
++	hcchar.b.mps = 8;
++	hcchar.b.chen = 1;
++	FH_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
++
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++
++	/* Wait for host channel interrupt */
++	do {
++		gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++	} while (gintsts.b.hcintr == 0);
++
++	/* Disable HCINTs */
++	FH_WRITE_REG32(&hc_regs->hcintmsk, 0x0000);
++
++	/* Disable HAINTs */
++	FH_WRITE_REG32(&hc_global_regs->haintmsk, 0x0000);
++
++	/* Read HAINT */
++	haint.d32 = FH_READ_REG32(&hc_global_regs->haint);
++
++	/* Read HCINT */
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++
++	/* Read HCCHAR */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++
++	/* Clear HCINT */
++	FH_WRITE_REG32(&hc_regs->hcint, hcint.d32);
++
++	/* Clear HAINT */
++	FH_WRITE_REG32(&hc_global_regs->haint, haint.d32);
++
++	/* Clear GINTSTS */
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	/* Read GINTSTS */
++	gintsts.d32 = FH_READ_REG32(&global_regs->gintsts);
++}
++#endif
++
++/** Handles hub class-specific requests. */
++int fh_otg_hcd_hub_control(fh_otg_hcd_t * fh_otg_hcd,
++			    uint16_t typeReq,
++			    uint16_t wValue,
++			    uint16_t wIndex, uint8_t * buf, uint16_t wLength)
++{
++	int retval = 0;
++
++	fh_otg_core_if_t *core_if = fh_otg_hcd->core_if;
++	usb_hub_descriptor_t *hub_desc;
++	hprt0_data_t hprt0 = {.d32 = 0 };
++
++	uint32_t port_status;
++
++	switch (typeReq) {
++	case UCR_CLEAR_HUB_FEATURE:
++		FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++			    "ClearHubFeature 0x%x\n", wValue);
++		switch (wValue) {
++		case UHF_C_HUB_LOCAL_POWER:
++		case UHF_C_HUB_OVER_CURRENT:
++			/* Nothing required here */
++			break;
++		default:
++			retval = -FH_E_INVALID;
++			FH_ERROR("FH OTG HCD - "
++				  "ClearHubFeature request %xh unknown\n",
++				  wValue);
++		}
++		break;
++	case UCR_CLEAR_PORT_FEATURE:
++#ifdef CONFIG_USB_FH_OTG_LPM
++		if (wValue != UHF_PORT_L1)
++#endif
++			if (!wIndex || wIndex > 1)
++				goto error;
++
++		switch (wValue) {
++		case UHF_PORT_ENABLE:
++			FH_DEBUGPL(DBG_ANY, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
++			hprt0.d32 = fh_otg_read_hprt0(core_if);
++			hprt0.b.prtena = 1;
++			FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++			break;
++		case UHF_PORT_SUSPEND:
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
++
++			if (core_if->power_down == 2) {
++				fh_otg_host_hibernation_restore(core_if, 0, 0);
++			} else {
++				FH_WRITE_REG32(core_if->pcgcctl, 0);
++				fh_mdelay(5);
++
++				hprt0.d32 = fh_otg_read_hprt0(core_if);
++				hprt0.b.prtres = 1;
++				FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++				hprt0.b.prtsusp = 0;
++				/* Clear Resume bit */
++				fh_mdelay(100);
++				hprt0.b.prtres = 0;
++				FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++			}
++			break;
++#ifdef CONFIG_USB_FH_OTG_LPM
++		case UHF_PORT_L1:
++			{
++				pcgcctl_data_t pcgcctl = {.d32 = 0 };
++				glpmcfg_data_t lpmcfg = {.d32 = 0 };
++
++				lpmcfg.d32 =
++				    FH_READ_REG32(&core_if->
++						   core_global_regs->glpmcfg);
++				lpmcfg.b.en_utmi_sleep = 0;
++				lpmcfg.b.hird_thres &= (~(1 << 4));
++				lpmcfg.b.prt_sleep_sts = 1;
++				FH_WRITE_REG32(&core_if->
++						core_global_regs->glpmcfg,
++						lpmcfg.d32);
++
++				/* Clear Enbl_L1Gating bit. */
++				pcgcctl.b.enbl_sleep_gating = 1;
++				FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,
++						 0);
++
++				fh_mdelay(5);
++
++				hprt0.d32 = fh_otg_read_hprt0(core_if);
++				hprt0.b.prtres = 1;
++				FH_WRITE_REG32(core_if->host_if->hprt0,
++						hprt0.d32);
++				/* This bit will be cleared in wakeup interrupt handle */
++				break;
++			}
++#endif
++		case UHF_PORT_POWER:
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_POWER\n");
++			hprt0.d32 = fh_otg_read_hprt0(core_if);
++			hprt0.b.prtpwr = 0;
++			FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++			break;
++		case UHF_PORT_INDICATOR:
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
++			/* Port inidicator not supported */
++			break;
++		case UHF_C_PORT_CONNECTION:
++			/* Clears drivers internal connect status change
++			 * flag */
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
++			fh_otg_hcd->flags.b.port_connect_status_change = 0;
++			break;
++		case UHF_C_PORT_RESET:
++			/* Clears the driver's internal Port Reset Change
++			 * flag */
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
++			fh_otg_hcd->flags.b.port_reset_change = 0;
++			break;
++		case UHF_C_PORT_ENABLE:
++			/* Clears the driver's internal Port
++			 * Enable/Disable Change flag */
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
++			fh_otg_hcd->flags.b.port_enable_change = 0;
++			break;
++		case UHF_C_PORT_SUSPEND:
++			/* Clears the driver's internal Port Suspend
++			 * Change flag, which is set when resume signaling on
++			 * the host port is complete */
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
++			fh_otg_hcd->flags.b.port_suspend_change = 0;
++			break;
++#ifdef CONFIG_USB_FH_OTG_LPM
++		case UHF_C_PORT_L1:
++			fh_otg_hcd->flags.b.port_l1_change = 0;
++			break;
++#endif
++		case UHF_C_PORT_OVER_CURRENT:
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
++			fh_otg_hcd->flags.b.port_over_current_change = 0;
++			break;
++		default:
++			retval = -FH_E_INVALID;
++			FH_ERROR("FH OTG HCD - "
++				  "ClearPortFeature request %xh "
++				  "unknown or unsupported\n", wValue);
++		}
++		break;
++	case UCR_GET_HUB_DESCRIPTOR:
++		FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++			    "GetHubDescriptor\n");
++		hub_desc = (usb_hub_descriptor_t *) buf;
++		hub_desc->bDescLength = 9;
++		hub_desc->bDescriptorType = 0x29;
++		hub_desc->bNbrPorts = 1;
++		USETW(hub_desc->wHubCharacteristics, 0x08);
++		hub_desc->bPwrOn2PwrGood = 1;
++		hub_desc->bHubContrCurrent = 0;
++		hub_desc->DeviceRemovable[0] = 0;
++		hub_desc->DeviceRemovable[1] = 0xff;
++		break;
++	case UCR_GET_HUB_STATUS:
++		FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++			    "GetHubStatus\n");
++		FH_MEMSET(buf, 0, 4);
++		break;
++	case UCR_GET_PORT_STATUS:
++		FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++			    "GetPortStatus wIndex = 0x%04x FLAGS=0x%08x\n",
++			    wIndex, fh_otg_hcd->flags.d32);
++		if (!wIndex || wIndex > 1)
++			goto error;
++
++		port_status = 0;
++
++		if (fh_otg_hcd->flags.b.port_connect_status_change)
++			port_status |= (1 << UHF_C_PORT_CONNECTION);
++
++		if (fh_otg_hcd->flags.b.port_enable_change)
++			port_status |= (1 << UHF_C_PORT_ENABLE);
++
++		if (fh_otg_hcd->flags.b.port_suspend_change)
++			port_status |= (1 << UHF_C_PORT_SUSPEND);
++
++		if (fh_otg_hcd->flags.b.port_l1_change)
++			port_status |= (1 << UHF_C_PORT_L1);
++
++		if (fh_otg_hcd->flags.b.port_reset_change) {
++			port_status |= (1 << UHF_C_PORT_RESET);
++		}
++
++		if (fh_otg_hcd->flags.b.port_over_current_change) {
++			FH_WARN("Overcurrent change detected\n");
++			port_status |= (1 << UHF_C_PORT_OVER_CURRENT);
++		}
++
++		if (!fh_otg_hcd->flags.b.port_connect_status) {
++			/*
++			 * The port is disconnected, which means the core is
++			 * either in device mode or it soon will be. Just
++			 * return 0's for the remainder of the port status
++			 * since the port register can't be read if the core
++			 * is in device mode.
++			 */
++			*((__le32 *) buf) = fh_cpu_to_le32(&port_status);
++			break;
++		}
++
++		hprt0.d32 = FH_READ_REG32(core_if->host_if->hprt0);
++		FH_DEBUGPL(DBG_HCDV, "  HPRT0: 0x%08x\n", hprt0.d32);
++
++		if (hprt0.b.prtconnsts)
++			port_status |= (1 << UHF_PORT_CONNECTION);
++
++		if (hprt0.b.prtena)
++			port_status |= (1 << UHF_PORT_ENABLE);
++
++		if (hprt0.b.prtsusp)
++			port_status |= (1 << UHF_PORT_SUSPEND);
++
++		if (hprt0.b.prtovrcurract)
++			port_status |= (1 << UHF_PORT_OVER_CURRENT);
++
++		if (hprt0.b.prtrst)
++			port_status |= (1 << UHF_PORT_RESET);
++
++		if (hprt0.b.prtpwr)
++			port_status |= (1 << UHF_PORT_POWER);
++
++		if (hprt0.b.prtspd == FH_HPRT0_PRTSPD_HIGH_SPEED)
++			port_status |= (1 << UHF_PORT_HIGH_SPEED);
++		else if (hprt0.b.prtspd == FH_HPRT0_PRTSPD_LOW_SPEED)
++			port_status |= (1 << UHF_PORT_LOW_SPEED);
++
++		if (hprt0.b.prttstctl)
++			port_status |= (1 << UHF_PORT_TEST);
++		if (fh_otg_get_lpm_portsleepstatus(fh_otg_hcd->core_if)) {
++			port_status |= (1 << UHF_PORT_L1);
++		}
++		/*
++		   For Synopsys HW emulation of Power down wkup_control asserts the 
++		   hreset_n and prst_n on suspned. This causes the HPRT0 to be zero. 
++		   We intentionally tell the software that port is in L2Suspend state. 
++		   Only for STE.
++		*/
++		if ((core_if->power_down == 2)
++		    && (core_if->hibernation_suspend == 1)) {
++			port_status |= (1 << UHF_PORT_SUSPEND);
++		}
++		/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
++
++		*((__le32 *) buf) = fh_cpu_to_le32(&port_status);
++
++		break;
++	case UCR_SET_HUB_FEATURE:
++		FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++			    "SetHubFeature\n");
++		/* No HUB features supported */
++		break;
++	case UCR_SET_PORT_FEATURE:
++		if (wValue != UHF_PORT_TEST && (!wIndex || wIndex > 1))
++			goto error;
++
++		if (!fh_otg_hcd->flags.b.port_connect_status) {
++			/*
++			 * The port is disconnected, which means the core is
++			 * either in device mode or it soon will be. Just
++			 * return without doing anything since the port
++			 * register can't be written if the core is in device
++			 * mode.
++			 */
++			break;
++		}
++
++		switch (wValue) {
++		case UHF_PORT_SUSPEND:
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
++			if (fh_otg_hcd_otg_port(fh_otg_hcd) != wIndex) {
++				goto error;
++			}
++			if (core_if->power_down == 2) {
++				int timeout = 300;
++				fh_irqflags_t flags;
++				pcgcctl_data_t pcgcctl = {.d32 = 0 };
++				gpwrdn_data_t gpwrdn = {.d32 = 0 };
++				gusbcfg_data_t gusbcfg = {.d32 = 0 };
++#ifdef FH_DEV_SRPCAP
++				int32_t otg_cap_param = core_if->core_params->otg_cap;
++#endif
++				FH_PRINTF("Preparing for complete power-off\n");
++
++				/* Save registers before hibernation */
++				fh_otg_save_global_regs(core_if);
++				fh_otg_save_host_regs(core_if);
++
++				hprt0.d32 = fh_otg_read_hprt0(core_if);
++				hprt0.b.prtsusp = 1;
++				hprt0.b.prtena = 0;
++				FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++				/* Spin hprt0.b.prtsusp to became 1 */
++				do {
++					hprt0.d32 = fh_otg_read_hprt0(core_if);
++					if (hprt0.b.prtsusp) {
++						break;
++					}
++					fh_mdelay(1);
++				} while (--timeout);
++				if (!timeout) {
++					FH_WARN("Suspend wasn't genereted\n");
++				}
++				fh_udelay(10);
++
++				/*
++				 * We need to disable interrupts to prevent servicing of any IRQ
++				 * during going to hibernation
++				 */
++				FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
++				core_if->lx_state = FH_OTG_L2;
++#ifdef FH_DEV_SRPCAP
++				hprt0.d32 = fh_otg_read_hprt0(core_if);
++				hprt0.b.prtpwr = 0;
++				hprt0.b.prtena = 0;
++				FH_WRITE_REG32(core_if->host_if->hprt0,
++						hprt0.d32);
++#endif
++				gusbcfg.d32 =
++				    FH_READ_REG32(&core_if->core_global_regs->
++						   gusbcfg);
++				if (gusbcfg.b.ulpi_utmi_sel == 1) {
++					/* ULPI interface */
++					/* Suspend the Phy Clock */
++					pcgcctl.d32 = 0;
++					pcgcctl.b.stoppclk = 1;
++					FH_MODIFY_REG32(core_if->pcgcctl, 0,
++							 pcgcctl.d32);
++					fh_udelay(10);
++					gpwrdn.b.pmuactv = 1;
++					FH_MODIFY_REG32(&core_if->
++							 core_global_regs->
++							 gpwrdn, 0, gpwrdn.d32);
++				} else {
++					/* UTMI+ Interface */
++					gpwrdn.b.pmuactv = 1;
++					FH_MODIFY_REG32(&core_if->
++							 core_global_regs->
++							 gpwrdn, 0, gpwrdn.d32);
++					fh_udelay(10);
++					pcgcctl.b.stoppclk = 1;
++					FH_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
++					fh_udelay(10);
++				}
++#ifdef FH_DEV_SRPCAP				
++				gpwrdn.d32 = 0;
++				gpwrdn.b.dis_vbus = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++#endif
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pmuintsel = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++				fh_udelay(10);
++
++				gpwrdn.d32 = 0;
++#ifdef FH_DEV_SRPCAP
++				gpwrdn.b.srp_det_msk = 1;
++#endif
++				gpwrdn.b.disconn_det_msk = 1;
++				gpwrdn.b.lnstchng_msk = 1;
++				gpwrdn.b.sts_chngint_msk = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++				fh_udelay(10);
++
++				/* Enable Power Down Clamp and all interrupts in GPWRDN */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pwrdnclmp = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++				fh_udelay(10);
++
++				/* Switch off VDD */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pwrdnswtch = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++
++#ifdef FH_DEV_SRPCAP
++				if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE)
++				{
++					core_if->pwron_timer_started = 1;
++					FH_TIMER_SCHEDULE(core_if->pwron_timer, 6000 /* 6 secs */ );
++				}
++#endif
++				/* Save gpwrdn register for further usage if stschng interrupt */
++				core_if->gr_backup->gpwrdn_local =
++						FH_READ_REG32(&core_if->core_global_regs->gpwrdn);
++
++				/* Set flag to indicate that we are in hibernation */
++				core_if->hibernation_suspend = 1;
++				FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock,flags);
++
++				FH_PRINTF("Host hibernation completed\n");
++				// Exit from case statement
++				break;
++
++			}
++			if (fh_otg_hcd_otg_port(fh_otg_hcd) == wIndex &&
++			    fh_otg_hcd->fops->get_b_hnp_enable(fh_otg_hcd)) {
++				gotgctl_data_t gotgctl = {.d32 = 0 };
++				gotgctl.b.hstsethnpen = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gotgctl, 0, gotgctl.d32);
++				core_if->op_state = A_SUSPEND;
++			}
++			hprt0.d32 = fh_otg_read_hprt0(core_if);
++			hprt0.b.prtsusp = 1;
++			FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++			{
++				fh_irqflags_t flags;
++				/* Update lx_state */
++				FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
++				core_if->lx_state = FH_OTG_L2;
++				FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags);
++			}
++			/* Suspend the Phy Clock */
++			if (core_if->otg_ver == 0) {
++				pcgcctl_data_t pcgcctl = {.d32 = 0 };
++				pcgcctl.b.stoppclk = 1;
++				FH_MODIFY_REG32(core_if->pcgcctl, 0,
++						 pcgcctl.d32);
++				fh_udelay(10);
++			}
++
++			/* For HNP the bus must be suspended for at least 200ms. */
++			if (fh_otg_hcd->fops->get_b_hnp_enable(fh_otg_hcd)) {
++				if (core_if->otg_ver) {
++					pcgcctl_data_t pcgcctl = {.d32 = 0 };
++					pcgcctl.b.stoppclk = 1;	
++					FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++				}
++				fh_mdelay(200);
++			}
++
++			/** @todo - check how sw can wait for 1 sec to check asesvld??? */
++#if 0 //vahrama !!!!!!!!!!!!!!!!!!
++			if (core_if->adp_enable) {
++				gotgctl_data_t gotgctl = {.d32 = 0 };
++				gpwrdn_data_t gpwrdn;
++
++				while (gotgctl.b.asesvld == 1) {
++					gotgctl.d32 =
++					    FH_READ_REG32(&core_if->
++							   core_global_regs->
++							   gotgctl);
++					fh_mdelay(100);
++				}
++
++				/* Enable Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pmuactv = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++
++				/* Unmask SRP detected interrupt from Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.srp_det_msk = 1;
++				FH_MODIFY_REG32(&core_if->core_global_regs->
++						 gpwrdn, 0, gpwrdn.d32);
++
++				fh_otg_adp_probe_start(core_if);
++			}
++#endif
++			break;
++		case UHF_PORT_POWER:
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "SetPortFeature - USB_PORT_FEAT_POWER\n");
++			hprt0.d32 = fh_otg_read_hprt0(core_if);
++			hprt0.b.prtpwr = 1;
++			FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++			break;
++		case UHF_PORT_RESET:
++			if ((core_if->power_down == 2)
++			    && (core_if->hibernation_suspend == 1)) {
++				/* If we are going to exit from Hibernated
++				 * state via USB RESET.
++				 */
++				fh_otg_host_hibernation_restore(core_if, 0, 1);
++			} else {
++				hprt0.d32 = fh_otg_read_hprt0(core_if);
++
++				FH_DEBUGPL(DBG_HCD,
++					    "FH OTG HCD HUB CONTROL - "
++					    "SetPortFeature - USB_PORT_FEAT_RESET\n");
++				{
++					pcgcctl_data_t pcgcctl = {.d32 = 0 };
++					pcgcctl.b.enbl_sleep_gating = 1;
++					pcgcctl.b.stoppclk = 1;
++					FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
++					FH_WRITE_REG32(core_if->pcgcctl, 0);
++				}
++#ifdef CONFIG_USB_FH_OTG_LPM
++				{
++					glpmcfg_data_t lpmcfg;
++					lpmcfg.d32 =
++						FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++					if (lpmcfg.b.prt_sleep_sts) {
++						lpmcfg.b.en_utmi_sleep = 0;
++						lpmcfg.b.hird_thres &= (~(1 << 4));
++						FH_WRITE_REG32
++						    (&core_if->core_global_regs->glpmcfg,
++						     lpmcfg.d32);
++						fh_mdelay(1);
++					}
++				}
++#endif
++				hprt0.d32 = fh_otg_read_hprt0(core_if);
++				/* Clear suspend bit if resetting from suspended state. */
++				hprt0.b.prtsusp = 0;
++				/* When B-Host the Port reset bit is set in
++				 * the Start HCD Callback function, so that
++				 * the reset is started within 1ms of the HNP
++				 * success interrupt. */
++				if (!fh_otg_hcd_is_b_host(fh_otg_hcd)) {
++					hprt0.b.prtpwr = 1;
++					hprt0.b.prtrst = 1;
++					FH_PRINTF("Indeed it is in host mode hprt0 = %08x\n",hprt0.d32);
++					FH_WRITE_REG32(core_if->host_if->hprt0,
++							hprt0.d32);
++				}
++				/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
++				fh_mdelay(60);
++				hprt0.b.prtrst = 0;
++				FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++				core_if->lx_state = FH_OTG_L0;	/* Now back to the on state */
++			}
++			break;
++#ifdef FH_HS_ELECT_TST
++		case UHF_PORT_TEST:
++			{
++				uint32_t t;
++				gintmsk_data_t gintmsk;
++
++				t = (wIndex >> 8);	/* MSB wIndex USB */
++				FH_DEBUGPL(DBG_HCD,
++					    "FH OTG HCD HUB CONTROL - "
++					    "SetPortFeature - USB_PORT_FEAT_TEST %d\n",
++					    t);
++				FH_WARN("USB_PORT_FEAT_TEST %d\n", t);
++				if (t < 6) {
++					hprt0.d32 = fh_otg_read_hprt0(core_if);
++					hprt0.b.prttstctl = t;
++					FH_WRITE_REG32(core_if->host_if->hprt0,
++							hprt0.d32);
++				} else {
++					/* Setup global vars with reg addresses (quick and
++					 * dirty hack, should be cleaned up)
++					 */
++					global_regs = core_if->core_global_regs;
++					hc_global_regs =
++					    core_if->host_if->host_global_regs;
++					hc_regs =
++					    (fh_otg_hc_regs_t *) ((char *)
++								   global_regs +
++								   0x500);
++					data_fifo =
++					    (uint32_t *) ((char *)global_regs +
++							  0x1000);
++
++					if (t == 6) {	/* HS_HOST_PORT_SUSPEND_RESUME */
++						/* Save current interrupt mask */
++						gintmsk.d32 =
++						    FH_READ_REG32
++						    (&global_regs->gintmsk);
++
++						/* Disable all interrupts while we muck with
++						 * the hardware directly
++						 */
++						FH_WRITE_REG32(&global_regs->gintmsk, 0);
++
++						/* 15 second delay per the test spec */
++						fh_mdelay(15000);
++
++						/* Drive suspend on the root port */
++						hprt0.d32 =
++						    fh_otg_read_hprt0(core_if);
++						hprt0.b.prtsusp = 1;
++						hprt0.b.prtres = 0;
++						FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++						/* 15 second delay per the test spec */
++						fh_mdelay(15000);
++
++						/* Drive resume on the root port */
++						hprt0.d32 =
++						    fh_otg_read_hprt0(core_if);
++						hprt0.b.prtsusp = 0;
++						hprt0.b.prtres = 1;
++						FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++						fh_mdelay(100);
++
++						/* Clear the resume bit */
++						hprt0.b.prtres = 0;
++						FH_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
++
++						/* Restore interrupts */
++						FH_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
++					} else if (t == 7) {	/* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
++						/* Save current interrupt mask */
++						gintmsk.d32 =
++						    FH_READ_REG32
++						    (&global_regs->gintmsk);
++
++						/* Disable all interrupts while we muck with
++						 * the hardware directly
++						 */
++						FH_WRITE_REG32(&global_regs->gintmsk, 0);
++
++						/* 15 second delay per the test spec */
++						fh_mdelay(15000);
++
++						/* Send the Setup packet */
++						do_setup();
++
++						/* 15 second delay so nothing else happens for awhile */
++						fh_mdelay(15000);
++
++						/* Restore interrupts */
++						FH_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
++					} else if (t == 8) {	/* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
++						/* Save current interrupt mask */
++						gintmsk.d32 =
++						    FH_READ_REG32
++						    (&global_regs->gintmsk);
++
++						/* Disable all interrupts while we muck with
++						 * the hardware directly
++						 */
++						FH_WRITE_REG32(&global_regs->gintmsk, 0);
++
++						/* Send the Setup packet */
++						do_setup();
++
++						/* 15 second delay so nothing else happens for awhile */
++						fh_mdelay(15000);
++
++						/* Send the In and Ack packets */
++						do_in_ack();
++
++						/* 15 second delay so nothing else happens for awhile */
++						fh_mdelay(15000);
++
++						/* Restore interrupts */
++						FH_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
++					}
++				}
++				break;
++			}
++#endif /* FH_HS_ELECT_TST */
++
++		case UHF_PORT_INDICATOR:
++			FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB CONTROL - "
++				    "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
++			/* Not supported */
++			break;
++		default:
++			retval = -FH_E_INVALID;
++			FH_ERROR("FH OTG HCD - "
++				  "SetPortFeature request %xh "
++				  "unknown or unsupported\n", wValue);
++			break;
++		}
++		break;
++#ifdef CONFIG_USB_FH_OTG_LPM
++	case UCR_SET_AND_TEST_PORT_FEATURE:
++		if (wValue != UHF_PORT_L1) {
++			goto error;
++		}
++		{
++			int portnum, hird, devaddr, remwake;
++			glpmcfg_data_t lpmcfg;
++			uint32_t time_usecs;
++			gintsts_data_t gintsts;
++			gintmsk_data_t gintmsk;
++
++			if (!fh_otg_get_param_lpm_enable(core_if)) {
++				goto error;
++			}
++			if (wValue != UHF_PORT_L1 || wLength != 1) {
++				goto error;
++			}
++			/* Check if the port currently is in SLEEP state */
++			lpmcfg.d32 =
++			    FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++			if (lpmcfg.b.prt_sleep_sts) {
++				FH_INFO("Port is already in sleep mode\n");
++				buf[0] = 0;	/* Return success */
++				break;
++			}
++
++			portnum = wIndex & 0xf;
++			hird = (wIndex >> 4) & 0xf;
++			devaddr = (wIndex >> 8) & 0x7f;
++			remwake = (wIndex >> 15);
++
++			if (portnum != 1) {
++				retval = -FH_E_INVALID;
++				FH_WARN
++				    ("Wrong port number(%d) in SetandTestPortFeature request\n",
++				     portnum);
++				break;
++			}
++
++			FH_PRINTF
++			    ("SetandTestPortFeature request: portnum = %d, hird = %d, devaddr = %d, rewake = %d\n",
++			     portnum, hird, devaddr, remwake);
++			/* Disable LPM interrupt */
++			gintmsk.d32 = 0;
++			gintmsk.b.lpmtranrcvd = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
++					 gintmsk.d32, 0);
++
++			if (fh_otg_hcd_send_lpm
++			    (fh_otg_hcd, devaddr, hird, remwake)) {
++				retval = -FH_E_INVALID;
++				break;
++			}
++
++			time_usecs = 10 * (lpmcfg.b.retry_count + 1);
++			/* We will consider timeout if time_usecs microseconds pass,
++			 * and we don't receive LPM transaction status.
++			 * After receiving non-error responce(ACK/NYET/STALL) from device,
++			 *  core will set lpmtranrcvd bit.
++			 */
++			do {
++				gintsts.d32 =
++				    FH_READ_REG32(&core_if->core_global_regs->gintsts);
++				if (gintsts.b.lpmtranrcvd) {
++					break;
++				}
++				fh_udelay(1);
++			} while (--time_usecs);
++			/* lpm_int bit will be cleared in LPM interrupt handler */
++
++			/* Now fill status
++			 * 0x00 - Success
++			 * 0x10 - NYET
++			 * 0x11 - Timeout
++			 */
++			if (!gintsts.b.lpmtranrcvd) {
++				buf[0] = 0x3;	/* Completion code is Timeout */
++				fh_otg_hcd_free_hc_from_lpm(fh_otg_hcd);
++			} else {
++				lpmcfg.d32 =
++				    FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++				if (lpmcfg.b.lpm_resp == 0x3) {
++					/* ACK responce from the device */
++					buf[0] = 0x00;	/* Success */
++				} else if (lpmcfg.b.lpm_resp == 0x2) {
++					/* NYET responce from the device */
++					buf[0] = 0x2;
++				} else {
++					/* Otherwise responce with Timeout */
++					buf[0] = 0x3;
++				}
++			}
++			FH_PRINTF("Device responce to LPM trans is %x\n",
++				   lpmcfg.b.lpm_resp);
++			FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0,
++					 gintmsk.d32);
++
++			break;
++		}
++#endif /* CONFIG_USB_FH_OTG_LPM */
++	default:
++error:
++		retval = -FH_E_INVALID;
++		FH_WARN("FH OTG HCD - "
++			 "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
++			 typeReq, wIndex, wValue);
++		break;
++	}
++
++	return retval;
++}
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++/** Returns index of host channel to perform LPM transaction. */
++int fh_otg_hcd_get_hc_for_lpm_tran(fh_otg_hcd_t * hcd, uint8_t devaddr)
++{
++	fh_otg_core_if_t *core_if = hcd->core_if;
++	fh_hc_t *hc;
++	hcchar_data_t hcchar;
++	gintmsk_data_t gintmsk = {.d32 = 0 };
++
++	if (FH_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
++		FH_PRINTF("No free channel to select for LPM transaction\n");
++		return -1;
++	}
++
++	hc = FH_CIRCLEQ_FIRST(&hcd->free_hc_list);
++
++	/* Mask host channel interrupts. */
++	gintmsk.b.hcintr = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
++
++	/* Fill fields that core needs for LPM transaction */
++	hcchar.b.devaddr = devaddr;
++	hcchar.b.epnum = 0;
++	hcchar.b.eptype = FH_OTG_EP_TYPE_CONTROL;
++	hcchar.b.mps = 64;
++	hcchar.b.lspddev = (hc->speed == FH_OTG_EP_SPEED_LOW);
++	hcchar.b.epdir = 0;	/* OUT */
++	FH_WRITE_REG32(&core_if->host_if->hc_regs[hc->hc_num]->hcchar,
++			hcchar.d32);
++
++	/* Remove the host channel from the free list. */
++	FH_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
++
++	FH_PRINTF("hcnum = %d devaddr = %d\n", hc->hc_num, devaddr);
++
++	return hc->hc_num;
++}
++
++/** Release hc after performing LPM transaction */
++void fh_otg_hcd_free_hc_from_lpm(fh_otg_hcd_t * hcd)
++{
++	fh_hc_t *hc;
++	glpmcfg_data_t lpmcfg;
++	uint8_t hc_num;
++
++	lpmcfg.d32 = FH_READ_REG32(&hcd->core_if->core_global_regs->glpmcfg);
++	hc_num = lpmcfg.b.lpm_chan_index;
++
++	hc = hcd->hc_ptr_array[hc_num];
++
++	FH_PRINTF("Freeing channel %d after LPM\n", hc_num);
++	/* Return host channel to free list */
++	FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
++}
++
++int fh_otg_hcd_send_lpm(fh_otg_hcd_t * hcd, uint8_t devaddr, uint8_t hird,
++			 uint8_t bRemoteWake)
++{
++	glpmcfg_data_t lpmcfg;
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	int channel;
++
++	channel = fh_otg_hcd_get_hc_for_lpm_tran(hcd, devaddr);
++	if (channel < 0) {
++		return channel;
++	}
++
++	pcgcctl.b.enbl_sleep_gating = 1;
++	FH_MODIFY_REG32(hcd->core_if->pcgcctl, 0, pcgcctl.d32);
++
++	/* Read LPM config register */
++	lpmcfg.d32 = FH_READ_REG32(&hcd->core_if->core_global_regs->glpmcfg);
++
++	/* Program LPM transaction fields */
++	lpmcfg.b.rem_wkup_en = bRemoteWake;
++	lpmcfg.b.hird = hird;
++	
++	if(fh_otg_get_param_besl_enable(hcd->core_if)) {
++		lpmcfg.b.hird_thres = 0x16;
++		lpmcfg.b.en_besl = 1;
++	} else {
++		lpmcfg.b.hird_thres = 0x1c;
++	}
++	
++	lpmcfg.b.lpm_chan_index = channel;
++	lpmcfg.b.en_utmi_sleep = 1;
++	/* Program LPM config register */
++	FH_WRITE_REG32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
++
++	/* Send LPM transaction */
++	lpmcfg.b.send_lpm = 1;
++	FH_WRITE_REG32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
++
++	return 0;
++}
++
++#endif /* CONFIG_USB_FH_OTG_LPM */
++
++int fh_otg_hcd_is_status_changed(fh_otg_hcd_t * hcd, int port)
++{
++	int retval;
++
++	if (port != 1) {
++		return -FH_E_INVALID;
++	}
++
++	retval = (hcd->flags.b.port_connect_status_change ||
++		  hcd->flags.b.port_reset_change ||
++		  hcd->flags.b.port_enable_change ||
++		  hcd->flags.b.port_suspend_change ||
++		  hcd->flags.b.port_over_current_change);
++#ifdef DEBUG
++	if (retval) {
++		FH_DEBUGPL(DBG_HCD, "FH OTG HCD HUB STATUS DATA:"
++			    " Root port status changed\n");
++		FH_DEBUGPL(DBG_HCDV, "  port_connect_status_change: %d\n",
++			    hcd->flags.b.port_connect_status_change);
++		FH_DEBUGPL(DBG_HCDV, "  port_reset_change: %d\n",
++			    hcd->flags.b.port_reset_change);
++		FH_DEBUGPL(DBG_HCDV, "  port_enable_change: %d\n",
++			    hcd->flags.b.port_enable_change);
++		FH_DEBUGPL(DBG_HCDV, "  port_suspend_change: %d\n",
++			    hcd->flags.b.port_suspend_change);
++		FH_DEBUGPL(DBG_HCDV, "  port_over_current_change: %d\n",
++			    hcd->flags.b.port_over_current_change);
++	}
++#endif
++	return retval;
++}
++
++int fh_otg_hcd_get_frame_number(fh_otg_hcd_t * fh_otg_hcd)
++{
++	hfnum_data_t hfnum;
++	hfnum.d32 =
++	    FH_READ_REG32(&fh_otg_hcd->core_if->host_if->host_global_regs->
++			   hfnum);
++
++#ifdef DEBUG_SOF
++	FH_DEBUGPL(DBG_HCDV, "FH OTG HCD GET FRAME NUMBER %d\n",
++		    hfnum.b.frnum);
++#endif
++	return hfnum.b.frnum;
++}
++
++int fh_otg_hcd_start(fh_otg_hcd_t * hcd,
++		      struct fh_otg_hcd_function_ops *fops)
++{
++	int retval = 0;
++	hprt0_data_t hprt0;
++
++	hcd->fops = fops;
++	if (!fh_otg_is_device_mode(hcd->core_if) && 
++		(!hcd->core_if->adp_enable || hcd->core_if->adp.adp_started)) {
++		fh_otg_hcd_reinit(hcd);
++	} else {
++		if (hcd->core_if->adp_enable) {
++			/* Clear any interrupt pending in the HPRT, sometimes 
++			 * Port Connect Detected is not being cleared*/
++			hprt0.d32 = FH_READ_REG32(hcd->core_if->host_if->hprt0);
++			FH_WRITE_REG32(hcd->core_if->host_if->hprt0, hprt0.d32);
++		}
++		retval = -FH_E_NO_DEVICE;
++	}
++
++	return retval;
++}
++
++void *fh_otg_hcd_get_priv_data(fh_otg_hcd_t * hcd)
++{
++	return hcd->priv;
++}
++
++void fh_otg_hcd_set_priv_data(fh_otg_hcd_t * hcd, void *priv_data)
++{
++	hcd->priv = priv_data;
++}
++
++uint32_t fh_otg_hcd_otg_port(fh_otg_hcd_t * hcd)
++{
++	return hcd->otg_port;
++}
++
++uint32_t fh_otg_hcd_is_b_host(fh_otg_hcd_t * hcd)
++{
++	uint32_t is_b_host;
++	if (hcd->core_if->op_state == B_HOST) {
++		is_b_host = 1;
++	} else {
++		is_b_host = 0;
++	}
++
++	return is_b_host;
++}
++
++fh_otg_hcd_urb_t *fh_otg_hcd_urb_alloc(fh_otg_hcd_t * hcd,
++					 int iso_desc_count, int atomic_alloc)
++{
++	fh_otg_hcd_urb_t *fh_otg_urb;
++	uint32_t size;
++
++	size =
++	    sizeof(*fh_otg_urb) +
++	    iso_desc_count * sizeof(struct fh_otg_hcd_iso_packet_desc);
++	if (atomic_alloc)
++		fh_otg_urb = FH_ALLOC_ATOMIC(size);
++	else
++		fh_otg_urb = FH_ALLOC(size);
++
++	fh_otg_urb->packet_count = iso_desc_count;
++
++	return fh_otg_urb;
++}
++
++void fh_otg_hcd_urb_set_pipeinfo(fh_otg_hcd_urb_t * fh_otg_urb,
++				  uint8_t dev_addr, uint8_t ep_num,
++				  uint8_t ep_type, uint8_t ep_dir, uint16_t mps)
++{
++	fh_otg_hcd_fill_pipe(&fh_otg_urb->pipe_info, dev_addr, ep_num,
++			      ep_type, ep_dir, mps);
++#if 0
++	FH_PRINTF
++	    ("addr = %d, ep_num = %d, ep_dir = 0x%x, ep_type = 0x%x, mps = %d\n",
++	     dev_addr, ep_num, ep_dir, ep_type, mps);
++#endif
++}
++
++void fh_otg_hcd_urb_set_params(fh_otg_hcd_urb_t * fh_otg_urb,
++				void *urb_handle, void *buf, fh_dma_t dma,
++				uint32_t buflen, void *setup_packet,
++				fh_dma_t setup_dma, uint32_t flags,
++				uint16_t interval)
++{
++	fh_otg_urb->priv = urb_handle;
++	fh_otg_urb->buf = buf;
++	fh_otg_urb->dma = dma;
++	fh_otg_urb->length = buflen;
++	fh_otg_urb->setup_packet = setup_packet;
++	fh_otg_urb->setup_dma = setup_dma;
++	fh_otg_urb->flags = flags;
++	fh_otg_urb->interval = interval;
++	fh_otg_urb->status = -FH_E_IN_PROGRESS;
++}
++
++uint32_t fh_otg_hcd_urb_get_status(fh_otg_hcd_urb_t * fh_otg_urb)
++{
++	return fh_otg_urb->status;
++}
++
++uint32_t fh_otg_hcd_urb_get_actual_length(fh_otg_hcd_urb_t * fh_otg_urb)
++{
++	return fh_otg_urb->actual_length;
++}
++
++uint32_t fh_otg_hcd_urb_get_error_count(fh_otg_hcd_urb_t * fh_otg_urb)
++{
++	return fh_otg_urb->error_count;
++}
++
++void fh_otg_hcd_urb_set_iso_desc_params(fh_otg_hcd_urb_t * fh_otg_urb,
++					 int desc_num, uint32_t offset,
++					 uint32_t length)
++{
++	fh_otg_urb->iso_descs[desc_num].offset = offset;
++	fh_otg_urb->iso_descs[desc_num].length = length;
++}
++
++uint32_t fh_otg_hcd_urb_get_iso_desc_status(fh_otg_hcd_urb_t * fh_otg_urb,
++					     int desc_num)
++{
++	return fh_otg_urb->iso_descs[desc_num].status;
++}
++
++uint32_t fh_otg_hcd_urb_get_iso_desc_actual_length(fh_otg_hcd_urb_t *
++						    fh_otg_urb, int desc_num)
++{
++	return fh_otg_urb->iso_descs[desc_num].actual_length;
++}
++
++int fh_otg_hcd_is_bandwidth_allocated(fh_otg_hcd_t * hcd, void *ep_handle)
++{
++	int allocated = 0;
++	fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
++
++	if (qh) {
++		if (!FH_LIST_EMPTY(&qh->qh_list_entry)) {
++			allocated = 1;
++		}
++	}
++	return allocated;
++}
++
++int fh_otg_hcd_is_bandwidth_freed(fh_otg_hcd_t * hcd, void *ep_handle)
++{
++	fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
++	int freed = 0;
++	FH_ASSERT(qh, "qh is not allocated\n");
++
++	if (FH_LIST_EMPTY(&qh->qh_list_entry)) {
++		freed = 1;
++	}
++
++	return freed;
++}
++
++uint8_t fh_otg_hcd_get_ep_bandwidth(fh_otg_hcd_t * hcd, void *ep_handle)
++{
++	fh_otg_qh_t *qh = (fh_otg_qh_t *) ep_handle;
++	FH_ASSERT(qh, "qh is not allocated\n");
++	return qh->usecs;
++}
++
++void fh_otg_hcd_dump_state(fh_otg_hcd_t * hcd)
++{
++#ifdef DEBUG
++	int num_channels;
++	int i;
++	gnptxsts_data_t np_tx_status;
++	hptxsts_data_t p_tx_status;
++
++	num_channels = hcd->core_if->core_params->host_channels;
++	FH_PRINTF("\n");
++	FH_PRINTF
++	    ("************************************************************\n");
++	FH_PRINTF("HCD State:\n");
++	FH_PRINTF("  Num channels: %d\n", num_channels);
++	for (i = 0; i < num_channels; i++) {
++		fh_hc_t *hc = hcd->hc_ptr_array[i];
++		FH_PRINTF("  Channel %d:\n", i);
++		FH_PRINTF("    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
++			   hc->dev_addr, hc->ep_num, hc->ep_is_in);
++		FH_PRINTF("    speed: %d\n", hc->speed);
++		FH_PRINTF("    ep_type: %d\n", hc->ep_type);
++		FH_PRINTF("    max_packet: %d\n", hc->max_packet);
++		FH_PRINTF("    data_pid_start: %d\n", hc->data_pid_start);
++		FH_PRINTF("    multi_count: %d\n", hc->multi_count);
++		FH_PRINTF("    xfer_started: %d\n", hc->xfer_started);
++		FH_PRINTF("    xfer_buff: %p\n", hc->xfer_buff);
++		FH_PRINTF("    xfer_len: %d\n", hc->xfer_len);
++		FH_PRINTF("    xfer_count: %d\n", hc->xfer_count);
++		FH_PRINTF("    halt_on_queue: %d\n", hc->halt_on_queue);
++		FH_PRINTF("    halt_pending: %d\n", hc->halt_pending);
++		FH_PRINTF("    halt_status: %d\n", hc->halt_status);
++		FH_PRINTF("    do_split: %d\n", hc->do_split);
++		FH_PRINTF("    complete_split: %d\n", hc->complete_split);
++		FH_PRINTF("    hub_addr: %d\n", hc->hub_addr);
++		FH_PRINTF("    port_addr: %d\n", hc->port_addr);
++		FH_PRINTF("    xact_pos: %d\n", hc->xact_pos);
++		FH_PRINTF("    requests: %d\n", hc->requests);
++		FH_PRINTF("    qh: %p\n", hc->qh);
++		if (hc->xfer_started) {
++			hfnum_data_t hfnum;
++			hcchar_data_t hcchar;
++			hctsiz_data_t hctsiz;
++			hcint_data_t hcint;
++			hcintmsk_data_t hcintmsk;
++			hfnum.d32 =
++			    FH_READ_REG32(&hcd->core_if->
++					   host_if->host_global_regs->hfnum);
++			hcchar.d32 =
++			    FH_READ_REG32(&hcd->core_if->host_if->
++					   hc_regs[i]->hcchar);
++			hctsiz.d32 =
++			    FH_READ_REG32(&hcd->core_if->host_if->
++					   hc_regs[i]->hctsiz);
++			hcint.d32 =
++			    FH_READ_REG32(&hcd->core_if->host_if->
++					   hc_regs[i]->hcint);
++			hcintmsk.d32 =
++			    FH_READ_REG32(&hcd->core_if->host_if->
++					   hc_regs[i]->hcintmsk);
++			FH_PRINTF("    hfnum: 0x%08x\n", hfnum.d32);
++			FH_PRINTF("    hcchar: 0x%08x\n", hcchar.d32);
++			FH_PRINTF("    hctsiz: 0x%08x\n", hctsiz.d32);
++			FH_PRINTF("    hcint: 0x%08x\n", hcint.d32);
++			FH_PRINTF("    hcintmsk: 0x%08x\n", hcintmsk.d32);
++		}
++		if (hc->xfer_started && hc->qh) {
++			fh_otg_qtd_t *qtd;
++			fh_otg_hcd_urb_t *urb;
++			
++			FH_CIRCLEQ_FOREACH(qtd, &hc->qh->qtd_list, qtd_list_entry) {
++				if (!qtd->in_process)
++					break;
++				
++				urb = qtd->urb;
++			FH_PRINTF("    URB Info:\n");
++			FH_PRINTF("      qtd: %p, urb: %p\n", qtd, urb);
++			if (urb) {
++				FH_PRINTF("      Dev: %d, EP: %d %s\n",
++					   fh_otg_hcd_get_dev_addr(&urb->
++								    pipe_info),
++					   fh_otg_hcd_get_ep_num(&urb->
++								  pipe_info),
++					   fh_otg_hcd_is_pipe_in(&urb->
++								  pipe_info) ?
++					   "IN" : "OUT");
++				FH_PRINTF("      Max packet size: %d\n",
++					   fh_otg_hcd_get_mps(&urb->
++							       pipe_info));
++				FH_PRINTF("      transfer_buffer: %p\n",
++					   urb->buf);
++				FH_PRINTF("      transfer_dma: %p\n",
++					   (void *)urb->dma);
++				FH_PRINTF("      transfer_buffer_length: %d\n",
++					   urb->length);
++					FH_PRINTF("      actual_length: %d\n",
++						   urb->actual_length);
++				}
++			}
++		}
++	}
++	FH_PRINTF("  non_periodic_channels: %d\n", hcd->non_periodic_channels);
++	FH_PRINTF("  periodic_channels: %d\n", hcd->periodic_channels);
++	FH_PRINTF("  periodic_usecs: %d\n", hcd->periodic_usecs);
++	np_tx_status.d32 =
++	    FH_READ_REG32(&hcd->core_if->core_global_regs->gnptxsts);
++	FH_PRINTF("  NP Tx Req Queue Space Avail: %d\n",
++		   np_tx_status.b.nptxqspcavail);
++	FH_PRINTF("  NP Tx FIFO Space Avail: %d\n",
++		   np_tx_status.b.nptxfspcavail);
++	p_tx_status.d32 =
++	    FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hptxsts);
++	FH_PRINTF("  P Tx Req Queue Space Avail: %d\n",
++		   p_tx_status.b.ptxqspcavail);
++	FH_PRINTF("  P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
++	fh_otg_hcd_dump_frrem(hcd);
++	fh_otg_dump_global_registers(hcd->core_if);
++	fh_otg_dump_host_registers(hcd->core_if);
++	FH_PRINTF
++	    ("************************************************************\n");
++	FH_PRINTF("\n");
++#endif
++}
++
++#ifdef DEBUG
++void fh_print_setup_data(uint8_t * setup)
++{
++	int i;
++	if (CHK_DEBUG_LEVEL(DBG_HCD)) {
++		FH_PRINTF("Setup Data = MSB ");
++		for (i = 7; i >= 0; i--)
++			FH_PRINTF("%02x ", setup[i]);
++		FH_PRINTF("\n");
++		FH_PRINTF("  bmRequestType Tranfer = %s\n",
++			   (setup[0] & 0x80) ? "Device-to-Host" :
++			   "Host-to-Device");
++		FH_PRINTF("  bmRequestType Type = ");
++		switch ((setup[0] & 0x60) >> 5) {
++		case 0:
++			FH_PRINTF("Standard\n");
++			break;
++		case 1:
++			FH_PRINTF("Class\n");
++			break;
++		case 2:
++			FH_PRINTF("Vendor\n");
++			break;
++		case 3:
++			FH_PRINTF("Reserved\n");
++			break;
++		}
++		FH_PRINTF("  bmRequestType Recipient = ");
++		switch (setup[0] & 0x1f) {
++		case 0:
++			FH_PRINTF("Device\n");
++			break;
++		case 1:
++			FH_PRINTF("Interface\n");
++			break;
++		case 2:
++			FH_PRINTF("Endpoint\n");
++			break;
++		case 3:
++			FH_PRINTF("Other\n");
++			break;
++		default:
++			FH_PRINTF("Reserved\n");
++			break;
++		}
++		FH_PRINTF("  bRequest = 0x%0x\n", setup[1]);
++		FH_PRINTF("  wValue = 0x%0x\n", *((uint16_t *) & setup[2]));
++		FH_PRINTF("  wIndex = 0x%0x\n", *((uint16_t *) & setup[4]));
++		FH_PRINTF("  wLength = 0x%0x\n\n", *((uint16_t *) & setup[6]));
++	}
++}
++#endif
++
++void fh_otg_hcd_dump_frrem(fh_otg_hcd_t * hcd)
++{
++#if 0
++	FH_PRINTF("Frame remaining at SOF:\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->frrem_samples, hcd->frrem_accum,
++		   (hcd->frrem_samples > 0) ?
++		   hcd->frrem_accum / hcd->frrem_samples : 0);
++
++	FH_PRINTF("\n");
++	FH_PRINTF("Frame remaining at start_transfer (uframe 7):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->core_if->hfnum_7_samples,
++		   hcd->core_if->hfnum_7_frrem_accum,
++		   (hcd->core_if->hfnum_7_samples >
++		    0) ? hcd->core_if->hfnum_7_frrem_accum /
++		   hcd->core_if->hfnum_7_samples : 0);
++	FH_PRINTF("Frame remaining at start_transfer (uframe 0):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->core_if->hfnum_0_samples,
++		   hcd->core_if->hfnum_0_frrem_accum,
++		   (hcd->core_if->hfnum_0_samples >
++		    0) ? hcd->core_if->hfnum_0_frrem_accum /
++		   hcd->core_if->hfnum_0_samples : 0);
++	FH_PRINTF("Frame remaining at start_transfer (uframe 1-6):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->core_if->hfnum_other_samples,
++		   hcd->core_if->hfnum_other_frrem_accum,
++		   (hcd->core_if->hfnum_other_samples >
++		    0) ? hcd->core_if->hfnum_other_frrem_accum /
++		   hcd->core_if->hfnum_other_samples : 0);
++
++	FH_PRINTF("\n");
++	FH_PRINTF("Frame remaining at sample point A (uframe 7):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a,
++		   (hcd->hfnum_7_samples_a > 0) ?
++		   hcd->hfnum_7_frrem_accum_a / hcd->hfnum_7_samples_a : 0);
++	FH_PRINTF("Frame remaining at sample point A (uframe 0):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a,
++		   (hcd->hfnum_0_samples_a > 0) ?
++		   hcd->hfnum_0_frrem_accum_a / hcd->hfnum_0_samples_a : 0);
++	FH_PRINTF("Frame remaining at sample point A (uframe 1-6):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->hfnum_other_samples_a, hcd->hfnum_other_frrem_accum_a,
++		   (hcd->hfnum_other_samples_a > 0) ?
++		   hcd->hfnum_other_frrem_accum_a /
++		   hcd->hfnum_other_samples_a : 0);
++
++	FH_PRINTF("\n");
++	FH_PRINTF("Frame remaining at sample point B (uframe 7):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b,
++		   (hcd->hfnum_7_samples_b > 0) ?
++		   hcd->hfnum_7_frrem_accum_b / hcd->hfnum_7_samples_b : 0);
++	FH_PRINTF("Frame remaining at sample point B (uframe 0):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b,
++		   (hcd->hfnum_0_samples_b > 0) ?
++		   hcd->hfnum_0_frrem_accum_b / hcd->hfnum_0_samples_b : 0);
++	FH_PRINTF("Frame remaining at sample point B (uframe 1-6):\n");
++	FH_PRINTF("  samples %u, accum %llu, avg %llu\n",
++		   hcd->hfnum_other_samples_b, hcd->hfnum_other_frrem_accum_b,
++		   (hcd->hfnum_other_samples_b > 0) ?
++		   hcd->hfnum_other_frrem_accum_b /
++		   hcd->hfnum_other_samples_b : 0);
++#endif
++}
++
++#endif /* FH_DEVICE_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.h
+new file mode 100644
+index 00000000..bfc3944b
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd.h
+@@ -0,0 +1,803 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd.h $
++ * $Revision: #58 $
++ * $Date: 2011/09/15 $
++ * $Change: 1846647 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_DEVICE_ONLY
++#ifndef __FH_HCD_H__
++#define __FH_HCD_H__
++
++#include "fh_otg_os_dep.h"
++#include "../fh_common_port/usb.h"
++#include "../fh_common_port/fh_list.h"
++#include "fh_otg_hcd_if.h"
++#include "fh_otg_core_if.h"
++#include "fh_otg_cil.h"
++
++/**
++ * @file
++ *
++ * This file contains the structures, constants, and interfaces for
++ * the Host Contoller Driver (HCD).
++ *
++ * The Host Controller Driver (HCD) is responsible for translating requests
++ * from the USB Driver into the appropriate actions on the FH_otg controller.
++ * It isolates the USBD from the specifics of the controller by providing an
++ * API to the USBD.
++ */
++
++struct fh_otg_hcd_pipe_info {
++	uint8_t dev_addr;
++	uint8_t ep_num;
++	uint8_t pipe_type;
++	uint8_t pipe_dir;
++	uint16_t mps;
++};
++
++struct fh_otg_hcd_iso_packet_desc {
++	uint32_t offset;
++	uint32_t length;
++	uint32_t actual_length;
++	uint32_t status;
++};
++
++struct fh_otg_qtd;
++
++struct fh_otg_hcd_urb {
++	void *priv;
++	struct fh_otg_qtd *qtd;
++	void *buf;
++	fh_dma_t dma;
++	void *setup_packet;
++	fh_dma_t setup_dma;
++	uint32_t length;
++	uint32_t actual_length;
++	uint32_t status;
++	uint32_t error_count;
++	uint32_t packet_count;
++	uint32_t flags;
++	uint16_t interval;
++	struct fh_otg_hcd_pipe_info pipe_info;
++	struct fh_otg_hcd_iso_packet_desc iso_descs[0];
++};
++
++static inline uint8_t fh_otg_hcd_get_ep_num(struct fh_otg_hcd_pipe_info *pipe)
++{
++	return pipe->ep_num;
++}
++
++static inline uint8_t fh_otg_hcd_get_pipe_type(struct fh_otg_hcd_pipe_info
++						*pipe)
++{
++	return pipe->pipe_type;
++}
++
++static inline uint16_t fh_otg_hcd_get_mps(struct fh_otg_hcd_pipe_info *pipe)
++{
++	return pipe->mps;
++}
++
++static inline uint8_t fh_otg_hcd_get_dev_addr(struct fh_otg_hcd_pipe_info
++					       *pipe)
++{
++	return pipe->dev_addr;
++}
++
++static inline uint8_t fh_otg_hcd_is_pipe_isoc(struct fh_otg_hcd_pipe_info
++					       *pipe)
++{
++	return (pipe->pipe_type == UE_ISOCHRONOUS);
++}
++
++static inline uint8_t fh_otg_hcd_is_pipe_int(struct fh_otg_hcd_pipe_info
++					      *pipe)
++{
++	return (pipe->pipe_type == UE_INTERRUPT);
++}
++
++static inline uint8_t fh_otg_hcd_is_pipe_bulk(struct fh_otg_hcd_pipe_info
++					       *pipe)
++{
++	return (pipe->pipe_type == UE_BULK);
++}
++
++static inline uint8_t fh_otg_hcd_is_pipe_control(struct fh_otg_hcd_pipe_info
++						  *pipe)
++{
++	return (pipe->pipe_type == UE_CONTROL);
++}
++
++static inline uint8_t fh_otg_hcd_is_pipe_in(struct fh_otg_hcd_pipe_info *pipe)
++{
++	return (pipe->pipe_dir == UE_DIR_IN);
++}
++
++static inline uint8_t fh_otg_hcd_is_pipe_out(struct fh_otg_hcd_pipe_info
++					      *pipe)
++{
++	return (!fh_otg_hcd_is_pipe_in(pipe));
++}
++
++static inline void fh_otg_hcd_fill_pipe(struct fh_otg_hcd_pipe_info *pipe,
++					 uint8_t devaddr, uint8_t ep_num,
++					 uint8_t pipe_type, uint8_t pipe_dir,
++					 uint16_t mps)
++{
++	pipe->dev_addr = devaddr;
++	pipe->ep_num = ep_num;
++	pipe->pipe_type = pipe_type;
++	pipe->pipe_dir = pipe_dir;
++	pipe->mps = mps;
++}
++
++/**
++ * Phases for control transfers.
++ */
++typedef enum fh_otg_control_phase {
++	FH_OTG_CONTROL_SETUP,
++	FH_OTG_CONTROL_DATA,
++	FH_OTG_CONTROL_STATUS
++} fh_otg_control_phase_e;
++
++/** Transaction types. */
++typedef enum fh_otg_transaction_type {
++	FH_OTG_TRANSACTION_NONE,
++	FH_OTG_TRANSACTION_PERIODIC,
++	FH_OTG_TRANSACTION_NON_PERIODIC,
++	FH_OTG_TRANSACTION_ALL
++} fh_otg_transaction_type_e;
++
++struct fh_otg_qh;
++
++/**
++ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
++ * interrupt, or isochronous transfer. A single QTD is created for each URB
++ * (of one of these types) submitted to the HCD. The transfer associated with
++ * a QTD may require one or multiple transactions.
++ *
++ * A QTD is linked to a Queue Head, which is entered in either the
++ * non-periodic or periodic schedule for execution. When a QTD is chosen for
++ * execution, some or all of its transactions may be executed. After
++ * execution, the state of the QTD is updated. The QTD may be retired if all
++ * its transactions are complete or if an error occurred. Otherwise, it
++ * remains in the schedule so more transactions can be executed later.
++ */
++typedef struct fh_otg_qtd {
++	/**
++	 * Determines the PID of the next data packet for the data phase of
++	 * control transfers. Ignored for other transfer types.<br>
++	 * One of the following values:
++	 *	- FH_OTG_HC_PID_DATA0
++	 *	- FH_OTG_HC_PID_DATA1
++	 */
++	uint8_t data_toggle;
++
++	/** Current phase for control transfers (Setup, Data, or Status). */
++	fh_otg_control_phase_e control_phase;
++
++	/** Keep track of the current split type
++	 * for FS/LS endpoints on a HS Hub */
++	uint8_t complete_split;
++
++	/** How many bytes transferred during SSPLIT OUT */
++	uint32_t ssplit_out_xfer_count;
++
++	/**
++	 * Holds the number of bus errors that have occurred for a transaction
++	 * within this transfer.
++	 */
++	uint8_t error_count;
++
++	/**
++	 * Index of the next frame descriptor for an isochronous transfer. A
++	 * frame descriptor describes the buffer position and length of the
++	 * data to be transferred in the next scheduled (micro)frame of an
++	 * isochronous transfer. It also holds status for that transaction.
++	 * The frame index starts at 0.
++	 */
++	uint16_t isoc_frame_index;
++
++	/** Position of the ISOC split on full/low speed */
++	uint8_t isoc_split_pos;
++
++	/** Position of the ISOC split in the buffer for the current frame */
++	uint16_t isoc_split_offset;
++
++	/** URB for this transfer */
++	struct fh_otg_hcd_urb *urb;
++
++	struct fh_otg_qh *qh;
++
++	/** This list of QTDs */
++	 FH_CIRCLEQ_ENTRY(fh_otg_qtd) qtd_list_entry;
++
++	/** Indicates if this QTD is currently processed by HW. */
++	uint8_t in_process;
++
++	/** Number of DMA descriptors for this QTD */
++	uint8_t n_desc;
++
++	/** 
++	 * Last activated frame(packet) index. 
++	 * Used in Descriptor DMA mode only.
++	 */
++	uint16_t isoc_frame_index_last;
++
++} fh_otg_qtd_t;
++
++FH_CIRCLEQ_HEAD(fh_otg_qtd_list, fh_otg_qtd);
++
++/**
++ * A Queue Head (QH) holds the static characteristics of an endpoint and
++ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
++ * be entered in either the non-periodic or periodic schedule.
++ */
++typedef struct fh_otg_qh {
++	/**
++	 * Endpoint type.
++	 * One of the following values:
++	 *	- UE_CONTROL
++	 *	- UE_BULK
++	 *	- UE_INTERRUPT
++	 *	- UE_ISOCHRONOUS
++	 */
++	uint8_t ep_type;
++	uint8_t ep_is_in;
++
++	/** wMaxPacketSize Field of Endpoint Descriptor. */
++	uint16_t maxp;
++
++	/**
++	 * Device speed.
++	 * One of the following values:
++	 *	- FH_OTG_EP_SPEED_LOW
++	 *	- FH_OTG_EP_SPEED_FULL
++	 *	- FH_OTG_EP_SPEED_HIGH
++	 */
++	uint8_t dev_speed;
++
++	/**
++	 * Determines the PID of the next data packet for non-control
++	 * transfers. Ignored for control transfers.<br>
++	 * One of the following values:
++	 *	- FH_OTG_HC_PID_DATA0
++	 *	- FH_OTG_HC_PID_DATA1
++	 */
++	uint8_t data_toggle;
++
++	/** Ping state if 1. */
++	uint8_t ping_state;
++
++	/**
++	 * List of QTDs for this QH.
++	 */
++	struct fh_otg_qtd_list qtd_list;
++
++	/** Host channel currently processing transfers for this QH. */
++	struct fh_hc *channel;
++
++	/** Full/low speed endpoint on high-speed hub requires split. */
++	uint8_t do_split;
++
++	/** @name Periodic schedule information */
++	/** @{ */
++
++	/** Bandwidth in microseconds per (micro)frame. */
++	uint16_t usecs;
++
++	/** Interval between transfers in (micro)frames. */
++	uint16_t interval;
++
++	/**
++	 * (micro)frame to initialize a periodic transfer. The transfer
++	 * executes in the following (micro)frame.
++	 */
++	uint16_t sched_frame;
++
++	/** (micro)frame at which last start split was initialized. */
++	uint16_t start_split_frame;
++
++	/** @} */
++
++	/** 
++	 * Used instead of original buffer if 
++	 * it(physical address) is not dword-aligned.
++	 */
++	uint8_t *dw_align_buf;
++	fh_dma_t dw_align_buf_dma;
++
++	/** Entry for QH in either the periodic or non-periodic schedule. */
++	fh_list_link_t qh_list_entry;
++
++	/** @name Descriptor DMA support */
++	/** @{ */
++
++	/** Descriptor List. */
++	fh_otg_host_dma_desc_t *desc_list;
++
++	/** Descriptor List physical address. */
++	fh_dma_t desc_list_dma;
++
++	/** 
++	 * Xfer Bytes array.
++	 * Each element corresponds to a descriptor and indicates 
++	 * original XferSize size value for the descriptor.
++	 */
++	uint32_t *n_bytes;
++
++	/** Actual number of transfer descriptors in a list. */
++	uint16_t ntd;
++
++	/** First activated isochronous transfer descriptor index. */
++	uint8_t td_first;
++	/** Last activated isochronous transfer descriptor index. */
++	uint8_t td_last;
++
++	/** @} */
++
++} fh_otg_qh_t;
++
++FH_CIRCLEQ_HEAD(hc_list, fh_hc);
++
++/**
++ * This structure holds the state of the HCD, including the non-periodic and
++ * periodic schedules.
++ */
++struct fh_otg_hcd {
++	/** The FH otg device pointer */
++	struct fh_otg_device *otg_dev;
++	/** FH OTG Core Interface Layer */
++	fh_otg_core_if_t *core_if;
++
++	/** Function HCD driver callbacks */
++	struct fh_otg_hcd_function_ops *fops;
++
++	/** Internal FH HCD Flags */
++	volatile union fh_otg_hcd_internal_flags {
++		uint32_t d32;
++		struct {
++			unsigned port_connect_status_change:1;
++			unsigned port_connect_status:1;
++			unsigned port_reset_change:1;
++			unsigned port_enable_change:1;
++			unsigned port_suspend_change:1;
++			unsigned port_over_current_change:1;
++			unsigned port_l1_change:1;
++			unsigned reserved:26;
++		} b;
++	} flags;
++
++	/**
++	 * Inactive items in the non-periodic schedule. This is a list of
++	 * Queue Heads. Transfers associated with these Queue Heads are not
++	 * currently assigned to a host channel.
++	 */
++	fh_list_link_t non_periodic_sched_inactive;
++
++	/**
++	 * Active items in the non-periodic schedule. This is a list of
++	 * Queue Heads. Transfers associated with these Queue Heads are
++	 * currently assigned to a host channel.
++	 */
++	fh_list_link_t non_periodic_sched_active;
++
++	/**
++	 * Pointer to the next Queue Head to process in the active
++	 * non-periodic schedule.
++	 */
++	fh_list_link_t *non_periodic_qh_ptr;
++
++	/**
++	 * Inactive items in the periodic schedule. This is a list of QHs for
++	 * periodic transfers that are _not_ scheduled for the next frame.
++	 * Each QH in the list has an interval counter that determines when it
++	 * needs to be scheduled for execution. This scheduling mechanism
++	 * allows only a simple calculation for periodic bandwidth used (i.e.
++	 * must assume that all periodic transfers may need to execute in the
++	 * same frame). However, it greatly simplifies scheduling and should
++	 * be sufficient for the vast majority of OTG hosts, which need to
++	 * connect to a small number of peripherals at one time.
++	 *
++	 * Items move from this list to periodic_sched_ready when the QH
++	 * interval counter is 0 at SOF.
++	 */
++	fh_list_link_t periodic_sched_inactive;
++
++	/**
++	 * List of periodic QHs that are ready for execution in the next
++	 * frame, but have not yet been assigned to host channels.
++	 *
++	 * Items move from this list to periodic_sched_assigned as host
++	 * channels become available during the current frame.
++	 */
++	fh_list_link_t periodic_sched_ready;
++
++	/**
++	 * List of periodic QHs to be executed in the next frame that are
++	 * assigned to host channels.
++	 *
++	 * Items move from this list to periodic_sched_queued as the
++	 * transactions for the QH are queued to the FH_otg controller.
++	 */
++	fh_list_link_t periodic_sched_assigned;
++
++	/**
++	 * List of periodic QHs that have been queued for execution.
++	 *
++	 * Items move from this list to either periodic_sched_inactive or
++	 * periodic_sched_ready when the channel associated with the transfer
++	 * is released. If the interval for the QH is 1, the item moves to
++	 * periodic_sched_ready because it must be rescheduled for the next
++	 * frame. Otherwise, the item moves to periodic_sched_inactive.
++	 */
++	fh_list_link_t periodic_sched_queued;
++
++	/**
++	 * Total bandwidth claimed so far for periodic transfers. This value
++	 * is in microseconds per (micro)frame. The assumption is that all
++	 * periodic transfers may occur in the same (micro)frame.
++	 */
++	uint16_t periodic_usecs;
++
++	/**
++	 * Frame number read from the core at SOF. The value ranges from 0 to
++	 * FH_HFNUM_MAX_FRNUM.
++	 */
++	uint16_t frame_number;
++
++	/**
++	 * Count of periodic QHs, if using several eps. For SOF enable/disable.
++	 */
++	uint16_t periodic_qh_count;
++
++	/**
++	 * Free host channels in the controller. This is a list of
++	 * fh_hc_t items.
++	 */
++	struct hc_list free_hc_list;
++	/**
++	 * Number of host channels assigned to periodic transfers. Currently
++	 * assuming that there is a dedicated host channel for each periodic
++	 * transaction and at least one host channel available for
++	 * non-periodic transactions.
++	 */
++	int periodic_channels;
++
++	/**
++	 * Number of host channels assigned to non-periodic transfers.
++	 */
++	int non_periodic_channels;
++
++	/**
++	 * Array of pointers to the host channel descriptors. Allows accessing
++	 * a host channel descriptor given the host channel number. This is
++	 * useful in interrupt handlers.
++	 */
++	struct fh_hc *hc_ptr_array[MAX_EPS_CHANNELS];
++
++	/**
++	 * Buffer to use for any data received during the status phase of a
++	 * control transfer. Normally no data is transferred during the status
++	 * phase. This buffer is used as a bit bucket.
++	 */
++	uint8_t *status_buf;
++
++	/**
++	 * DMA address for status_buf.
++	 */
++	dma_addr_t status_buf_dma;
++#define FH_OTG_HCD_STATUS_BUF_SIZE 64
++
++	/**
++	 * Connection timer. An OTG host must display a message if the device
++	 * does not connect. Started when the VBus power is turned on via
++	 * sysfs attribute "buspower".
++	 */
++	fh_timer_t *conn_timer;
++
++	/* Tasket to do a reset */
++	fh_tasklet_t *reset_tasklet;
++
++	/*  */
++	fh_spinlock_t *lock;
++
++	/**
++	 * Private data that could be used by OS wrapper.
++	 */
++	void *priv;
++
++	uint8_t otg_port;
++
++	/** Frame List */
++	uint32_t *frame_list;
++
++	/** Frame List DMA address */
++	dma_addr_t frame_list_dma;
++
++#ifdef DEBUG
++	uint32_t frrem_samples;
++	uint64_t frrem_accum;
++
++	uint32_t hfnum_7_samples_a;
++	uint64_t hfnum_7_frrem_accum_a;
++	uint32_t hfnum_0_samples_a;
++	uint64_t hfnum_0_frrem_accum_a;
++	uint32_t hfnum_other_samples_a;
++	uint64_t hfnum_other_frrem_accum_a;
++
++	uint32_t hfnum_7_samples_b;
++	uint64_t hfnum_7_frrem_accum_b;
++	uint32_t hfnum_0_samples_b;
++	uint64_t hfnum_0_frrem_accum_b;
++	uint32_t hfnum_other_samples_b;
++	uint64_t hfnum_other_frrem_accum_b;
++#endif
++};
++
++/** @name Transaction Execution Functions */
++/** @{ */
++extern fh_otg_transaction_type_e fh_otg_hcd_select_transactions(fh_otg_hcd_t
++								  * hcd);
++extern void fh_otg_hcd_queue_transactions(fh_otg_hcd_t * hcd,
++					   fh_otg_transaction_type_e tr_type);
++
++/** @} */
++
++/** @name Interrupt Handler Functions */
++/** @{ */
++extern int32_t fh_otg_hcd_handle_intr(fh_otg_hcd_t * fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_sof_intr(fh_otg_hcd_t * fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_rx_status_q_level_intr(fh_otg_hcd_t *
++							 fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_np_tx_fifo_empty_intr(fh_otg_hcd_t *
++							fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_perio_tx_fifo_empty_intr(fh_otg_hcd_t *
++							   fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_incomplete_periodic_intr(fh_otg_hcd_t *
++							   fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_port_intr(fh_otg_hcd_t * fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_conn_id_status_change_intr(fh_otg_hcd_t *
++							     fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_disconnect_intr(fh_otg_hcd_t * fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_hc_intr(fh_otg_hcd_t * fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_hc_n_intr(fh_otg_hcd_t * fh_otg_hcd,
++					    uint32_t num);
++extern int32_t fh_otg_hcd_handle_session_req_intr(fh_otg_hcd_t * fh_otg_hcd);
++extern int32_t fh_otg_hcd_handle_wakeup_detected_intr(fh_otg_hcd_t *
++						       fh_otg_hcd);
++/** @} */
++
++/** @name Schedule Queue Functions */
++/** @{ */
++
++/* Implemented in fh_otg_hcd_queue.c */
++extern fh_otg_qh_t *fh_otg_hcd_qh_create(fh_otg_hcd_t * hcd,
++					   fh_otg_hcd_urb_t * urb, int atomic_alloc);
++extern void fh_otg_hcd_qh_free(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
++extern int fh_otg_hcd_qh_add(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
++extern void fh_otg_hcd_qh_remove(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
++extern void fh_otg_hcd_qh_deactivate(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
++				      int sched_csplit);
++
++/** Remove and free a QH */
++static inline void fh_otg_hcd_qh_remove_and_free(fh_otg_hcd_t * hcd,
++						  fh_otg_qh_t * qh)
++{
++	fh_irqflags_t flags;
++	FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++	fh_otg_hcd_qh_remove(hcd, qh);
++	FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++	fh_otg_hcd_qh_free(hcd, qh);
++}
++
++/** Allocates memory for a QH structure.
++ * @return Returns the memory allocate or NULL on error. */
++static inline fh_otg_qh_t *fh_otg_hcd_qh_alloc(int atomic_alloc)
++{
++	if (atomic_alloc)
++		return (fh_otg_qh_t *) FH_ALLOC_ATOMIC(sizeof(fh_otg_qh_t));
++	else
++		return (fh_otg_qh_t *) FH_ALLOC(sizeof(fh_otg_qh_t));
++}
++
++extern fh_otg_qtd_t *fh_otg_hcd_qtd_create(fh_otg_hcd_urb_t * urb,
++					     int atomic_alloc);
++extern void fh_otg_hcd_qtd_init(fh_otg_qtd_t * qtd, fh_otg_hcd_urb_t * urb);
++extern int fh_otg_hcd_qtd_add(fh_otg_qtd_t * qtd, fh_otg_hcd_t * fh_otg_hcd,
++			       fh_otg_qh_t ** qh, int atomic_alloc);
++
++/** Allocates memory for a QTD structure.
++ * @return Returns the memory allocate or NULL on error. */
++static inline fh_otg_qtd_t *fh_otg_hcd_qtd_alloc(int atomic_alloc)
++{
++	if (atomic_alloc)
++		return (fh_otg_qtd_t *) FH_ALLOC_ATOMIC(sizeof(fh_otg_qtd_t));
++	else
++		return (fh_otg_qtd_t *) FH_ALLOC(sizeof(fh_otg_qtd_t));
++}
++
++/** Frees the memory for a QTD structure.  QTD should already be removed from
++ * list.
++ * @param qtd QTD to free.*/
++static inline void fh_otg_hcd_qtd_free(fh_otg_qtd_t * qtd)
++{
++	FH_FREE(qtd);
++}
++
++/** Removes a QTD from list.
++ * @param hcd HCD instance.
++ * @param qtd QTD to remove from list.
++ * @param qh QTD belongs to.
++ */
++static inline void fh_otg_hcd_qtd_remove(fh_otg_hcd_t * hcd,
++					  fh_otg_qtd_t * qtd,
++					  fh_otg_qh_t * qh)
++{
++	FH_CIRCLEQ_REMOVE(&qh->qtd_list, qtd, qtd_list_entry);
++}
++
++/** Remove and free a QTD 
++  * Need to disable IRQ and hold hcd lock while calling this function out of 
++  * interrupt servicing chain */
++static inline void fh_otg_hcd_qtd_remove_and_free(fh_otg_hcd_t * hcd,
++						   fh_otg_qtd_t * qtd,
++						   fh_otg_qh_t * qh)
++{
++	fh_otg_hcd_qtd_remove(hcd, qtd, qh);
++	fh_otg_hcd_qtd_free(qtd);
++}
++
++/** @} */
++
++/** @name Descriptor DMA Supporting Functions */
++/** @{ */
++
++extern void fh_otg_hcd_start_xfer_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
++extern void fh_otg_hcd_complete_xfer_ddma(fh_otg_hcd_t * hcd,
++					   fh_hc_t * hc,
++					   fh_otg_hc_regs_t * hc_regs,
++					   fh_otg_halt_status_e halt_status);
++
++extern int fh_otg_hcd_qh_init_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
++extern void fh_otg_hcd_qh_free_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh);
++
++/** @} */
++
++/** @name Internal Functions */
++/** @{ */
++fh_otg_qh_t *fh_urb_to_qh(fh_otg_hcd_urb_t * urb);
++/** @} */
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++extern int fh_otg_hcd_get_hc_for_lpm_tran(fh_otg_hcd_t * hcd,
++					   uint8_t devaddr);
++extern void fh_otg_hcd_free_hc_from_lpm(fh_otg_hcd_t * hcd);
++#endif
++
++/** Gets the QH that contains the list_head */
++#define fh_list_to_qh(_list_head_ptr_) container_of(_list_head_ptr_, fh_otg_qh_t, qh_list_entry)
++
++/** Gets the QTD that contains the list_head */
++#define fh_list_to_qtd(_list_head_ptr_) container_of(_list_head_ptr_, fh_otg_qtd_t, qtd_list_entry)
++
++/** Check if QH is non-periodic  */
++#define fh_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == UE_BULK) || \
++				     (_qh_ptr_->ep_type == UE_CONTROL))
++
++/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */
++#define fh_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
++
++/** Packet size for any kind of endpoint descriptor */
++#define fh_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
++
++/**
++ * Returns true if _frame1 is less than or equal to _frame2. The comparison is
++ * done modulo FH_HFNUM_MAX_FRNUM. This accounts for the rollover of the
++ * frame number when the max frame number is reached.
++ */
++static inline int fh_frame_num_le(uint16_t frame1, uint16_t frame2)
++{
++	return ((frame2 - frame1) & FH_HFNUM_MAX_FRNUM) <=
++	    (FH_HFNUM_MAX_FRNUM >> 1);
++}
++
++/**
++ * Returns true if _frame1 is greater than _frame2. The comparison is done
++ * modulo FH_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
++ * number when the max frame number is reached.
++ */
++static inline int fh_frame_num_gt(uint16_t frame1, uint16_t frame2)
++{
++	return (frame1 != frame2) &&
++	    (((frame1 - frame2) & FH_HFNUM_MAX_FRNUM) <
++	     (FH_HFNUM_MAX_FRNUM >> 1));
++}
++
++/**
++ * Increments _frame by the amount specified by _inc. The addition is done
++ * modulo FH_HFNUM_MAX_FRNUM. Returns the incremented value.
++ */
++static inline uint16_t fh_frame_num_inc(uint16_t frame, uint16_t inc)
++{
++	return (frame + inc) & FH_HFNUM_MAX_FRNUM;
++}
++
++static inline uint16_t fh_full_frame_num(uint16_t frame)
++{
++	return (frame & FH_HFNUM_MAX_FRNUM) >> 3;
++}
++
++static inline uint16_t fh_micro_frame_num(uint16_t frame)
++{
++	return frame & 0x7;
++}
++
++void fh_otg_hcd_save_data_toggle(fh_hc_t * hc,
++				  fh_otg_hc_regs_t * hc_regs,
++				  fh_otg_qtd_t * qtd);
++
++#ifdef DEBUG
++/**
++ * Macro to sample the remaining PHY clocks left in the current frame. This
++ * may be used during debugging to determine the average time it takes to
++ * execute sections of code. There are two possible sample points, "a" and
++ * "b", so the _letter argument must be one of these values.
++ *
++ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
++ * example, "cat /sys/devices/lm0/hcd_frrem".
++ */
++#define fh_sample_frrem(_hcd, _qh, _letter) \
++{ \
++	hfnum_data_t hfnum; \
++	fh_otg_qtd_t *qtd; \
++	qtd = list_entry(_qh->qtd_list.next, fh_otg_qtd_t, qtd_list_entry); \
++	if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \
++		hfnum.d32 = FH_READ_REG32(&_hcd->core_if->host_if->host_global_regs->hfnum); \
++		switch (hfnum.b.frnum & 0x7) { \
++		case 7: \
++			_hcd->hfnum_7_samples_##_letter++; \
++			_hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \
++			break; \
++		case 0: \
++			_hcd->hfnum_0_samples_##_letter++; \
++			_hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \
++			break; \
++		default: \
++			_hcd->hfnum_other_samples_##_letter++; \
++			_hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \
++			break; \
++		} \
++	} \
++}
++#else
++#define fh_sample_frrem(_hcd, _qh, _letter)
++#endif
++#endif
++#endif /* FH_DEVICE_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_ddma.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_ddma.c
+new file mode 100644
+index 00000000..aa65be42
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_ddma.c
+@@ -0,0 +1,1125 @@
++/*==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_ddma.c $
++ * $Revision: #11 $
++ * $Date: 2013/01/24 $
++ * $Change: 2150761 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_DEVICE_ONLY
++
++/** @file
++ * This file contains Descriptor DMA support implementation for host mode.
++ */
++
++#include "fh_otg_hcd.h"
++#include "fh_otg_regs.h"
++
++static inline uint8_t frame_list_idx(uint16_t frame)
++{
++	return (frame & (MAX_FRLIST_EN_NUM - 1));
++}
++
++static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
++{
++	return (idx + inc) &
++	    (((speed ==
++	       FH_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
++	      MAX_DMA_DESC_NUM_GENERIC) - 1);
++}
++
++static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
++{
++	return (idx - inc) &
++	    (((speed ==
++	       FH_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
++	      MAX_DMA_DESC_NUM_GENERIC) - 1);
++}
++
++static inline uint16_t max_desc_num(fh_otg_qh_t * qh)
++{
++	return (((qh->ep_type == UE_ISOCHRONOUS)
++		 && (qh->dev_speed == FH_OTG_EP_SPEED_HIGH))
++		? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
++}
++static inline uint16_t frame_incr_val(fh_otg_qh_t * qh)
++{
++	return ((qh->dev_speed == FH_OTG_EP_SPEED_HIGH)
++		? ((qh->interval + 8 - 1) / 8)
++		: qh->interval);
++}
++
++static int desc_list_alloc(fh_otg_qh_t * qh)
++{
++	int retval = 0;
++
++	qh->desc_list = (fh_otg_host_dma_desc_t *)
++	    FH_DMA_ALLOC(sizeof(fh_otg_host_dma_desc_t) * max_desc_num(qh),
++			  &qh->desc_list_dma);
++
++	if (!qh->desc_list) {
++		retval = -FH_E_NO_MEMORY;
++		FH_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
++		
++	}
++
++	fh_memset(qh->desc_list, 0x00,
++		   sizeof(fh_otg_host_dma_desc_t) * max_desc_num(qh));
++
++	qh->n_bytes =
++	    (uint32_t *) FH_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
++
++	if (!qh->n_bytes) {
++		retval = -FH_E_NO_MEMORY;
++		FH_ERROR
++		    ("%s: Failed to allocate array for descriptors' size actual values\n",
++		     __func__);
++
++	}
++	return retval;
++
++}
++
++static void desc_list_free(fh_otg_qh_t * qh)
++{
++	if (qh->desc_list) {
++		FH_DMA_FREE(max_desc_num(qh), qh->desc_list,
++			     qh->desc_list_dma);
++		qh->desc_list = NULL;
++	}
++
++	if (qh->n_bytes) {
++		FH_FREE(qh->n_bytes);
++		qh->n_bytes = NULL;
++	}
++}
++
++static int frame_list_alloc(fh_otg_hcd_t * hcd)
++{
++	int retval = 0;
++	if (hcd->frame_list)
++		return 0;
++
++	hcd->frame_list = FH_DMA_ALLOC(4 * MAX_FRLIST_EN_NUM,
++					&hcd->frame_list_dma);
++	if (!hcd->frame_list) {
++		retval = -FH_E_NO_MEMORY;
++		FH_ERROR("%s: Frame List allocation failed\n", __func__);
++	}
++
++	fh_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
++
++	return retval;
++}
++
++static void frame_list_free(fh_otg_hcd_t * hcd)
++{
++	if (!hcd->frame_list)
++		return;
++	
++	FH_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
++	hcd->frame_list = NULL;
++}
++
++static void per_sched_enable(fh_otg_hcd_t * hcd, uint16_t fr_list_en)
++{
++
++	hcfg_data_t hcfg;
++
++	hcfg.d32 = FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
++
++	if (hcfg.b.perschedena) {
++		/* already enabled */
++		return;
++	}
++
++	FH_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
++			hcd->frame_list_dma);
++
++	switch (fr_list_en) {
++	case 64:
++		hcfg.b.frlisten = 3;
++		break;
++	case 32:
++		hcfg.b.frlisten = 2;
++		break;
++	case 16:
++		hcfg.b.frlisten = 1;
++		break;
++	case 8:
++		hcfg.b.frlisten = 0;
++		break;
++	default:
++		break;
++	}
++
++	hcfg.b.perschedena = 1;
++
++	FH_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
++	FH_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
++
++}
++
++static void per_sched_disable(fh_otg_hcd_t * hcd)
++{
++	hcfg_data_t hcfg;
++
++	hcfg.d32 = FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
++	
++	if (!hcfg.b.perschedena) {
++		/* already disabled */
++		return;
++	}
++	hcfg.b.perschedena = 0;
++
++	FH_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
++	FH_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
++}
++
++/* 
++ * Activates/Deactivates FrameList entries for the channel 
++ * based on endpoint servicing period.
++ */
++void update_frame_list(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh, uint8_t enable)
++{
++	uint16_t i, j, inc;
++	fh_hc_t *hc = NULL;
++
++	if (!qh->channel) {
++		FH_ERROR("qh->channel = %p", qh->channel);
++		return;
++	}
++
++	if (!hcd) {
++		FH_ERROR("------hcd = %p", hcd);
++		return;
++	}
++
++	if (!hcd->frame_list) {
++		FH_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
++		return;
++	}
++
++	hc = qh->channel;
++	inc = frame_incr_val(qh);
++	if (qh->ep_type == UE_ISOCHRONOUS)
++		i = frame_list_idx(qh->sched_frame);
++	else
++		i = 0;
++
++	j = i;
++	do {
++		if (enable)
++			hcd->frame_list[j] |= (1 << hc->hc_num);
++		else
++			hcd->frame_list[j] &= ~(1 << hc->hc_num);
++		j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
++	}
++	while (j != i);
++	if (!enable)
++		return;
++	hc->schinfo = 0;
++	if (qh->channel->speed == FH_OTG_EP_SPEED_HIGH) {
++		j = 1;
++		/* TODO - check this */
++		inc = (8 + qh->interval - 1) / qh->interval;
++		for (i = 0; i < inc; i++) {
++			hc->schinfo |= j;
++			j = j << qh->interval;
++		}
++	} else {
++		hc->schinfo = 0xff;
++	}
++}
++
++#if 1
++void dump_frame_list(fh_otg_hcd_t * hcd)
++{
++	int i = 0;
++	FH_PRINTF("--FRAME LIST (hex) --\n");
++	for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
++		FH_PRINTF("%x\t", hcd->frame_list[i]);
++		if (!(i % 8) && i)
++			FH_PRINTF("\n");
++	}
++	FH_PRINTF("\n----\n");
++
++}
++#endif
++
++static void release_channel_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	fh_hc_t *hc = qh->channel;
++	if (fh_qh_is_non_per(qh))
++		hcd->non_periodic_channels--;
++	else
++		update_frame_list(hcd, qh, 0);
++
++	/* 
++	 * The condition is added to prevent double cleanup try in case of device
++	 * disconnect. See channel cleanup in fh_otg_hcd_disconnect_cb().
++	 */
++	if (hc->qh) {
++		fh_otg_hc_cleanup(hcd->core_if, hc);
++		FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
++		hc->qh = NULL;
++	}
++
++	qh->channel = NULL;
++	qh->ntd = 0;
++
++	if (qh->desc_list) {
++		fh_memset(qh->desc_list, 0x00,
++			   sizeof(fh_otg_host_dma_desc_t) * max_desc_num(qh));
++	}
++}
++
++/** 
++ * Initializes a QH structure's Descriptor DMA related members.
++ * Allocates memory for descriptor list.
++ * On first periodic QH, allocates memory for FrameList 
++ * and enables periodic scheduling.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh The QH to init.
++ *
++ * @return 0 if successful, negative error code otherwise.
++ */
++int fh_otg_hcd_qh_init_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	int retval = 0;
++
++	if (qh->do_split) {
++		FH_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
++    		return -1;
++    	}
++
++	retval = desc_list_alloc(qh);
++
++	if ((retval == 0)
++	    && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
++		if (!hcd->frame_list) {
++			retval = frame_list_alloc(hcd);
++			/* Enable periodic schedule on first periodic QH */
++			if (retval == 0)
++				per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
++		}
++	}
++
++	qh->ntd = 0;
++
++	return retval;
++}
++
++/** 
++ * Frees descriptor list memory associated with the QH. 
++ * If QH is periodic and the last, frees FrameList memory 
++ * and disables periodic scheduling. 
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh The QH to init.
++ */
++void fh_otg_hcd_qh_free_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	desc_list_free(qh);
++
++	/* 
++	 * Channel still assigned due to some reasons. 
++	 * Seen on Isoc URB dequeue. Channel halted but no subsequent
++	 * ChHalted interrupt to release the channel. Afterwards
++	 * when it comes here from endpoint disable routine
++	 * channel remains assigned.
++	 */
++	if (qh->channel)
++		release_channel_ddma(hcd, qh);
++
++	if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
++	    && !hcd->periodic_channels && hcd->frame_list) {
++
++		per_sched_disable(hcd);
++		frame_list_free(hcd);
++	}
++}
++
++static uint8_t frame_to_desc_idx(fh_otg_qh_t * qh, uint16_t frame_idx)
++{
++	if (qh->dev_speed == FH_OTG_EP_SPEED_HIGH) {
++		/* 
++		 * Descriptor set(8 descriptors) index
++		 * which is 8-aligned.
++		 */
++		return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
++	} else {
++		return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
++	}
++}
++
++/* 
++ * Determine starting frame for Isochronous transfer. 
++ * Few frames skipped to prevent race condition with HC. 
++ */
++static uint8_t calc_starting_frame(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
++				   uint8_t * skip_frames)
++{
++	uint16_t frame = 0;
++	hcd->frame_number = fh_otg_hcd_get_frame_number(hcd);
++	
++	/* sched_frame is always frame number(not uFrame) both in FS and HS !! */
++	
++	/* 
++	 * skip_frames is used to limit activated descriptors number
++	 * to avoid the situation when HC services the last activated
++	 * descriptor firstly.
++	 * Example for FS:
++	 * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
++	 * corresponding to curr_frame+1, the descriptor corresponding to frame 2
++	 * will be fetched. If the number of descriptors is max=64 (or greather) the
++	 * list will be fully programmed with Active descriptors and it is possible
++	 * case(rare) that the latest descriptor(considering rollback) corresponding
++	 * to frame 2 will be serviced first. HS case is more probable because, in fact,
++	 * up to 11 uframes(16 in the code) may be skipped.
++	 */
++	if (qh->dev_speed == FH_OTG_EP_SPEED_HIGH) {
++		/* 
++		 * Consider uframe counter also, to start xfer asap.
++		 * If half of the frame elapsed skip 2 frames otherwise
++		 * just 1 frame. 
++		 * Starting descriptor index must be 8-aligned, so
++		 * if the current frame is near to complete the next one
++		 * is skipped as well.
++		 */
++
++		if (fh_micro_frame_num(hcd->frame_number) >= 5) {
++			*skip_frames = 2 * 8;
++		 	frame = fh_frame_num_inc(hcd->frame_number, *skip_frames);
++		} else {
++			*skip_frames = 1 * 8;
++			frame = fh_frame_num_inc(hcd->frame_number, *skip_frames);
++		}
++
++		frame = fh_full_frame_num(frame);
++	} else {
++		/* 
++		 * Two frames are skipped for FS - the current and the next.
++		 * But for descriptor programming, 1 frame(descriptor) is enough,
++		 * see example above.
++		 */
++		*skip_frames = 1;
++		frame = fh_frame_num_inc(hcd->frame_number, 2);
++	}
++
++	return frame;
++}
++
++/* 
++ * Calculate initial descriptor index for isochronous transfer
++ * based on scheduled frame. 
++ */
++static uint8_t recalc_initial_desc_idx(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	uint16_t frame = 0, fr_idx, fr_idx_tmp;
++	uint8_t skip_frames = 0;
++	/* 
++	 * With current ISOC processing algorithm the channel is being
++	 * released when no more QTDs in the list(qh->ntd == 0).
++	 * Thus this function is called only when qh->ntd == 0 and qh->channel == 0. 
++	 *
++	 * So qh->channel != NULL branch is not used and just not removed from the
++	 * source file. It is required for another possible approach which is,
++	 * do not disable and release the channel when ISOC session completed, 
++	 * just move QH to inactive schedule until new QTD arrives. 
++	 * On new QTD, the QH moved back to 'ready' schedule,
++	 * starting frame and therefore starting desc_index are recalculated.
++	 * In this case channel is released only on ep_disable.
++	 */
++
++	/* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
++	if (qh->channel) {
++		frame = calc_starting_frame(hcd, qh, &skip_frames);
++		/* 
++		 * Calculate initial descriptor index based on FrameList current bitmap
++		 * and servicing period.
++		 */
++		fr_idx_tmp = frame_list_idx(frame);
++		fr_idx =
++		    (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
++		     fr_idx_tmp)
++		    % frame_incr_val(qh);
++		fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
++	} else {
++		qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
++		fr_idx = frame_list_idx(qh->sched_frame);
++	}
++
++	qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
++
++	return skip_frames;
++}
++
++#define	ISOC_URB_GIVEBACK_ASAP
++
++#define MAX_ISOC_XFER_SIZE_FS 1023
++#define MAX_ISOC_XFER_SIZE_HS 3072
++#define DESCNUM_THRESHOLD 4
++
++static void init_isoc_dma_desc(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
++			       uint8_t skip_frames)
++{
++	struct fh_otg_hcd_iso_packet_desc *frame_desc;
++	fh_otg_qtd_t *qtd;
++	fh_otg_host_dma_desc_t *dma_desc;
++	uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
++
++	idx = qh->td_last;
++	inc = qh->interval;
++	n_desc = 0;
++
++	ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
++	if (skip_frames && !qh->channel)
++		ntd_max = ntd_max - skip_frames / qh->interval;
++
++	max_xfer_size =
++	    (qh->dev_speed ==
++	     FH_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
++	    MAX_ISOC_XFER_SIZE_FS;
++
++	FH_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
++		while ((qh->ntd < ntd_max)
++		       && (qtd->isoc_frame_index_last <
++			   qtd->urb->packet_count)) {
++
++			dma_desc = &qh->desc_list[idx];
++			fh_memset(dma_desc, 0x00, sizeof(fh_otg_host_dma_desc_t));
++
++			frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
++
++			if (frame_desc->length > max_xfer_size)
++				qh->n_bytes[idx] = max_xfer_size;
++			else
++				qh->n_bytes[idx] = frame_desc->length;
++			dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
++			dma_desc->status.b_isoc.a = 1;
++			dma_desc->status.b_isoc.sts = 0;
++
++			dma_desc->buf = qtd->urb->dma + frame_desc->offset;
++
++			qh->ntd++;
++
++			qtd->isoc_frame_index_last++;
++
++#ifdef	ISOC_URB_GIVEBACK_ASAP
++			/* 
++			 * Set IOC for each descriptor corresponding to the 
++			 * last frame of the URB.
++			 */
++			if (qtd->isoc_frame_index_last ==
++			    qtd->urb->packet_count)
++				dma_desc->status.b_isoc.ioc = 1;
++
++#endif
++			idx = desclist_idx_inc(idx, inc, qh->dev_speed);
++			n_desc++;
++
++		}
++		qtd->in_process = 1;
++	}
++
++	qh->td_last = idx;
++
++#ifdef	ISOC_URB_GIVEBACK_ASAP
++	/* Set IOC for the last descriptor if descriptor list is full */
++	if (qh->ntd == ntd_max) {
++		idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
++		qh->desc_list[idx].status.b_isoc.ioc = 1;
++	}
++#else
++	/* 
++	 * Set IOC bit only for one descriptor. 
++	 * Always try to be ahead of HW processing,
++	 * i.e. on IOC generation driver activates next descriptors but
++	 * core continues to process descriptors followed the one with IOC set.
++	 */
++
++	if (n_desc > DESCNUM_THRESHOLD) {
++		/* 
++		 * Move IOC "up". Required even if there is only one QTD 
++		 * in the list, cause QTDs migth continue to be queued,
++		 * but during the activation it was only one queued.
++		 * Actually more than one QTD might be in the list if this function called 
++		 * from XferCompletion - QTDs was queued during HW processing of the previous
++		 * descriptor chunk.
++		 */
++		idx = fh_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
++	} else {
++		/* 
++		 * Set the IOC for the latest descriptor
++		 * if either number of descriptor is not greather than threshold
++		 * or no more new descriptors activated.
++		 */
++		idx = fh_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
++	}
++
++	qh->desc_list[idx].status.b_isoc.ioc = 1;
++#endif
++}
++
++static void init_non_isoc_dma_desc(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++
++	fh_hc_t *hc;
++	fh_otg_host_dma_desc_t *dma_desc;
++	fh_otg_qtd_t *qtd;
++	int num_packets, len, n_desc = 0;
++
++	hc = qh->channel;
++
++	/* 
++	 * Start with hc->xfer_buff initialized in 
++	 * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
++	 * this pointer re-assigned to the buffer of the currently processed QTD.
++	 * For non-SG request there is always one QTD active.
++	 */
++
++	FH_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
++
++		if (n_desc) {
++			/* SG request - more than 1 QTDs */
++	 		hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
++			hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
++		}
++
++		qtd->n_desc = 0;
++
++		do {
++			dma_desc = &qh->desc_list[n_desc];
++			len = hc->xfer_len;
++
++			if (len > MAX_DMA_DESC_SIZE)
++				len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
++
++			if (hc->ep_is_in) {
++				if (len > 0) {
++					num_packets = (len + hc->max_packet - 1) / hc->max_packet;
++				} else {
++					/* Need 1 packet for transfer length of 0. */
++					num_packets = 1;
++				}
++				/* Always program an integral # of max packets for IN transfers. */
++				len = num_packets * hc->max_packet;
++			}
++
++			dma_desc->status.b.n_bytes = len;
++
++			qh->n_bytes[n_desc] = len;
++
++			if ((qh->ep_type == UE_CONTROL)
++			    && (qtd->control_phase == FH_OTG_CONTROL_SETUP))
++				dma_desc->status.b.sup = 1;	/* Setup Packet */
++
++			dma_desc->status.b.a = 1;	/* Active descriptor */
++			dma_desc->status.b.sts = 0;
++
++			dma_desc->buf =
++			    ((unsigned long)hc->xfer_buff & 0xffffffff);
++
++			/* 
++			 * Last descriptor(or single) of IN transfer 
++			 * with actual size less than MaxPacket.
++			 */
++			if (len > hc->xfer_len) {
++				hc->xfer_len = 0;
++			} else {
++				hc->xfer_buff += len;
++				hc->xfer_len -= len;
++			}
++
++			qtd->n_desc++;
++			n_desc++;
++		}
++		while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
++		
++
++		qtd->in_process = 1;
++
++		if (qh->ep_type == UE_CONTROL)
++			break;
++
++		if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
++			break;
++
++		if (qh->ep_is_in && (qh->ep_type == UE_BULK))
++			break;
++	}
++
++	if (n_desc) {
++		/* Request Transfer Complete interrupt for the last descriptor */
++		qh->desc_list[n_desc - 1].status.b.ioc = 1;
++		/* End of List indicator */
++		qh->desc_list[n_desc - 1].status.b.eol = 1;
++
++		hc->ntd = n_desc;
++	}
++}
++
++/** 
++ * For Control and Bulk endpoints initializes descriptor list
++ * and starts the transfer.
++ *
++ * For Interrupt and Isochronous endpoints initializes descriptor list
++ * then updates FrameList, marking appropriate entries as active.
++ * In case of Isochronous, the starting descriptor index is calculated based
++ * on the scheduled frame, but only on the first transfer descriptor within a session.
++ * Then starts the transfer via enabling the channel. 
++ * For Isochronous endpoint the channel is not halted on XferComplete 
++ * interrupt so remains assigned to the endpoint(QH) until session is done.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh The QH to init.
++ *
++ * @return 0 if successful, negative error code otherwise.
++ */
++void fh_otg_hcd_start_xfer_ddma(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	/* Channel is already assigned */
++	fh_hc_t *hc = qh->channel;
++	uint8_t skip_frames = 0;
++
++	switch (hc->ep_type) {
++	case FH_OTG_EP_TYPE_CONTROL:
++	case FH_OTG_EP_TYPE_BULK:
++		init_non_isoc_dma_desc(hcd, qh);
++
++		fh_otg_hc_start_transfer_ddma(hcd->core_if, hc);
++		break;
++	case FH_OTG_EP_TYPE_INTR:
++		init_non_isoc_dma_desc(hcd, qh);
++
++		update_frame_list(hcd, qh, 1);
++
++		fh_otg_hc_start_transfer_ddma(hcd->core_if, hc);
++		break;
++	case FH_OTG_EP_TYPE_ISOC:
++
++		if (!qh->ntd)
++			skip_frames = recalc_initial_desc_idx(hcd, qh);
++
++		init_isoc_dma_desc(hcd, qh, skip_frames);
++
++		if (!hc->xfer_started) {
++
++			update_frame_list(hcd, qh, 1);
++
++			/* 
++			 * Always set to max, instead of actual size.
++			 * Otherwise ntd will be changed with 
++			 * channel being enabled. Not recommended.
++			 *
++			 */
++			hc->ntd = max_desc_num(qh);
++			/* Enable channel only once for ISOC */
++			fh_otg_hc_start_transfer_ddma(hcd->core_if, hc);
++		}
++
++		break;
++	default:
++
++		break;
++	}
++}
++
++static void complete_isoc_xfer_ddma(fh_otg_hcd_t * hcd,
++				    fh_hc_t * hc,
++				    fh_otg_hc_regs_t * hc_regs,
++				    fh_otg_halt_status_e halt_status)
++{
++	struct fh_otg_hcd_iso_packet_desc *frame_desc;
++	fh_otg_qtd_t *qtd, *qtd_tmp;
++	fh_otg_qh_t *qh;
++	fh_otg_host_dma_desc_t *dma_desc;
++	uint16_t idx, remain;
++	uint8_t urb_compl;
++
++	qh = hc->qh;
++	idx = qh->td_first;
++
++	if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE) {
++		FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
++		    qtd->in_process = 0;
++		return;
++	} else if ((halt_status == FH_OTG_HC_XFER_AHB_ERR) ||
++		   (halt_status == FH_OTG_HC_XFER_BABBLE_ERR)) {
++		/* 
++		 * Channel is halted in these error cases.
++		 * Considered as serious issues.
++		 * Complete all URBs marking all frames as failed, 
++		 * irrespective whether some of the descriptors(frames) succeeded or no.
++		 * Pass error code to completion routine as well, to
++		 * update urb->status, some of class drivers might use it to stop
++		 * queing transfer requests.
++		 */
++		int err = (halt_status == FH_OTG_HC_XFER_AHB_ERR)
++		    ? (-FH_E_IO)
++		    : (-FH_E_OVERFLOW);
++						
++		FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
++			for (idx = 0; idx < qtd->urb->packet_count; idx++) {
++				frame_desc = &qtd->urb->iso_descs[idx];
++				frame_desc->status = err;
++			}
++			hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
++			fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
++		}
++		return;
++	}
++
++	FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
++
++		if (!qtd->in_process)
++			break;
++
++		urb_compl = 0;
++
++		do {
++
++			dma_desc = &qh->desc_list[idx];
++			
++			frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
++			remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
++
++			if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
++				/* 
++				 * XactError or, unable to complete all the transactions 
++				 * in the scheduled micro-frame/frame, 
++				 * both indicated by DMA_DESC_STS_PKTERR.
++				 */
++				qtd->urb->error_count++;
++				frame_desc->actual_length = qh->n_bytes[idx] - remain;
++				frame_desc->status = -FH_E_PROTOCOL;
++			} else {
++				/* Success */
++								
++				frame_desc->actual_length = qh->n_bytes[idx] - remain;
++				frame_desc->status = 0;
++			}
++
++			if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
++				/*
++				 * urb->status is not used for isoc transfers here.
++				 * The individual frame_desc status are used instead.
++				 */
++
++				hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
++				fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
++
++				/* 
++				 * This check is necessary because urb_dequeue can be called 
++				 * from urb complete callback(sound driver example).
++				 * All pending URBs are dequeued there, so no need for
++				 * further processing.
++				 */
++				if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE) {	
++					return;
++				}
++
++				urb_compl = 1;
++
++			}
++
++			qh->ntd--;
++
++			/* Stop if IOC requested descriptor reached */
++			if (dma_desc->status.b_isoc.ioc) {
++				idx = desclist_idx_inc(idx, qh->interval, hc->speed);	
++				goto stop_scan;
++			}
++
++			idx = desclist_idx_inc(idx, qh->interval, hc->speed);
++
++			if (urb_compl)
++				break;
++		}
++		while (idx != qh->td_first);
++	}
++stop_scan:
++	qh->td_first = idx;
++}
++
++uint8_t update_non_isoc_urb_state_ddma(fh_otg_hcd_t * hcd,
++				       fh_hc_t * hc,
++				       fh_otg_qtd_t * qtd,
++				       fh_otg_host_dma_desc_t * dma_desc,
++				       fh_otg_halt_status_e halt_status,
++				       uint32_t n_bytes, uint8_t * xfer_done)
++{
++
++	uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
++	fh_otg_hcd_urb_t *urb = qtd->urb;
++
++	if (halt_status == FH_OTG_HC_XFER_AHB_ERR) {
++		urb->status = -FH_E_IO;
++		return 1;
++	}
++	if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
++		switch (halt_status) {
++		case FH_OTG_HC_XFER_STALL:
++			urb->status = -FH_E_PIPE;
++			break;
++		case FH_OTG_HC_XFER_BABBLE_ERR:
++			urb->status = -FH_E_OVERFLOW;
++			break;
++		case FH_OTG_HC_XFER_XACT_ERR:
++			urb->status = -FH_E_PROTOCOL;
++			break;
++		default:	
++			FH_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
++			  	  halt_status);
++			break;
++		}
++		return 1;
++	}
++
++	if (dma_desc->status.b.a == 1) {
++		FH_DEBUGPL(DBG_HCDV,
++			    "Active descriptor encountered on channel %d\n",
++			    hc->hc_num);
++		return 0;
++	}
++
++	if (hc->ep_type == FH_OTG_EP_TYPE_CONTROL) {
++		if (qtd->control_phase == FH_OTG_CONTROL_DATA) {
++			urb->actual_length += n_bytes - remain;
++			if (remain || urb->actual_length >= urb->length) {
++				/* 
++				 * For Control Data stage do not set urb->status=0 to prevent
++				 * URB callback. Set it when Status phase done. See below.
++				 */
++				*xfer_done = 1;
++			}
++
++		} else if (qtd->control_phase == FH_OTG_CONTROL_STATUS) {
++			urb->status = 0;
++			*xfer_done = 1;
++		}
++		/* No handling for SETUP stage */
++	} else {
++		/* BULK and INTR */
++		urb->actual_length += n_bytes - remain;
++		if (remain || urb->actual_length >= urb->length) {
++			urb->status = 0;
++			*xfer_done = 1;
++		}
++	}
++
++	return 0;
++}
++
++static void complete_non_isoc_xfer_ddma(fh_otg_hcd_t * hcd,
++					fh_hc_t * hc,
++					fh_otg_hc_regs_t * hc_regs,
++					fh_otg_halt_status_e halt_status)
++{
++	fh_otg_hcd_urb_t *urb = NULL;
++	fh_otg_qtd_t *qtd, *qtd_tmp;
++	fh_otg_qh_t *qh;
++	fh_otg_host_dma_desc_t *dma_desc;
++	uint32_t n_bytes, n_desc, i;
++	uint8_t failed = 0, xfer_done;
++
++	n_desc = 0;
++
++	qh = hc->qh;
++
++	if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE) {
++		FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
++			qtd->in_process = 0;
++		}
++		return;
++	}
++
++	FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
++
++		urb = qtd->urb;
++
++		n_bytes = 0;
++		xfer_done = 0;
++
++		for (i = 0; i < qtd->n_desc; i++) {
++			dma_desc = &qh->desc_list[n_desc];
++
++			n_bytes = qh->n_bytes[n_desc];
++
++			failed =
++			    update_non_isoc_urb_state_ddma(hcd, hc, qtd,
++							   dma_desc,
++							   halt_status, n_bytes,
++							   &xfer_done);
++
++			if (failed
++			    || (xfer_done
++				&& (urb->status != -FH_E_IN_PROGRESS))) {
++
++				hcd->fops->complete(hcd, urb->priv, urb,
++						    urb->status);
++				fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
++
++				if (failed)
++					goto stop_scan;
++			} else if (qh->ep_type == UE_CONTROL) {
++				if (qtd->control_phase == FH_OTG_CONTROL_SETUP) {
++					if (urb->length > 0) {
++						qtd->control_phase = FH_OTG_CONTROL_DATA;
++					} else {
++						qtd->control_phase = FH_OTG_CONTROL_STATUS;
++					}
++					FH_DEBUGPL(DBG_HCDV, "  Control setup transaction done\n");
++				} else if (qtd->control_phase == FH_OTG_CONTROL_DATA) {
++					if (xfer_done) {
++						qtd->control_phase = FH_OTG_CONTROL_STATUS;
++						FH_DEBUGPL(DBG_HCDV, "  Control data transfer done\n");
++					} else if (i + 1 == qtd->n_desc) {
++						/* 
++						 * Last descriptor for Control data stage which is
++						 * not completed yet.
++						 */
++						fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++					}
++				}
++			}
++
++			n_desc++;
++		}
++
++	}
++
++stop_scan:
++
++	if (qh->ep_type != UE_CONTROL) {
++		/* 
++		 * Resetting the data toggle for bulk
++		 * and interrupt endpoints in case of stall. See handle_hc_stall_intr() 
++		 */
++		if (halt_status == FH_OTG_HC_XFER_STALL)
++			qh->data_toggle = FH_OTG_HC_PID_DATA0;
++		else
++			fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++	}
++
++	if (halt_status == FH_OTG_HC_XFER_COMPLETE) {
++		hcint_data_t hcint;
++		hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++		if (hcint.b.nyet) {
++			/*
++			 * Got a NYET on the last transaction of the transfer. It
++			 * means that the endpoint should be in the PING state at the
++			 * beginning of the next transfer.
++			 */
++			qh->ping_state = 1;
++			clear_hc_int(hc_regs, nyet);
++		}
++
++	}
++
++}
++
++/**
++ * This function is called from interrupt handlers.
++ * Scans the descriptor list, updates URB's status and
++ * calls completion routine for the URB if it's done.
++ * Releases the channel to be used by other transfers.
++ * In case of Isochronous endpoint the channel is not halted until 
++ * the end of the session, i.e. QTD list is empty.
++ * If periodic channel released the FrameList is updated accordingly.
++ *
++ * Calls transaction selection routines to activate pending transfers.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param hc Host channel, the transfer is completed on.
++ * @param hc_regs Host channel registers.
++ * @param halt_status Reason the channel is being halted, 
++ *		      or just XferComplete for isochronous transfer
++ */
++void fh_otg_hcd_complete_xfer_ddma(fh_otg_hcd_t * hcd,
++				    fh_hc_t * hc,
++				    fh_otg_hc_regs_t * hc_regs,
++				    fh_otg_halt_status_e halt_status)
++{
++	uint8_t continue_isoc_xfer = 0;
++	fh_otg_transaction_type_e tr_type;
++	fh_otg_qh_t *qh = hc->qh;
++
++	if (hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++
++		complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
++
++		/* Release the channel if halted or session completed */
++		if (halt_status != FH_OTG_HC_XFER_COMPLETE ||
++		    FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
++
++			/* Halt the channel if session completed */
++			if (halt_status == FH_OTG_HC_XFER_COMPLETE) {
++				fh_otg_hc_halt(hcd->core_if, hc, halt_status);
++			}
++
++			release_channel_ddma(hcd, qh);
++			fh_otg_hcd_qh_remove(hcd, qh);
++		} else {
++			/* Keep in assigned schedule to continue transfer */
++			FH_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
++					   &qh->qh_list_entry);
++			continue_isoc_xfer = 1;
++
++		}
++		/** @todo Consider the case when period exceeds FrameList size.
++		 *  Frame Rollover interrupt should be used. 
++		 */
++	} else {
++		/* Scan descriptor list to complete the URB(s), then release the channel */
++		complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
++
++		release_channel_ddma(hcd, qh);
++		fh_otg_hcd_qh_remove(hcd, qh);
++
++		if (!FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
++			/* Add back to inactive non-periodic schedule on normal completion */
++			fh_otg_hcd_qh_add(hcd, qh);
++		}
++
++	}
++	tr_type = fh_otg_hcd_select_transactions(hcd);
++	if (tr_type != FH_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
++		if (continue_isoc_xfer) {
++			if (tr_type == FH_OTG_TRANSACTION_NONE) {
++				tr_type = FH_OTG_TRANSACTION_PERIODIC;
++			} else if (tr_type == FH_OTG_TRANSACTION_NON_PERIODIC) {
++				tr_type = FH_OTG_TRANSACTION_ALL;
++			}
++		}
++		fh_otg_hcd_queue_transactions(hcd, tr_type);
++	}
++}
++
++#endif /* FH_DEVICE_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_if.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_if.h
+new file mode 100644
+index 00000000..55a0d658
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_if.h
+@@ -0,0 +1,412 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_if.h $
++ * $Revision: #12 $
++ * $Date: 2011/10/26 $
++ * $Change: 1873028 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_DEVICE_ONLY
++#ifndef __FH_HCD_IF_H__
++#define __FH_HCD_IF_H__
++
++#include "fh_otg_core_if.h"
++
++/** @file
++ * This file defines FH_OTG HCD Core API.
++ */
++
++struct fh_otg_hcd;
++typedef struct fh_otg_hcd fh_otg_hcd_t;
++
++struct fh_otg_hcd_urb;
++typedef struct fh_otg_hcd_urb fh_otg_hcd_urb_t;
++
++/** @name HCD Function Driver Callbacks */
++/** @{ */
++
++/** This function is called whenever core switches to host mode. */
++typedef int (*fh_otg_hcd_start_cb_t) (fh_otg_hcd_t * hcd);
++
++/** This function is called when device has been disconnected */
++typedef int (*fh_otg_hcd_disconnect_cb_t) (fh_otg_hcd_t * hcd);
++
++/** Wrapper provides this function to HCD to core, so it can get hub information to which device is connected */
++typedef int (*fh_otg_hcd_hub_info_from_urb_cb_t) (fh_otg_hcd_t * hcd,
++						   void *urb_handle,
++						   uint32_t * hub_addr,
++						   uint32_t * port_addr);
++/** Via this function HCD core gets device speed */
++typedef int (*fh_otg_hcd_speed_from_urb_cb_t) (fh_otg_hcd_t * hcd,
++						void *urb_handle);
++
++/** This function is called when urb is completed */
++typedef int (*fh_otg_hcd_complete_urb_cb_t) (fh_otg_hcd_t * hcd,
++					      void *urb_handle,
++					      fh_otg_hcd_urb_t * fh_otg_urb,
++					      int32_t status);
++
++/** Via this function HCD core gets b_hnp_enable parameter */
++typedef int (*fh_otg_hcd_get_b_hnp_enable) (fh_otg_hcd_t * hcd);
++
++struct fh_otg_hcd_function_ops {
++	fh_otg_hcd_start_cb_t start;
++	fh_otg_hcd_disconnect_cb_t disconnect;
++	fh_otg_hcd_hub_info_from_urb_cb_t hub_info;
++	fh_otg_hcd_speed_from_urb_cb_t speed;
++	fh_otg_hcd_complete_urb_cb_t complete;
++	fh_otg_hcd_get_b_hnp_enable get_b_hnp_enable;
++};
++/** @} */
++
++/** @name HCD Core API */
++/** @{ */
++/** This function allocates fh_otg_hcd structure and returns pointer on it. */
++extern fh_otg_hcd_t *fh_otg_hcd_alloc_hcd(void);
++
++/** This function should be called to initiate HCD Core.
++ *
++ * @param hcd The HCD
++ * @param core_if The FH_OTG Core
++ *
++ * Returns -FH_E_NO_MEMORY if no enough memory.
++ * Returns 0 on success  
++ */
++extern int fh_otg_hcd_init(fh_otg_hcd_t * hcd, fh_otg_core_if_t * core_if);
++
++/** Frees HCD
++ *
++ * @param hcd The HCD
++ */
++extern void fh_otg_hcd_remove(fh_otg_hcd_t * hcd);
++
++/** This function should be called on every hardware interrupt.
++ *
++ * @param fh_otg_hcd The HCD
++ *
++ * Returns non zero if interrupt is handled
++ * Return 0 if interrupt is not handled
++ */
++extern int32_t fh_otg_hcd_handle_intr(fh_otg_hcd_t * fh_otg_hcd);
++
++/**
++ * Returns private data set by
++ * fh_otg_hcd_set_priv_data function.
++ *
++ * @param hcd The HCD
++ */
++extern void *fh_otg_hcd_get_priv_data(fh_otg_hcd_t * hcd);
++
++/**
++ * Set private data.
++ *
++ * @param hcd The HCD
++ * @param priv_data pointer to be stored in private data
++ */
++extern void fh_otg_hcd_set_priv_data(fh_otg_hcd_t * hcd, void *priv_data);
++
++/**
++ * This function initializes the HCD Core.
++ *
++ * @param hcd The HCD
++ * @param fops The Function Driver Operations data structure containing pointers to all callbacks.
++ *
++ * Returns -FH_E_NO_DEVICE if Core is currently is in device mode.
++ * Returns 0 on success
++ */
++extern int fh_otg_hcd_start(fh_otg_hcd_t * hcd,
++			     struct fh_otg_hcd_function_ops *fops);
++
++/**
++ * Halts the FH_otg host mode operations in a clean manner. USB transfers are
++ * stopped. 
++ *
++ * @param hcd The HCD
++ */
++extern void fh_otg_hcd_stop(fh_otg_hcd_t * hcd);
++
++/**
++ * Handles hub class-specific requests.
++ *
++ * @param fh_otg_hcd The HCD
++ * @param typeReq Request Type
++ * @param wValue wValue from control request
++ * @param wIndex wIndex from control request
++ * @param buf data buffer 
++ * @param wLength data buffer length
++ *
++ * Returns -FH_E_INVALID if invalid argument is passed
++ * Returns 0 on success
++ */
++extern int fh_otg_hcd_hub_control(fh_otg_hcd_t * fh_otg_hcd,
++				   uint16_t typeReq, uint16_t wValue,
++				   uint16_t wIndex, uint8_t * buf,
++				   uint16_t wLength);
++
++/**
++ * Returns otg port number.
++ *
++ * @param hcd The HCD
++ */
++extern uint32_t fh_otg_hcd_otg_port(fh_otg_hcd_t * hcd);
++
++/**
++ * Returns OTG version - either 1.3 or 2.0.
++ *
++ * @param core_if The core_if structure pointer
++ */
++extern uint16_t fh_otg_get_otg_version(fh_otg_core_if_t * core_if);
++
++/**
++ * Returns 1 if currently core is acting as B host, and 0 otherwise.
++ *
++ * @param hcd The HCD
++ */
++extern uint32_t fh_otg_hcd_is_b_host(fh_otg_hcd_t * hcd);
++
++/**
++ * Returns current frame number.
++ *
++ * @param hcd The HCD
++ */
++extern int fh_otg_hcd_get_frame_number(fh_otg_hcd_t * hcd);
++
++/**
++ * Dumps hcd state.
++ *
++ * @param hcd The HCD
++ */
++extern void fh_otg_hcd_dump_state(fh_otg_hcd_t * hcd);
++
++/**
++ * Dump the average frame remaining at SOF. This can be used to
++ * determine average interrupt latency. Frame remaining is also shown for
++ * start transfer and two additional sample points.
++ * Currently this function is not implemented.
++ *
++ * @param hcd The HCD
++ */
++extern void fh_otg_hcd_dump_frrem(fh_otg_hcd_t * hcd);
++
++/**
++ * Sends LPM transaction to the local device.
++ *
++ * @param hcd The HCD
++ * @param devaddr Device Address
++ * @param hird Host initiated resume duration
++ * @param bRemoteWake Value of bRemoteWake field in LPM transaction
++ *
++ * Returns negative value if sending LPM transaction was not succeeded.
++ * Returns 0 on success.
++ */
++extern int fh_otg_hcd_send_lpm(fh_otg_hcd_t * hcd, uint8_t devaddr,
++				uint8_t hird, uint8_t bRemoteWake);
++
++/* URB interface */
++
++/**
++ * Allocates memory for fh_otg_hcd_urb structure.
++ * Allocated memory should be freed by call of FH_FREE.
++ *
++ * @param hcd The HCD
++ * @param iso_desc_count Count of ISOC descriptors
++ * @param atomic_alloc Specefies whether to perform atomic allocation.
++ */
++extern fh_otg_hcd_urb_t *fh_otg_hcd_urb_alloc(fh_otg_hcd_t * hcd,
++						int iso_desc_count,
++						int atomic_alloc);
++
++/**
++ * Set pipe information in URB.
++ *
++ * @param hcd_urb FH_OTG URB
++ * @param devaddr Device Address
++ * @param ep_num Endpoint Number
++ * @param ep_type Endpoint Type
++ * @param ep_dir Endpoint Direction
++ * @param mps Max Packet Size
++ */
++extern void fh_otg_hcd_urb_set_pipeinfo(fh_otg_hcd_urb_t * hcd_urb,
++					 uint8_t devaddr, uint8_t ep_num,
++					 uint8_t ep_type, uint8_t ep_dir,
++					 uint16_t mps);
++
++/* Transfer flags */
++#define URB_GIVEBACK_ASAP 0x1
++#define URB_SEND_ZERO_PACKET 0x2
++
++/**
++ * Sets fh_otg_hcd_urb parameters.
++ *
++ * @param urb FH_OTG URB allocated by fh_otg_hcd_urb_alloc function.
++ * @param urb_handle Unique handle for request, this will be passed back
++ * to function driver in completion callback.
++ * @param buf The buffer for the data
++ * @param dma The DMA buffer for the data
++ * @param buflen Transfer length
++ * @param sp Buffer for setup data
++ * @param sp_dma DMA address of setup data buffer
++ * @param flags Transfer flags
++ * @param interval Polling interval for interrupt or isochronous transfers.
++ */
++extern void fh_otg_hcd_urb_set_params(fh_otg_hcd_urb_t * urb,
++				       void *urb_handle, void *buf,
++				       fh_dma_t dma, uint32_t buflen, void *sp,
++				       fh_dma_t sp_dma, uint32_t flags,
++				       uint16_t interval);
++
++/** Gets status from fh_otg_hcd_urb
++ *
++ * @param fh_otg_urb FH_OTG URB
++ */
++extern uint32_t fh_otg_hcd_urb_get_status(fh_otg_hcd_urb_t * fh_otg_urb);
++
++/** Gets actual length from fh_otg_hcd_urb
++ *
++ * @param fh_otg_urb FH_OTG URB
++ */
++extern uint32_t fh_otg_hcd_urb_get_actual_length(fh_otg_hcd_urb_t *
++						  fh_otg_urb);
++
++/** Gets error count from fh_otg_hcd_urb. Only for ISOC URBs
++ *
++ * @param fh_otg_urb FH_OTG URB
++ */
++extern uint32_t fh_otg_hcd_urb_get_error_count(fh_otg_hcd_urb_t *
++						fh_otg_urb);
++
++/** Set ISOC descriptor offset and length
++ *
++ * @param fh_otg_urb FH_OTG URB
++ * @param desc_num ISOC descriptor number
++ * @param offset Offset from beginig of buffer.
++ * @param length Transaction length
++ */
++extern void fh_otg_hcd_urb_set_iso_desc_params(fh_otg_hcd_urb_t * fh_otg_urb,
++						int desc_num, uint32_t offset,
++						uint32_t length);
++
++/** Get status of ISOC descriptor, specified by desc_num
++ *
++ * @param fh_otg_urb FH_OTG URB
++ * @param desc_num ISOC descriptor number 
++ */
++extern uint32_t fh_otg_hcd_urb_get_iso_desc_status(fh_otg_hcd_urb_t *
++						    fh_otg_urb, int desc_num);
++
++/** Get actual length of ISOC descriptor, specified by desc_num
++ *
++ * @param fh_otg_urb FH_OTG URB
++ * @param desc_num ISOC descriptor number
++ */
++extern uint32_t fh_otg_hcd_urb_get_iso_desc_actual_length(fh_otg_hcd_urb_t *
++							   fh_otg_urb,
++							   int desc_num);
++
++/** Queue URB. After transfer is completes, the complete callback will be called with the URB status
++ *
++ * @param fh_otg_hcd The HCD
++ * @param fh_otg_urb FH_OTG URB
++ * @param ep_handle Out parameter for returning endpoint handle
++ * @param atomic_alloc Flag to do atomic allocation if needed
++ *
++ * Returns -FH_E_NO_DEVICE if no device is connected.
++ * Returns -FH_E_NO_MEMORY if there is no enough memory.
++ * Returns 0 on success.
++ */
++extern int fh_otg_hcd_urb_enqueue(fh_otg_hcd_t * fh_otg_hcd,
++				   fh_otg_hcd_urb_t * fh_otg_urb,
++				   void **ep_handle, int atomic_alloc);
++
++/** De-queue the specified URB
++ *
++ * @param fh_otg_hcd The HCD
++ * @param fh_otg_urb FH_OTG URB
++ */
++extern int fh_otg_hcd_urb_dequeue(fh_otg_hcd_t * fh_otg_hcd,
++				   fh_otg_hcd_urb_t * fh_otg_urb);
++
++/** Frees resources in the FH_otg controller related to a given endpoint.
++ * Any URBs for the endpoint must already be dequeued.
++ *
++ * @param hcd The HCD
++ * @param ep_handle Endpoint handle, returned by fh_otg_hcd_urb_enqueue function
++ * @param retry Number of retries if there are queued transfers.
++ *
++ * Returns -FH_E_INVALID if invalid arguments are passed.
++ * Returns 0 on success
++ */
++extern int fh_otg_hcd_endpoint_disable(fh_otg_hcd_t * hcd, void *ep_handle,
++					int retry);
++
++/* Resets the data toggle in qh structure. This function can be called from
++ * usb_clear_halt routine.
++ *
++ * @param hcd The HCD
++ * @param ep_handle Endpoint handle, returned by fh_otg_hcd_urb_enqueue function
++ *
++ * Returns -FH_E_INVALID if invalid arguments are passed.
++ * Returns 0 on success
++ */
++extern int fh_otg_hcd_endpoint_reset(fh_otg_hcd_t * hcd, void *ep_handle);
++
++/** Returns 1 if status of specified port is changed and 0 otherwise.
++ *
++ * @param hcd The HCD
++ * @param port Port number
++ */
++extern int fh_otg_hcd_is_status_changed(fh_otg_hcd_t * hcd, int port);
++
++/** Call this function to check if bandwidth was allocated for specified endpoint.
++ * Only for ISOC and INTERRUPT endpoints.
++ *
++ * @param hcd The HCD
++ * @param ep_handle Endpoint handle
++ */
++extern int fh_otg_hcd_is_bandwidth_allocated(fh_otg_hcd_t * hcd,
++					      void *ep_handle);
++
++/** Call this function to check if bandwidth was freed for specified endpoint.
++ *
++ * @param hcd The HCD
++ * @param ep_handle Endpoint handle
++ */
++extern int fh_otg_hcd_is_bandwidth_freed(fh_otg_hcd_t * hcd, void *ep_handle);
++
++/** Returns bandwidth allocated for specified endpoint in microseconds.
++ * Only for ISOC and INTERRUPT endpoints.
++ *
++ * @param hcd The HCD
++ * @param ep_handle Endpoint handle
++ */
++extern uint8_t fh_otg_hcd_get_ep_bandwidth(fh_otg_hcd_t * hcd,
++					    void *ep_handle);
++
++/** @} */
++
++#endif /* __FH_HCD_IF_H__ */
++#endif /* FH_DEVICE_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_intr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_intr.c
+new file mode 100644
+index 00000000..e891950f
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_intr.c
+@@ -0,0 +1,2107 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_intr.c $
++ * $Revision: #94 $
++ * $Date: 2013/01/31 $
++ * $Change: 2155605 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_DEVICE_ONLY
++
++#include "fh_otg_hcd.h"
++#include "fh_otg_regs.h"
++
++/** @file
++ * This file contains the implementation of the HCD Interrupt handlers.
++ */
++
++/** This function handles interrupts for the HCD. */
++int32_t fh_otg_hcd_handle_intr(fh_otg_hcd_t * fh_otg_hcd)
++{
++	int retval = 0;
++
++	fh_otg_core_if_t *core_if = fh_otg_hcd->core_if;
++	gintsts_data_t gintsts;
++#ifdef DEBUG
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++#endif
++	
++	if (fh_otg_check_haps_status(core_if) == -1 ) {
++		FH_WARN("HAPS is disconnected");			
++		return retval;
++	}
++
++	/* Exit from ISR if core is hibernated */
++	if (core_if->hibernation_suspend == 1) {
++		return retval;
++	}
++	FH_SPINLOCK(fh_otg_hcd->lock);
++	/* Check if HOST Mode */
++	if (fh_otg_is_host_mode(core_if)) {
++		gintsts.d32 = fh_otg_read_core_intr(core_if);
++		if (!gintsts.d32) {
++			FH_SPINUNLOCK(fh_otg_hcd->lock);
++			return 0;
++		}
++#ifdef DEBUG
++		/* Don't print debug message in the interrupt handler on SOF */
++#ifndef DEBUG_SOF
++		if (gintsts.d32 != FH_SOF_INTR_MASK)
++#endif
++			FH_DEBUGPL(DBG_HCD, "\n");
++#endif
++
++#ifdef DEBUG
++#ifndef DEBUG_SOF
++		if (gintsts.d32 != FH_SOF_INTR_MASK)
++#endif
++			FH_DEBUGPL(DBG_HCD,
++				    "FH OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
++				    gintsts.d32);
++#endif
++
++		if (gintsts.b.sofintr) {
++			retval |= fh_otg_hcd_handle_sof_intr(fh_otg_hcd);
++		}
++		if (gintsts.b.rxstsqlvl) {
++			retval |=
++			    fh_otg_hcd_handle_rx_status_q_level_intr
++			    (fh_otg_hcd);
++		}
++		if (gintsts.b.nptxfempty) {
++			retval |=
++			    fh_otg_hcd_handle_np_tx_fifo_empty_intr
++			    (fh_otg_hcd);
++		}
++		if (gintsts.b.i2cintr) {
++			/** @todo Implement i2cintr handler. */
++		}
++		if (gintsts.b.portintr) {
++			retval |= fh_otg_hcd_handle_port_intr(fh_otg_hcd);
++		}
++		if (gintsts.b.hcintr) {
++			retval |= fh_otg_hcd_handle_hc_intr(fh_otg_hcd);
++		}
++		if (gintsts.b.ptxfempty) {
++			retval |=
++			    fh_otg_hcd_handle_perio_tx_fifo_empty_intr
++			    (fh_otg_hcd);
++		}
++#ifdef DEBUG
++#ifndef DEBUG_SOF
++		if (gintsts.d32 != FH_SOF_INTR_MASK)
++#endif
++		{
++			FH_DEBUGPL(DBG_HCD,
++				    "FH OTG HCD Finished Servicing Interrupts\n");
++			FH_DEBUGPL(DBG_HCDV, "FH OTG HCD gintsts=0x%08x\n",
++				    FH_READ_REG32(&global_regs->gintsts));
++			FH_DEBUGPL(DBG_HCDV, "FH OTG HCD gintmsk=0x%08x\n",
++				    FH_READ_REG32(&global_regs->gintmsk));
++		}
++#endif
++
++#ifdef DEBUG
++#ifndef DEBUG_SOF
++		if (gintsts.d32 != FH_SOF_INTR_MASK)
++#endif
++			FH_DEBUGPL(DBG_HCD, "\n");
++#endif
++
++	}
++	FH_SPINUNLOCK(fh_otg_hcd->lock);
++	return retval;
++}
++
++#ifdef FH_TRACK_MISSED_SOFS
++#warning Compiling code to track missed SOFs
++#define FRAME_NUM_ARRAY_SIZE 1000
++/**
++ * This function is for debug only.
++ */
++static inline void track_missed_sofs(uint16_t curr_frame_number)
++{
++	static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
++	static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
++	static int frame_num_idx = 0;
++	static uint16_t last_frame_num = FH_HFNUM_MAX_FRNUM;
++	static int dumped_frame_num_array = 0;
++
++	if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
++		if (((last_frame_num + 1) & FH_HFNUM_MAX_FRNUM) !=
++		    curr_frame_number) {
++			frame_num_array[frame_num_idx] = curr_frame_number;
++			last_frame_num_array[frame_num_idx++] = last_frame_num;
++		}
++	} else if (!dumped_frame_num_array) {
++		int i;
++		FH_PRINTF("Frame     Last Frame\n");
++		FH_PRINTF("-----     ----------\n");
++		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
++			FH_PRINTF("0x%04x    0x%04x\n",
++				   frame_num_array[i], last_frame_num_array[i]);
++		}
++		dumped_frame_num_array = 1;
++	}
++	last_frame_num = curr_frame_number;
++}
++#endif
++
++/**
++ * Handles the start-of-frame interrupt in host mode. Non-periodic
++ * transactions may be queued to the FH_otg controller for the current
++ * (micro)frame. Periodic transactions may be queued to the controller for the
++ * next (micro)frame.
++ */
++int32_t fh_otg_hcd_handle_sof_intr(fh_otg_hcd_t * hcd)
++{
++	hfnum_data_t hfnum;
++	fh_list_link_t *qh_entry;
++	fh_otg_qh_t *qh;
++	fh_otg_transaction_type_e tr_type;
++	gintsts_data_t gintsts = {.d32 = 0 };
++
++	hfnum.d32 =
++	    FH_READ_REG32(&hcd->core_if->host_if->host_global_regs->hfnum);
++
++#ifdef DEBUG_SOF
++	FH_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
++#endif
++	hcd->frame_number = hfnum.b.frnum;
++
++#ifdef DEBUG
++	hcd->frrem_accum += hfnum.b.frrem;
++	hcd->frrem_samples++;
++#endif
++
++#ifdef FH_TRACK_MISSED_SOFS
++	track_missed_sofs(hcd->frame_number);
++#endif
++	/* Determine whether any periodic QHs should be executed. */
++	qh_entry = FH_LIST_FIRST(&hcd->periodic_sched_inactive);
++	while (qh_entry != &hcd->periodic_sched_inactive) {
++		qh = FH_LIST_ENTRY(qh_entry, fh_otg_qh_t, qh_list_entry);
++		qh_entry = qh_entry->next;
++		if (fh_frame_num_le(qh->sched_frame, hcd->frame_number)) {
++			/*
++			 * Move QH to the ready list to be executed next
++			 * (micro)frame.
++			 */
++			FH_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
++					   &qh->qh_list_entry);
++		}
++	}
++	tr_type = fh_otg_hcd_select_transactions(hcd);
++	if (tr_type != FH_OTG_TRANSACTION_NONE) {
++		fh_otg_hcd_queue_transactions(hcd, tr_type);
++	}
++
++	/* Clear interrupt */
++	gintsts.b.sofintr = 1;
++	FH_WRITE_REG32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
++ * least one packet in the Rx FIFO.  The packets are moved from the FIFO to
++ * memory if the FH_otg controller is operating in Slave mode. */
++int32_t fh_otg_hcd_handle_rx_status_q_level_intr(fh_otg_hcd_t * fh_otg_hcd)
++{
++	host_grxsts_data_t grxsts;
++	fh_hc_t *hc = NULL;
++
++	FH_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
++
++	grxsts.d32 =
++	    FH_READ_REG32(&fh_otg_hcd->core_if->core_global_regs->grxstsp);
++
++	hc = fh_otg_hcd->hc_ptr_array[grxsts.b.chnum];
++	if (!hc) {
++		FH_ERROR("Unable to get corresponding channel\n");
++		return 0;
++	}
++
++	/* Packet Status */
++	FH_DEBUGPL(DBG_HCDV, "    Ch num = %d\n", grxsts.b.chnum);
++	FH_DEBUGPL(DBG_HCDV, "    Count = %d\n", grxsts.b.bcnt);
++	FH_DEBUGPL(DBG_HCDV, "    DPID = %d, hc.dpid = %d\n", grxsts.b.dpid,
++		    hc->data_pid_start);
++	FH_DEBUGPL(DBG_HCDV, "    PStatus = %d\n", grxsts.b.pktsts);
++
++	switch (grxsts.b.pktsts) {
++	case FH_GRXSTS_PKTSTS_IN:
++		/* Read the data into the host buffer. */
++		if (grxsts.b.bcnt > 0) {
++			fh_otg_read_packet(fh_otg_hcd->core_if,
++					    hc->xfer_buff, grxsts.b.bcnt);
++
++			/* Update the HC fields for the next packet received. */
++			hc->xfer_count += grxsts.b.bcnt;
++			hc->xfer_buff += grxsts.b.bcnt;
++		}
++
++	case FH_GRXSTS_PKTSTS_IN_XFER_COMP:
++	case FH_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
++	case FH_GRXSTS_PKTSTS_CH_HALTED:
++		/* Handled in interrupt, just ignore data */
++		break;
++	default:
++		FH_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
++			  grxsts.b.pktsts);
++		break;
++	}
++
++	return 1;
++}
++
++/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
++ * data packets may be written to the FIFO for OUT transfers. More requests
++ * may be written to the non-periodic request queue for IN transfers. This
++ * interrupt is enabled only in Slave mode. */
++int32_t fh_otg_hcd_handle_np_tx_fifo_empty_intr(fh_otg_hcd_t * fh_otg_hcd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
++	fh_otg_hcd_queue_transactions(fh_otg_hcd,
++				       FH_OTG_TRANSACTION_NON_PERIODIC);
++	return 1;
++}
++
++/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
++ * packets may be written to the FIFO for OUT transfers. More requests may be
++ * written to the periodic request queue for IN transfers. This interrupt is
++ * enabled only in Slave mode. */
++int32_t fh_otg_hcd_handle_perio_tx_fifo_empty_intr(fh_otg_hcd_t * fh_otg_hcd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
++	fh_otg_hcd_queue_transactions(fh_otg_hcd,
++				       FH_OTG_TRANSACTION_PERIODIC);
++	return 1;
++}
++
++/** There are multiple conditions that can cause a port interrupt. This function
++ * determines which interrupt conditions have occurred and handles them
++ * appropriately. */
++int32_t fh_otg_hcd_handle_port_intr(fh_otg_hcd_t * fh_otg_hcd)
++{
++	int retval = 0;
++	hprt0_data_t hprt0;
++	hprt0_data_t hprt0_modify;
++
++	hprt0.d32 = FH_READ_REG32(fh_otg_hcd->core_if->host_if->hprt0);
++	hprt0_modify.d32 = FH_READ_REG32(fh_otg_hcd->core_if->host_if->hprt0);
++
++	/* Clear appropriate bits in HPRT0 to clear the interrupt bit in
++	 * GINTSTS */
++
++	hprt0_modify.b.prtena = 0;
++	hprt0_modify.b.prtconndet = 0;
++	hprt0_modify.b.prtenchng = 0;
++	hprt0_modify.b.prtovrcurrchng = 0;
++
++	/* Port Connect Detected
++	 * Set flag and clear if detected */
++	if (fh_otg_hcd->core_if->hibernation_suspend == 1) {
++		// Dont modify port status if we are in hibernation state
++		hprt0_modify.b.prtconndet = 1;
++		hprt0_modify.b.prtenchng = 1;
++		FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
++		hprt0.d32 = FH_READ_REG32(fh_otg_hcd->core_if->host_if->hprt0);
++		return retval;
++	}
++
++	if (hprt0.b.prtconndet) {
++		/** @todo - check if steps performed in 'else' block should be perfromed regardles adp */
++		if (fh_otg_hcd->core_if->adp_enable && 	
++				fh_otg_hcd->core_if->adp.vbuson_timer_started == 1) {
++			FH_PRINTF("PORT CONNECT DETECTED ----------------\n");
++			FH_TIMER_CANCEL(fh_otg_hcd->core_if->adp.vbuson_timer);
++			fh_otg_hcd->core_if->adp.vbuson_timer_started = 0;
++			/* TODO - check if this is required, as
++			 * host initialization was already performed
++			 * after initial ADP probing
++			 */
++			/*fh_otg_hcd->core_if->adp.vbuson_timer_started = 0;
++			fh_otg_core_init(fh_otg_hcd->core_if);
++			fh_otg_enable_global_interrupts(fh_otg_hcd->core_if);
++			cil_hcd_start(fh_otg_hcd->core_if);*/
++		} else {
++			hprt0_data_t hprt0_local;
++			FH_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
++				    "Port Connect Detected--\n", hprt0.d32);
++			fh_otg_hcd->flags.b.port_connect_status_change = 1;
++			fh_otg_hcd->flags.b.port_connect_status = 1;
++			hprt0_modify.b.prtconndet = 1;
++			/* PET testing */
++			if (fh_otg_hcd->core_if->otg_ver && (fh_otg_hcd->core_if->test_mode == 7)) {
++				hprt0_local.d32 = fh_otg_read_hprt0(fh_otg_hcd->core_if);
++				hprt0_local.b.prtrst = 1;
++				FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0_local.d32);
++				fh_mdelay(60);
++				hprt0.d32 = fh_otg_read_hprt0(fh_otg_hcd->core_if);
++				hprt0.b.prtrst = 0;
++				FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
++			}
++	
++			/* B-Device has connected, Delete the connection timer. */
++			FH_TIMER_CANCEL(fh_otg_hcd->conn_timer);
++		}
++		/* The Hub driver asserts a reset when it sees port connect
++		 * status change flag */
++		retval |= 1;
++	}
++
++	/* Port Enable Changed
++	 * Clear if detected - Set internal flag if disabled */
++	if (hprt0.b.prtenchng) {
++		FH_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
++			    "Port Enable Changed--\n", hprt0.d32);
++		hprt0_modify.b.prtenchng = 1;
++		if (hprt0.b.prtena == 1) {
++			hfir_data_t hfir;
++			int do_reset = 0;
++			fh_otg_core_params_t *params =
++			    fh_otg_hcd->core_if->core_params;
++			fh_otg_core_global_regs_t *global_regs =
++			    fh_otg_hcd->core_if->core_global_regs;
++			fh_otg_host_if_t *host_if =
++			    fh_otg_hcd->core_if->host_if;
++			    
++			/* Every time when port enables calculate
++			 * HFIR.FrInterval
++			 */
++			hfir.d32 = FH_READ_REG32(&host_if->host_global_regs->hfir);
++			hfir.b.frint = calc_frame_interval(fh_otg_hcd->core_if);
++			FH_WRITE_REG32(&host_if->host_global_regs->hfir, hfir.d32);
++
++			/* Check if we need to adjust the PHY clock speed for
++			 * low power and adjust it */
++			if (params->host_support_fs_ls_low_power) {
++				gusbcfg_data_t usbcfg;
++
++				usbcfg.d32 =
++				    FH_READ_REG32(&global_regs->gusbcfg);
++
++				if (hprt0.b.prtspd == FH_HPRT0_PRTSPD_LOW_SPEED
++				    || hprt0.b.prtspd ==
++				    FH_HPRT0_PRTSPD_FULL_SPEED) {
++					/*
++					 * Low power
++					 */
++					hcfg_data_t hcfg;
++					if (usbcfg.b.phylpwrclksel == 0) {
++						/* Set PHY low power clock select for FS/LS devices */
++						usbcfg.b.phylpwrclksel = 1;
++						FH_WRITE_REG32
++						    (&global_regs->gusbcfg,
++						     usbcfg.d32);
++						do_reset = 1;
++					}
++
++					hcfg.d32 =
++					    FH_READ_REG32
++					    (&host_if->host_global_regs->hcfg);
++
++					if (hprt0.b.prtspd ==
++					    FH_HPRT0_PRTSPD_LOW_SPEED
++					    && params->host_ls_low_power_phy_clk
++					    ==
++					    FH_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)
++					{
++						/* 6 MHZ */
++						FH_DEBUGPL(DBG_CIL,
++							    "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
++						if (hcfg.b.fslspclksel !=
++						    FH_HCFG_6_MHZ) {
++							hcfg.b.fslspclksel =
++							    FH_HCFG_6_MHZ;
++							FH_WRITE_REG32
++							    (&host_if->host_global_regs->hcfg,
++							     hcfg.d32);
++							do_reset = 1;
++						}
++					} else {
++						/* 48 MHZ */
++						FH_DEBUGPL(DBG_CIL,
++							    "FS_PHY programming HCFG to 48 MHz ()\n");
++						if (hcfg.b.fslspclksel !=
++						    FH_HCFG_48_MHZ) {
++							hcfg.b.fslspclksel =
++							    FH_HCFG_48_MHZ;
++							FH_WRITE_REG32
++							    (&host_if->host_global_regs->hcfg,
++							     hcfg.d32);
++							do_reset = 1;
++						}
++					}
++				} else {
++					/*
++					 * Not low power
++					 */
++					if (usbcfg.b.phylpwrclksel == 1) {
++						usbcfg.b.phylpwrclksel = 0;
++						FH_WRITE_REG32
++						    (&global_regs->gusbcfg,
++						     usbcfg.d32);
++						do_reset = 1;
++					}
++				}
++
++				if (do_reset) {
++					FH_TASK_SCHEDULE(fh_otg_hcd->reset_tasklet);
++				}
++			}
++
++			if (!do_reset) {
++				/* Port has been enabled set the reset change flag */
++				fh_otg_hcd->flags.b.port_reset_change = 1;
++			}
++		} else {
++			fh_otg_hcd->flags.b.port_enable_change = 1;
++		}
++		retval |= 1;
++	}
++
++	/** Overcurrent Change Interrupt */
++	if (hprt0.b.prtovrcurrchng) {
++		FH_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
++			    "Port Overcurrent Changed--\n", hprt0.d32);
++		fh_otg_hcd->flags.b.port_over_current_change = 1;
++		hprt0_modify.b.prtovrcurrchng = 1;
++		retval |= 1;
++	}
++
++	/* Clear Port Interrupts */
++	FH_WRITE_REG32(fh_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
++
++	return retval;
++}
++
++/** This interrupt indicates that one or more host channels has a pending
++ * interrupt. There are multiple conditions that can cause each host channel
++ * interrupt. This function determines which conditions have occurred for each
++ * host channel interrupt and handles them appropriately. */
++int32_t fh_otg_hcd_handle_hc_intr(fh_otg_hcd_t * fh_otg_hcd)
++{
++	int i;
++	int retval = 0;
++	haint_data_t haint;
++
++	/* Clear appropriate bits in HCINTn to clear the interrupt bit in
++	 * GINTSTS */
++
++	haint.d32 = fh_otg_read_host_all_channels_intr(fh_otg_hcd->core_if);
++
++	for (i = 0; i < fh_otg_hcd->core_if->core_params->host_channels; i++) {
++		if (haint.b2.chint & (1 << i)) {
++			retval |= fh_otg_hcd_handle_hc_n_intr(fh_otg_hcd, i);
++		}
++	}
++
++	return retval;
++}
++
++/**
++ * Gets the actual length of a transfer after the transfer halts. _halt_status
++ * holds the reason for the halt.
++ *
++ * For IN transfers where halt_status is FH_OTG_HC_XFER_COMPLETE,
++ * *short_read is set to 1 upon return if less than the requested
++ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
++ * return. short_read may also be NULL on entry, in which case it remains
++ * unchanged.
++ */
++static uint32_t get_actual_xfer_length(fh_hc_t * hc,
++				       fh_otg_hc_regs_t * hc_regs,
++				       fh_otg_qtd_t * qtd,
++				       fh_otg_halt_status_e halt_status,
++				       int *short_read)
++{
++	hctsiz_data_t hctsiz;
++	uint32_t length;
++
++	if (short_read != NULL) {
++		*short_read = 0;
++	}
++	hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++
++	if (halt_status == FH_OTG_HC_XFER_COMPLETE) {
++		if (hc->ep_is_in) {
++			length = hc->xfer_len - hctsiz.b.xfersize;
++			if (short_read != NULL) {
++				*short_read = (hctsiz.b.xfersize != 0);
++			}
++		} else if (hc->qh->do_split) {
++			length = qtd->ssplit_out_xfer_count;
++		} else {
++			length = hc->xfer_len;
++		}
++	} else {
++		/*
++		 * Must use the hctsiz.pktcnt field to determine how much data
++		 * has been transferred. This field reflects the number of
++		 * packets that have been transferred via the USB. This is
++		 * always an integral number of packets if the transfer was
++		 * halted before its normal completion. (Can't use the
++		 * hctsiz.xfersize field because that reflects the number of
++		 * bytes transferred via the AHB, not the USB).
++		 */
++		length =
++		    (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
++	}
++
++	return length;
++}
++
++/**
++ * Updates the state of the URB after a Transfer Complete interrupt on the
++ * host channel. Updates the actual_length field of the URB based on the
++ * number of bytes transferred via the host channel. Sets the URB status
++ * if the data transfer is finished.
++ *
++ * @return 1 if the data transfer specified by the URB is completely finished,
++ * 0 otherwise.
++ */
++static int update_urb_state_xfer_comp(fh_hc_t * hc,
++				      fh_otg_hc_regs_t * hc_regs,
++				      fh_otg_hcd_urb_t * urb,
++				      fh_otg_qtd_t * qtd)
++{
++	int xfer_done = 0;
++	int short_read = 0;
++
++	int xfer_length;
++
++	xfer_length = get_actual_xfer_length(hc, hc_regs, qtd,
++					     FH_OTG_HC_XFER_COMPLETE,
++					     &short_read);
++
++
++	/* non DWORD-aligned buffer case handling. */
++	if (hc->align_buff && xfer_length && hc->ep_is_in) {
++		fh_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
++			   xfer_length);
++	}
++
++	urb->actual_length += xfer_length;
++
++	if (xfer_length && (hc->ep_type == FH_OTG_EP_TYPE_BULK) &&
++	    (urb->flags & URB_SEND_ZERO_PACKET)
++	    && (urb->actual_length >= urb->length)
++	    && !(urb->length % hc->max_packet)) {
++		xfer_done = 0;
++	} else if (short_read || urb->actual_length >= urb->length) {
++		xfer_done = 1;
++		urb->status = 0;
++	}
++	
++#ifdef DEBUG
++	{
++		hctsiz_data_t hctsiz;
++		hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++		FH_DEBUGPL(DBG_HCDV, "FH_otg: %s: %s, channel %d\n",
++			    __func__, (hc->ep_is_in ? "IN" : "OUT"),
++			    hc->hc_num);
++		FH_DEBUGPL(DBG_HCDV, "  hc->xfer_len %d\n", hc->xfer_len);
++		FH_DEBUGPL(DBG_HCDV, "  hctsiz.xfersize %d\n",
++			    hctsiz.b.xfersize);
++		FH_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
++			    urb->length);
++		FH_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n",
++			    urb->actual_length);
++		FH_DEBUGPL(DBG_HCDV, "  short_read %d, xfer_done %d\n",
++			    short_read, xfer_done);
++	}
++#endif
++
++	return xfer_done;
++}
++
++/*
++ * Save the starting data toggle for the next transfer. The data toggle is
++ * saved in the QH for non-control transfers and it's saved in the QTD for
++ * control transfers.
++ */
++void fh_otg_hcd_save_data_toggle(fh_hc_t * hc,
++			     fh_otg_hc_regs_t * hc_regs, fh_otg_qtd_t * qtd)
++{
++	hctsiz_data_t hctsiz;
++	hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++
++	if (hc->ep_type != FH_OTG_EP_TYPE_CONTROL) {
++		fh_otg_qh_t *qh = hc->qh;
++		if (hctsiz.b.pid == FH_HCTSIZ_DATA0) {
++			qh->data_toggle = FH_OTG_HC_PID_DATA0;
++		} else {
++			qh->data_toggle = FH_OTG_HC_PID_DATA1;
++		}
++	} else {
++		if (hctsiz.b.pid == FH_HCTSIZ_DATA0) {
++			qtd->data_toggle = FH_OTG_HC_PID_DATA0;
++		} else {
++			qtd->data_toggle = FH_OTG_HC_PID_DATA1;
++		}
++	}
++}
++
++/**
++ * Updates the state of an Isochronous URB when the transfer is stopped for
++ * any reason. The fields of the current entry in the frame descriptor array
++ * are set based on the transfer state and the input _halt_status. Completes
++ * the Isochronous URB if all the URB frames have been completed.
++ *
++ * @return FH_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
++ * transferred in the URB. Otherwise return FH_OTG_HC_XFER_URB_COMPLETE.
++ */
++static fh_otg_halt_status_e
++update_isoc_urb_state(fh_otg_hcd_t * hcd,
++		      fh_hc_t * hc,
++		      fh_otg_hc_regs_t * hc_regs,
++		      fh_otg_qtd_t * qtd, fh_otg_halt_status_e halt_status)
++{
++	fh_otg_hcd_urb_t *urb = qtd->urb;
++	fh_otg_halt_status_e ret_val = halt_status;
++	struct fh_otg_hcd_iso_packet_desc *frame_desc;
++
++	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
++	switch (halt_status) {
++	case FH_OTG_HC_XFER_COMPLETE:
++		frame_desc->status = 0;
++		frame_desc->actual_length =
++		    get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
++
++		/* non DWORD-aligned buffer case handling. */
++		if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
++			fh_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
++				   hc->qh->dw_align_buf, frame_desc->actual_length);
++		}
++		
++		break;
++	case FH_OTG_HC_XFER_FRAME_OVERRUN:
++		urb->error_count++;
++		if (hc->ep_is_in) {
++			frame_desc->status = -FH_E_NO_STREAM_RES;
++		} else {
++			frame_desc->status = -FH_E_COMMUNICATION;
++		}
++		frame_desc->actual_length = 0;
++		break;
++	case FH_OTG_HC_XFER_BABBLE_ERR:
++		urb->error_count++;
++		frame_desc->status = -FH_E_OVERFLOW;
++		/* Don't need to update actual_length in this case. */
++		break;
++	case FH_OTG_HC_XFER_XACT_ERR:
++		urb->error_count++;
++		frame_desc->status = -FH_E_PROTOCOL;
++		frame_desc->actual_length =
++		    get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
++
++		/* non DWORD-aligned buffer case handling. */
++		if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
++			fh_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
++				   hc->qh->dw_align_buf, frame_desc->actual_length);
++		}
++		/* Skip whole frame */
++		if (hc->qh->do_split && (hc->ep_type == FH_OTG_EP_TYPE_ISOC) &&
++		    hc->ep_is_in && hcd->core_if->dma_enable) {
++			qtd->complete_split = 0;
++			qtd->isoc_split_offset = 0;
++		}
++
++		break;
++	default:
++		FH_ASSERT(1, "Unhandled _halt_status (%d)\n", halt_status);
++		break;
++	}
++	if (++qtd->isoc_frame_index == urb->packet_count) {
++		/*
++		 * urb->status is not used for isoc transfers.
++		 * The individual frame_desc statuses are used instead.
++		 */
++		hcd->fops->complete(hcd, urb->priv, urb, 0);
++		ret_val = FH_OTG_HC_XFER_URB_COMPLETE;
++	} else {
++		ret_val = FH_OTG_HC_XFER_COMPLETE;
++	}
++	return ret_val;
++}
++
++/**
++ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
++ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
++ * still linked to the QH, the QH is added to the end of the inactive
++ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
++ * schedule if no more QTDs are linked to the QH.
++ */
++static void deactivate_qh(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh, int free_qtd)
++{
++	int continue_split = 0;
++	fh_otg_qtd_t *qtd;
++
++	FH_DEBUGPL(DBG_HCDV, "  %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
++
++	qtd = FH_CIRCLEQ_FIRST(&qh->qtd_list);
++
++	if (qtd->complete_split) {
++		continue_split = 1;
++	} else if (qtd->isoc_split_pos == FH_HCSPLIT_XACTPOS_MID ||
++		   qtd->isoc_split_pos == FH_HCSPLIT_XACTPOS_END) {
++		continue_split = 1;
++	}
++
++	if (free_qtd) {
++		fh_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
++		continue_split = 0;
++	}
++
++	qh->channel = NULL;
++	fh_otg_hcd_qh_deactivate(hcd, qh, continue_split);
++}
++
++/**
++ * Releases a host channel for use by other transfers. Attempts to select and
++ * queue more transactions since at least one host channel is available.
++ *
++ * @param hcd The HCD state structure.
++ * @param hc The host channel to release.
++ * @param qtd The QTD associated with the host channel. This QTD may be freed
++ * if the transfer is complete or an error has occurred.
++ * @param halt_status Reason the channel is being released. This status
++ * determines the actions taken by this function.
++ */
++static void release_channel(fh_otg_hcd_t * hcd,
++			    fh_hc_t * hc,
++			    fh_otg_qtd_t * qtd,
++			    fh_otg_halt_status_e halt_status)
++{
++	fh_otg_transaction_type_e tr_type;
++	int free_qtd;
++
++	FH_DEBUGPL(DBG_HCDV, "  %s: channel %d, halt_status %d\n",
++		    __func__, hc->hc_num, halt_status);
++
++	switch (halt_status) {
++	case FH_OTG_HC_XFER_URB_COMPLETE:
++		free_qtd = 1;
++		break;
++	case FH_OTG_HC_XFER_AHB_ERR:
++	case FH_OTG_HC_XFER_STALL:
++	case FH_OTG_HC_XFER_BABBLE_ERR:
++		free_qtd = 1;
++		break;
++	case FH_OTG_HC_XFER_XACT_ERR:
++		if (qtd->error_count >= 3) {
++			FH_DEBUGPL(DBG_HCDV,
++				    "  Complete URB with transaction error\n");
++			free_qtd = 1;
++			qtd->urb->status = -FH_E_PROTOCOL;
++			hcd->fops->complete(hcd, qtd->urb->priv,
++					    qtd->urb, -FH_E_PROTOCOL);
++		} else {
++			free_qtd = 0;
++		}
++		break;
++	case FH_OTG_HC_XFER_URB_DEQUEUE:
++		/*
++		 * The QTD has already been removed and the QH has been
++		 * deactivated. Don't want to do anything except release the
++		 * host channel and try to queue more transfers.
++		 */
++		goto cleanup;
++	case FH_OTG_HC_XFER_NO_HALT_STATUS:
++		free_qtd = 0;
++		break;
++	case FH_OTG_HC_XFER_PERIODIC_INCOMPLETE:
++		FH_DEBUGPL(DBG_HCDV,
++			"  Complete URB with I/O error\n");
++		free_qtd = 1;
++		qtd->urb->status = -FH_E_IO;
++		hcd->fops->complete(hcd, qtd->urb->priv,
++			qtd->urb, -FH_E_IO);
++		break;
++	default:
++		free_qtd = 0;
++		break;
++	}
++
++	deactivate_qh(hcd, hc->qh, free_qtd);
++
++cleanup:
++	/*
++	 * Release the host channel for use by other transfers. The cleanup
++	 * function clears the channel interrupt enables and conditions, so
++	 * there's no need to clear the Channel Halted interrupt separately.
++	 */
++	fh_otg_hc_cleanup(hcd->core_if, hc);
++	FH_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
++
++	switch (hc->ep_type) {
++	case FH_OTG_EP_TYPE_CONTROL:
++	case FH_OTG_EP_TYPE_BULK:
++		hcd->non_periodic_channels--;
++		break;
++
++	default:
++		/*
++		 * Don't release reservations for periodic channels here.
++		 * That's done when a periodic transfer is descheduled (i.e.
++		 * when the QH is removed from the periodic schedule).
++		 */
++		break;
++	}
++
++	/* Try to queue more transfers now that there's a free channel. */
++	tr_type = fh_otg_hcd_select_transactions(hcd);
++	if (tr_type != FH_OTG_TRANSACTION_NONE) {
++		fh_otg_hcd_queue_transactions(hcd, tr_type);
++	}
++}
++
++/**
++ * Halts a host channel. If the channel cannot be halted immediately because
++ * the request queue is full, this function ensures that the FIFO empty
++ * interrupt for the appropriate queue is enabled so that the halt request can
++ * be queued when there is space in the request queue.
++ *
++ * This function may also be called in DMA mode. In that case, the channel is
++ * simply released since the core always halts the channel automatically in
++ * DMA mode.
++ */
++static void halt_channel(fh_otg_hcd_t * hcd,
++			 fh_hc_t * hc,
++			 fh_otg_qtd_t * qtd, fh_otg_halt_status_e halt_status)
++{
++	if (hcd->core_if->dma_enable) {
++		release_channel(hcd, hc, qtd, halt_status);
++		return;
++	}
++
++	/* Slave mode processing... */
++	fh_otg_hc_halt(hcd->core_if, hc, halt_status);
++
++	if (hc->halt_on_queue) {
++		gintmsk_data_t gintmsk = {.d32 = 0 };
++		fh_otg_core_global_regs_t *global_regs;
++		global_regs = hcd->core_if->core_global_regs;
++
++		if (hc->ep_type == FH_OTG_EP_TYPE_CONTROL ||
++		    hc->ep_type == FH_OTG_EP_TYPE_BULK) {
++			/*
++			 * Make sure the Non-periodic Tx FIFO empty interrupt
++			 * is enabled so that the non-periodic schedule will
++			 * be processed.
++			 */
++			gintmsk.b.nptxfempty = 1;
++			FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
++		} else {
++			/*
++			 * Move the QH from the periodic queued schedule to
++			 * the periodic assigned schedule. This allows the
++			 * halt to be queued when the periodic schedule is
++			 * processed.
++			 */
++			FH_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
++					   &hc->qh->qh_list_entry);
++
++			/*
++			 * Make sure the Periodic Tx FIFO Empty interrupt is
++			 * enabled so that the periodic schedule will be
++			 * processed.
++			 */
++			gintmsk.b.ptxfempty = 1;
++			FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
++		}
++	}
++}
++
++/**
++ * Performs common cleanup for non-periodic transfers after a Transfer
++ * Complete interrupt. This function should be called after any endpoint type
++ * specific handling is finished to release the host channel.
++ */
++static void complete_non_periodic_xfer(fh_otg_hcd_t * hcd,
++				       fh_hc_t * hc,
++				       fh_otg_hc_regs_t * hc_regs,
++				       fh_otg_qtd_t * qtd,
++				       fh_otg_halt_status_e halt_status)
++{
++	hcint_data_t hcint;
++
++	qtd->error_count = 0;
++
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++	if (hcint.b.nyet) {
++		/*
++		 * Got a NYET on the last transaction of the transfer. This
++		 * means that the endpoint should be in the PING state at the
++		 * beginning of the next transfer.
++		 */
++		hc->qh->ping_state = 1;
++		clear_hc_int(hc_regs, nyet);
++	}
++
++	/*
++	 * Always halt and release the host channel to make it available for
++	 * more transfers. There may still be more phases for a control
++	 * transfer or more data packets for a bulk transfer at this point,
++	 * but the host channel is still halted. A channel will be reassigned
++	 * to the transfer when the non-periodic schedule is processed after
++	 * the channel is released. This allows transactions to be queued
++	 * properly via fh_otg_hcd_queue_transactions, which also enables the
++	 * Tx FIFO Empty interrupt if necessary.
++	 */
++	if (hc->ep_is_in) {
++		/*
++		 * IN transfers in Slave mode require an explicit disable to
++		 * halt the channel. (In DMA mode, this call simply releases
++		 * the channel.)
++		 */
++		halt_channel(hcd, hc, qtd, halt_status);
++	} else {
++		/*
++		 * The channel is automatically disabled by the core for OUT
++		 * transfers in Slave mode.
++		 */
++		release_channel(hcd, hc, qtd, halt_status);
++	}
++}
++
++/**
++ * Performs common cleanup for periodic transfers after a Transfer Complete
++ * interrupt. This function should be called after any endpoint type specific
++ * handling is finished to release the host channel.
++ */
++static void complete_periodic_xfer(fh_otg_hcd_t * hcd,
++				   fh_hc_t * hc,
++				   fh_otg_hc_regs_t * hc_regs,
++				   fh_otg_qtd_t * qtd,
++				   fh_otg_halt_status_e halt_status)
++{
++	hctsiz_data_t hctsiz;
++	qtd->error_count = 0;
++
++	hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++	if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) {
++		/* Core halts channel in these cases. */
++		release_channel(hcd, hc, qtd, halt_status);
++	} else {
++		/* Flush any outstanding requests from the Tx queue. */
++		halt_channel(hcd, hc, qtd, halt_status);
++	}
++}
++
++static int32_t handle_xfercomp_isoc_split_in(fh_otg_hcd_t * hcd,
++					     fh_hc_t * hc,
++					     fh_otg_hc_regs_t * hc_regs,
++					     fh_otg_qtd_t * qtd)
++{
++	uint32_t len;
++	struct fh_otg_hcd_iso_packet_desc *frame_desc;
++	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
++
++	len = get_actual_xfer_length(hc, hc_regs, qtd,
++				     FH_OTG_HC_XFER_COMPLETE, NULL);
++
++	if (!len) {
++		qtd->complete_split = 0;
++		qtd->isoc_split_offset = 0;
++		return 0;
++	}
++	frame_desc->actual_length += len;
++
++	if (hc->align_buff && len)
++		fh_memcpy(qtd->urb->buf + frame_desc->offset +
++			   qtd->isoc_split_offset, hc->qh->dw_align_buf, len);
++	qtd->isoc_split_offset += len;
++
++	if (frame_desc->length == frame_desc->actual_length) {
++		frame_desc->status = 0;
++		qtd->isoc_frame_index++;
++		qtd->complete_split = 0;
++		qtd->isoc_split_offset = 0;
++	}
++
++	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
++		hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
++		release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_URB_COMPLETE);
++	} else {
++		release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NO_HALT_STATUS);
++	}
++
++	return 1;		/* Indicates that channel released */
++}
++
++/**
++ * Handles a host channel Transfer Complete interrupt. This handler may be
++ * called in either DMA mode or Slave mode.
++ */
++static int32_t handle_hc_xfercomp_intr(fh_otg_hcd_t * hcd,
++				       fh_hc_t * hc,
++				       fh_otg_hc_regs_t * hc_regs,
++				       fh_otg_qtd_t * qtd)
++{
++	int urb_xfer_done;
++	fh_otg_halt_status_e halt_status = FH_OTG_HC_XFER_COMPLETE;
++	fh_otg_hcd_urb_t *urb = qtd->urb;
++	int pipe_type = fh_otg_hcd_get_pipe_type(&urb->pipe_info);
++
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "Transfer Complete--\n", hc->hc_num);
++
++	if (hcd->core_if->dma_desc_enable) {
++		fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, halt_status);
++		if (pipe_type == UE_ISOCHRONOUS) {
++			/* Do not disable the interrupt, just clear it */
++			clear_hc_int(hc_regs, xfercomp);
++			return 1;
++		}
++		goto handle_xfercomp_done;
++	}
++
++	/*
++	 * Handle xfer complete on CSPLIT.
++	 */
++
++	if (hc->qh->do_split) {
++		if ((hc->ep_type == FH_OTG_EP_TYPE_ISOC) && hc->ep_is_in
++		    && hcd->core_if->dma_enable) {
++			if (qtd->complete_split
++			    && handle_xfercomp_isoc_split_in(hcd, hc, hc_regs,
++							     qtd))
++				goto handle_xfercomp_done;
++		} else {
++			qtd->complete_split = 0;
++		}
++	}
++
++	/* Update the QTD and URB states. */
++	switch (pipe_type) {
++	case UE_CONTROL:
++		switch (qtd->control_phase) {
++		case FH_OTG_CONTROL_SETUP:
++			if (urb->length > 0) {
++				qtd->control_phase = FH_OTG_CONTROL_DATA;
++			} else {
++				qtd->control_phase = FH_OTG_CONTROL_STATUS;
++			}
++			FH_DEBUGPL(DBG_HCDV,
++				    "  Control setup transaction done\n");
++			halt_status = FH_OTG_HC_XFER_COMPLETE;
++			break;
++		case FH_OTG_CONTROL_DATA:{
++				urb_xfer_done =
++				    update_urb_state_xfer_comp(hc, hc_regs, urb,
++							       qtd);
++				if (urb_xfer_done) {
++					qtd->control_phase =
++					    FH_OTG_CONTROL_STATUS;
++					FH_DEBUGPL(DBG_HCDV,
++						    "  Control data transfer done\n");
++				} else {
++					fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++				}
++				halt_status = FH_OTG_HC_XFER_COMPLETE;
++				break;
++			}
++		case FH_OTG_CONTROL_STATUS:
++			FH_DEBUGPL(DBG_HCDV, "  Control transfer complete\n");
++			if (urb->status == -FH_E_IN_PROGRESS) {
++				urb->status = 0;
++			}
++			hcd->fops->complete(hcd, urb->priv, urb, urb->status);
++			halt_status = FH_OTG_HC_XFER_URB_COMPLETE;
++			if (!hcd->core_if->dma_enable && hcd->core_if->otg_ver == 1)
++				qtd->urb = NULL;
++			break;
++		}
++
++		complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
++		break;
++	case UE_BULK:
++		FH_DEBUGPL(DBG_HCDV, "  Bulk transfer complete\n");
++		urb_xfer_done =
++		    update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
++		if (urb_xfer_done) {
++			hcd->fops->complete(hcd, urb->priv, urb, urb->status);
++			halt_status = FH_OTG_HC_XFER_URB_COMPLETE;
++		} else {
++			halt_status = FH_OTG_HC_XFER_COMPLETE;
++		}
++
++		fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++		complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
++		break;
++	case UE_INTERRUPT:
++		FH_DEBUGPL(DBG_HCDV, "  Interrupt transfer complete\n");
++		urb_xfer_done =
++			update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
++
++		/*
++		 * Interrupt URB is done on the first transfer complete
++		 * interrupt.
++		 */
++		if (urb_xfer_done) {
++				hcd->fops->complete(hcd, urb->priv, urb, urb->status);
++				halt_status = FH_OTG_HC_XFER_URB_COMPLETE;
++		} else {
++				halt_status = FH_OTG_HC_XFER_COMPLETE;
++		}
++
++		fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++		complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
++		break;
++	case UE_ISOCHRONOUS:
++		FH_DEBUGPL(DBG_HCDV, "  Isochronous transfer complete\n");
++		if (qtd->isoc_split_pos == FH_HCSPLIT_XACTPOS_ALL) {
++			halt_status =
++			    update_isoc_urb_state(hcd, hc, hc_regs, qtd,
++						  FH_OTG_HC_XFER_COMPLETE);
++		}
++		complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
++		break;
++	}
++
++handle_xfercomp_done:
++	disable_hc_int(hc_regs, xfercompl);
++
++	return 1;
++}
++
++/**
++ * Handles a host channel STALL interrupt. This handler may be called in
++ * either DMA mode or Slave mode.
++ */
++static int32_t handle_hc_stall_intr(fh_otg_hcd_t * hcd,
++				    fh_hc_t * hc,
++				    fh_otg_hc_regs_t * hc_regs,
++				    fh_otg_qtd_t * qtd)
++{
++	fh_otg_hcd_urb_t *urb = qtd->urb;
++	int pipe_type = fh_otg_hcd_get_pipe_type(&urb->pipe_info);
++
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "STALL Received--\n", hc->hc_num);
++
++	if (hcd->core_if->dma_desc_enable) {
++		fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, FH_OTG_HC_XFER_STALL);
++		goto handle_stall_done;
++	}
++
++	if (pipe_type == UE_CONTROL) {
++		hcd->fops->complete(hcd, urb->priv, urb, -FH_E_PIPE);
++	}
++
++	if (pipe_type == UE_BULK || pipe_type == UE_INTERRUPT) {
++		hcd->fops->complete(hcd, urb->priv, urb, -FH_E_PIPE);
++		/*
++		 * USB protocol requires resetting the data toggle for bulk
++		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
++		 * setup command is issued to the endpoint. Anticipate the
++		 * CLEAR_FEATURE command since a STALL has occurred and reset
++		 * the data toggle now.
++		 */
++		hc->qh->data_toggle = 0;
++	}
++
++	halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_STALL);
++
++handle_stall_done:
++	disable_hc_int(hc_regs, stall);
++
++	return 1;
++}
++
++/*
++ * Updates the state of the URB when a transfer has been stopped due to an
++ * abnormal condition before the transfer completes. Modifies the
++ * actual_length field of the URB to reflect the number of bytes that have
++ * actually been transferred via the host channel.
++ */
++static void update_urb_state_xfer_intr(fh_hc_t * hc,
++				       fh_otg_hc_regs_t * hc_regs,
++				       fh_otg_hcd_urb_t * urb,
++				       fh_otg_qtd_t * qtd,
++				       fh_otg_halt_status_e halt_status)
++{
++	uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd,
++							    halt_status, NULL);
++	/* non DWORD-aligned buffer case handling. */
++	if (hc->align_buff && bytes_transferred && hc->ep_is_in) {
++		fh_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
++			   bytes_transferred);
++	}
++
++	urb->actual_length += bytes_transferred;
++
++#ifdef DEBUG
++	{
++		hctsiz_data_t hctsiz;
++		hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++		FH_DEBUGPL(DBG_HCDV, "FH_otg: %s: %s, channel %d\n",
++			    __func__, (hc->ep_is_in ? "IN" : "OUT"),
++			    hc->hc_num);
++		FH_DEBUGPL(DBG_HCDV, "  hc->start_pkt_count %d\n",
++			    hc->start_pkt_count);
++		FH_DEBUGPL(DBG_HCDV, "  hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
++		FH_DEBUGPL(DBG_HCDV, "  hc->max_packet %d\n", hc->max_packet);
++		FH_DEBUGPL(DBG_HCDV, "  bytes_transferred %d\n",
++			    bytes_transferred);
++		FH_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n",
++			    urb->actual_length);
++		FH_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
++			    urb->length);
++	}
++#endif
++}
++
++/**
++ * Handles a host channel NAK interrupt. This handler may be called in either
++ * DMA mode or Slave mode.
++ */
++static int32_t handle_hc_nak_intr(fh_otg_hcd_t * hcd,
++				  fh_hc_t * hc,
++				  fh_otg_hc_regs_t * hc_regs,
++				  fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "NAK Received--\n", hc->hc_num);
++
++	/*
++	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
++	 * interrupt.  Re-start the SSPLIT transfer.
++	 */
++	if (hc->do_split) {
++		if (hc->complete_split) {
++			qtd->error_count = 0;
++		}
++		qtd->complete_split = 0;
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NAK);
++		goto handle_nak_done;
++	}
++
++	switch (fh_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
++	case UE_CONTROL:
++	case UE_BULK:
++		if (hcd->core_if->dma_enable && hc->ep_is_in) {
++			/*
++			 * NAK interrupts are enabled on bulk/control IN
++			 * transfers in DMA mode for the sole purpose of
++			 * resetting the error count after a transaction error
++			 * occurs. The core will continue transferring data.
++			 */
++			qtd->error_count = 0;
++			goto handle_nak_done;
++		}
++
++		/*
++		 * NAK interrupts normally occur during OUT transfers in DMA
++		 * or Slave mode. For IN transfers, more requests will be
++		 * queued as request queue space is available.
++		 */
++		qtd->error_count = 0;
++
++		if (!hc->qh->ping_state) {
++			update_urb_state_xfer_intr(hc, hc_regs,
++						   qtd->urb, qtd,
++						   FH_OTG_HC_XFER_NAK);
++			fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++
++			if (hc->speed == FH_OTG_EP_SPEED_HIGH)
++				hc->qh->ping_state = 1;
++		}
++
++		/*
++		 * Halt the channel so the transfer can be re-started from
++		 * the appropriate point or the PING protocol will
++		 * start/continue.
++		 */
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NAK);
++		break;
++	case UE_INTERRUPT:
++		qtd->error_count = 0;
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NAK);
++		break;
++	case UE_ISOCHRONOUS:
++		/* Should never get called for isochronous transfers. */
++		FH_ASSERT(1, "NACK interrupt for ISOC transfer\n");
++		break;
++	}
++
++handle_nak_done:
++	disable_hc_int(hc_regs, nak);
++
++	return 1;
++}
++
++/**
++ * Handles a host channel ACK interrupt. This interrupt is enabled when
++ * performing the PING protocol in Slave mode, when errors occur during
++ * either Slave mode or DMA mode, and during Start Split transactions.
++ */
++static int32_t handle_hc_ack_intr(fh_otg_hcd_t * hcd,
++				  fh_hc_t * hc,
++				  fh_otg_hc_regs_t * hc_regs,
++				  fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "ACK Received--\n", hc->hc_num);
++
++	if (hc->do_split) {
++		/*
++		 * Handle ACK on SSPLIT.
++		 * ACK should not occur in CSPLIT.
++		 */
++		if (!hc->ep_is_in && hc->data_pid_start != FH_OTG_HC_PID_SETUP) {
++			qtd->ssplit_out_xfer_count = hc->xfer_len;
++		}
++		if (!(hc->ep_type == FH_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) {
++			/* Don't need complete for isochronous out transfers. */
++			qtd->complete_split = 1;
++		}
++
++		/* ISOC OUT */
++		if (hc->ep_type == FH_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
++			switch (hc->xact_pos) {
++			case FH_HCSPLIT_XACTPOS_ALL:
++				break;
++			case FH_HCSPLIT_XACTPOS_END:
++				qtd->isoc_split_pos = FH_HCSPLIT_XACTPOS_ALL;
++				qtd->isoc_split_offset = 0;
++				break;
++			case FH_HCSPLIT_XACTPOS_BEGIN:
++			case FH_HCSPLIT_XACTPOS_MID:
++				/*
++				 * For BEGIN or MID, calculate the length for
++				 * the next microframe to determine the correct
++				 * SSPLIT token, either MID or END.
++				 */
++				{
++					struct fh_otg_hcd_iso_packet_desc
++					*frame_desc;
++
++					frame_desc =
++					    &qtd->urb->
++					    iso_descs[qtd->isoc_frame_index];
++					qtd->isoc_split_offset += 188;
++
++					if ((frame_desc->length -
++					     qtd->isoc_split_offset) <= 188) {
++						qtd->isoc_split_pos =
++						    FH_HCSPLIT_XACTPOS_END;
++					} else {
++						qtd->isoc_split_pos =
++						    FH_HCSPLIT_XACTPOS_MID;
++					}
++
++				}
++				break;
++			}
++		} else {
++			halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_ACK);
++		}
++	} else {
++		qtd->error_count = 0;
++
++		if (hc->qh->ping_state) {
++			hc->qh->ping_state = 0;
++			/*
++			 * Halt the channel so the transfer can be re-started
++			 * from the appropriate point. This only happens in
++			 * Slave mode. In DMA mode, the ping_state is cleared
++			 * when the transfer is started because the core
++			 * automatically executes the PING, then the transfer.
++			 */
++			halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_ACK);
++		}
++	}
++
++	/*
++	 * If the ACK occurred when _not_ in the PING state, let the channel
++	 * continue transferring data after clearing the error count.
++	 */
++
++	disable_hc_int(hc_regs, ack);
++
++	return 1;
++}
++
++/**
++ * Handles a host channel NYET interrupt. This interrupt should only occur on
++ * Bulk and Control OUT endpoints and for complete split transactions. If a
++ * NYET occurs at the same time as a Transfer Complete interrupt, it is
++ * handled in the xfercomp interrupt handler, not here. This handler may be
++ * called in either DMA mode or Slave mode.
++ */
++static int32_t handle_hc_nyet_intr(fh_otg_hcd_t * hcd,
++				   fh_hc_t * hc,
++				   fh_otg_hc_regs_t * hc_regs,
++				   fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "NYET Received--\n", hc->hc_num);
++
++	/*
++	 * NYET on CSPLIT
++	 * re-do the CSPLIT immediately on non-periodic
++	 */
++	if (hc->do_split && hc->complete_split) {
++		if (hc->ep_is_in && (hc->ep_type == FH_OTG_EP_TYPE_ISOC)
++		    && hcd->core_if->dma_enable) {
++			qtd->complete_split = 0;
++			qtd->isoc_split_offset = 0;
++			if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
++				hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
++				release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_URB_COMPLETE);	
++			}
++			else
++				release_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NO_HALT_STATUS);	
++			goto handle_nyet_done;
++		}
++		
++		if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
++		    hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++			int frnum = fh_otg_hcd_get_frame_number(hcd);
++
++			if (fh_full_frame_num(frnum) !=
++			    fh_full_frame_num(hc->qh->sched_frame)) {
++				/*
++				 * No longer in the same full speed frame.
++				 * Treat this as a transaction error.
++				 */
++#if 0
++				/** @todo Fix system performance so this can
++				 * be treated as an error. Right now complete
++				 * splits cannot be scheduled precisely enough
++				 * due to other system activity, so this error
++				 * occurs regularly in Slave mode.
++				 */
++				qtd->error_count++;
++#endif
++				qtd->complete_split = 0;
++				halt_channel(hcd, hc, qtd,
++					     FH_OTG_HC_XFER_XACT_ERR);
++				/** @todo add support for isoc release */
++				goto handle_nyet_done;
++			}
++		}
++
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NYET);
++		goto handle_nyet_done;
++	}
++
++	hc->qh->ping_state = 1;
++	qtd->error_count = 0;
++
++	update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
++				   FH_OTG_HC_XFER_NYET);
++	fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++
++	/*
++	 * Halt the channel and re-start the transfer so the PING
++	 * protocol will start.
++	 */
++	halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_NYET);
++
++handle_nyet_done:
++	disable_hc_int(hc_regs, nyet);
++	return 1;
++}
++
++/**
++ * Handles a host channel babble interrupt. This handler may be called in
++ * either DMA mode or Slave mode.
++ */
++static int32_t handle_hc_babble_intr(fh_otg_hcd_t * hcd,
++				     fh_hc_t * hc,
++				     fh_otg_hc_regs_t * hc_regs,
++				     fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "Babble Error--\n", hc->hc_num);
++
++	if (hcd->core_if->dma_desc_enable) {
++		fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
++					       FH_OTG_HC_XFER_BABBLE_ERR);
++		goto handle_babble_done;
++	}
++
++	if (hc->ep_type != FH_OTG_EP_TYPE_ISOC) {
++		hcd->fops->complete(hcd, qtd->urb->priv,
++				    qtd->urb, -FH_E_OVERFLOW);
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_BABBLE_ERR);
++	} else {
++		fh_otg_halt_status_e halt_status;
++		halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
++						    FH_OTG_HC_XFER_BABBLE_ERR);
++		halt_channel(hcd, hc, qtd, halt_status);
++	}
++
++handle_babble_done:
++	disable_hc_int(hc_regs, bblerr);
++	return 1;
++}
++
++/**
++ * Handles a host channel AHB error interrupt. This handler is only called in
++ * DMA mode.
++ */
++static int32_t handle_hc_ahberr_intr(fh_otg_hcd_t * hcd,
++				     fh_hc_t * hc,
++				     fh_otg_hc_regs_t * hc_regs,
++				     fh_otg_qtd_t * qtd)
++{
++	hcchar_data_t hcchar;
++	hcsplt_data_t hcsplt;
++	hctsiz_data_t hctsiz;
++	uint32_t hcdma;
++	char *pipetype, *speed;
++
++	fh_otg_hcd_urb_t *urb = qtd->urb;
++
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "AHB Error--\n", hc->hc_num);
++
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
++	hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++	hcdma = FH_READ_REG32(&hc_regs->hcdma);
++
++	FH_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
++	FH_ERROR("AHB ERROR, Xfer size %d\n", hc->xfer_len);
++	FH_ERROR("  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
++	FH_ERROR("  hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
++
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD URB Enqueue\n");
++	FH_ERROR("  Device address: %d\n",
++		  fh_otg_hcd_get_dev_addr(&urb->pipe_info));
++	FH_ERROR("  Endpoint: %d, %s\n",
++		  fh_otg_hcd_get_ep_num(&urb->pipe_info),
++		  (fh_otg_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"));
++
++	switch (fh_otg_hcd_get_pipe_type(&urb->pipe_info)) {
++	case UE_CONTROL:
++		pipetype = "CONTROL";
++		break;
++	case UE_BULK:
++		pipetype = "BULK";
++		break;
++	case UE_INTERRUPT:
++		pipetype = "INTERRUPT";
++		break;
++	case UE_ISOCHRONOUS:
++		pipetype = "ISOCHRONOUS";
++		break;
++	default:
++		pipetype = "UNKNOWN";
++		break;
++	}
++
++	FH_ERROR("  Endpoint type: %s\n", pipetype);
++
++	switch (hc->speed) {
++	case FH_OTG_EP_SPEED_HIGH:
++		speed = "HIGH";
++		break;
++	case FH_OTG_EP_SPEED_FULL:
++		speed = "FULL";
++		break;
++	case FH_OTG_EP_SPEED_LOW:
++		speed = "LOW";
++		break;
++	default:
++		speed = "UNKNOWN";
++		break;
++	};
++
++	FH_ERROR("  Speed: %s\n", speed);
++
++	FH_ERROR("  Max packet size: %d\n",
++		  fh_otg_hcd_get_mps(&urb->pipe_info));
++	FH_ERROR("  Data buffer length: %d\n", urb->length);
++	FH_ERROR("  Transfer buffer: %p, Transfer DMA: %p\n",
++		  urb->buf, (void *)urb->dma);
++	FH_ERROR("  Setup buffer: %p, Setup DMA: %p\n",
++		  urb->setup_packet, (void *)urb->setup_dma);
++	FH_ERROR("  Interval: %d\n", urb->interval);
++
++	/* Core haltes the channel for Descriptor DMA mode */
++	if (hcd->core_if->dma_desc_enable) {
++		fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
++					       FH_OTG_HC_XFER_AHB_ERR);
++		goto handle_ahberr_done;
++	}
++
++	hcd->fops->complete(hcd, urb->priv, urb, -FH_E_IO);
++
++	/*
++	 * Force a channel halt. Don't call halt_channel because that won't
++	 * write to the HCCHARn register in DMA mode to force the halt.
++	 */
++	fh_otg_hc_halt(hcd->core_if, hc, FH_OTG_HC_XFER_AHB_ERR);
++handle_ahberr_done:
++	disable_hc_int(hc_regs, ahberr);
++	return 1;
++}
++
++/**
++ * Handles a host channel transaction error interrupt. This handler may be
++ * called in either DMA mode or Slave mode.
++ */
++static int32_t handle_hc_xacterr_intr(fh_otg_hcd_t * hcd,
++				      fh_hc_t * hc,
++				      fh_otg_hc_regs_t * hc_regs,
++				      fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "Transaction Error--\n", hc->hc_num);
++
++	if (hcd->core_if->dma_desc_enable) {
++		fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
++					       FH_OTG_HC_XFER_XACT_ERR);
++		goto handle_xacterr_done;
++	}
++
++	switch (fh_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
++	case UE_CONTROL:
++	case UE_BULK:
++		qtd->error_count++;
++		if (!hc->qh->ping_state) {
++
++			update_urb_state_xfer_intr(hc, hc_regs,
++						   qtd->urb, qtd,
++						   FH_OTG_HC_XFER_XACT_ERR);
++			fh_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
++			if (!hc->ep_is_in && hc->speed == FH_OTG_EP_SPEED_HIGH) {
++				hc->qh->ping_state = 1;
++			}
++		}
++
++		/*
++		 * Halt the channel so the transfer can be re-started from
++		 * the appropriate point or the PING protocol will start.
++		 */
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_XACT_ERR);
++		break;
++	case UE_INTERRUPT:
++		qtd->error_count++;
++		if (hc->do_split && hc->complete_split) {
++			qtd->complete_split = 0;
++		}
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_XACT_ERR);
++		break;
++	case UE_ISOCHRONOUS:
++		{
++			fh_otg_halt_status_e halt_status;
++			halt_status =
++			    update_isoc_urb_state(hcd, hc, hc_regs, qtd,
++						  FH_OTG_HC_XFER_XACT_ERR);
++
++			halt_channel(hcd, hc, qtd, halt_status);
++		}
++		break;
++	}
++handle_xacterr_done:
++	disable_hc_int(hc_regs, xacterr);
++
++	return 1;
++}
++
++/**
++ * Handles a host channel frame overrun interrupt. This handler may be called
++ * in either DMA mode or Slave mode.
++ */
++static int32_t handle_hc_frmovrun_intr(fh_otg_hcd_t * hcd,
++				       fh_hc_t * hc,
++				       fh_otg_hc_regs_t * hc_regs,
++				       fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "Frame Overrun--\n", hc->hc_num);
++
++	switch (fh_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
++	case UE_CONTROL:
++	case UE_BULK:
++		break;
++	case UE_INTERRUPT:
++		halt_channel(hcd, hc, qtd, FH_OTG_HC_XFER_FRAME_OVERRUN);
++		break;
++	case UE_ISOCHRONOUS:
++		{
++			fh_otg_halt_status_e halt_status;
++			halt_status =
++			    update_isoc_urb_state(hcd, hc, hc_regs, qtd,
++						  FH_OTG_HC_XFER_FRAME_OVERRUN);
++
++			halt_channel(hcd, hc, qtd, halt_status);
++		}
++		break;
++	}
++
++	disable_hc_int(hc_regs, frmovrun);
++
++	return 1;
++}
++
++/**
++ * Handles a host channel data toggle error interrupt. This handler may be
++ * called in either DMA mode or Slave mode.
++ */
++static int32_t handle_hc_datatglerr_intr(fh_otg_hcd_t * hcd,
++					 fh_hc_t * hc,
++					 fh_otg_hc_regs_t * hc_regs,
++					 fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "Data Toggle Error--\n", hc->hc_num);
++
++	if (hc->ep_is_in) {
++		qtd->error_count = 0;
++	} else {
++		FH_ERROR("Data Toggle Error on OUT transfer,"
++			  "channel %d\n", hc->hc_num);
++	}
++
++	disable_hc_int(hc_regs, datatglerr);
++
++	return 1;
++}
++
++#ifdef DEBUG
++/**
++ * This function is for debug only. It checks that a valid halt status is set
++ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
++ * taken and a warning is issued.
++ * @return 1 if halt status is ok, 0 otherwise.
++ */
++static inline int halt_status_ok(fh_otg_hcd_t * hcd,
++				 fh_hc_t * hc,
++				 fh_otg_hc_regs_t * hc_regs,
++				 fh_otg_qtd_t * qtd)
++{
++	hcchar_data_t hcchar;
++	hctsiz_data_t hctsiz;
++	hcint_data_t hcint;
++	hcintmsk_data_t hcintmsk;
++	hcsplt_data_t hcsplt;
++
++	if (hc->halt_status == FH_OTG_HC_XFER_NO_HALT_STATUS) {
++		/*
++		 * This code is here only as a check. This condition should
++		 * never happen. Ignore the halt if it does occur.
++		 */
++		hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++		hctsiz.d32 = FH_READ_REG32(&hc_regs->hctsiz);
++		hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++		hcintmsk.d32 = FH_READ_REG32(&hc_regs->hcintmsk);
++		hcsplt.d32 = FH_READ_REG32(&hc_regs->hcsplt);
++		FH_WARN
++		    ("%s: hc->halt_status == FH_OTG_HC_XFER_NO_HALT_STATUS, "
++		     "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
++		     "hcint 0x%08x, hcintmsk 0x%08x, "
++		     "hcsplt 0x%08x, qtd->complete_split %d\n", __func__,
++		     hc->hc_num, hcchar.d32, hctsiz.d32, hcint.d32,
++		     hcintmsk.d32, hcsplt.d32, qtd->complete_split);
++
++		FH_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
++			 __func__, hc->hc_num);
++		FH_WARN("\n");
++		clear_hc_int(hc_regs, chhltd);
++		return 0;
++	}
++
++	/*
++	 * This code is here only as a check. hcchar.chdis should
++	 * never be set when the halt interrupt occurs. Halt the
++	 * channel again if it does occur.
++	 */
++	hcchar.d32 = FH_READ_REG32(&hc_regs->hcchar);
++	if (hcchar.b.chdis) {
++		FH_WARN("%s: hcchar.chdis set unexpectedly, "
++			 "hcchar 0x%08x, trying to halt again\n",
++			 __func__, hcchar.d32);
++		clear_hc_int(hc_regs, chhltd);
++		hc->halt_pending = 0;
++		halt_channel(hcd, hc, qtd, hc->halt_status);
++		return 0;
++	}
++
++	return 1;
++}
++#endif
++
++/**
++ * Handles a host Channel Halted interrupt in DMA mode. This handler
++ * determines the reason the channel halted and proceeds accordingly.
++ */
++static void handle_hc_chhltd_intr_dma(fh_otg_hcd_t * hcd,
++				      fh_hc_t * hc,
++				      fh_otg_hc_regs_t * hc_regs,
++				      fh_otg_qtd_t * qtd)
++{
++	hcint_data_t hcint;
++	hcintmsk_data_t hcintmsk;
++	int out_nak_enh = 0;
++
++	/* For core with OUT NAK enhancement, the flow for high-
++	 * speed CONTROL/BULK OUT is handled a little differently.
++	 */
++	if (hcd->core_if->snpsid >= OTG_CORE_REV_2_71a) {
++		if (hc->speed == FH_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
++		    (hc->ep_type == FH_OTG_EP_TYPE_CONTROL ||
++		     hc->ep_type == FH_OTG_EP_TYPE_BULK)) {
++			out_nak_enh = 1;
++		}
++	}
++
++	if (hc->halt_status == FH_OTG_HC_XFER_URB_DEQUEUE ||
++	    (hc->halt_status == FH_OTG_HC_XFER_AHB_ERR
++	     && !hcd->core_if->dma_desc_enable)) {
++		/*
++		 * Just release the channel. A dequeue can happen on a
++		 * transfer timeout. In the case of an AHB Error, the channel
++		 * was forced to halt because there's no way to gracefully
++		 * recover.
++		 */
++		if (hcd->core_if->dma_desc_enable)
++			fh_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
++						       hc->halt_status);
++		else
++			release_channel(hcd, hc, qtd, hc->halt_status);
++		return;
++	}
++
++	/* Read the HCINTn register to determine the cause for the halt. */
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++	hcintmsk.d32 = FH_READ_REG32(&hc_regs->hcintmsk);
++
++	if (hcint.b.xfercomp) {
++		/** @todo This is here because of a possible hardware bug.  Spec
++		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
++		 * interrupt w/ACK bit set should occur, but I only see the
++		 * XFERCOMP bit, even with it masked out.  This is a workaround
++		 * for that behavior.  Should fix this when hardware is fixed.
++		 */
++		if (hc->ep_type == FH_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
++			handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
++		}
++		handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
++	} else if (hcint.b.stall) {
++		handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
++	} else if (hcint.b.xacterr && !hcd->core_if->dma_desc_enable) {
++		if (out_nak_enh) {
++			if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
++				FH_DEBUG("XactErr with NYET/NAK/ACK\n");
++				qtd->error_count = 0;
++			} else {
++				FH_DEBUG("XactErr without NYET/NAK/ACK\n");
++			}
++		}
++
++		/*
++		 * Must handle xacterr before nak or ack. Could get a xacterr
++		 * at the same time as either of these on a BULK/CONTROL OUT
++		 * that started with a PING. The xacterr takes precedence.
++		 */
++		handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
++	} else if (hcint.b.xcs_xact && hcd->core_if->dma_desc_enable) {
++		handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
++	} else if (hcint.b.ahberr && hcd->core_if->dma_desc_enable) {
++		handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
++	} else if (hcint.b.bblerr) {
++		handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
++	} else if (hcint.b.frmovrun) {
++		handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd);
++	} else if (!out_nak_enh) {
++		if (hcint.b.nyet) {
++			/*
++			 * Must handle nyet before nak or ack. Could get a nyet at the
++			 * same time as either of those on a BULK/CONTROL OUT that
++			 * started with a PING. The nyet takes precedence.
++			 */
++			handle_hc_nyet_intr(hcd, hc, hc_regs, qtd);
++		} else if (hcint.b.nak && !hcintmsk.b.nak) {
++			/*
++			 * If nak is not masked, it's because a non-split IN transfer
++			 * is in an error state. In that case, the nak is handled by
++			 * the nak interrupt handler, not here. Handle nak here for
++			 * BULK/CONTROL OUT transfers, which halt on a NAK to allow
++			 * rewinding the buffer pointer.
++			 */
++			handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
++		} else if (hcint.b.ack && !hcintmsk.b.ack) {
++			/*
++			 * If ack is not masked, it's because a non-split IN transfer
++			 * is in an error state. In that case, the ack is handled by
++			 * the ack interrupt handler, not here. Handle ack here for
++			 * split transfers. Start splits halt on ACK.
++			 */
++			handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
++		} else {
++			if (hc->ep_type == FH_OTG_EP_TYPE_INTR ||
++			    hc->ep_type == FH_OTG_EP_TYPE_ISOC) {
++				/*
++				 * A periodic transfer halted with no other channel
++				 * interrupts set. Assume it was halted by the core
++				 * because it could not be completed in its scheduled
++				 * (micro)frame.
++				 */
++#ifdef DEBUG
++				FH_PRINTF
++				    ("%s: Halt channel %d (assume incomplete periodic transfer)\n",
++				     __func__, hc->hc_num);
++#endif
++				halt_channel(hcd, hc, qtd,
++					     FH_OTG_HC_XFER_PERIODIC_INCOMPLETE);
++			} else {
++				FH_ERROR
++				    ("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
++				     "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n",
++				     __func__, hc->hc_num, hcint.d32,
++				     FH_READ_REG32(&hcd->
++						    core_if->core_global_regs->
++						    gintsts));
++				disable_hc_int(hc_regs, chhltd);
++			}
++
++		}
++	} else {
++		FH_PRINTF("NYET/NAK/ACK/other in non-error case, 0x%08x\n",
++			   hcint.d32);
++		disable_hc_int(hc_regs, chhltd);
++	}
++}
++
++/**
++ * Handles a host channel Channel Halted interrupt.
++ *
++ * In slave mode, this handler is called only when the driver specifically
++ * requests a halt. This occurs during handling other host channel interrupts
++ * (e.g. nak, xacterr, stall, nyet, etc.).
++ *
++ * In DMA mode, this is the interrupt that occurs when the core has finished
++ * processing a transfer on a channel. Other host channel interrupts (except
++ * ahberr) are disabled in DMA mode.
++ */
++static int32_t handle_hc_chhltd_intr(fh_otg_hcd_t * hcd,
++				     fh_hc_t * hc,
++				     fh_otg_hc_regs_t * hc_regs,
++				     fh_otg_qtd_t * qtd)
++{
++	FH_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
++		    "Channel Halted--\n", hc->hc_num);
++
++	if (hcd->core_if->dma_enable) {
++		handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd);
++	} else {
++#ifdef DEBUG
++		if (!halt_status_ok(hcd, hc, hc_regs, qtd)) {
++			return 1;
++		}
++#endif
++		release_channel(hcd, hc, qtd, hc->halt_status);
++	}
++
++	return 1;
++}
++
++/** Handles interrupt for a specific Host Channel */
++int32_t fh_otg_hcd_handle_hc_n_intr(fh_otg_hcd_t * fh_otg_hcd, uint32_t num)
++{
++	int retval = 0;
++	hcint_data_t hcint;
++	hcintmsk_data_t hcintmsk;
++	fh_hc_t *hc;
++	fh_otg_hc_regs_t *hc_regs;
++	fh_otg_qtd_t *qtd;
++
++	FH_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
++
++	hc = fh_otg_hcd->hc_ptr_array[num];
++	hc_regs = fh_otg_hcd->core_if->host_if->hc_regs[num];
++	qtd = FH_CIRCLEQ_FIRST(&hc->qh->qtd_list);
++
++	hcint.d32 = FH_READ_REG32(&hc_regs->hcint);
++	hcintmsk.d32 = FH_READ_REG32(&hc_regs->hcintmsk);
++	FH_DEBUGPL(DBG_HCDV,
++		    "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
++		    hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
++	hcint.d32 = hcint.d32 & hcintmsk.d32;
++
++	if (!fh_otg_hcd->core_if->dma_enable) {
++		if (hcint.b.chhltd && hcint.d32 != 0x2) {
++			hcint.b.chhltd = 0;
++		}
++	}
++
++	if (hcint.b.xfercomp) {
++		retval |=
++		    handle_hc_xfercomp_intr(fh_otg_hcd, hc, hc_regs, qtd);
++		/*
++		 * If NYET occurred at same time as Xfer Complete, the NYET is
++		 * handled by the Xfer Complete interrupt handler. Don't want
++		 * to call the NYET interrupt handler in this case.
++		 */
++		hcint.b.nyet = 0;
++	}
++	if (hcint.b.chhltd) {
++		retval |= handle_hc_chhltd_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.ahberr) {
++		retval |= handle_hc_ahberr_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.stall) {
++		retval |= handle_hc_stall_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.nak) {
++		retval |= handle_hc_nak_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.ack) {
++		retval |= handle_hc_ack_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.nyet) {
++		retval |= handle_hc_nyet_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.xacterr) {
++		retval |= handle_hc_xacterr_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.bblerr) {
++		retval |= handle_hc_babble_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.frmovrun) {
++		retval |=
++		    handle_hc_frmovrun_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++	if (hcint.b.datatglerr) {
++		retval |=
++		    handle_hc_datatglerr_intr(fh_otg_hcd, hc, hc_regs, qtd);
++	}
++
++	return retval;
++}
++
++#endif /* FH_DEVICE_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_linux.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_linux.c
+new file mode 100644
+index 00000000..fea476c3
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_linux.c
+@@ -0,0 +1,873 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_linux.c $
++ * $Revision: #25 $
++ * $Date: 2015/09/08 $
++ * $Change: 2943025 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_DEVICE_ONLY
++
++/**
++ * @file
++ *
++ * This file contains the implementation of the HCD. In Linux, the HCD
++ * implements the hc_driver API.
++ */
++#include <linux/platform_device.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/dma-mapping.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <linux/usb.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
++#include <../drivers/usb/core/hcd.h>
++#else
++#include <linux/usb/hcd.h>
++#endif
++
++#include "fh_otg_hcd_if.h"
++#include "fh_otg_dbg.h"
++#include "fh_otg_driver.h"
++#include "fh_otg_hcd.h"
++/**
++ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is
++ * qualified with its direction (possible 32 endpoints per device).
++ */
++#define fh_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
++						     ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
++
++static const char fh_otg_hcd_name[] = "fh_otg_hcd";
++
++/** @name Linux HC Driver API Functions */
++/** @{ */
++static int urb_enqueue(struct usb_hcd *hcd,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++		       struct usb_host_endpoint *ep,
++#endif
++		       struct urb *urb, gfp_t mem_flags);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
++#else
++static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
++#endif
++
++static void endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
++static void endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
++#endif
++static irqreturn_t fh_otg_hcd_irq(struct usb_hcd *hcd);
++extern int hcd_start(struct usb_hcd *hcd);
++extern void hcd_stop(struct usb_hcd *hcd);
++static int get_frame_number(struct usb_hcd *hcd);
++extern int hub_status_data(struct usb_hcd *hcd, char *buf);
++extern int hub_control(struct usb_hcd *hcd,
++		       u16 typeReq,
++		       u16 wValue, u16 wIndex, char *buf, u16 wLength);
++
++struct wrapper_priv_data {
++	fh_otg_hcd_t *fh_otg_hcd;
++};
++
++/** @} */
++
++static struct hc_driver fh_otg_hc_driver = {
++
++	.description = fh_otg_hcd_name,
++	.product_desc = "FH OTG Controller",
++	.hcd_priv_size = sizeof(struct wrapper_priv_data),
++
++	.irq = fh_otg_hcd_irq,
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,12,0)
++	.flags = HCD_MEMORY | HCD_USB2,
++#else
++	.flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
++#endif
++
++
++	.start = hcd_start,
++	.stop = hcd_stop,
++
++	.urb_enqueue = urb_enqueue,
++	.urb_dequeue = urb_dequeue,
++	.endpoint_disable = endpoint_disable,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
++	.endpoint_reset = endpoint_reset,
++#endif
++	.get_frame_number = get_frame_number,
++
++	.hub_status_data = hub_status_data,
++	.hub_control = hub_control,
++	//.bus_suspend =                
++	//.bus_resume =         
++};
++
++/** Gets the fh_otg_hcd from a struct usb_hcd */
++static inline fh_otg_hcd_t *hcd_to_fh_otg_hcd(struct usb_hcd *hcd)
++{
++	struct wrapper_priv_data *p;
++	p = (struct wrapper_priv_data *)(hcd->hcd_priv);
++	return p->fh_otg_hcd;
++}
++
++/** Gets the struct usb_hcd that contains a fh_otg_hcd_t. */
++static inline struct usb_hcd *fh_otg_hcd_to_hcd(fh_otg_hcd_t * fh_otg_hcd)
++{
++	return fh_otg_hcd_get_priv_data(fh_otg_hcd);
++}
++
++/** Gets the usb_host_endpoint associated with an URB. */
++inline struct usb_host_endpoint *fh_urb_to_endpoint(struct urb *urb)
++{
++	struct usb_device *dev = urb->dev;
++	int ep_num = usb_pipeendpoint(urb->pipe);
++
++	if (usb_pipein(urb->pipe))
++		return dev->ep_in[ep_num];
++	else
++		return dev->ep_out[ep_num];
++}
++
++static int _disconnect(fh_otg_hcd_t * hcd)
++{
++	struct usb_hcd *usb_hcd = fh_otg_hcd_to_hcd(hcd);
++
++	usb_hcd->self.is_b_host = 0;
++	return 0;
++}
++
++static int _start(fh_otg_hcd_t * hcd)
++{
++	struct usb_hcd *usb_hcd = fh_otg_hcd_to_hcd(hcd);
++
++	usb_hcd->self.is_b_host = fh_otg_hcd_is_b_host(hcd);
++	hcd_start(usb_hcd);
++
++	return 0;
++}
++
++static int _hub_info(fh_otg_hcd_t * hcd, void *urb_handle, uint32_t * hub_addr,
++		     uint32_t * port_addr)
++{
++	struct urb *urb = (struct urb *)urb_handle;
++	if (urb->dev->tt) {
++		*hub_addr = urb->dev->tt->hub->devnum;
++	} else {
++		*hub_addr = 0;
++	}
++	*port_addr = urb->dev->ttport;
++	return 0;
++}
++
++static int _speed(fh_otg_hcd_t * hcd, void *urb_handle)
++{
++	struct urb *urb = (struct urb *)urb_handle;
++	return urb->dev->speed;
++}
++
++static int _get_b_hnp_enable(fh_otg_hcd_t * hcd)
++{
++	struct usb_hcd *usb_hcd = fh_otg_hcd_to_hcd(hcd);
++	return usb_hcd->self.b_hnp_enable;
++}
++
++static void allocate_bus_bandwidth(struct usb_hcd *hcd, uint32_t bw,
++				   struct urb *urb)
++{
++	hcd_to_bus(hcd)->bandwidth_allocated += bw / urb->interval;
++	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++		hcd_to_bus(hcd)->bandwidth_isoc_reqs++;
++	} else {
++		hcd_to_bus(hcd)->bandwidth_int_reqs++;
++	}
++}
++
++static void free_bus_bandwidth(struct usb_hcd *hcd, uint32_t bw,
++			       struct urb *urb)
++{
++	hcd_to_bus(hcd)->bandwidth_allocated -= bw / urb->interval;
++	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++		hcd_to_bus(hcd)->bandwidth_isoc_reqs--;
++	} else {
++		hcd_to_bus(hcd)->bandwidth_int_reqs--;
++	}
++}
++
++/**
++ * Sets the final status of an URB and returns it to the device driver. Any
++ * required cleanup of the URB is performed.
++ */
++static int _complete(fh_otg_hcd_t * hcd, void *urb_handle,
++		     fh_otg_hcd_urb_t * fh_otg_urb, int32_t status)
++{
++	struct urb *urb = (struct urb *)urb_handle;
++#ifdef DEBUG
++	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
++		FH_PRINTF("%s: urb %p, device %d, ep %d %s, status=%d\n",
++			   __func__, urb, usb_pipedevice(urb->pipe),
++			   usb_pipeendpoint(urb->pipe),
++			   usb_pipein(urb->pipe) ? "IN" : "OUT", status);
++		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++			int i;
++			for (i = 0; i < urb->number_of_packets; i++) {
++				FH_PRINTF("  ISO Desc %d status: %d\n",
++					   i, urb->iso_frame_desc[i].status);
++			}
++		}
++	}
++#endif
++
++	urb->actual_length = fh_otg_hcd_urb_get_actual_length(fh_otg_urb);
++	/* Convert status value. */
++	switch (status) {
++	case -FH_E_PROTOCOL:
++		status = -EPROTO;
++		break;
++	case -FH_E_IN_PROGRESS:
++		status = -EINPROGRESS;
++		break;
++	case -FH_E_PIPE:
++		status = -EPIPE;
++		break;
++	case -FH_E_IO:
++		status = -EIO;
++		break;
++	case -FH_E_TIMEOUT:
++		status = -ETIMEDOUT;
++		break;
++	case -FH_E_OVERFLOW:
++		status = -EOVERFLOW;
++		break;
++	default:
++		if (status) {
++			FH_PRINTF("Uknown urb status %d\n", status);
++
++		}
++	}
++
++	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++		int i;
++
++		urb->error_count = fh_otg_hcd_urb_get_error_count(fh_otg_urb);
++		for (i = 0; i < urb->number_of_packets; ++i) {
++			urb->iso_frame_desc[i].actual_length =
++			    fh_otg_hcd_urb_get_iso_desc_actual_length
++			    (fh_otg_urb, i);
++			urb->iso_frame_desc[i].status =
++			    fh_otg_hcd_urb_get_iso_desc_status(fh_otg_urb, i);
++		}
++	}
++
++	urb->status = status;
++	if (!status) {
++		if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
++		    (urb->actual_length < urb->transfer_buffer_length)) {
++			urb->status = -EREMOTEIO;
++		}
++	}
++
++	if ((usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) ||
++	    (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
++		struct usb_host_endpoint *ep = fh_urb_to_endpoint(urb);
++		if (ep) {
++			free_bus_bandwidth(fh_otg_hcd_to_hcd(hcd),
++					   fh_otg_hcd_get_ep_bandwidth(hcd,
++									ep->hcpriv),
++					   urb);
++		}
++	}
++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
++	usb_hcd_unlink_urb_from_ep(fh_otg_hcd_to_hcd(hcd), urb);
++#endif
++	
++	urb->hcpriv = NULL;
++	FH_FREE(fh_otg_urb);
++
++	//printk("\nfuck...\n");
++	
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++	FH_SPINUNLOCK(hcd->lock);
++	usb_hcd_giveback_urb(fh_otg_hcd_to_hcd(hcd), urb);
++	FH_SPINLOCK(hcd->lock);
++#elseif LINUX_VERSION_CODE <= KERNEL_VERSION(3,7,7)
++	FH_SPINUNLOCK(hcd->lock);
++	usb_hcd_giveback_urb(fh_otg_hcd_to_hcd(hcd), urb, status);
++	FH_SPINLOCK(hcd->lock);
++#else
++	usb_hcd_giveback_urb(fh_otg_hcd_to_hcd(hcd), urb, status);
++#endif
++
++	return 0;
++}
++
++static struct fh_otg_hcd_function_ops hcd_fops = {
++	.start = _start,
++	.disconnect = _disconnect,
++	.hub_info = _hub_info,
++	.speed = _speed,
++	.complete = _complete,
++	.get_b_hnp_enable = _get_b_hnp_enable,
++};
++
++/**
++ * Initializes the HCD. This function allocates memory for and initializes the
++ * static parts of the usb_hcd and fh_otg_hcd structures. It also registers the
++ * USB bus with the core and calls the hc_driver->start() function. It returns
++ * a negative error on failure.
++ */
++int hcd_init(struct platform_device *dev, int irq)
++{
++	struct usb_hcd *hcd = NULL;
++	fh_otg_hcd_t *fh_otg_hcd = NULL;
++	fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
++
++	int retval = 0;
++
++	printk(KERN_ERR "FH OTG HCD INIT (%p)\n", otg_dev);
++
++	/* Set device flags indicating whether the HCD supports DMA */
++	if (otg_dev->core_if->dma_enable > 0) {
++		if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32)) < 0)
++			printk(KERN_ERR "can't set DMA mask\n");
++		if (dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32)) < 0)
++			printk(KERN_ERR "can't set coherent DMA mask\n");
++	}
++
++	/*
++	 * Allocate memory for the base HCD plus the FH OTG HCD.
++	 * Initialize the base HCD.
++	 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
++	hcd = usb_create_hcd(&fh_otg_hc_driver, &dev->dev, dev->dev.bus_id);
++#else
++	hcd = usb_create_hcd(&fh_otg_hc_driver, &dev->dev, dev_name(&dev->dev));
++
++	hcd->has_tt = 1;
++//      hcd->uses_new_polling = 1;
++//      hcd->poll_rh = 0;
++#endif
++	if (!hcd) {
++		retval = -ENOMEM;
++		goto error1;
++	}
++
++	printk(KERN_ERR "hcd regs before base(%p)\n", otg_dev->os_dep.base);
++	hcd->regs = otg_dev->os_dep.base;
++
++	/* Initialize the FH OTG HCD. */
++	fh_otg_hcd = fh_otg_hcd_alloc_hcd();
++	if (!fh_otg_hcd) {
++		goto error2;
++	}
++	((struct wrapper_priv_data *)(hcd->hcd_priv))->fh_otg_hcd =
++	    fh_otg_hcd;
++	otg_dev->hcd = fh_otg_hcd;
++
++	if (fh_otg_hcd_init(fh_otg_hcd, otg_dev->core_if)) {
++		goto error2;
++	}
++
++	otg_dev->hcd->otg_dev = otg_dev;
++	hcd->self.otg_port = fh_otg_hcd_otg_port(fh_otg_hcd);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33) //don't support for LM(with 2.6.20.1 kernel)
++    //hcd->self.otg_version = fh_otg_get_otg_version(otg_dev->core_if);
++	/* Don't support SG list at this point */
++	hcd->self.sg_tablesize = 0;
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
++	/* Do not to do HNP polling if not capable */
++	if (otg_dev->core_if->otg_ver)
++		hcd->self.is_hnp_cap = fh_otg_get_hnpcapable(otg_dev->core_if);
++#endif
++	/*
++	 * Finish generic HCD initialization and start the HCD. This function
++	 * allocates the DMA buffer pool, registers the USB bus, requests the
++	 * IRQ line, and calls hcd_start method.
++	 */
++	retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
++	if (retval < 0) {
++		goto error2;
++	}
++
++	fh_otg_hcd_set_priv_data(fh_otg_hcd, hcd);
++	platform_set_drvdata(dev, otg_dev);
++	return 0;
++
++error2:
++	usb_put_hcd(hcd);
++error1:
++	return retval;
++}
++
++/**
++ * Removes the HCD.
++ * Frees memory and resources associated with the HCD and deregisters the bus.
++ */
++void hcd_remove(struct platform_device *dev)
++{
++	fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
++
++
++	fh_otg_hcd_t *fh_otg_hcd;
++	struct usb_hcd *hcd;
++
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD REMOVE\n");
++
++	if (!otg_dev) {
++		FH_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
++		return;
++	}
++
++	fh_otg_hcd = otg_dev->hcd;
++
++	if (!fh_otg_hcd) {
++		FH_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
++		return;
++	}
++
++	hcd = fh_otg_hcd_to_hcd(fh_otg_hcd);
++
++	if (!hcd) {
++		FH_DEBUGPL(DBG_ANY,
++			    "%s: fh_otg_hcd_to_hcd(fh_otg_hcd) NULL!\n",
++			    __func__);
++		return;
++	}
++	usb_remove_hcd(hcd);
++	fh_otg_hcd_set_priv_data(fh_otg_hcd, NULL);
++	fh_otg_hcd_remove(fh_otg_hcd);
++	usb_put_hcd(hcd);
++}
++
++/* =========================================================================
++ *  Linux HC Driver Functions
++ * ========================================================================= */
++
++/** Initializes the FH_otg controller and its root hub and prepares it for host
++ * mode operation. Activates the root port. Returns 0 on success and a negative
++ * error code on failure. */
++int hcd_start(struct usb_hcd *hcd)
++{
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++	struct usb_bus *bus;
++
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD START\n");
++	bus = hcd_to_bus(hcd);
++
++	hcd->state = HC_STATE_RUNNING;
++	if (fh_otg_hcd_start(fh_otg_hcd, &hcd_fops)) {
++		if (fh_otg_hcd->core_if->otg_ver && fh_otg_is_device_mode(fh_otg_hcd->core_if))
++			fh_otg_hcd->core_if->op_state = B_PERIPHERAL;
++		return 0;
++	}
++
++	/* Initialize and connect root hub if one is not already attached */
++	if (bus->root_hub) {
++		FH_DEBUGPL(DBG_HCD, "FH OTG HCD Has Root Hub\n");
++		/* Inform the HUB driver to resume. */
++		usb_hcd_resume_root_hub(hcd);
++	}
++
++	return 0;
++}
++
++/**
++ * Halts the FH_otg host mode operations in a clean manner. USB transfers are
++ * stopped.
++ */
++void hcd_stop(struct usb_hcd *hcd)
++{
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++
++	fh_otg_hcd_stop(fh_otg_hcd);
++}
++
++/** Returns the current frame number. */
++static int get_frame_number(struct usb_hcd *hcd)
++{
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++
++	return fh_otg_hcd_get_frame_number(fh_otg_hcd);
++}
++
++#ifdef DEBUG
++static void dump_urb_info(struct urb *urb, char *fn_name)
++{
++	printk("%s, urb %p\n", fn_name, urb);
++	printk("  Device address: %d\n", usb_pipedevice(urb->pipe));
++	printk("  Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
++		   (usb_pipein(urb->pipe) ? "IN" : "OUT"));
++	printk("  Endpoint type: %s\n", ( {
++					     char *pipetype;
++					     switch (usb_pipetype(urb->pipe)) {
++case PIPE_CONTROL:
++pipetype = "CONTROL"; break; case PIPE_BULK:
++pipetype = "BULK"; break; case PIPE_INTERRUPT:
++pipetype = "INTERRUPT"; break; case PIPE_ISOCHRONOUS:
++pipetype = "ISOCHRONOUS"; break; default:
++					     pipetype = "UNKNOWN"; break;};
++					     pipetype;}
++		   )) ;
++	printk("  Speed: %s\n", ( {
++				     char *speed; switch (urb->dev->speed) {
++case USB_SPEED_HIGH:
++speed = "HIGH"; break; case USB_SPEED_FULL:
++speed = "FULL"; break; case USB_SPEED_LOW:
++speed = "LOW"; break; default:
++				     speed = "UNKNOWN"; break;};
++				     speed;}
++		   )) ;
++	printk("  Max packet size: %d\n",
++		   usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
++	printk("  Data buffer length: %d\n", urb->transfer_buffer_length);
++	printk("  Transfer buffer: %p, Transfer DMA: %p\n",
++		   urb->transfer_buffer, (void *)urb->transfer_dma);
++	printk("  Setup buffer: %p, Setup DMA: %p\n",
++		   urb->setup_packet, (void *)urb->setup_dma);
++    printk("  Interval: %d\n", urb->interval);
++	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++		int i;
++		for (i = 0; i < urb->number_of_packets; i++) {
++			printk("  ISO Desc %d:\n", i);
++			printk("    offset: %d, length %d\n",
++				   urb->iso_frame_desc[i].offset,
++				   urb->iso_frame_desc[i].length);
++		}
++	}
++}
++
++#endif
++
++/** Starts processing a USB transfer request specified by a USB Request Block
++ * (URB). mem_flags indicates the type of memory allocation to use while
++ * processing this URB. */
++static int ___urb_enqueue(struct usb_hcd *hcd,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++		       struct usb_host_endpoint *ep,
++#endif
++		       struct urb *urb, gfp_t mem_flags)
++{
++	int retval = 0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++	struct usb_host_endpoint *ep = urb->ep;
++#endif
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++	fh_otg_hcd_urb_t *fh_otg_urb;
++	int i;
++	int alloc_bandwidth = 0;
++	uint8_t ep_type = 0;
++	uint32_t flags = 0;
++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)	
++	fh_irqflags_t irqflags;
++#endif
++	void *buf;
++
++#ifdef DEBUG
++	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
++		dump_urb_info(urb, "urb_enqueue");
++	}
++#endif
++
++	if ((usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
++	    || (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
++		if (!fh_otg_hcd_is_bandwidth_allocated
++		    (fh_otg_hcd, &ep->hcpriv)) {
++			alloc_bandwidth = 1;
++		}
++	}
++
++	switch (usb_pipetype(urb->pipe)) {
++	case PIPE_CONTROL:
++		ep_type = USB_ENDPOINT_XFER_CONTROL;
++		break;
++	case PIPE_ISOCHRONOUS:
++		ep_type = USB_ENDPOINT_XFER_ISOC;
++		break;
++	case PIPE_BULK:
++		ep_type = USB_ENDPOINT_XFER_BULK;
++		break;
++	case PIPE_INTERRUPT:
++		ep_type = USB_ENDPOINT_XFER_INT;
++		break;
++	default:
++		FH_WARN("Wrong ep type\n");
++	}
++
++	fh_otg_urb = fh_otg_hcd_urb_alloc(fh_otg_hcd,
++					    urb->number_of_packets,
++					    mem_flags == GFP_ATOMIC ? 1 : 0);
++
++	fh_otg_hcd_urb_set_pipeinfo(fh_otg_urb, usb_pipedevice(urb->pipe),
++				     usb_pipeendpoint(urb->pipe), ep_type,
++				     usb_pipein(urb->pipe),
++				     usb_maxpacket(urb->dev, urb->pipe,
++						   !(usb_pipein(urb->pipe))));
++
++	buf = urb->transfer_buffer;
++	if (hcd->self.uses_dma) {
++		/*
++		 * Calculate virtual address from physical address,
++		 * because some class driver may not fill transfer_buffer.
++		 * In Buffer DMA mode virual address is used,
++		 * when handling non DWORD aligned buffers.
++		 */
++		buf = phys_to_virt(urb->transfer_dma);
++	}
++
++	if (!(urb->transfer_flags & URB_NO_INTERRUPT))
++		flags |= URB_GIVEBACK_ASAP;
++	if (urb->transfer_flags & URB_ZERO_PACKET)
++		flags |= URB_SEND_ZERO_PACKET;
++
++	fh_otg_hcd_urb_set_params(fh_otg_urb, urb, buf,
++				   urb->transfer_dma,
++				   urb->transfer_buffer_length,
++				   urb->setup_packet,
++				   urb->setup_dma, flags, urb->interval);
++
++	for (i = 0; i < urb->number_of_packets; ++i) {
++		fh_otg_hcd_urb_set_iso_desc_params(fh_otg_urb, i,
++						    urb->
++						    iso_frame_desc[i].offset,
++						    urb->
++						    iso_frame_desc[i].length);
++	}
++
++	urb->hcpriv = fh_otg_urb;
++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)	
++	FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &irqflags);
++	retval = usb_hcd_link_urb_to_ep(hcd, urb);
++	FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, irqflags);
++	if (retval)
++		goto fail1;
++#endif
++	
++	retval = fh_otg_hcd_urb_enqueue(fh_otg_hcd, fh_otg_urb, &ep->hcpriv,
++					 mem_flags == GFP_ATOMIC ? 1 : 0);
++	if (retval){
++		goto fail2;
++	}
++	
++	if (alloc_bandwidth) {
++		allocate_bus_bandwidth(hcd,
++		       fh_otg_hcd_get_ep_bandwidth
++		       (fh_otg_hcd, ep->hcpriv), urb);
++	}
++	
++	return 0;
++	
++fail2:
++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
++    FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &irqflags);
++    fh_otg_urb->priv = NULL;
++    usb_hcd_unlink_urb_from_ep(hcd, urb);
++    FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, irqflags);
++fail1:
++#endif
++    urb->hcpriv = NULL;
++    FH_FREE(fh_otg_urb);
++	
++	return retval;
++}
++
++static int urb_enqueue(struct usb_hcd *hcd,
++		       struct urb *urb, gfp_t mem_flags)
++{
++	int ret;
++	unsigned long flagxx;
++
++	local_irq_save(flagxx);
++	ret = ___urb_enqueue(hcd, urb, GFP_ATOMIC);
++	local_irq_restore(flagxx);
++
++	return ret;
++}
++
++/** Aborts/cancels a USB transfer request. Always returns 0 to indicate
++ * success.  */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
++#else
++static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
++#endif
++{
++	fh_irqflags_t flags;
++	fh_otg_hcd_t *fh_otg_hcd;
++	int rc = 0;
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD URB Dequeue\n");
++
++	fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++
++#ifdef DEBUG
++	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
++		dump_urb_info(urb, "urb_dequeue");
++	}
++#endif
++
++	FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
++	
++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
++	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
++	if (rc)
++		goto out;
++#endif
++
++	if (!urb->hcpriv) {
++		FH_DEBUGPL(DBG_HCD, "urb->hcpriv is NULL\n");
++		goto out;
++	}
++
++	rc = fh_otg_hcd_urb_dequeue(fh_otg_hcd, urb->hcpriv);
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,7,7)
++	usb_hcd_unlink_urb_from_ep(hcd, urb);
++#endif
++	
++	FH_FREE(urb->hcpriv);
++	urb->hcpriv = NULL;
++
++	/* Higher layer software sets URB status. */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++	usb_hcd_giveback_urb(hcd, urb);
++#else
++	FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags); //mvardan
++	usb_hcd_giveback_urb(hcd, urb, status);
++	FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags); //mvardan
++#endif
++	if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
++		FH_PRINTF("Called usb_hcd_giveback_urb()\n");
++		FH_PRINTF("  urb->status = %d\n", urb->status);
++	}
++out:
++	FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags);
++
++	return rc;
++}
++
++/* Frees resources in the FH_otg controller related to a given endpoint. Also
++ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
++ * must already be dequeued. */
++static void endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
++{
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++
++	FH_DEBUGPL(DBG_HCD,
++		    "FH OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
++		    "endpoint=%d\n", ep->desc.bEndpointAddress,
++		    fh_ep_addr_to_endpoint(ep->desc.bEndpointAddress));
++	fh_otg_hcd_endpoint_disable(fh_otg_hcd, ep->hcpriv, 250);
++	ep->hcpriv = NULL;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
++/* Resets endpoint specific parameter values, in current version used to reset 
++ * the data toggle(as a WA). This function can be called from usb_clear_halt routine */
++static void endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
++{
++	fh_irqflags_t flags;
++	struct usb_device *udev = NULL;
++	int epnum = usb_endpoint_num(&ep->desc);
++	int is_out = usb_endpoint_dir_out(&ep->desc);
++	int is_control = usb_endpoint_xfer_control(&ep->desc);
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++	struct platform_device *_dev = fh_otg_hcd->otg_dev->os_dep.pdev;
++
++	if (_dev)
++		udev = to_usb_device(&_dev->dev);
++	else
++		return;
++
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD EP RESET: Endpoint Num=0x%02d\n", epnum);
++
++	FH_SPINLOCK_IRQSAVE(fh_otg_hcd->lock, &flags);
++	usb_settoggle(udev, epnum, is_out, 0);
++	if (is_control)
++		usb_settoggle(udev, epnum, !is_out, 0);
++
++	if (ep->hcpriv) {
++		fh_otg_hcd_endpoint_reset(fh_otg_hcd, ep->hcpriv);
++	}
++	FH_SPINUNLOCK_IRQRESTORE(fh_otg_hcd->lock, flags);
++}
++#endif
++
++/** Handles host mode interrupts for the FH_otg controller. Returns IRQ_NONE if
++ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
++ * interrupt.
++ *
++ * This function is called by the USB core when an interrupt occurs */
++static irqreturn_t fh_otg_hcd_irq(struct usb_hcd *hcd)
++{
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++	int32_t retval = fh_otg_hcd_handle_intr(fh_otg_hcd);
++	if (retval != 0) {
++		S3C2410X_CLEAR_EINTPEND();
++	}
++	return IRQ_RETVAL(retval);
++}
++
++/** Creates Status Change bitmap for the root hub and root port. The bitmap is
++ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
++ * is the status change indicator for the single root port. Returns 1 if either
++ * change indicator is 1, otherwise returns 0. */
++int hub_status_data(struct usb_hcd *hcd, char *buf)
++{
++	fh_otg_hcd_t *fh_otg_hcd = hcd_to_fh_otg_hcd(hcd);
++
++	buf[0] = 0;
++	buf[0] |= (fh_otg_hcd_is_status_changed(fh_otg_hcd, 1)) << 1;
++
++	return (buf[0] != 0);
++}
++
++/** Handles hub class-specific requests. */
++int hub_control(struct usb_hcd *hcd,
++		u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength)
++{
++	int retval;
++
++	retval = fh_otg_hcd_hub_control(hcd_to_fh_otg_hcd(hcd),
++					 typeReq, wValue, wIndex, buf, wLength);
++
++	switch (retval) {
++	case -FH_E_INVALID:
++		retval = -EINVAL;
++		break;
++	}
++
++	return retval;
++}
++
++#endif /* FH_DEVICE_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_queue.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_queue.c
+new file mode 100644
+index 00000000..37e085db
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_hcd_queue.c
+@@ -0,0 +1,731 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_hcd_queue.c $
++ * $Revision: #45 $
++ * $Date: 2013/01/24 $
++ * $Change: 2150293 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ * 
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ * 
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_DEVICE_ONLY
++
++/**
++ * @file
++ *
++ * This file contains the functions to manage Queue Heads and Queue
++ * Transfer Descriptors.
++ */
++
++#include "fh_otg_hcd.h"
++#include "fh_otg_regs.h"
++
++/** 
++ * Free each QTD in the QH's QTD-list then free the QH.  QH should already be
++ * removed from a list.  QTD list should already be empty if called from URB
++ * Dequeue.
++ *
++ * @param hcd HCD instance.
++ * @param qh The QH to free.
++ */
++void fh_otg_hcd_qh_free(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	fh_otg_qtd_t *qtd, *qtd_tmp;
++	fh_irqflags_t flags;
++
++	/* Free each QTD in the QTD list */
++	FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++	FH_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
++		FH_CIRCLEQ_REMOVE(&qh->qtd_list, qtd, qtd_list_entry);
++		fh_otg_hcd_qtd_free(qtd);
++	}
++
++	if (hcd->core_if->dma_desc_enable) {
++		FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++		fh_otg_hcd_qh_free_ddma(hcd, qh);
++		FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++	} else if (qh->dw_align_buf) {
++		uint32_t buf_size;
++		if (qh->ep_type == UE_ISOCHRONOUS) {
++			buf_size = 4096;
++		} else {
++			buf_size = hcd->core_if->core_params->max_transfer_size;
++		}
++		FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++		FH_DMA_FREE(buf_size, qh->dw_align_buf, qh->dw_align_buf_dma);
++		FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++	}
++
++	FH_FREE(qh);
++	FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++	return;
++}
++
++#define BitStuffTime(bytecount)  ((8 * 7* bytecount) / 6)
++#define HS_HOST_DELAY		5	/* nanoseconds */
++#define FS_LS_HOST_DELAY	1000	/* nanoseconds */
++#define HUB_LS_SETUP		333	/* nanoseconds */
++#define NS_TO_US(ns)		((ns + 500) / 1000)
++				/* convert & round nanoseconds to microseconds */
++
++static uint32_t calc_bus_time(int speed, int is_in, int is_isoc, int bytecount)
++{
++	unsigned long retval;
++
++	switch (speed) {
++	case USB_SPEED_HIGH:
++		if (is_isoc) {
++			retval =
++			    ((38 * 8 * 2083) +
++			     (2083 * (3 + BitStuffTime(bytecount)))) / 1000 +
++			    HS_HOST_DELAY;
++		} else {
++			retval =
++			    ((55 * 8 * 2083) +
++			     (2083 * (3 + BitStuffTime(bytecount)))) / 1000 +
++			    HS_HOST_DELAY;
++		}
++		break;
++	case USB_SPEED_FULL:
++		if (is_isoc) {
++			retval =
++			    (8354 * (31 + 10 * BitStuffTime(bytecount))) / 1000;
++			if (is_in) {
++				retval = 7268 + FS_LS_HOST_DELAY + retval;
++			} else {
++				retval = 6265 + FS_LS_HOST_DELAY + retval;
++			}
++		} else {
++			retval =
++			    (8354 * (31 + 10 * BitStuffTime(bytecount))) / 1000;
++			retval = 9107 + FS_LS_HOST_DELAY + retval;
++		}
++		break;
++	case USB_SPEED_LOW:
++		if (is_in) {
++			retval =
++			    (67667 * (31 + 10 * BitStuffTime(bytecount))) /
++			    1000;
++			retval =
++			    64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
++			    retval;
++		} else {
++			retval =
++			    (66700 * (31 + 10 * BitStuffTime(bytecount))) /
++			    1000;
++			retval =
++			    64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
++			    retval;
++		}
++		break;
++	default:
++		FH_WARN("Unknown device speed\n");
++		retval = -1;
++	}
++
++	return NS_TO_US(retval);
++}
++
++/** 
++ * Initializes a QH structure.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh  The QH to init.
++ * @param urb Holds the information about the device/endpoint that we need
++ * 	      to initialize the QH. 
++ */
++#define SCHEDULE_SLOP 10
++void qh_init(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh, fh_otg_hcd_urb_t * urb)
++{
++	char *speed, *type;
++	int dev_speed;
++	uint32_t hub_addr, hub_port;
++
++	fh_memset(qh, 0, sizeof(fh_otg_qh_t));
++
++	/* Initialize QH */
++	qh->ep_type = fh_otg_hcd_get_pipe_type(&urb->pipe_info);
++	qh->ep_is_in = fh_otg_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
++
++	qh->data_toggle = FH_OTG_HC_PID_DATA0;
++	qh->maxp = fh_otg_hcd_get_mps(&urb->pipe_info);
++	FH_CIRCLEQ_INIT(&qh->qtd_list);
++	FH_LIST_INIT(&qh->qh_list_entry);
++	qh->channel = NULL;
++
++	/* FS/LS Enpoint on HS Hub 
++	 * NOT virtual root hub */
++	dev_speed = hcd->fops->speed(hcd, urb->priv);
++
++	hcd->fops->hub_info(hcd, urb->priv, &hub_addr, &hub_port);
++	qh->do_split = 0;
++
++	if (((dev_speed == USB_SPEED_LOW) ||
++	     (dev_speed == USB_SPEED_FULL)) &&
++	    (hub_addr != 0 && hub_addr != 1)) {
++		FH_DEBUGPL(DBG_HCD,
++			    "QH init: EP %d: TT found at hub addr %d, for port %d\n",
++			    fh_otg_hcd_get_ep_num(&urb->pipe_info), hub_addr,
++			    hub_port);
++		qh->do_split = 1;
++	}
++
++	if (qh->ep_type == UE_INTERRUPT || qh->ep_type == UE_ISOCHRONOUS) {
++		/* Compute scheduling parameters once and save them. */
++		hprt0_data_t hprt;
++
++		/** @todo Account for split transfers in the bus time. */
++		int bytecount =
++		    fh_hb_mult(qh->maxp) * fh_max_packet(qh->maxp);
++
++		qh->usecs =
++		    calc_bus_time((qh->do_split ? USB_SPEED_HIGH : dev_speed),
++				  qh->ep_is_in, (qh->ep_type == UE_ISOCHRONOUS),
++				  bytecount);
++		/* Start in a slightly future (micro)frame. */
++		qh->sched_frame = fh_frame_num_inc(hcd->frame_number,
++						    SCHEDULE_SLOP);
++		qh->interval = urb->interval;
++
++#if 0
++		/* Increase interrupt polling rate for debugging. */
++		if (qh->ep_type == UE_INTERRUPT) {
++			qh->interval = 8;
++		}
++#endif
++		hprt.d32 = FH_READ_REG32(hcd->core_if->host_if->hprt0);
++		if ((hprt.b.prtspd == FH_HPRT0_PRTSPD_HIGH_SPEED) &&
++		    ((dev_speed == USB_SPEED_LOW) ||
++		     (dev_speed == USB_SPEED_FULL))) {
++			qh->interval *= 8;
++			qh->sched_frame |= 0x7;
++			qh->start_split_frame = qh->sched_frame;
++		}
++
++	}
++
++	FH_DEBUGPL(DBG_HCD, "FH OTG HCD QH Initialized\n");
++	FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH  - qh = %p\n", qh);
++	FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH  - Device Address = %d\n",
++		    fh_otg_hcd_get_dev_addr(&urb->pipe_info));
++	FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH  - Endpoint %d, %s\n",
++		    fh_otg_hcd_get_ep_num(&urb->pipe_info),
++		    fh_otg_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
++	switch (dev_speed) {
++	case USB_SPEED_LOW:
++		qh->dev_speed = FH_OTG_EP_SPEED_LOW;
++		speed = "low";
++		break;
++	case USB_SPEED_FULL:
++		qh->dev_speed = FH_OTG_EP_SPEED_FULL;
++		speed = "full";
++		break;
++	case USB_SPEED_HIGH:
++		qh->dev_speed = FH_OTG_EP_SPEED_HIGH;
++		speed = "high";
++		break;
++	default:
++		speed = "?";
++		break;
++	}
++	FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH  - Speed = %s\n", speed);
++
++	switch (qh->ep_type) {
++	case UE_ISOCHRONOUS:
++		type = "isochronous";
++		break;
++	case UE_INTERRUPT:
++		type = "interrupt";
++		break;
++	case UE_CONTROL:
++		type = "control";
++		break;
++	case UE_BULK:
++		type = "bulk";
++		break;
++	default:
++		type = "?";
++		break;
++	}
++
++	FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH  - Type = %s\n", type);
++
++#ifdef DEBUG
++	if (qh->ep_type == UE_INTERRUPT) {
++		FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - usecs = %d\n",
++			    qh->usecs);
++		FH_DEBUGPL(DBG_HCDV, "FH OTG HCD QH - interval = %d\n",
++			    qh->interval);
++	}
++#endif
++
++}
++
++/**
++ * This function allocates and initializes a QH.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param urb Holds the information about the device/endpoint that we need
++ * 	      to initialize the QH.
++ * @param atomic_alloc Flag to do atomic allocation if needed
++ *
++ * @return Returns pointer to the newly allocated QH, or NULL on error. */
++fh_otg_qh_t *fh_otg_hcd_qh_create(fh_otg_hcd_t * hcd,
++				    fh_otg_hcd_urb_t * urb, int atomic_alloc)
++{
++	fh_otg_qh_t *qh;
++
++	/* Allocate memory */
++	/** @todo add memflags argument */
++	qh = fh_otg_hcd_qh_alloc(atomic_alloc);
++	if (qh == NULL) {
++		FH_ERROR("qh allocation failed");
++		return NULL;
++	}
++
++	qh_init(hcd, qh, urb);
++
++	if (hcd->core_if->dma_desc_enable
++	    && (fh_otg_hcd_qh_init_ddma(hcd, qh) < 0)) {
++		fh_otg_hcd_qh_free(hcd, qh);
++		return NULL;
++	}
++
++	return qh;
++}
++
++/**
++ * Checks that a channel is available for a periodic transfer.
++ *
++ * @return 0 if successful, negative error code otherise.
++ */
++static int periodic_channel_available(fh_otg_hcd_t * hcd)
++{
++	/*
++	 * Currently assuming that there is a dedicated host channnel for each
++	 * periodic transaction plus at least one host channel for
++	 * non-periodic transactions.
++	 */
++	int status;
++	int num_channels;
++
++	num_channels = hcd->core_if->core_params->host_channels;
++	if ((hcd->periodic_channels + hcd->non_periodic_channels < num_channels)
++	    && (hcd->periodic_channels < num_channels - 1)) {
++		status = 0;
++	} else {
++		FH_INFO("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
++			__func__, num_channels, hcd->periodic_channels, hcd->non_periodic_channels);	//NOTICE
++		status = -FH_E_NO_SPACE;
++	}
++
++	return status;
++}
++
++/**
++ * Checks that there is sufficient bandwidth for the specified QH in the
++ * periodic schedule. For simplicity, this calculation assumes that all the
++ * transfers in the periodic schedule may occur in the same (micro)frame.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh QH containing periodic bandwidth required.
++ *
++ * @return 0 if successful, negative error code otherwise.
++ */
++static int check_periodic_bandwidth(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	int status;
++	int16_t max_claimed_usecs;
++
++	status = 0;
++
++	if ((qh->dev_speed == FH_OTG_EP_SPEED_HIGH) || qh->do_split) {
++		/*
++		 * High speed mode.
++		 * Max periodic usecs is 80% x 125 usec = 100 usec.
++		 */
++
++		max_claimed_usecs = 100 - qh->usecs;
++	} else {
++		/*
++		 * Full speed mode.
++		 * Max periodic usecs is 90% x 1000 usec = 900 usec.
++		 */
++		max_claimed_usecs = 900 - qh->usecs;
++	}
++
++	if (hcd->periodic_usecs > max_claimed_usecs) {
++		FH_INFO("%s: already claimed usecs %d, required usecs %d\n", __func__, hcd->periodic_usecs, qh->usecs);	//NOTICE
++		status = -FH_E_NO_SPACE;
++	}
++
++	return status;
++}
++
++/**
++ * Checks that the max transfer size allowed in a host channel is large enough
++ * to handle the maximum data transfer in a single (micro)frame for a periodic
++ * transfer.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh QH for a periodic endpoint.
++ *
++ * @return 0 if successful, negative error code otherwise.
++ */
++static int check_max_xfer_size(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	int status;
++	uint32_t max_xfer_size;
++	uint32_t max_channel_xfer_size;
++
++	status = 0;
++
++	max_xfer_size = fh_max_packet(qh->maxp) * fh_hb_mult(qh->maxp);
++	max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size;
++
++	if (max_xfer_size > max_channel_xfer_size) {
++		FH_INFO("%s: Periodic xfer length %d > " "max xfer length for channel %d\n",
++				__func__, max_xfer_size, max_channel_xfer_size);	//NOTICE
++		status = -FH_E_NO_SPACE;
++	}
++
++	return status;
++}
++
++/**
++ * Schedules an interrupt or isochronous transfer in the periodic schedule.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh QH for the periodic transfer. The QH should already contain the
++ * scheduling information.
++ *
++ * @return 0 if successful, negative error code otherwise.
++ */
++static int schedule_periodic(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	int status = 0;
++
++	status = periodic_channel_available(hcd);
++	if (status) {
++		FH_INFO("%s: No host channel available for periodic " "transfer.\n", __func__);	//NOTICE
++		return status;
++	}
++
++	status = check_periodic_bandwidth(hcd, qh);
++	if (status) {
++		FH_INFO("%s: Insufficient periodic bandwidth for " "periodic transfer.\n", __func__);	//NOTICE
++		return status;
++	}
++
++	status = check_max_xfer_size(hcd, qh);
++	if (status) {
++		FH_INFO("%s: Channel max transfer size too small " "for periodic transfer.\n", __func__);	//NOTICE
++		return status;
++	}
++
++	if (hcd->core_if->dma_desc_enable) {
++		/* Don't rely on SOF and start in ready schedule */
++		FH_LIST_INSERT_TAIL(&hcd->periodic_sched_ready, &qh->qh_list_entry);
++	}
++	else {
++	/* Always start in the inactive schedule. */
++	FH_LIST_INSERT_TAIL(&hcd->periodic_sched_inactive, &qh->qh_list_entry);
++	}
++
++	/* Reserve the periodic channel. */
++	hcd->periodic_channels++;
++
++	/* Update claimed usecs per (micro)frame. */
++	hcd->periodic_usecs += qh->usecs;
++
++	return status;
++}
++
++/**
++ * This function adds a QH to either the non periodic or periodic schedule if
++ * it is not already in the schedule. If the QH is already in the schedule, no
++ * action is taken.
++ *
++ * @return 0 if successful, negative error code otherwise.
++ */
++int fh_otg_hcd_qh_add(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	int status = 0;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	if (!FH_LIST_EMPTY(&qh->qh_list_entry)) {
++		/* QH already in a schedule. */
++		return status;
++	}
++
++	/* Add the new QH to the appropriate schedule */
++	if (fh_qh_is_non_per(qh)) {
++		/* Always start in the inactive schedule. */
++		FH_LIST_INSERT_TAIL(&hcd->non_periodic_sched_inactive,
++				     &qh->qh_list_entry);
++	} else {
++		status = schedule_periodic(hcd, qh);
++		if ( !hcd->periodic_qh_count ) {
++			intr_mask.b.sofintr = 1;
++			FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk,
++								intr_mask.d32, intr_mask.d32);
++		}
++		hcd->periodic_qh_count++;
++	}
++
++	return status;
++}
++
++/**
++ * Removes an interrupt or isochronous transfer from the periodic schedule.
++ *
++ * @param hcd The HCD state structure for the FH OTG controller.
++ * @param qh QH for the periodic transfer.
++ */
++static void deschedule_periodic(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	FH_LIST_REMOVE_INIT(&qh->qh_list_entry);
++
++	/* Release the periodic channel reservation. */
++	hcd->periodic_channels--;
++
++	/* Update claimed usecs per (micro)frame. */
++	hcd->periodic_usecs -= qh->usecs;
++}
++
++/** 
++ * Removes a QH from either the non-periodic or periodic schedule.  Memory is
++ * not freed.
++ *
++ * @param hcd The HCD state structure.
++ * @param qh QH to remove from schedule. */
++void fh_otg_hcd_qh_remove(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh)
++{
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	if (FH_LIST_EMPTY(&qh->qh_list_entry)) {
++		/* QH is not in a schedule. */
++		return;
++	}
++
++	if (fh_qh_is_non_per(qh)) {
++		if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) {
++			hcd->non_periodic_qh_ptr =
++			    hcd->non_periodic_qh_ptr->next;
++		}
++		FH_LIST_REMOVE_INIT(&qh->qh_list_entry);
++	} else {
++		deschedule_periodic(hcd, qh);
++		hcd->periodic_qh_count--;
++		if( !hcd->periodic_qh_count ) {
++			intr_mask.b.sofintr = 1;
++				FH_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk,
++									intr_mask.d32, 0);
++		}
++	}
++}
++
++/**
++ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
++ * non-periodic schedule. The QH is added to the inactive non-periodic
++ * schedule if any QTDs are still attached to the QH.
++ *
++ * For periodic QHs, the QH is removed from the periodic queued schedule. If
++ * there are any QTDs still attached to the QH, the QH is added to either the
++ * periodic inactive schedule or the periodic ready schedule and its next
++ * scheduled frame is calculated. The QH is placed in the ready schedule if
++ * the scheduled frame has been reached already. Otherwise it's placed in the
++ * inactive schedule. If there are no QTDs attached to the QH, the QH is
++ * completely removed from the periodic schedule.
++ */
++void fh_otg_hcd_qh_deactivate(fh_otg_hcd_t * hcd, fh_otg_qh_t * qh,
++			       int sched_next_periodic_split)
++{	
++	if (fh_qh_is_non_per(qh)) {
++		fh_otg_hcd_qh_remove(hcd, qh);
++		if (!FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
++			/* Add back to inactive non-periodic schedule. */
++			fh_otg_hcd_qh_add(hcd, qh);
++		}
++	} else {
++		uint16_t frame_number = fh_otg_hcd_get_frame_number(hcd);
++
++		if (qh->do_split) {
++			/* Schedule the next continuing periodic split transfer */
++			if (sched_next_periodic_split) {
++
++				qh->sched_frame = frame_number;
++				if (fh_frame_num_le(frame_number,
++						     fh_frame_num_inc
++						     (qh->start_split_frame,
++						      1))) {
++					/*
++					 * Allow one frame to elapse after start
++					 * split microframe before scheduling
++					 * complete split, but DONT if we are
++					 * doing the next start split in the
++					 * same frame for an ISOC out.
++					 */
++					if ((qh->ep_type != UE_ISOCHRONOUS) ||
++					    (qh->ep_is_in != 0)) {
++						qh->sched_frame =
++						    fh_frame_num_inc(qh->sched_frame, 1);
++					}
++				}
++			} else {
++				qh->sched_frame =
++				    fh_frame_num_inc(qh->start_split_frame,
++						      qh->interval);
++				if (fh_frame_num_le
++				    (qh->sched_frame, frame_number)) {
++					qh->sched_frame = frame_number;
++				}
++				qh->sched_frame |= 0x7;
++				qh->start_split_frame = qh->sched_frame;
++			}
++		} else {
++			qh->sched_frame =
++			    fh_frame_num_inc(qh->sched_frame, qh->interval);
++			if (fh_frame_num_le(qh->sched_frame, frame_number)) {
++				qh->sched_frame = frame_number;
++			}
++		}
++
++		if (FH_CIRCLEQ_EMPTY(&qh->qtd_list)) {
++			fh_otg_hcd_qh_remove(hcd, qh);
++		} else {
++			/*
++			 * Remove from periodic_sched_queued and move to
++			 * appropriate queue.
++			 */
++			if (qh->sched_frame == frame_number) {
++				FH_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
++						   &qh->qh_list_entry);
++			} else {
++				FH_LIST_MOVE_HEAD
++				    (&hcd->periodic_sched_inactive,
++				     &qh->qh_list_entry);
++			}
++		}
++	}
++}
++
++/** 
++ * This function allocates and initializes a QTD. 
++ *
++ * @param urb The URB to create a QTD from.  Each URB-QTD pair will end up
++ * 	      pointing to each other so each pair should have a unique correlation.
++ * @param atomic_alloc Flag to do atomic alloc if needed
++ *
++ * @return Returns pointer to the newly allocated QTD, or NULL on error. */
++fh_otg_qtd_t *fh_otg_hcd_qtd_create(fh_otg_hcd_urb_t * urb, int atomic_alloc)
++{
++	fh_otg_qtd_t *qtd;
++
++	qtd = fh_otg_hcd_qtd_alloc(atomic_alloc);
++	if (qtd == NULL) {
++		return NULL;
++	}
++
++	fh_otg_hcd_qtd_init(qtd, urb);
++	return qtd;
++}
++
++/** 
++ * Initializes a QTD structure.
++ *
++ * @param qtd The QTD to initialize.
++ * @param urb The URB to use for initialization.  */
++void fh_otg_hcd_qtd_init(fh_otg_qtd_t * qtd, fh_otg_hcd_urb_t * urb)
++{
++	fh_memset(qtd, 0, sizeof(fh_otg_qtd_t));
++	qtd->urb = urb;
++	if (fh_otg_hcd_get_pipe_type(&urb->pipe_info) == UE_CONTROL) {
++		/*
++		 * The only time the QTD data toggle is used is on the data
++		 * phase of control transfers. This phase always starts with
++		 * DATA1.
++		 */
++		qtd->data_toggle = FH_OTG_HC_PID_DATA1;
++		qtd->control_phase = FH_OTG_CONTROL_SETUP;
++	}
++
++	/* start split */
++	qtd->complete_split = 0;
++	qtd->isoc_split_pos = FH_HCSPLIT_XACTPOS_ALL;
++	qtd->isoc_split_offset = 0;
++	qtd->in_process = 0;
++
++	/* Store the qtd ptr in the urb to reference what QTD. */
++	urb->qtd = qtd;
++	return;
++}
++
++/**
++ * This function adds a QTD to the QTD-list of a QH.  It will find the correct
++ * QH to place the QTD into.  If it does not find a QH, then it will create a
++ * new QH. If the QH to which the QTD is added is not currently scheduled, it
++ * is placed into the proper schedule based on its EP type.
++ *
++ * @param[in] qtd The QTD to add
++ * @param[in] hcd The FH HCD structure
++ * @param[out] qh out parameter to return queue head
++ * @param atomic_alloc Flag to do atomic alloc if needed
++ *
++ * @return 0 if successful, negative error code otherwise.
++ */
++int fh_otg_hcd_qtd_add(fh_otg_qtd_t * qtd,
++			fh_otg_hcd_t * hcd, fh_otg_qh_t ** qh, int atomic_alloc)
++{
++	int retval = 0;
++	fh_irqflags_t flags;
++
++	fh_otg_hcd_urb_t *urb = qtd->urb;
++
++	/*
++	 * Get the QH which holds the QTD-list to insert to. Create QH if it
++	 * doesn't exist.
++	 */
++	if (*qh == NULL) {
++		*qh = fh_otg_hcd_qh_create(hcd, urb, atomic_alloc);
++		if (*qh == NULL) {
++			retval = -1;
++			goto done;
++		}
++	}
++	FH_SPINLOCK_IRQSAVE(hcd->lock, &flags);
++	retval = fh_otg_hcd_qh_add(hcd, *qh);
++	if (retval == 0) {
++		FH_CIRCLEQ_INSERT_TAIL(&((*qh)->qtd_list), qtd,
++					qtd_list_entry);
++	}
++	FH_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
++
++done:
++
++	return retval;
++}
++
++#endif /* FH_DEVICE_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_os_dep.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_os_dep.h
+new file mode 100644
+index 00000000..cf5bf274
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_os_dep.h
+@@ -0,0 +1,99 @@
++#ifndef _FH_OS_DEP_H_
++#define _FH_OS_DEP_H_
++
++/**
++ * @file
++ *
++ * This file contains OS dependent structures.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/dma-mapping.h>
++#include <linux/jiffies.h>
++#include <linux/delay.h>
++#include <linux/timer.h>
++#include <linux/workqueue.h>
++#include <linux/stat.h>
++#include <linux/pci.h>
++
++#include <linux/version.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++# include <linux/irq.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++# include <linux/usb/ch9.h>
++#else
++# include <linux/usb_ch9.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# include <linux/usb/gadget.h>
++#else
++# include <linux/usb_gadget.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++# include <asm/irq.h>
++#endif
++
++#ifdef PCI_INTERFACE
++# include <asm/io.h>
++#endif
++
++#ifdef LM_INTERFACE
++# include <asm/unaligned.h>
++# include <asm/sizes.h>
++# include <asm/param.h>
++# include <asm/io.h>
++# include <asm/arch/lm.h>
++# include <asm/arch/irqs.h>
++# include <asm/arch/regs-irq.h>
++#endif
++
++/** The OS page size */
++#define FH_OS_PAGE_SIZE	PAGE_SIZE
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
++typedef int gfp_t;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++# define IRQF_SHARED SA_SHIRQ
++#endif
++
++typedef struct os_dependent {
++	/** Base address returned from ioremap() */
++	void *base;
++
++	/** Register offset for Diagnostic API */
++	uint32_t reg_offset;
++
++	struct platform_device *pdev;
++
++	/** Start address of a PCI region */
++	resource_size_t rsrc_start;
++	
++	/** Length address of a PCI region */
++	resource_size_t rsrc_len;
++
++} os_dependent_t;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* _FH_OS_DEP_H_ */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.c
+new file mode 100644
+index 00000000..d92eb4a8
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.c
+@@ -0,0 +1,2917 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd.c $
++ * $Revision: #105 $
++ * $Date: 2013/05/16 $
++ * $Change: 2231774 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_HOST_ONLY
++
++/** @file
++ * This file implements PCD Core. All code in this file is portable and doesn't
++ * use any OS specific functions.
++ * PCD Core provides Interface, defined in <code><fh_otg_pcd_if.h></code>
++ * header file, which can be used to implement OS specific PCD interface.
++ *
++ * An important function of the PCD is managing interrupts generated
++ * by the FH_otg controller. The implementation of the FH_otg device
++ * mode interrupt service routines is in fh_otg_pcd_intr.c.
++ *
++ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
++ * @todo Does it work when the request size is greater than DEPTSIZ
++ * transfer size
++ *
++ */
++
++#include "fh_otg_pcd.h"
++
++#ifdef FH_UTE_CFI
++#include "fh_otg_cfi.h"
++
++extern int init_cfi(cfiobject_t * cfiobj);
++#endif
++
++/**
++ * Choose endpoint from ep arrays using usb_ep structure.
++ */
++static fh_otg_pcd_ep_t *get_ep_from_handle(fh_otg_pcd_t * pcd, void *handle)
++{
++	int i;
++	if (pcd->ep0.priv == handle) {
++		return &pcd->ep0;
++	}
++	for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
++		if (pcd->in_ep[i].priv == handle)
++			return &pcd->in_ep[i];
++		if (pcd->out_ep[i].priv == handle)
++			return &pcd->out_ep[i];
++	}
++
++	return NULL;
++}
++
++/**
++ * This function completes a request.  It call's the request call back.
++ */
++void fh_otg_request_done(fh_otg_pcd_ep_t * ep, fh_otg_pcd_request_t * req,
++			  int32_t status)
++{
++	unsigned stopped = ep->stopped;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
++	FH_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
++
++	/* don't modify queue heads during completion callback */
++	ep->stopped = 1;
++	/* spin_unlock/spin_lock now done in fops->complete() */
++	ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
++				req->actual);
++
++	if (ep->pcd->request_pending > 0) {
++		--ep->pcd->request_pending;
++	}
++
++	ep->stopped = stopped;
++	FH_FREE(req);
++}
++
++/**
++ * This function terminates all the requsts in the EP request queue.
++ */
++void fh_otg_request_nuke(fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_pcd_request_t *req;
++
++	ep->stopped = 1;
++
++	/* called with irqs blocked?? */
++	while (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		req = FH_CIRCLEQ_FIRST(&ep->queue);
++		fh_otg_request_done(ep, req, -FH_E_SHUTDOWN);
++	}
++}
++
++void fh_otg_pcd_start(fh_otg_pcd_t * pcd,
++		       const struct fh_otg_pcd_function_ops *fops)
++{
++	pcd->fops = fops;
++}
++
++/**
++ * PCD Callback function for initializing the PCD when switching to
++ * device mode.
++ *
++ * @param p void pointer to the <code>fh_otg_pcd_t</code>
++ */
++static int32_t fh_otg_pcd_start_cb(void *p)
++{
++	fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++
++	/*
++	 * Initialized the Core for Device mode.
++	 */
++	if (fh_otg_is_device_mode(core_if)) {
++		fh_otg_core_dev_init(core_if);
++		/* Set core_if's lock pointer to the pcd->lock */
++		core_if->lock = pcd->lock;
++	}
++	return 1;
++}
++
++/** CFI-specific buffer allocation function for EP */
++#ifdef FH_UTE_CFI
++uint8_t *cfiw_ep_alloc_buffer(fh_otg_pcd_t * pcd, void *pep, fh_dma_t * addr,
++			      size_t buflen, int flags)
++{
++	fh_otg_pcd_ep_t *ep;
++	ep = get_ep_from_handle(pcd, pep);
++	if (!ep) {
++		FH_WARN("bad ep\n");
++		return -FH_E_INVALID;
++	}
++
++	return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
++					  flags);
++}
++#else
++uint8_t *cfiw_ep_alloc_buffer(fh_otg_pcd_t * pcd, void *pep, fh_dma_t * addr,
++			      size_t buflen, int flags);
++#endif
++
++/**
++ * PCD Callback function for notifying the PCD when resuming from
++ * suspend.
++ *
++ * @param p void pointer to the <code>fh_otg_pcd_t</code>
++ */
++static int32_t fh_otg_pcd_resume_cb(void *p)
++{
++	fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
++
++	if (pcd->fops->resume) {
++		pcd->fops->resume(pcd);
++	}
++
++	/* Stop the SRP timeout timer. */
++	if ((GET_CORE_IF(pcd)->core_params->phy_type != FH_PHY_TYPE_PARAM_FS)
++	    || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
++		if (GET_CORE_IF(pcd)->srp_timer_started) {
++			GET_CORE_IF(pcd)->srp_timer_started = 0;
++			FH_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
++		}
++	}
++	return 1;
++}
++
++/**
++ * PCD Callback function for notifying the PCD device is suspended.
++ *
++ * @param p void pointer to the <code>fh_otg_pcd_t</code>
++ */
++static int32_t fh_otg_pcd_suspend_cb(void *p)
++{
++	fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
++
++	if (pcd->fops->suspend) {
++		FH_SPINUNLOCK(pcd->lock);
++		pcd->fops->suspend(pcd);
++		FH_SPINLOCK(pcd->lock);
++	}
++
++	return 1;
++}
++
++/**
++ * PCD Callback function for stopping the PCD when switching to Host
++ * mode.
++ *
++ * @param p void pointer to the <code>fh_otg_pcd_t</code>
++ */
++static int32_t fh_otg_pcd_stop_cb(void *p)
++{
++	fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) p;
++	extern void fh_otg_pcd_stop(fh_otg_pcd_t * _pcd);
++
++	fh_otg_pcd_stop(pcd);
++	return 1;
++}
++
++/**
++ * PCD Callback structure for handling mode switching.
++ */
++static fh_otg_cil_callbacks_t pcd_callbacks = {
++	.start = fh_otg_pcd_start_cb,
++	.stop = fh_otg_pcd_stop_cb,
++	.suspend = fh_otg_pcd_suspend_cb,
++	.resume_wakeup = fh_otg_pcd_resume_cb,
++	.p = 0,			/* Set at registration */
++};
++
++/**
++ * This function allocates a DMA Descriptor chain for the Endpoint
++ * buffer to be used for a transfer to/from the specified endpoint.
++ */
++fh_otg_dev_dma_desc_t *fh_otg_ep_alloc_desc_chain(fh_dma_t * dma_desc_addr,
++						    uint32_t count)
++{
++	return FH_DMA_ALLOC_ATOMIC(count * sizeof(fh_otg_dev_dma_desc_t),
++				    dma_desc_addr);
++}
++
++/**
++ * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
++ */
++void fh_otg_ep_free_desc_chain(fh_otg_dev_dma_desc_t * desc_addr,
++				uint32_t dma_desc_addr, uint32_t count)
++{
++	FH_DMA_FREE(count * sizeof(fh_otg_dev_dma_desc_t), desc_addr,
++		     dma_desc_addr);
++}
++
++#ifdef FH_EN_ISOC
++
++/**
++ * This function initializes a descriptor chain for Isochronous transfer
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param fh_ep The EP to start the transfer on.
++ *
++ */
++void fh_otg_iso_ep_start_ddma_transfer(fh_otg_core_if_t * core_if,
++					fh_ep_t * fh_ep)
++{
++
++	dsts_data_t dsts = {.d32 = 0 };
++	depctl_data_t depctl = {.d32 = 0 };
++	volatile uint32_t *addr;
++	int i, j;
++	uint32_t len;
++
++	if (fh_ep->is_in)
++		fh_ep->desc_cnt = fh_ep->buf_proc_intrvl / fh_ep->bInterval;
++	else
++		fh_ep->desc_cnt =
++		    fh_ep->buf_proc_intrvl * fh_ep->pkt_per_frm /
++		    fh_ep->bInterval;
++
++	/** Allocate descriptors for double buffering */
++	fh_ep->iso_desc_addr =
++	    fh_otg_ep_alloc_desc_chain(&fh_ep->iso_dma_desc_addr,
++					fh_ep->desc_cnt * 2);
++	if (fh_ep->desc_addr) {
++		FH_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
++		return;
++	}
++
++	dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++
++	/** ISO OUT EP */
++	if (fh_ep->is_in == 0) {
++		dev_dma_desc_sts_t sts = {.d32 = 0 };
++		fh_otg_dev_dma_desc_t *dma_desc = fh_ep->iso_desc_addr;
++		dma_addr_t dma_ad;
++		uint32_t data_per_desc;
++		fh_otg_dev_out_ep_regs_t *out_regs =
++		    core_if->dev_if->out_ep_regs[fh_ep->num];
++		int offset;
++
++		addr = &core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl;
++		dma_ad = (dma_addr_t) FH_READ_REG32(&(out_regs->doepdma));
++
++		/** Buffer 0 descriptors setup */
++		dma_ad = fh_ep->dma_addr0;
++
++		sts.b_iso_out.bs = BS_HOST_READY;
++		sts.b_iso_out.rxsts = 0;
++		sts.b_iso_out.l = 0;
++		sts.b_iso_out.sp = 0;
++		sts.b_iso_out.ioc = 0;
++		sts.b_iso_out.pid = 0;
++		sts.b_iso_out.framenum = 0;
++
++		offset = 0;
++		for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
++		     i += fh_ep->pkt_per_frm) {
++
++			for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
++				uint32_t len = (j + 1) * fh_ep->maxpacket;
++				if (len > fh_ep->data_per_frame)
++					data_per_desc =
++					    fh_ep->data_per_frame -
++					    j * fh_ep->maxpacket;
++				else
++					data_per_desc = fh_ep->maxpacket;
++				len = data_per_desc % 4;
++				if (len)
++					data_per_desc += 4 - len;
++
++				sts.b_iso_out.rxbytes = data_per_desc;
++				dma_desc->buf = dma_ad;
++				dma_desc->status.d32 = sts.d32;
++
++				offset += data_per_desc;
++				dma_desc++;
++				dma_ad += data_per_desc;
++			}
++		}
++
++		for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
++			uint32_t len = (j + 1) * fh_ep->maxpacket;
++			if (len > fh_ep->data_per_frame)
++				data_per_desc =
++				    fh_ep->data_per_frame -
++				    j * fh_ep->maxpacket;
++			else
++				data_per_desc = fh_ep->maxpacket;
++			len = data_per_desc % 4;
++			if (len)
++				data_per_desc += 4 - len;
++			sts.b_iso_out.rxbytes = data_per_desc;
++			dma_desc->buf = dma_ad;
++			dma_desc->status.d32 = sts.d32;
++
++			offset += data_per_desc;
++			dma_desc++;
++			dma_ad += data_per_desc;
++		}
++
++		sts.b_iso_out.ioc = 1;
++		len = (j + 1) * fh_ep->maxpacket;
++		if (len > fh_ep->data_per_frame)
++			data_per_desc =
++			    fh_ep->data_per_frame - j * fh_ep->maxpacket;
++		else
++			data_per_desc = fh_ep->maxpacket;
++		len = data_per_desc % 4;
++		if (len)
++			data_per_desc += 4 - len;
++		sts.b_iso_out.rxbytes = data_per_desc;
++
++		dma_desc->buf = dma_ad;
++		dma_desc->status.d32 = sts.d32;
++		dma_desc++;
++
++		/** Buffer 1 descriptors setup */
++		sts.b_iso_out.ioc = 0;
++		dma_ad = fh_ep->dma_addr1;
++
++		offset = 0;
++		for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
++		     i += fh_ep->pkt_per_frm) {
++			for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
++				uint32_t len = (j + 1) * fh_ep->maxpacket;
++				if (len > fh_ep->data_per_frame)
++					data_per_desc =
++					    fh_ep->data_per_frame -
++					    j * fh_ep->maxpacket;
++				else
++					data_per_desc = fh_ep->maxpacket;
++				len = data_per_desc % 4;
++				if (len)
++					data_per_desc += 4 - len;
++
++				data_per_desc =
++				    sts.b_iso_out.rxbytes = data_per_desc;
++				dma_desc->buf = dma_ad;
++				dma_desc->status.d32 = sts.d32;
++
++				offset += data_per_desc;
++				dma_desc++;
++				dma_ad += data_per_desc;
++			}
++		}
++		for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
++			data_per_desc =
++			    ((j + 1) * fh_ep->maxpacket >
++			     fh_ep->data_per_frame) ? fh_ep->data_per_frame -
++			    j * fh_ep->maxpacket : fh_ep->maxpacket;
++			data_per_desc +=
++			    (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
++			sts.b_iso_out.rxbytes = data_per_desc;
++			dma_desc->buf = dma_ad;
++			dma_desc->status.d32 = sts.d32;
++
++			offset += data_per_desc;
++			dma_desc++;
++			dma_ad += data_per_desc;
++		}
++
++		sts.b_iso_out.ioc = 1;
++		sts.b_iso_out.l = 1;
++		data_per_desc =
++		    ((j + 1) * fh_ep->maxpacket >
++		     fh_ep->data_per_frame) ? fh_ep->data_per_frame -
++		    j * fh_ep->maxpacket : fh_ep->maxpacket;
++		data_per_desc +=
++		    (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
++		sts.b_iso_out.rxbytes = data_per_desc;
++
++		dma_desc->buf = dma_ad;
++		dma_desc->status.d32 = sts.d32;
++
++		fh_ep->next_frame = 0;
++
++		/** Write dma_ad into DOEPDMA register */
++		FH_WRITE_REG32(&(out_regs->doepdma),
++				(uint32_t) fh_ep->iso_dma_desc_addr);
++
++	}
++	/** ISO IN EP */
++	else {
++		dev_dma_desc_sts_t sts = {.d32 = 0 };
++		fh_otg_dev_dma_desc_t *dma_desc = fh_ep->iso_desc_addr;
++		dma_addr_t dma_ad;
++		fh_otg_dev_in_ep_regs_t *in_regs =
++		    core_if->dev_if->in_ep_regs[fh_ep->num];
++		unsigned int frmnumber;
++		fifosize_data_t txfifosize, rxfifosize;
++
++		txfifosize.d32 =
++		    FH_READ_REG32(&core_if->dev_if->in_ep_regs[fh_ep->num]->
++				   dtxfsts);
++		rxfifosize.d32 =
++		    FH_READ_REG32(&core_if->core_global_regs->grxfsiz);
++
++		addr = &core_if->dev_if->in_ep_regs[fh_ep->num]->diepctl;
++
++		dma_ad = fh_ep->dma_addr0;
++
++		dsts.d32 =
++		    FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++
++		sts.b_iso_in.bs = BS_HOST_READY;
++		sts.b_iso_in.txsts = 0;
++		sts.b_iso_in.sp =
++		    (fh_ep->data_per_frame % fh_ep->maxpacket) ? 1 : 0;
++		sts.b_iso_in.ioc = 0;
++		sts.b_iso_in.pid = fh_ep->pkt_per_frm;
++
++		frmnumber = fh_ep->next_frame;
++
++		sts.b_iso_in.framenum = frmnumber;
++		sts.b_iso_in.txbytes = fh_ep->data_per_frame;
++		sts.b_iso_in.l = 0;
++
++		/** Buffer 0 descriptors setup */
++		for (i = 0; i < fh_ep->desc_cnt - 1; i++) {
++			dma_desc->buf = dma_ad;
++			dma_desc->status.d32 = sts.d32;
++			dma_desc++;
++
++			dma_ad += fh_ep->data_per_frame;
++			sts.b_iso_in.framenum += fh_ep->bInterval;
++		}
++
++		sts.b_iso_in.ioc = 1;
++		dma_desc->buf = dma_ad;
++		dma_desc->status.d32 = sts.d32;
++		++dma_desc;
++
++		/** Buffer 1 descriptors setup */
++		sts.b_iso_in.ioc = 0;
++		dma_ad = fh_ep->dma_addr1;
++
++		for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
++		     i += fh_ep->pkt_per_frm) {
++			dma_desc->buf = dma_ad;
++			dma_desc->status.d32 = sts.d32;
++			dma_desc++;
++
++			dma_ad += fh_ep->data_per_frame;
++			sts.b_iso_in.framenum += fh_ep->bInterval;
++
++			sts.b_iso_in.ioc = 0;
++		}
++		sts.b_iso_in.ioc = 1;
++		sts.b_iso_in.l = 1;
++
++		dma_desc->buf = dma_ad;
++		dma_desc->status.d32 = sts.d32;
++
++		fh_ep->next_frame = sts.b_iso_in.framenum + fh_ep->bInterval;
++
++		/** Write dma_ad into diepdma register */
++		FH_WRITE_REG32(&(in_regs->diepdma),
++				(uint32_t) fh_ep->iso_dma_desc_addr);
++	}
++	/** Enable endpoint, clear nak  */
++	depctl.d32 = 0;
++	depctl.b.epena = 1;
++	depctl.b.usbactep = 1;
++	depctl.b.cnak = 1;
++
++	FH_MODIFY_REG32(addr, depctl.d32, depctl.d32);
++	depctl.d32 = FH_READ_REG32(addr);
++}
++
++/**
++ * This function initializes a descriptor chain for Isochronous transfer
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ *
++ */
++void fh_otg_iso_ep_start_buf_transfer(fh_otg_core_if_t * core_if,
++				       fh_ep_t * ep)
++{
++	depctl_data_t depctl = {.d32 = 0 };
++	volatile uint32_t *addr;
++
++	if (ep->is_in) {
++		addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
++	} else {
++		addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
++	}
++
++	if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
++		return;
++	} else {
++		deptsiz_data_t deptsiz = {.d32 = 0 };
++
++		ep->xfer_len =
++		    ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
++		ep->pkt_cnt =
++		    (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
++		ep->xfer_count = 0;
++		ep->xfer_buff =
++		    (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
++		ep->dma_addr =
++		    (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
++
++		if (ep->is_in) {
++			/* Program the transfer size and packet count
++			 *      as follows: xfersize = N * maxpacket +
++			 *      short_packet pktcnt = N + (short_packet
++			 *      exist ? 1 : 0) 
++			 */
++			deptsiz.b.mc = ep->pkt_per_frm;
++			deptsiz.b.xfersize = ep->xfer_len;
++			deptsiz.b.pktcnt =
++			    (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
++			FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
++					dieptsiz, deptsiz.d32);
++
++			/* Write the DMA register */
++			FH_WRITE_REG32(&
++					(core_if->dev_if->in_ep_regs[ep->num]->
++					 diepdma), (uint32_t) ep->dma_addr);
++
++		} else {
++			deptsiz.b.pktcnt =
++			    (ep->xfer_len + (ep->maxpacket - 1)) /
++			    ep->maxpacket;
++			deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
++
++			FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
++					doeptsiz, deptsiz.d32);
++
++			/* Write the DMA register */
++			FH_WRITE_REG32(&
++					(core_if->dev_if->out_ep_regs[ep->num]->
++					 doepdma), (uint32_t) ep->dma_addr);
++
++		}
++		/** Enable endpoint, clear nak  */
++		depctl.d32 = 0;
++		depctl.b.epena = 1;
++		depctl.b.cnak = 1;
++
++		FH_MODIFY_REG32(addr, depctl.d32, depctl.d32);
++	}
++}
++
++/**
++ * This function does the setup for a data transfer for an EP and
++ * starts the transfer. For an IN transfer, the packets will be
++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
++ * the packets are unloaded from the Rx FIFO in the ISR.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ */
++
++static void fh_otg_iso_ep_start_transfer(fh_otg_core_if_t * core_if,
++					  fh_ep_t * ep)
++{
++	if (core_if->dma_enable) {
++		if (core_if->dma_desc_enable) {
++			if (ep->is_in) {
++				ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
++			} else {
++				ep->desc_cnt = ep->pkt_cnt;
++			}
++			fh_otg_iso_ep_start_ddma_transfer(core_if, ep);
++		} else {
++			if (core_if->pti_enh_enable) {
++				fh_otg_iso_ep_start_buf_transfer(core_if, ep);
++			} else {
++				ep->cur_pkt_addr =
++				    (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
++				    xfer_buff0;
++				ep->cur_pkt_dma_addr =
++				    (ep->proc_buf_num) ? ep->dma_addr1 : ep->
++				    dma_addr0;
++				fh_otg_iso_ep_start_frm_transfer(core_if, ep);
++			}
++		}
++	} else {
++		ep->cur_pkt_addr =
++		    (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
++		ep->cur_pkt_dma_addr =
++		    (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
++		fh_otg_iso_ep_start_frm_transfer(core_if, ep);
++	}
++}
++
++/**
++ * This function stops transfer for an EP and
++ * resets the ep's variables. 
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ */
++
++void fh_otg_iso_ep_stop_transfer(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	depctl_data_t depctl = {.d32 = 0 };
++	volatile uint32_t *addr;
++
++	if (ep->is_in == 1) {
++		addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
++	} else {
++		addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
++	}
++
++	/* disable the ep */
++	depctl.d32 = FH_READ_REG32(addr);
++
++	depctl.b.epdis = 1;
++	depctl.b.snak = 1;
++
++	FH_WRITE_REG32(addr, depctl.d32);
++
++	if (core_if->dma_desc_enable &&
++	    ep->iso_desc_addr && ep->iso_dma_desc_addr) {
++		fh_otg_ep_free_desc_chain(ep->iso_desc_addr,
++					   ep->iso_dma_desc_addr,
++					   ep->desc_cnt * 2);
++	}
++
++	/* reset varibales */
++	ep->dma_addr0 = 0;
++	ep->dma_addr1 = 0;
++	ep->xfer_buff0 = 0;
++	ep->xfer_buff1 = 0;
++	ep->data_per_frame = 0;
++	ep->data_pattern_frame = 0;
++	ep->sync_frame = 0;
++	ep->buf_proc_intrvl = 0;
++	ep->bInterval = 0;
++	ep->proc_buf_num = 0;
++	ep->pkt_per_frm = 0;
++	ep->pkt_per_frm = 0;
++	ep->desc_cnt = 0;
++	ep->iso_desc_addr = 0;
++	ep->iso_dma_desc_addr = 0;
++}
++
++int fh_otg_pcd_iso_ep_start(fh_otg_pcd_t * pcd, void *ep_handle,
++			     uint8_t * buf0, uint8_t * buf1, fh_dma_t dma0,
++			     fh_dma_t dma1, int sync_frame, int dp_frame,
++			     int data_per_frame, int start_frame,
++			     int buf_proc_intrvl, void *req_handle,
++			     int atomic_alloc)
++{
++	fh_otg_pcd_ep_t *ep;
++	fh_irqflags_t flags = 0;
++	fh_ep_t *fh_ep;
++	int32_t frm_data;
++	dsts_data_t dsts;
++	fh_otg_core_if_t *core_if;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++
++	if (!ep || !ep->desc || ep->fh_ep.num == 0) {
++		FH_WARN("bad ep\n");
++		return -FH_E_INVALID;
++	}
++
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++	core_if = GET_CORE_IF(pcd);
++	fh_ep = &ep->fh_ep;
++
++	if (ep->iso_req_handle) {
++		FH_WARN("ISO request in progress\n");
++	}
++
++	fh_ep->dma_addr0 = dma0;
++	fh_ep->dma_addr1 = dma1;
++
++	fh_ep->xfer_buff0 = buf0;
++	fh_ep->xfer_buff1 = buf1;
++
++	fh_ep->data_per_frame = data_per_frame;
++
++	/** @todo - pattern data support is to be implemented in the future */
++	fh_ep->data_pattern_frame = dp_frame;
++	fh_ep->sync_frame = sync_frame;
++
++	fh_ep->buf_proc_intrvl = buf_proc_intrvl;
++
++	fh_ep->bInterval = 1 << (ep->desc->bInterval - 1);
++
++	fh_ep->proc_buf_num = 0;
++
++	fh_ep->pkt_per_frm = 0;
++	frm_data = ep->fh_ep.data_per_frame;
++	while (frm_data > 0) {
++		fh_ep->pkt_per_frm++;
++		frm_data -= ep->fh_ep.maxpacket;
++	}
++
++	dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++
++	if (start_frame == -1) {
++		fh_ep->next_frame = dsts.b.soffn + 1;
++		if (fh_ep->bInterval != 1) {
++			fh_ep->next_frame =
++			    fh_ep->next_frame + (fh_ep->bInterval - 1 -
++						  fh_ep->next_frame %
++						  fh_ep->bInterval);
++		}
++	} else {
++		fh_ep->next_frame = start_frame;
++	}
++
++	if (!core_if->pti_enh_enable) {
++		fh_ep->pkt_cnt =
++		    fh_ep->buf_proc_intrvl * fh_ep->pkt_per_frm /
++		    fh_ep->bInterval;
++	} else {
++		fh_ep->pkt_cnt =
++		    (fh_ep->data_per_frame *
++		     (fh_ep->buf_proc_intrvl / fh_ep->bInterval)
++		     - 1 + fh_ep->maxpacket) / fh_ep->maxpacket;
++	}
++
++	if (core_if->dma_desc_enable) {
++		fh_ep->desc_cnt =
++		    fh_ep->buf_proc_intrvl * fh_ep->pkt_per_frm /
++		    fh_ep->bInterval;
++	}
++
++	if (atomic_alloc) {
++		fh_ep->pkt_info =
++		    FH_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * fh_ep->pkt_cnt);
++	} else {
++		fh_ep->pkt_info =
++		    FH_ALLOC(sizeof(iso_pkt_info_t) * fh_ep->pkt_cnt);
++	}
++	if (!fh_ep->pkt_info) {
++		FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++		return -FH_E_NO_MEMORY;
++	}
++	if (core_if->pti_enh_enable) {
++		fh_memset(fh_ep->pkt_info, 0,
++			   sizeof(iso_pkt_info_t) * fh_ep->pkt_cnt);
++	}
++
++	fh_ep->cur_pkt = 0;
++	ep->iso_req_handle = req_handle;
++
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++	fh_otg_iso_ep_start_transfer(core_if, fh_ep);
++	return 0;
++}
++
++int fh_otg_pcd_iso_ep_stop(fh_otg_pcd_t * pcd, void *ep_handle,
++			    void *req_handle)
++{
++	fh_irqflags_t flags = 0;
++	fh_otg_pcd_ep_t *ep;
++	fh_ep_t *fh_ep;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++	if (!ep || !ep->desc || ep->fh_ep.num == 0) {
++		FH_WARN("bad ep\n");
++		return -FH_E_INVALID;
++	}
++	fh_ep = &ep->fh_ep;
++
++	fh_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), fh_ep);
++
++	FH_FREE(fh_ep->pkt_info);
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++	if (ep->iso_req_handle != req_handle) {
++		FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++		return -FH_E_INVALID;
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++
++	ep->iso_req_handle = 0;
++	return 0;
++}
++
++/**
++ * This function is used for perodical data exchnage between PCD and gadget drivers.
++ * for Isochronous EPs
++ *
++ *	- Every time a sync period completes this function is called to
++ *	  perform data exchange between PCD and gadget
++ */
++void fh_otg_iso_buffer_done(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep,
++			     void *req_handle)
++{
++	int i;
++	fh_ep_t *fh_ep;
++
++	fh_ep = &ep->fh_ep;
++
++	FH_SPINUNLOCK(ep->pcd->lock);
++	pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
++				 fh_ep->proc_buf_num ^ 0x1);
++	FH_SPINLOCK(ep->pcd->lock);
++
++	for (i = 0; i < fh_ep->pkt_cnt; ++i) {
++		fh_ep->pkt_info[i].status = 0;
++		fh_ep->pkt_info[i].offset = 0;
++		fh_ep->pkt_info[i].length = 0;
++	}
++}
++
++int fh_otg_pcd_get_iso_packet_count(fh_otg_pcd_t * pcd, void *ep_handle,
++				     void *iso_req_handle)
++{
++	fh_otg_pcd_ep_t *ep;
++	fh_ep_t *fh_ep;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++	if (!ep->desc || ep->fh_ep.num == 0) {
++		FH_WARN("bad ep\n");
++		return -FH_E_INVALID;
++	}
++	fh_ep = &ep->fh_ep;
++
++	return fh_ep->pkt_cnt;
++}
++
++void fh_otg_pcd_get_iso_packet_params(fh_otg_pcd_t * pcd, void *ep_handle,
++				       void *iso_req_handle, int packet,
++				       int *status, int *actual, int *offset)
++{
++	fh_otg_pcd_ep_t *ep;
++	fh_ep_t *fh_ep;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++	if (!ep)
++		FH_WARN("bad ep\n");
++
++	fh_ep = &ep->fh_ep;
++
++	*status = fh_ep->pkt_info[packet].status;
++	*actual = fh_ep->pkt_info[packet].length;
++	*offset = fh_ep->pkt_info[packet].offset;
++}
++
++#endif /* FH_EN_ISOC */
++
++static void fh_otg_pcd_init_ep(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * pcd_ep,
++				uint32_t is_in, uint32_t ep_num)
++{
++	/* Init EP structure */
++	pcd_ep->desc = 0;
++	pcd_ep->pcd = pcd;
++	pcd_ep->stopped = 1;
++	pcd_ep->queue_sof = 0;
++
++	/* Init FH ep structure */
++	pcd_ep->fh_ep.is_in = is_in;
++	pcd_ep->fh_ep.num = ep_num;
++	pcd_ep->fh_ep.active = 0;
++	pcd_ep->fh_ep.tx_fifo_num = 0;
++	/* Control until ep is actvated */
++	pcd_ep->fh_ep.type = FH_OTG_EP_TYPE_CONTROL;
++	pcd_ep->fh_ep.maxpacket = MAX_PACKET_SIZE;
++	pcd_ep->fh_ep.dma_addr = 0;
++	pcd_ep->fh_ep.start_xfer_buff = 0;
++	pcd_ep->fh_ep.xfer_buff = 0;
++	pcd_ep->fh_ep.xfer_len = 0;
++	pcd_ep->fh_ep.xfer_count = 0;
++	pcd_ep->fh_ep.sent_zlp = 0;
++	pcd_ep->fh_ep.total_len = 0;
++	pcd_ep->fh_ep.desc_addr = 0;
++	pcd_ep->fh_ep.dma_desc_addr = 0;
++	FH_CIRCLEQ_INIT(&pcd_ep->queue);
++}
++
++/**
++ * Initialize ep's
++ */
++static void fh_otg_pcd_reinit(fh_otg_pcd_t * pcd)
++{
++	int i;
++	uint32_t hwcfg1;
++	fh_otg_pcd_ep_t *ep;
++	int in_ep_cntr, out_ep_cntr;
++	uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
++	uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
++
++	/**
++	 * Initialize the EP0 structure.
++	 */
++	ep = &pcd->ep0;
++	fh_otg_pcd_init_ep(pcd, ep, 0, 0);
++
++	in_ep_cntr = 0;
++	hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
++	for (i = 1; in_ep_cntr < num_in_eps; i++) {
++		if ((hwcfg1 & 0x1) == 0) {
++			fh_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
++			in_ep_cntr++;
++			/**
++			 * @todo NGS: Add direction to EP, based on contents
++			 * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
++			 * sprintf(";r
++			 */
++			fh_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
++
++			FH_CIRCLEQ_INIT(&ep->queue);
++		}
++		hwcfg1 >>= 2;
++	}
++
++	out_ep_cntr = 0;
++	hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
++	for (i = 1; out_ep_cntr < num_out_eps; i++) {
++		if ((hwcfg1 & 0x1) == 0) {
++			fh_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
++			out_ep_cntr++;
++			/**
++			 * @todo NGS: Add direction to EP, based on contents
++			 * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
++			 * sprintf(";r
++			 */
++			fh_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
++			FH_CIRCLEQ_INIT(&ep->queue);
++		}
++		hwcfg1 >>= 2;
++	}
++
++	pcd->ep0state = EP0_DISCONNECT;
++	pcd->ep0.fh_ep.maxpacket = MAX_EP0_SIZE;
++	pcd->ep0.fh_ep.type = FH_OTG_EP_TYPE_CONTROL;
++}
++
++/**
++ * This function is called when the SRP timer expires. The SRP should
++ * complete within 6 seconds.
++ */
++static void srp_timeout(void *ptr)
++{
++	gotgctl_data_t gotgctl;
++	fh_otg_core_if_t *core_if = (fh_otg_core_if_t *) ptr;
++	volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
++
++	gotgctl.d32 = FH_READ_REG32(addr);
++
++	core_if->srp_timer_started = 0;
++
++	if (core_if->adp_enable) {
++		if (gotgctl.b.bsesvld == 0) {
++			gpwrdn_data_t gpwrdn = {.d32 = 0 };
++			FH_PRINTF("SRP Timeout BSESSVLD = 0\n");
++			/* Power off the core */
++			if (core_if->power_down == 2) {
++				gpwrdn.b.pwrdnswtch = 1;
++				FH_MODIFY_REG32(&core_if->
++						 core_global_regs->gpwrdn,
++						 gpwrdn.d32, 0);
++			}
++
++			gpwrdn.d32 = 0;
++			gpwrdn.b.pmuintsel = 1;
++			gpwrdn.b.pmuactv = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
++					 gpwrdn.d32);
++			fh_otg_adp_probe_start(core_if);
++		} else {
++			FH_PRINTF("SRP Timeout BSESSVLD = 1\n");
++			core_if->op_state = B_PERIPHERAL;
++			fh_otg_core_init(core_if);
++			fh_otg_enable_global_interrupts(core_if);
++			cil_pcd_start(core_if);
++		}
++	}
++
++	if ((core_if->core_params->phy_type == FH_PHY_TYPE_PARAM_FS) &&
++	    (core_if->core_params->i2c_enable)) {
++		FH_PRINTF("SRP Timeout\n");
++
++		if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
++			if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
++				core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
++			}
++
++			/* Clear Session Request */
++			gotgctl.d32 = 0;
++			gotgctl.b.sesreq = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
++					 gotgctl.d32, 0);
++
++			core_if->srp_success = 0;
++		} else {
++			__FH_ERROR("Device not connected/responding\n");
++			gotgctl.b.sesreq = 0;
++			FH_WRITE_REG32(addr, gotgctl.d32);
++		}
++	} else if (gotgctl.b.sesreq) {
++		FH_PRINTF("SRP Timeout\n");
++
++		__FH_ERROR("Device not connected/responding\n");
++		gotgctl.b.sesreq = 0;
++		FH_WRITE_REG32(addr, gotgctl.d32);
++	} else {
++		FH_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
++	}
++}
++
++/**
++ * Tasklet
++ *
++ */
++extern void start_next_request(fh_otg_pcd_ep_t * ep);
++
++static void start_xfer_tasklet_func(void *data)
++{
++	fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) data;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++
++	int i;
++	depctl_data_t diepctl;
++
++	FH_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
++
++	diepctl.d32 = FH_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
++
++	if (pcd->ep0.queue_sof) {
++		pcd->ep0.queue_sof = 0;
++		start_next_request(&pcd->ep0);
++		// break;
++	}
++
++	for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
++		depctl_data_t diepctl;
++		diepctl.d32 =
++		    FH_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
++
++		if (pcd->in_ep[i].queue_sof) {
++			pcd->in_ep[i].queue_sof = 0;
++			start_next_request(&pcd->in_ep[i]);
++			// break;
++		}
++	}
++
++	return;
++}
++
++/**
++ * This function initialized the PCD portion of the driver.
++ *
++ */
++fh_otg_pcd_t *fh_otg_pcd_init(fh_otg_core_if_t * core_if)
++{
++	fh_otg_pcd_t *pcd = NULL;
++	fh_otg_dev_if_t *dev_if;
++	int i;
++
++	/*
++	 * Allocate PCD structure
++	 */
++	pcd = FH_ALLOC(sizeof(fh_otg_pcd_t));
++
++	if (pcd == NULL) {
++		return NULL;
++	}
++
++	pcd->lock = FH_SPINLOCK_ALLOC();
++	if (!pcd->lock) {
++		FH_ERROR("Could not allocate lock for pcd");
++		FH_FREE(pcd);
++		return NULL;
++	}
++	/* Set core_if's lock pointer to hcd->lock */
++	core_if->lock = pcd->lock;
++	pcd->core_if = core_if;
++
++	dev_if = core_if->dev_if;
++	dev_if->isoc_ep = NULL;
++
++	if (core_if->hwcfg4.b.ded_fifo_en) {
++		FH_PRINTF("Dedicated Tx FIFOs mode\n");
++	} else {
++		FH_PRINTF("Shared Tx FIFO mode\n");
++	}
++
++	/*
++	 * Initialized the Core for Device mode here if there is nod ADP support. 
++	 * Otherwise it will be done later in fh_otg_adp_start routine.
++	 */
++	if (fh_otg_is_device_mode(core_if) /*&& !core_if->adp_enable */ ) {
++		fh_otg_core_dev_init(core_if);
++	}
++
++	/*
++	 * Register the PCD Callbacks.
++	 */
++	fh_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
++
++	/*
++	 * Initialize the DMA buffer for SETUP packets
++	 */
++	if (GET_CORE_IF(pcd)->dma_enable) {
++		pcd->setup_pkt =
++		    FH_DMA_ALLOC(sizeof(*pcd->setup_pkt) * 5,
++				  &pcd->setup_pkt_dma_handle);
++		if (pcd->setup_pkt == NULL) {
++			FH_FREE(pcd);
++			return NULL;
++		}
++
++		pcd->status_buf =
++		    FH_DMA_ALLOC(sizeof(uint16_t),
++				  &pcd->status_buf_dma_handle);
++		if (pcd->status_buf == NULL) {
++			FH_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
++				     pcd->setup_pkt, pcd->setup_pkt_dma_handle);
++			FH_FREE(pcd);
++			return NULL;
++		}
++
++		if (GET_CORE_IF(pcd)->dma_desc_enable) {
++			dev_if->setup_desc_addr[0] =
++			    fh_otg_ep_alloc_desc_chain
++			    (&dev_if->dma_setup_desc_addr[0], 1);
++			dev_if->setup_desc_addr[1] =
++			    fh_otg_ep_alloc_desc_chain
++			    (&dev_if->dma_setup_desc_addr[1], 1);
++			dev_if->in_desc_addr =
++			    fh_otg_ep_alloc_desc_chain
++			    (&dev_if->dma_in_desc_addr, 1);
++			dev_if->out_desc_addr =
++			    fh_otg_ep_alloc_desc_chain
++			    (&dev_if->dma_out_desc_addr, 1);
++			pcd->data_terminated = 0;
++
++			if (dev_if->setup_desc_addr[0] == 0
++			    || dev_if->setup_desc_addr[1] == 0
++			    || dev_if->in_desc_addr == 0
++			    || dev_if->out_desc_addr == 0) {
++
++				if (dev_if->out_desc_addr)
++					fh_otg_ep_free_desc_chain
++					    (dev_if->out_desc_addr,
++					     dev_if->dma_out_desc_addr, 1);
++				if (dev_if->in_desc_addr)
++					fh_otg_ep_free_desc_chain
++					    (dev_if->in_desc_addr,
++					     dev_if->dma_in_desc_addr, 1);
++				if (dev_if->setup_desc_addr[1])
++					fh_otg_ep_free_desc_chain
++					    (dev_if->setup_desc_addr[1],
++					     dev_if->dma_setup_desc_addr[1], 1);
++				if (dev_if->setup_desc_addr[0])
++					fh_otg_ep_free_desc_chain
++					    (dev_if->setup_desc_addr[0],
++					     dev_if->dma_setup_desc_addr[0], 1);
++
++				FH_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
++					     pcd->setup_pkt,
++					     pcd->setup_pkt_dma_handle);
++				FH_DMA_FREE(sizeof(*pcd->status_buf),
++					     pcd->status_buf,
++					     pcd->status_buf_dma_handle);
++
++				FH_FREE(pcd);
++
++				return NULL;
++			}
++		}
++	} else {
++		pcd->setup_pkt = FH_ALLOC(sizeof(*pcd->setup_pkt) * 5);
++		if (pcd->setup_pkt == NULL) {
++			FH_FREE(pcd);
++			return NULL;
++		}
++
++		pcd->status_buf = FH_ALLOC(sizeof(uint16_t));
++		if (pcd->status_buf == NULL) {
++			FH_FREE(pcd->setup_pkt);
++			FH_FREE(pcd);
++			return NULL;
++		}
++	}
++
++	fh_otg_pcd_reinit(pcd);
++
++	/* Allocate the cfi object for the PCD */
++#ifdef FH_UTE_CFI
++	pcd->cfi = FH_ALLOC(sizeof(cfiobject_t));
++	if (NULL == pcd->cfi)
++		goto fail;
++	if (init_cfi(pcd->cfi)) {
++		CFI_INFO("%s: Failed to init the CFI object\n", __func__);
++		goto fail;
++	}
++#endif
++
++	/* Initialize tasklets */
++	pcd->start_xfer_tasklet = FH_TASK_ALLOC("xfer_tasklet",
++						 start_xfer_tasklet_func, pcd);
++	pcd->test_mode_tasklet = FH_TASK_ALLOC("test_mode_tasklet",
++						do_test_mode, pcd);
++
++	/* Initialize SRP timer */
++	core_if->srp_timer = FH_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
++
++	if (core_if->core_params->dev_out_nak) {
++		/** 
++		* Initialize xfer timeout timer. Implemented for
++		* 2.93a feature "Device DDMA OUT NAK Enhancement"
++		*/
++		for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++			pcd->core_if->ep_xfer_timer[i] =
++			    FH_TIMER_ALLOC("ep timer", ep_xfer_timeout,
++					    &pcd->core_if->ep_xfer_info[i]);
++		}
++	}
++
++	return pcd;
++#ifdef FH_UTE_CFI
++fail:
++#endif
++	if (pcd->setup_pkt)
++		FH_FREE(pcd->setup_pkt);
++	if (pcd->status_buf)
++		FH_FREE(pcd->status_buf);
++#ifdef FH_UTE_CFI
++	if (pcd->cfi)
++		FH_FREE(pcd->cfi);
++#endif
++	if (pcd)
++		FH_FREE(pcd);
++	return NULL;
++
++}
++
++/**
++ * Remove PCD specific data
++ */
++void fh_otg_pcd_remove(fh_otg_pcd_t * pcd)
++{
++	fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
++	int i;
++	if (pcd->core_if->core_params->dev_out_nak) {
++		for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++			FH_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
++			pcd->core_if->ep_xfer_info[i].state = 0;
++		}
++	}
++
++	if (GET_CORE_IF(pcd)->dma_enable) {
++		FH_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
++			     pcd->setup_pkt_dma_handle);
++		FH_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
++			     pcd->status_buf_dma_handle);
++		if (GET_CORE_IF(pcd)->dma_desc_enable) {
++			fh_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
++						   dev_if->dma_setup_desc_addr
++						   [0], 1);
++			fh_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
++						   dev_if->dma_setup_desc_addr
++						   [1], 1);
++			fh_otg_ep_free_desc_chain(dev_if->in_desc_addr,
++						   dev_if->dma_in_desc_addr, 1);
++			fh_otg_ep_free_desc_chain(dev_if->out_desc_addr,
++						   dev_if->dma_out_desc_addr,
++						   1);
++		}
++	} else {
++		FH_FREE(pcd->setup_pkt);
++		FH_FREE(pcd->status_buf);
++	}
++	FH_SPINLOCK_FREE(pcd->lock);
++	/* Set core_if's lock pointer to NULL */
++	pcd->core_if->lock = NULL;
++
++	FH_TASK_FREE(pcd->start_xfer_tasklet);
++	FH_TASK_FREE(pcd->test_mode_tasklet);
++	if (pcd->core_if->core_params->dev_out_nak) {
++		for (i = 0; i < MAX_EPS_CHANNELS; i++) {
++			if (pcd->core_if->ep_xfer_timer[i]) {
++				FH_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
++			}
++		}
++	}
++
++/* Release the CFI object's dynamic memory */
++#ifdef FH_UTE_CFI
++	if (pcd->cfi->ops.release) {
++		pcd->cfi->ops.release(pcd->cfi);
++	}
++#endif
++
++	FH_FREE(pcd);
++}
++
++/**
++ * Returns whether registered pcd is dual speed or not
++ */
++uint32_t fh_otg_pcd_is_dualspeed(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++
++	if ((core_if->core_params->speed == FH_SPEED_PARAM_FULL) ||
++	    ((core_if->hwcfg2.b.hs_phy_type == 2) &&
++	     (core_if->hwcfg2.b.fs_phy_type == 1) &&
++	     (core_if->core_params->ulpi_fs_ls))) {
++		return 0;
++	}
++
++	return 1;
++}
++
++/**
++ * Returns whether registered pcd is OTG capable or not
++ */
++uint32_t fh_otg_pcd_is_otg(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	gusbcfg_data_t usbcfg = {.d32 = 0 };
++	uint32_t retval = 0;
++
++	usbcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->gusbcfg);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
++	if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
++		return 0;
++	else 
++		return 1;
++# else
++	if (!usbcfg.b.srpcap)
++		return 0;
++	else 
++		retval |= 1;
++
++	if (usbcfg.b.hnpcap)
++		retval |= 2;
++	
++	if (core_if->adp_enable) 
++		retval |= 4;
++#endif
++
++	return retval;
++}
++
++/**
++ * This function assigns periodic Tx FIFO to an periodic EP
++ * in shared Tx FIFO mode
++ */
++static uint32_t assign_tx_fifo(fh_otg_core_if_t * core_if)
++{
++	uint32_t TxMsk = 1;
++	int i;
++
++	for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
++		if ((TxMsk & core_if->tx_msk) == 0) {
++			core_if->tx_msk |= TxMsk;
++			return i + 1;
++		}
++		TxMsk <<= 1;
++	}
++	return 0;
++}
++
++/**
++ * This function assigns periodic Tx FIFO to an periodic EP
++ * in shared Tx FIFO mode
++ */
++static uint32_t assign_perio_tx_fifo(fh_otg_core_if_t * core_if)
++{
++	uint32_t PerTxMsk = 1;
++	int i;
++	for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
++		if ((PerTxMsk & core_if->p_tx_msk) == 0) {
++			core_if->p_tx_msk |= PerTxMsk;
++			return i + 1;
++		}
++		PerTxMsk <<= 1;
++	}
++	return 0;
++}
++
++/**
++ * This function releases periodic Tx FIFO
++ * in shared Tx FIFO mode
++ */
++static void release_perio_tx_fifo(fh_otg_core_if_t * core_if,
++				  uint32_t fifo_num)
++{
++	core_if->p_tx_msk =
++	    (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
++}
++
++/**
++ * This function releases periodic Tx FIFO
++ * in shared Tx FIFO mode
++ */
++static void release_tx_fifo(fh_otg_core_if_t * core_if, uint32_t fifo_num)
++{
++	core_if->tx_msk =
++	    (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
++}
++
++/**
++ * This function is being called from gadget 
++ * to enable PCD endpoint.
++ */
++int fh_otg_pcd_ep_enable(fh_otg_pcd_t * pcd,
++			  const uint8_t * ep_desc, void *usb_ep)
++{
++	int num, dir;
++	fh_otg_pcd_ep_t *ep = NULL;
++	const usb_endpoint_descriptor_t *desc;
++	fh_irqflags_t flags;
++	fifosize_data_t dptxfsiz = {.d32 = 0 };
++	gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
++	gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
++	int retval = 0;
++	int i, epcount;
++
++	desc = (const usb_endpoint_descriptor_t *)ep_desc;
++
++	if (!desc) {
++		pcd->ep0.priv = usb_ep;
++		ep = &pcd->ep0;
++		retval = -FH_E_INVALID;
++		goto out;
++	}
++
++	num = UE_GET_ADDR(desc->bEndpointAddress);
++	dir = UE_GET_DIR(desc->bEndpointAddress);
++
++	if (!desc->wMaxPacketSize) {
++		FH_WARN("bad maxpacketsize\n");
++		retval = -FH_E_INVALID;
++		goto out;
++	}
++
++	if (dir == UE_DIR_IN) {
++		epcount = pcd->core_if->dev_if->num_in_eps;
++		for (i = 0; i < epcount; i++) {
++			if (num == pcd->in_ep[i].fh_ep.num) {
++				ep = &pcd->in_ep[i];
++				break;
++			}
++		}
++	} else {
++		epcount = pcd->core_if->dev_if->num_out_eps;
++		for (i = 0; i < epcount; i++) {
++			if (num == pcd->out_ep[i].fh_ep.num) {
++				ep = &pcd->out_ep[i];
++				break;
++			}
++		}
++	}
++
++	if (!ep) {
++		FH_WARN("bad address\n");
++		retval = -FH_E_INVALID;
++		goto out;
++	}
++
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++
++	ep->desc = desc;
++	ep->priv = usb_ep;
++
++	/*
++	 * Activate the EP
++	 */
++	ep->stopped = 0;
++
++	ep->fh_ep.is_in = (dir == UE_DIR_IN);
++	ep->fh_ep.maxpacket = UGETW(desc->wMaxPacketSize);
++
++	ep->fh_ep.type = desc->bmAttributes & UE_XFERTYPE;
++
++	if (ep->fh_ep.is_in) {
++		if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
++			ep->fh_ep.tx_fifo_num = 0;
++
++			if (ep->fh_ep.type == UE_ISOCHRONOUS) {
++				/*
++				 * if ISOC EP then assign a Periodic Tx FIFO.
++				 */
++				ep->fh_ep.tx_fifo_num =
++				    assign_perio_tx_fifo(GET_CORE_IF(pcd));
++			}
++		} else {
++			/*
++			 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
++			 */
++			ep->fh_ep.tx_fifo_num =
++			    assign_tx_fifo(GET_CORE_IF(pcd));
++		}
++
++		/* Calculating EP info controller base address */
++		if (ep->fh_ep.tx_fifo_num
++		    && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
++			gdfifocfg.d32 =
++			    FH_READ_REG32(&GET_CORE_IF(pcd)->
++					   core_global_regs->gdfifocfg);
++			gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
++			dptxfsiz.d32 =
++			    (FH_READ_REG32
++			     (&GET_CORE_IF(pcd)->core_global_regs->
++			      dtxfsiz[ep->fh_ep.tx_fifo_num - 1]) >> 16);
++			gdfifocfg.b.epinfobase =
++			    gdfifocfgbase.d32 + dptxfsiz.d32;
++			if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
++				FH_WRITE_REG32(&GET_CORE_IF(pcd)->
++						core_global_regs->gdfifocfg,
++						gdfifocfg.d32);
++			}
++		}
++	}
++	/* Set initial data PID. */
++	if (ep->fh_ep.type == UE_BULK) {
++		ep->fh_ep.data_pid_start = 0;
++	}
++
++	/* Alloc DMA Descriptors */
++	if (GET_CORE_IF(pcd)->dma_desc_enable) {
++#ifndef FH_UTE_PER_IO
++		if (ep->fh_ep.type != UE_ISOCHRONOUS) {
++#endif
++			ep->fh_ep.desc_addr =
++			    fh_otg_ep_alloc_desc_chain(&ep->
++							fh_ep.dma_desc_addr,
++							MAX_DMA_DESC_CNT);
++			if (!ep->fh_ep.desc_addr) {
++				FH_WARN("%s, can't allocate DMA descriptor\n",
++					 __func__);
++				retval = -FH_E_SHUTDOWN;
++				FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++				goto out;
++			}
++#ifndef FH_UTE_PER_IO
++		} else {
++			ep->fh_ep.desc_addr =
++				fh_otg_ep_alloc_desc_chain(&ep->
++				fh_ep.dma_desc_addr,
++				MAX_DMA_DESC_CNT/2);
++			ep->fh_ep.desc_addr1 =
++				fh_otg_ep_alloc_desc_chain(&ep->
++				fh_ep.dma_desc_addr1,
++				MAX_DMA_DESC_CNT/2);
++			if (!ep->fh_ep.desc_addr || !ep->fh_ep.desc_addr1) {
++				FH_WARN("%s, can't allocate DMA descriptor\n",
++					__func__);
++				retval = -FH_E_SHUTDOWN;
++				FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++				goto out;
++			}
++			/* Set initial data PID. */
++			if (ep->fh_ep.type == UE_ISOCHRONOUS) {
++				ep->fh_ep.iso_desc_first = 0;
++				ep->fh_ep.iso_desc_second = 0;
++				ep->fh_ep.iso_transfer_started = 0;
++			}
++		}
++#endif
++	}
++
++	FH_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
++		    (ep->fh_ep.is_in ? "IN" : "OUT"),
++		    ep->fh_ep.type, ep->fh_ep.maxpacket, ep->desc);
++#ifdef FH_UTE_PER_IO
++	ep->fh_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
++#endif
++	if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
++		ep->fh_ep.bInterval = 1 << (ep->desc->bInterval - 1);
++		ep->fh_ep.frame_num = 0xFFFFFFFF;
++	}
++
++	fh_otg_ep_activate(GET_CORE_IF(pcd), &ep->fh_ep);
++
++#ifdef FH_UTE_CFI
++	if (pcd->cfi->ops.ep_enable) {
++		pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
++	}
++#endif
++
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++
++out:
++	return retval;
++}
++
++/**
++ * This function is being called from gadget 
++ * to disable PCD endpoint.
++ */
++int fh_otg_pcd_ep_disable(fh_otg_pcd_t * pcd, void *ep_handle)
++{
++	fh_otg_pcd_ep_t *ep;
++	fh_irqflags_t flags;
++	fh_otg_dev_dma_desc_t *desc_addr;
++	fh_dma_t dma_desc_addr;
++	gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
++	gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
++	fifosize_data_t dptxfsiz = {.d32 = 0 };
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++
++	if (!ep || !ep->desc) {
++		FH_DEBUGPL(DBG_PCD, "bad ep address\n");
++		return -FH_E_INVALID;
++	}
++
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++
++	fh_otg_request_nuke(ep);
++
++	fh_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->fh_ep);
++	if (pcd->core_if->core_params->dev_out_nak) {
++		FH_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->fh_ep.num]);
++		pcd->core_if->ep_xfer_info[ep->fh_ep.num].state = 0;
++	}
++	ep->desc = NULL;
++	ep->stopped = 1;
++
++	gdfifocfg.d32 =
++	    FH_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
++	gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
++
++	if (ep->fh_ep.is_in) {
++		if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
++			/* Flush the Tx FIFO */
++			fh_otg_flush_tx_fifo(GET_CORE_IF(pcd),
++					      ep->fh_ep.tx_fifo_num);
++		}
++		release_perio_tx_fifo(GET_CORE_IF(pcd), ep->fh_ep.tx_fifo_num);
++		release_tx_fifo(GET_CORE_IF(pcd), ep->fh_ep.tx_fifo_num);
++		if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
++			/* Decreasing EPinfo Base Addr */
++			dptxfsiz.d32 =
++			    (FH_READ_REG32
++			     (&GET_CORE_IF(pcd)->
++		      		core_global_regs->dtxfsiz[ep->fh_ep.tx_fifo_num-1]) >> 16);
++			gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
++			if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
++				FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
++						gdfifocfg.d32);
++			}
++		}
++	}
++
++	/* Free DMA Descriptors */
++	if (GET_CORE_IF(pcd)->dma_desc_enable) {
++		if (ep->fh_ep.type != UE_ISOCHRONOUS) {
++			desc_addr = ep->fh_ep.desc_addr;
++			dma_desc_addr = ep->fh_ep.dma_desc_addr;
++
++			/* Cannot call dma_free_coherent() with IRQs disabled */
++			FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++			fh_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
++						   MAX_DMA_DESC_CNT);
++
++		} else {
++			desc_addr = ep->fh_ep.desc_addr;
++			dma_desc_addr = ep->fh_ep.dma_desc_addr;
++
++			/* Cannot call dma_free_coherent() with IRQs disabled */
++			FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++			fh_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
++				MAX_DMA_DESC_CNT/2);
++			desc_addr = ep->fh_ep.desc_addr1;
++			dma_desc_addr = ep->fh_ep.dma_desc_addr1;
++			fh_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
++				MAX_DMA_DESC_CNT/2);
++		}
++		goto out_unlocked;
++	}
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++
++out_unlocked:
++	FH_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->fh_ep.num,
++		    ep->fh_ep.is_in ? "IN" : "OUT");
++	return 0;
++
++}
++
++/**
++ * This function initializes dma descriptor chain for ISOC transfers.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ */
++void fh_otg_pcd_start_iso_ddma(fh_otg_core_if_t * core_if, fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_dev_dma_desc_t *dma_desc;
++	fh_otg_pcd_request_t *req = NULL;
++	fh_ep_t *fhep = NULL;
++	uint32_t frame_num = 0;
++	int i = 0;
++	int j;
++	int sync_request = 4;
++	uint16_t nat;
++	depctl_data_t depctl;                                                                                                                                                                                                           
++
++	fhep = &ep->fh_ep;
++	dma_desc = fhep->desc_addr;
++
++	nat = UGETW(ep->desc->wMaxPacketSize);
++	nat = (nat >> 11) & 0x03;
++	FH_DEBUGPL(DBG_PCD, "nat=%u binterval =%02x\n",nat, fhep->bInterval);
++	FH_DEBUGPL(DBG_PCD, "frame_num =  %d\n", fhep->frame_num);
++
++	/* Complete first three IN EP requests for the synchronization */
++	if (fhep->is_in) {
++		if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++			for (j = 0; j < sync_request; j++) {
++				req = FH_CIRCLEQ_FIRST(&ep->queue);
++				if (!req) {
++					FH_PRINTF("ISOC 0x%p, req = NULL!\n", ep);
++					return;
++				} else {
++					/* Complete first request */
++					req->actual = 0;
++					fh_otg_request_done(ep, req, 0);
++				}
++			}
++		} else {
++			FH_PRINTF("ISOC ep 0x%p, ep->queue empty!\n", ep);
++			return;
++		}
++
++		frame_num = fhep->frame_num + (sync_request -1)*fhep->bInterval;
++		
++		FH_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
++			i = i+1;
++			frame_num = (frame_num + fhep->bInterval) & 0x3FFF;
++			/** DMA Descriptor Setup */
++			dma_desc->status.b_iso_in.bs = BS_HOST_BUSY;
++			dma_desc->buf = req->dma;
++			dma_desc->status.b_iso_in.txbytes = req->length;
++			dma_desc->status.b_iso_in.framenum = frame_num;
++			dma_desc->status.b_iso_in.txsts = 0;
++			dma_desc->status.b_iso_in.sp = (req->length % fhep->maxpacket) ? 1 : 0;
++			dma_desc->status.b_iso_in.ioc = 1;
++			dma_desc->status.b_iso_in.pid = nat + 1;
++			dma_desc->status.b_iso_in.l = 0;
++			
++			if (req == FH_CIRCLEQ_LAST(&ep->queue)) {
++				dma_desc->status.b_iso_in.l = 1;
++			}
++			dma_desc->status.b_iso_in.bs = BS_HOST_READY;
++			FH_DEBUGPL(DBG_PCD, "ISO_DESC #%d %p status = %08x\n", i, dma_desc, dma_desc->status.d32);
++			if (i == MAX_DMA_DESC_CNT/2 - 1) {
++				dma_desc->status.b_iso_in.l = 1;
++				break;
++			}
++			dma_desc++;
++		}
++		FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[fhep->num]->diepdma, fhep->dma_desc_addr);
++ 		FH_DEBUGPL(DBG_PCD, "%d ISOC IN descs were programmed\n", i-1);
++		depctl.d32 = 0;
++		depctl.b.epena = 1;
++		depctl.b.cnak = 1;
++		FH_MODIFY_REG32(&core_if->dev_if->in_ep_regs[fhep->num]->diepctl, 0, depctl.d32);
++	} else {
++		FH_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
++			i = i+1;
++			frame_num = (frame_num + fhep->bInterval) & 0x3FFF;
++			/** DMA Descriptor Setup */
++			dma_desc->status.b_iso_out.bs = BS_HOST_BUSY;
++			dma_desc->buf = req->dma;
++			dma_desc->status.b_iso_out.rxbytes = req->length;
++			dma_desc->status.b_iso_out.rxsts = 0;
++			dma_desc->status.b_iso_out.sp = (req->length % fhep->maxpacket) ? 1 : 0;
++			dma_desc->status.b_iso_out.ioc = 1;
++			dma_desc->status.b_iso_out.pid = nat + 1;
++			dma_desc->status.b_iso_out.l = 0;
++
++			if (req == FH_CIRCLEQ_LAST(&ep->queue)) {
++				dma_desc->status.b_iso_out.l = 1;
++			}
++			dma_desc->status.b_iso_in.bs = BS_HOST_READY;
++			FH_DEBUGPL(DBG_PCD, "ISO_DESC #%d %p status = %08x\n", i, dma_desc, dma_desc->status.d32);
++			if (i == MAX_DMA_DESC_CNT/2 - 1) {
++				dma_desc->status.b_iso_out.l = 1;
++				break;
++			}
++			dma_desc++;
++		}
++		FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[fhep->num]->doepdma, fhep->dma_desc_addr);
++		FH_DEBUGPL(DBG_PCD, "%d ISOC OUT descs were programmed\n", i-1);
++		depctl.d32 = 0;
++		depctl.b.epena = 1;
++		depctl.b.cnak = 1;
++		FH_MODIFY_REG32(&core_if->dev_if->out_ep_regs[fhep->num]->doepctl, 0, depctl.d32);
++	}
++	fhep->iso_desc_first = i; //vahrama - pay attention previous one was i-1
++	fhep->iso_transfer_started = 1;
++	fhep->frame_num = frame_num;
++	fhep->use_add_buf = 1;
++}
++/**
++ * Program next ISO request to the DMA chain
++ *
++ */
++static void program_next_iso_request_ddma (fh_otg_pcd_ep_t * ep, fh_otg_pcd_request_t * req)
++{
++	fh_otg_dev_dma_desc_t *dma_desc;
++	fh_dma_t dma_desc_addr;
++	uint32_t frame_num = 0;
++	uint32_t nat;
++	uint32_t index;
++
++	FH_DEBUGPL(DBG_PCD, "%s", __FUNCTION__);
++
++	if (ep->fh_ep.use_add_buf) {
++		index =	ep->fh_ep.iso_desc_second + 1;
++	} else {
++		index =	ep->fh_ep.iso_desc_first + 1;
++	}
++
++	if (index > MAX_DMA_DESC_CNT/2) {
++		FH_PRINTF("There are no free descs in the chain!\n");
++		return;
++	} 
++
++	if (ep->fh_ep.use_add_buf) {
++		dma_desc = &ep->fh_ep.desc_addr1[ep->fh_ep.iso_desc_second];
++		dma_desc_addr = ep->fh_ep.dma_desc_addr1;
++		ep->fh_ep.iso_desc_second += 1;
++	}  else {
++		dma_desc = &ep->fh_ep.desc_addr[ep->fh_ep.iso_desc_first];
++		dma_desc_addr = ep->fh_ep.dma_desc_addr;
++		ep->fh_ep.iso_desc_first += 1;
++	}
++	nat = UGETW(ep->desc->wMaxPacketSize);
++	nat = (nat >> 11) & 0x03;
++	frame_num = (ep->fh_ep.frame_num + ep->fh_ep.bInterval) & 0x3FFF;
++	if (ep->fh_ep.is_in) {
++		/** DMA Descriptor Setup */
++		dma_desc->status.b_iso_in.bs = BS_HOST_BUSY;
++		dma_desc->buf = req->dma;
++		dma_desc->status.b_iso_in.txbytes = req->length;
++		dma_desc->status.b_iso_in.framenum = frame_num;
++		dma_desc->status.b_iso_in.txsts = 0;
++		dma_desc->status.b_iso_in.sp = (req->length % ep->fh_ep.maxpacket) ? 1 : 0;
++		dma_desc->status.b_iso_in.ioc = 1;
++		dma_desc->status.b_iso_in.pid = nat + 1;
++		dma_desc->status.b_iso_in.l = 1;
++
++		dma_desc->status.b_iso_in.bs = BS_HOST_READY;
++
++		/* Clear L bit on the previous desc of the chain */
++		if (index > 1) {
++			dma_desc--;
++			dma_desc->status.b_iso_in.l = 0;
++		}					
++	}  else {
++		/** DMA Descriptor Setup */
++		dma_desc->status.b_iso_out.bs = BS_HOST_BUSY;
++		dma_desc->buf = req->dma;
++		dma_desc->status.b_iso_out.rxbytes = req->length;
++		dma_desc->status.b_iso_out.rxsts = 0;
++		dma_desc->status.b_iso_out.sp = (req->length % ep->fh_ep.maxpacket) ? 1 : 0;
++		dma_desc->status.b_iso_out.ioc = 1;
++		dma_desc->status.b_iso_out.pid = nat + 1;
++		dma_desc->status.b_iso_out.l = 1;
++
++		dma_desc->status.b_iso_out.bs = BS_HOST_READY;
++
++		/* Clear L bit on the previous desc of the chain */
++		if (index > 1) {
++			dma_desc--;
++			dma_desc->status.b_iso_out.l = 0;
++		}
++	}
++	ep->fh_ep.frame_num = frame_num;
++
++}
++
++/******************************************************************************/
++#ifdef FH_UTE_PER_IO
++
++/**
++ * Free the request and its extended parts
++ *
++ */
++void fh_pcd_xiso_ereq_free(fh_otg_pcd_ep_t * ep, fh_otg_pcd_request_t * req)
++{
++	FH_FREE(req->ext_req.per_io_frame_descs);
++	FH_FREE(req);
++}
++
++/**
++ * Start the next request in the endpoint's queue.
++ *
++ */
++int fh_otg_pcd_xiso_start_next_request(fh_otg_pcd_t * pcd,
++					fh_otg_pcd_ep_t * ep)
++{
++	int i;
++	fh_otg_pcd_request_t *req = NULL;
++	fh_ep_t *fhep = NULL;
++	struct fh_iso_xreq_port *ereq = NULL;
++	struct fh_iso_pkt_desc_port *ddesc_iso;
++	uint16_t nat;
++	depctl_data_t diepctl;
++
++	fhep = &ep->fh_ep;
++
++	if (fhep->xiso_active_xfers > 0) {
++#if 0	//Disable this to decrease s/w overhead that is crucial for Isoc transfers
++		FH_WARN("There are currently active transfers for EP%d \
++				(active=%d; queued=%d)", fhep->num, fhep->xiso_active_xfers, 
++				fhep->xiso_queued_xfers);
++#endif
++		return 0;
++	}
++
++	nat = UGETW(ep->desc->wMaxPacketSize);
++	nat = (nat >> 11) & 0x03;
++
++	if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		req = FH_CIRCLEQ_FIRST(&ep->queue);
++		ereq = &req->ext_req;
++		ep->stopped = 0;
++
++		/* Get the frame number */
++		fhep->xiso_frame_num =
++		    fh_otg_get_frame_number(GET_CORE_IF(pcd));
++		FH_DEBUG("FRM_NUM=%d", fhep->xiso_frame_num);
++
++		ddesc_iso = ereq->per_io_frame_descs;
++
++		if (fhep->is_in) {
++			/* Setup DMA Descriptor chain for IN Isoc request */
++			for (i = 0; i < ereq->pio_pkt_count; i++) {
++				//if ((i % (nat + 1)) == 0)
++				if (i > 0)
++					fhep->xiso_frame_num =
++					    (fhep->xiso_bInterval +
++					     fhep->xiso_frame_num) & 0x3FFF;
++				fhep->desc_addr[i].buf =
++				    req->dma + ddesc_iso[i].offset;
++				fhep->desc_addr[i].status.b_iso_in.txbytes =
++				    ddesc_iso[i].length;
++				fhep->desc_addr[i].status.b_iso_in.framenum =
++				    fhep->xiso_frame_num;
++				fhep->desc_addr[i].status.b_iso_in.bs =
++				    BS_HOST_READY;
++				fhep->desc_addr[i].status.b_iso_in.txsts = 0;
++				fhep->desc_addr[i].status.b_iso_in.sp =
++				    (ddesc_iso[i].length %
++				     fhep->maxpacket) ? 1 : 0;
++				fhep->desc_addr[i].status.b_iso_in.ioc = 0;
++				fhep->desc_addr[i].status.b_iso_in.pid = nat + 1;
++				fhep->desc_addr[i].status.b_iso_in.l = 0;
++
++				/* Process the last descriptor */
++				if (i == ereq->pio_pkt_count - 1) {
++					fhep->desc_addr[i].status.b_iso_in.ioc = 1;
++					fhep->desc_addr[i].status.b_iso_in.l = 1;
++				}
++			}
++
++			/* Setup and start the transfer for this endpoint */
++			fhep->xiso_active_xfers++;
++			FH_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
++					in_ep_regs[fhep->num]->diepdma,
++					fhep->dma_desc_addr);
++			diepctl.d32 = 0;
++			diepctl.b.epena = 1;
++			diepctl.b.cnak = 1;
++			FH_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
++					 in_ep_regs[fhep->num]->diepctl, 0,
++					 diepctl.d32);
++		} else {
++			/* Setup DMA Descriptor chain for OUT Isoc request */
++			for (i = 0; i < ereq->pio_pkt_count; i++) {
++				//if ((i % (nat + 1)) == 0)
++				fhep->xiso_frame_num = (fhep->xiso_bInterval + 
++										fhep->xiso_frame_num) & 0x3FFF;
++				fhep->desc_addr[i].buf =
++				    req->dma + ddesc_iso[i].offset;
++				fhep->desc_addr[i].status.b_iso_out.rxbytes =
++				    ddesc_iso[i].length;
++				fhep->desc_addr[i].status.b_iso_out.framenum =
++				    fhep->xiso_frame_num;
++				fhep->desc_addr[i].status.b_iso_out.bs =
++				    BS_HOST_READY;
++				fhep->desc_addr[i].status.b_iso_out.rxsts = 0;
++				fhep->desc_addr[i].status.b_iso_out.sp =
++				    (ddesc_iso[i].length %
++				     fhep->maxpacket) ? 1 : 0;
++				fhep->desc_addr[i].status.b_iso_out.ioc = 0;
++				fhep->desc_addr[i].status.b_iso_out.pid = nat + 1;
++				fhep->desc_addr[i].status.b_iso_out.l = 0;
++				
++				/* Process the last descriptor */
++				if (i == ereq->pio_pkt_count - 1) {
++					fhep->desc_addr[i].status.b_iso_out.ioc = 1;
++					fhep->desc_addr[i].status.b_iso_out.l = 1;
++				}			
++			}
++			
++			/* Setup and start the transfer for this endpoint */
++			fhep->xiso_active_xfers++;
++			FH_WRITE_REG32(&GET_CORE_IF(pcd)->
++					dev_if->out_ep_regs[fhep->num]->
++					doepdma, fhep->dma_desc_addr);
++			diepctl.d32 = 0;
++			diepctl.b.epena = 1;
++			diepctl.b.cnak = 1;
++			FH_MODIFY_REG32(&GET_CORE_IF(pcd)->
++					 dev_if->out_ep_regs[fhep->num]->
++					 doepctl, 0, diepctl.d32);
++		}
++
++	} else {
++		ep->stopped = 1;
++	}
++
++	return 0;
++}
++
++/**
++ *	- Remove the request from the queue
++ */
++void complete_xiso_ep(fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_pcd_request_t *req = NULL;
++	struct fh_iso_xreq_port *ereq = NULL;
++	struct fh_iso_pkt_desc_port *ddesc_iso = NULL;
++	fh_ep_t *fhep = NULL;
++	int i;
++
++	//FH_DEBUG();
++	fhep = &ep->fh_ep;
++
++	/* Get the first pending request from the queue */
++	if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		req = FH_CIRCLEQ_FIRST(&ep->queue);
++		if (!req) {
++			FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
++			return;
++		}
++		fhep->xiso_active_xfers--;
++		fhep->xiso_queued_xfers--;
++		/* Remove this request from the queue */
++		FH_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
++	} else {
++		FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
++		return;
++	}
++
++	ep->stopped = 1;
++	ereq = &req->ext_req;
++	ddesc_iso = ereq->per_io_frame_descs;
++
++	if (fhep->xiso_active_xfers < 0) {
++		FH_WARN("EP#%d (xiso_active_xfers=%d)", fhep->num,
++			 fhep->xiso_active_xfers);
++	}
++
++	/* Fill the Isoc descs of portable extended req from dma descriptors */
++	for (i = 0; i < ereq->pio_pkt_count; i++) {
++		if (fhep->is_in) {	/* IN endpoints */
++			ddesc_iso[i].actual_length = ddesc_iso[i].length -
++			    fhep->desc_addr[i].status.b_iso_in.txbytes;
++			ddesc_iso[i].status =
++			    fhep->desc_addr[i].status.b_iso_in.txsts;
++		} else {	/* OUT endpoints */
++			ddesc_iso[i].actual_length = ddesc_iso[i].length -
++			    fhep->desc_addr[i].status.b_iso_out.rxbytes;
++			ddesc_iso[i].status =
++			    fhep->desc_addr[i].status.b_iso_out.rxsts;
++		}
++	}
++
++	FH_SPINUNLOCK(ep->pcd->lock);
++
++	/* Call the completion function in the non-portable logic */
++	ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
++				      &req->ext_req);
++
++	FH_SPINLOCK(ep->pcd->lock);
++
++	/* Free the request - specific freeing needed for extended request object */
++	fh_pcd_xiso_ereq_free(ep, req);
++
++	/* Start the next request */
++	fh_otg_pcd_xiso_start_next_request(ep->pcd, ep);
++
++	return;
++}
++
++/**
++ * Create and initialize the Isoc pkt descriptors of the extended request.
++ *
++ */
++static int fh_otg_pcd_xiso_create_pkt_descs(fh_otg_pcd_request_t * req,
++					     void *ereq_nonport,
++					     int atomic_alloc)
++{
++	struct fh_iso_xreq_port *ereq = NULL;
++	struct fh_iso_xreq_port *req_mapped = NULL;
++	struct fh_iso_pkt_desc_port *ipds = NULL;	/* To be created in this function */
++	uint32_t pkt_count;
++	int i;
++
++	ereq = &req->ext_req;
++	req_mapped = (struct fh_iso_xreq_port *)ereq_nonport;
++	pkt_count = req_mapped->pio_pkt_count;
++
++	/* Create the isoc descs */
++	if (atomic_alloc) {
++		ipds = FH_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
++	} else {
++		ipds = FH_ALLOC(sizeof(*ipds) * pkt_count);
++	}
++
++	if (!ipds) {
++		FH_ERROR("Failed to allocate isoc descriptors");
++		return -FH_E_NO_MEMORY;
++	}
++
++	/* Initialize the extended request fields */
++	ereq->per_io_frame_descs = ipds;
++	ereq->error_count = 0;
++	ereq->pio_alloc_pkt_count = pkt_count;
++	ereq->pio_pkt_count = pkt_count;
++	ereq->tr_sub_flags = req_mapped->tr_sub_flags;
++
++	/* Init the Isoc descriptors */
++	for (i = 0; i < pkt_count; i++) {
++		ipds[i].length = req_mapped->per_io_frame_descs[i].length;
++		ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
++		ipds[i].status = req_mapped->per_io_frame_descs[i].status;	/* 0 */
++		ipds[i].actual_length =
++		    req_mapped->per_io_frame_descs[i].actual_length;
++	}
++
++	return 0;
++}
++
++static void prn_ext_request(struct fh_iso_xreq_port *ereq)
++{
++	struct fh_iso_pkt_desc_port *xfd = NULL;
++	int i;
++
++	FH_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
++	FH_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
++	FH_DEBUG("error_count=%d", ereq->error_count);
++	FH_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
++	FH_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
++	FH_DEBUG("res=%d", ereq->res);
++
++	for (i = 0; i < ereq->pio_pkt_count; i++) {
++		xfd = &ereq->per_io_frame_descs[0];
++		FH_DEBUG("FD #%d", i);
++
++		FH_DEBUG("xfd->actual_length=%d", xfd->actual_length);
++		FH_DEBUG("xfd->length=%d", xfd->length);
++		FH_DEBUG("xfd->offset=%d", xfd->offset);
++		FH_DEBUG("xfd->status=%d", xfd->status);
++	}
++}
++
++/**
++ *
++ */
++int fh_otg_pcd_xiso_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
++			      uint8_t * buf, fh_dma_t dma_buf, uint32_t buflen,
++			      int zero, void *req_handle, int atomic_alloc,
++			      void *ereq_nonport)
++{
++	fh_otg_pcd_request_t *req = NULL;
++	fh_otg_pcd_ep_t *ep;
++	fh_irqflags_t flags;
++	int res;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++	if (!ep) {
++		FH_WARN("bad ep\n");
++		return -FH_E_INVALID;
++	}
++
++	/* We support this extension only for DDMA mode */
++	if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC)
++		if (!GET_CORE_IF(pcd)->dma_desc_enable)
++			return -FH_E_INVALID;
++
++	/* Create a fh_otg_pcd_request_t object */
++	if (atomic_alloc) {
++		req = FH_ALLOC_ATOMIC(sizeof(*req));
++	} else {
++		req = FH_ALLOC(sizeof(*req));
++	}
++
++	if (!req) {
++		return -FH_E_NO_MEMORY;
++	}
++
++	/* Create the Isoc descs for this request which shall be the exact match
++	 * of the structure sent to us from the non-portable logic */
++	res =
++	    fh_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
++	if (res) {
++		FH_WARN("Failed to init the Isoc descriptors");
++		FH_FREE(req);
++		return res;
++	}
++
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++
++	FH_CIRCLEQ_INIT_ENTRY(req, queue_entry);
++	req->buf = buf;
++	req->dma = dma_buf;
++	req->length = buflen;
++	req->sent_zlp = zero;
++	req->priv = req_handle;
++
++	//FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++	ep->fh_ep.dma_addr = dma_buf;
++	ep->fh_ep.start_xfer_buff = buf;
++	ep->fh_ep.xfer_buff = buf;
++	ep->fh_ep.xfer_len = 0;
++	ep->fh_ep.xfer_count = 0;
++	ep->fh_ep.sent_zlp = 0;
++	ep->fh_ep.total_len = buflen;
++
++	/* Add this request to the tail */
++	FH_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
++	ep->fh_ep.xiso_queued_xfers++;
++
++//FH_DEBUG("CP_0");
++//FH_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
++//prn_ext_request((struct fh_iso_xreq_port *) ereq_nonport);
++//prn_ext_request(&req->ext_req);
++
++	//FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++
++	/* If the req->status == ASAP  then check if there is any active transfer
++	 * for this endpoint. If no active transfers, then get the first entry
++	 * from the queue and start that transfer
++	 */
++	if (req->ext_req.tr_sub_flags == FH_EREQ_TF_ASAP) {
++		res = fh_otg_pcd_xiso_start_next_request(pcd, ep);
++		if (res) {
++			FH_WARN("Failed to start the next Isoc transfer");
++			FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++			FH_FREE(req);
++			return res;
++		}
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++	return 0;
++}
++
++#endif
++/* END ifdef FH_UTE_PER_IO ***************************************************/
++int fh_otg_pcd_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
++			 uint8_t * buf, fh_dma_t dma_buf, uint32_t buflen,
++			 int zero, void *req_handle, int atomic_alloc)
++{
++	fh_irqflags_t flags;
++	fh_otg_pcd_request_t *req;
++	fh_otg_pcd_ep_t *ep;
++	uint32_t max_transfer;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++	if (!ep || (!ep->desc && ep->fh_ep.num != 0)) {
++		FH_WARN("bad ep\n");
++		return -FH_E_INVALID;
++	}
++
++	if (atomic_alloc) {
++		req = FH_ALLOC_ATOMIC(sizeof(*req));
++	} else {
++		req = FH_ALLOC(sizeof(*req));
++	}
++
++	if (!req) {
++		return -FH_E_NO_MEMORY;
++	}
++	FH_CIRCLEQ_INIT_ENTRY(req, queue_entry);
++	if (!GET_CORE_IF(pcd)->core_params->opt) {
++		if (ep->fh_ep.num != 0) {
++			FH_ERROR("queue req %p, len %d buf %p\n",
++				  req_handle, buflen, buf);
++		}
++	}
++
++	req->buf = buf;
++	req->dma = dma_buf;
++	req->length = buflen;
++	req->sent_zlp = zero;
++	req->priv = req_handle;
++	req->dw_align_buf = NULL;
++	if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
++	    && !GET_CORE_IF(pcd)->dma_desc_enable)
++		req->dw_align_buf = FH_DMA_ALLOC(buflen,
++						  &req->dw_align_buf_dma);
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++
++	/*
++	 * After adding request to the queue for IN ISOC wait for In Token Received
++	 * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token 
++	 * Received when EP is disabled interrupt to obtain starting microframe
++	 * (odd/even) start transfer
++	 */
++	if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
++		if (req != 0) {
++			depctl_data_t depctl = {.d32 =
++				    FH_READ_REG32(&pcd->core_if->dev_if->
++						   in_ep_regs[ep->fh_ep.num]->
++						   diepctl) };
++			++pcd->request_pending;
++
++			FH_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
++			if (ep->fh_ep.is_in) {
++				depctl.b.cnak = 1;
++				FH_WRITE_REG32(&pcd->core_if->dev_if->
++						in_ep_regs[ep->fh_ep.num]->
++						diepctl, depctl.d32);
++			}
++			if (GET_CORE_IF(pcd)->dma_desc_enable) {
++				if (ep->fh_ep.iso_transfer_started) {
++					/* 
++					 * Add next request to the descriptor chain 
++					 * currently not in use by HW 
++					 */
++					program_next_iso_request_ddma(ep, req);
++				} else if (!ep->fh_ep.is_in)
++					/* For OUT start first request immediately after queue */
++					fh_otg_pcd_start_iso_ddma(GET_CORE_IF(pcd), ep);
++			}
++			
++			FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++		}
++		return 0;
++	}
++
++	/*
++	 * For EP0 IN without premature status, zlp is required?
++	 */
++	if (ep->fh_ep.num == 0 && ep->fh_ep.is_in) {
++		FH_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->fh_ep.num);
++		//_req->zero = 1;
++	}
++
++	/* Start the transfer */
++	if (FH_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
++		/* EP0 Transfer? */
++		if (ep->fh_ep.num == 0) {
++			switch (pcd->ep0state) {
++			case EP0_IN_DATA_PHASE:
++				FH_DEBUGPL(DBG_PCD,
++					    "%s ep0: EP0_IN_DATA_PHASE\n",
++					    __func__);
++				break;
++
++			case EP0_OUT_DATA_PHASE:
++				FH_DEBUGPL(DBG_PCD,
++					    "%s ep0: EP0_OUT_DATA_PHASE\n",
++					    __func__);
++				if (pcd->request_config) {
++					/* Complete STATUS PHASE */
++					ep->fh_ep.is_in = 1;
++					pcd->ep0state = EP0_IN_STATUS_PHASE;
++				}
++				break;
++
++			case EP0_IN_STATUS_PHASE:
++				FH_DEBUGPL(DBG_PCD,
++					    "%s ep0: EP0_IN_STATUS_PHASE\n",
++					    __func__);
++				break;
++
++			default:
++				FH_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
++					    pcd->ep0state);
++				FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++				return -FH_E_SHUTDOWN;
++			}
++
++			ep->fh_ep.dma_addr = dma_buf;
++			ep->fh_ep.start_xfer_buff = buf;
++			ep->fh_ep.xfer_buff = buf;
++			ep->fh_ep.xfer_len = buflen;
++			ep->fh_ep.xfer_count = 0;
++			ep->fh_ep.sent_zlp = 0;
++			ep->fh_ep.total_len = ep->fh_ep.xfer_len;
++
++			if (zero) {
++				if ((ep->fh_ep.xfer_len %
++				     ep->fh_ep.maxpacket == 0)
++				    && (ep->fh_ep.xfer_len != 0)) {
++					ep->fh_ep.sent_zlp = 1;
++				}
++
++			}
++
++			fh_otg_ep0_start_transfer(GET_CORE_IF(pcd),
++						   &ep->fh_ep);
++		}		// non-ep0 endpoints
++		else {
++#ifdef FH_UTE_CFI
++			if (ep->fh_ep.buff_mode != BM_STANDARD) {
++				/* store the request length */
++				ep->fh_ep.cfi_req_len = buflen;
++				pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
++								ep, req);
++			} else {
++#endif
++				max_transfer =
++				    GET_CORE_IF(ep->pcd)->core_params->
++				    max_transfer_size;
++
++				/* Setup and start the Transfer */
++				if (req->dw_align_buf) {
++					if (ep->fh_ep.is_in)
++						fh_memcpy(req->dw_align_buf,
++							   buf, buflen);
++					ep->fh_ep.dma_addr =
++					    req->dw_align_buf_dma;
++					ep->fh_ep.start_xfer_buff =
++					    req->dw_align_buf;
++					ep->fh_ep.xfer_buff =
++					    req->dw_align_buf;
++				} else {
++					ep->fh_ep.dma_addr = dma_buf;
++					ep->fh_ep.start_xfer_buff = buf;
++					ep->fh_ep.xfer_buff = buf;
++				}
++				ep->fh_ep.xfer_len = 0;
++				ep->fh_ep.xfer_count = 0;
++				ep->fh_ep.sent_zlp = 0;
++				ep->fh_ep.total_len = buflen;
++
++				ep->fh_ep.maxxfer = max_transfer;
++				if (GET_CORE_IF(pcd)->dma_desc_enable) {
++					uint32_t out_max_xfer =
++					    DDMA_MAX_TRANSFER_SIZE -
++					    (DDMA_MAX_TRANSFER_SIZE % 4);
++					if (ep->fh_ep.is_in) {
++						if (ep->fh_ep.maxxfer >
++						    DDMA_MAX_TRANSFER_SIZE) {
++							ep->fh_ep.maxxfer =
++							    DDMA_MAX_TRANSFER_SIZE;
++						}
++					} else {
++						if (ep->fh_ep.maxxfer >
++						    out_max_xfer) {
++							ep->fh_ep.maxxfer =
++							    out_max_xfer;
++						}
++					}
++				}
++				if (ep->fh_ep.maxxfer < ep->fh_ep.total_len) {
++					ep->fh_ep.maxxfer -=
++					    (ep->fh_ep.maxxfer %
++					     ep->fh_ep.maxpacket);
++				}
++
++				if (zero) {
++					if ((ep->fh_ep.total_len %
++					     ep->fh_ep.maxpacket == 0)
++					    && (ep->fh_ep.total_len != 0)) {
++						ep->fh_ep.sent_zlp = 1;
++					}
++				}
++#ifdef FH_UTE_CFI
++			}
++#endif
++			fh_otg_ep_start_transfer(GET_CORE_IF(pcd),
++						  &ep->fh_ep);
++		}
++	}
++
++	if (req != 0) {
++		++pcd->request_pending;
++		FH_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
++		if (ep->fh_ep.is_in && ep->stopped
++		    && !(GET_CORE_IF(pcd)->dma_enable)) {
++			/** @todo NGS Create a function for this. */
++			diepmsk_data_t diepmsk = {.d32 = 0 };
++			diepmsk.b.intktxfemp = 1;
++			if (GET_CORE_IF(pcd)->multiproc_int_enable) {
++				FH_MODIFY_REG32(&GET_CORE_IF(pcd)->
++						 dev_if->dev_global_regs->diepeachintmsk
++						 [ep->fh_ep.num], 0,
++						 diepmsk.d32);
++			} else {
++				FH_MODIFY_REG32(&GET_CORE_IF(pcd)->
++						 dev_if->dev_global_regs->
++						 diepmsk, 0, diepmsk.d32);
++			}
++
++		}
++	}
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++
++	return 0;
++}
++
++int fh_otg_pcd_ep_dequeue(fh_otg_pcd_t * pcd, void *ep_handle,
++			   void *req_handle)
++{
++	fh_irqflags_t flags;
++	fh_otg_pcd_request_t *req;
++	fh_otg_pcd_ep_t *ep;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++	if (!ep || (!ep->desc && ep->fh_ep.num != 0)) {
++		FH_WARN("bad argument\n");
++		return -FH_E_INVALID;
++	}
++
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++
++	/* make sure it's actually queued on this endpoint */
++	FH_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
++		if (req->priv == (void *)req_handle) {
++			break;
++		}
++	}
++
++	if (req->priv != (void *)req_handle) {
++		FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++		return -FH_E_INVALID;
++	}
++
++	if (!FH_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
++		fh_otg_request_done(ep, req, -FH_E_RESTART);
++	} else {
++		req = NULL;
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++
++	return req ? 0 : -FH_E_SHUTDOWN;
++
++}
++
++int fh_otg_pcd_ep_halt(fh_otg_pcd_t * pcd, void *ep_handle, int value)
++{
++	fh_otg_pcd_ep_t *ep;
++	fh_irqflags_t flags;
++	int retval = 0;
++
++	ep = get_ep_from_handle(pcd, ep_handle);
++
++	if (!ep || (!ep->desc && ep != &pcd->ep0) ||
++	    (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
++		FH_WARN("%s, bad ep\n", __func__);
++		return -FH_E_INVALID;
++	}
++
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++	if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		FH_WARN("%d %s XFer In process\n", ep->fh_ep.num,
++			 ep->fh_ep.is_in ? "IN" : "OUT");
++		retval = -FH_E_AGAIN;
++	} else if (value == 0) {
++	    ep->fh_ep.stall_clear_flag = 0;
++		fh_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->fh_ep);
++	} else if (value == 1) {
++	stall:
++		if (ep->fh_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
++			dtxfsts_data_t txstatus;
++			fifosize_data_t txfifosize;
++
++			txfifosize.d32 =
++			    FH_READ_REG32(&GET_CORE_IF(pcd)->
++					   core_global_regs->dtxfsiz[ep->fh_ep.
++								     tx_fifo_num]);
++			txstatus.d32 =
++			    FH_READ_REG32(&GET_CORE_IF(pcd)->
++					   dev_if->in_ep_regs[ep->fh_ep.num]->
++					   dtxfsts);
++
++			if (txstatus.b.txfspcavail < txfifosize.b.depth) {
++				FH_WARN("%s() Data In Tx Fifo\n", __func__);
++				retval = -FH_E_AGAIN;
++			} else {
++				if (ep->fh_ep.num == 0) {
++					pcd->ep0state = EP0_STALL;
++				}
++
++				ep->stopped = 1;
++				fh_otg_ep_set_stall(GET_CORE_IF(pcd),
++						     &ep->fh_ep);
++			}
++		} else {
++			if (ep->fh_ep.num == 0) {
++				pcd->ep0state = EP0_STALL;
++			}
++
++			ep->stopped = 1;
++			fh_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->fh_ep);
++		}
++	} else if (value == 2) {
++		ep->fh_ep.stall_clear_flag = 0;
++	} else if (value == 3) {
++		ep->fh_ep.stall_clear_flag = 1;
++		goto stall;
++	}
++
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++
++	return retval;
++}
++
++/**
++ * This function initiates remote wakeup of the host from suspend state.
++ */
++void fh_otg_pcd_rem_wkup_from_suspend(fh_otg_pcd_t * pcd, int set)
++{
++	dctl_data_t dctl = { 0 };
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	dsts_data_t dsts;
++
++	dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++	if (!dsts.b.suspsts) {
++		FH_WARN("Remote wakeup while is not in suspend state\n");
++	}
++	/* Check if DEVICE_REMOTE_WAKEUP feature enabled */
++	if (pcd->remote_wakeup_enable) {
++		if (set) {
++
++			if (core_if->adp_enable) {
++				gpwrdn_data_t gpwrdn;
++
++				fh_otg_adp_probe_stop(core_if);
++
++				/* Mask SRP detected interrupt from Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.srp_det_msk = 1;
++				FH_MODIFY_REG32(&core_if->
++						 core_global_regs->gpwrdn,
++						 gpwrdn.d32, 0);
++
++				/* Disable Power Down Logic */
++				gpwrdn.d32 = 0;
++				gpwrdn.b.pmuactv = 1;
++				FH_MODIFY_REG32(&core_if->
++						 core_global_regs->gpwrdn,
++						 gpwrdn.d32, 0);
++
++				/*
++				 * Initialize the Core for Device mode.
++				 */
++				core_if->op_state = B_PERIPHERAL;
++				fh_otg_core_init(core_if);
++				fh_otg_enable_global_interrupts(core_if);
++				cil_pcd_start(core_if);
++
++				fh_otg_initiate_srp(core_if);
++			}
++
++			dctl.b.rmtwkupsig = 1;
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++					 dctl, 0, dctl.d32);
++			FH_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
++
++			fh_mdelay(2);
++			FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++					 dctl, dctl.d32, 0);
++			FH_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
++		}
++	} else {
++		FH_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
++	}
++}
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++/**
++ * This function initiates remote wakeup of the host from L1 sleep state.
++ */
++void fh_otg_pcd_rem_wkup_from_sleep(fh_otg_pcd_t * pcd, int set)
++{
++	glpmcfg_data_t lpmcfg;
++	pcgcctl_data_t pcgcctl = {.d32 = 0 };
++	
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++
++	/* Check if we are in L1 state */
++	if (!lpmcfg.b.prt_sleep_sts) {
++		FH_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
++		return;
++	}
++
++	/* Check if host allows remote wakeup */
++	if (!lpmcfg.b.rem_wkup_en) {
++		FH_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
++		return;
++	}
++
++	/* Check if Resume OK */
++	if (!lpmcfg.b.sleep_state_resumeok) {
++		FH_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
++		return;
++	}
++
++	lpmcfg.d32 = FH_READ_REG32(&core_if->core_global_regs->glpmcfg);
++	lpmcfg.b.en_utmi_sleep = 0;
++	lpmcfg.b.hird_thres &= (~(1 << 4));
++	
++	/* Clear Enbl_L1Gating bit. */
++	pcgcctl.b.enbl_sleep_gating = 1;
++	FH_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,0);
++			
++	FH_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
++
++	if (set) {
++		dctl_data_t dctl = {.d32 = 0 };
++		dctl.b.rmtwkupsig = 1;
++		/* Set RmtWkUpSig bit to start remote wakup signaling.
++		 * Hardware will automatically clear this bit.
++		 */
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
++				 0, dctl.d32);
++		FH_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
++	}
++
++}
++#endif
++
++/**
++ * Performs remote wakeup.
++ */
++void fh_otg_pcd_remote_wakeup(fh_otg_pcd_t * pcd, int set)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_irqflags_t flags;
++	if (fh_otg_is_device_mode(core_if)) {
++		FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++#ifdef CONFIG_USB_FH_OTG_LPM
++		if (core_if->lx_state == FH_OTG_L1) {
++			fh_otg_pcd_rem_wkup_from_sleep(pcd, set);
++		} else {
++#endif
++			fh_otg_pcd_rem_wkup_from_suspend(pcd, set);
++#ifdef CONFIG_USB_FH_OTG_LPM
++		}
++#endif
++		FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++	}
++	return;
++}
++
++void fh_otg_pcd_disconnect_us(fh_otg_pcd_t * pcd, int no_of_usecs)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	dctl_data_t dctl = { 0 };
++
++	if (fh_otg_is_device_mode(core_if)) {
++		dctl.b.sftdiscon = 1;
++		FH_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
++		fh_udelay(no_of_usecs);
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
++		
++	} else{
++		FH_PRINTF("NOT SUPPORTED IN HOST MODE\n");
++	}
++	return;
++
++}
++
++int fh_otg_pcd_wakeup(fh_otg_pcd_t * pcd)
++{
++	dsts_data_t dsts;
++	gotgctl_data_t gotgctl;
++
++	/*
++	 * This function starts the Protocol if no session is in progress. If
++	 * a session is already in progress, but the device is suspended,
++	 * remote wakeup signaling is started.
++	 */
++
++	/* Check if valid session */
++	gotgctl.d32 =
++	    FH_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
++	if (gotgctl.b.bsesvld) {
++		/* Check if suspend state */
++		dsts.d32 =
++		    FH_READ_REG32(&
++				   (GET_CORE_IF(pcd)->dev_if->
++				    dev_global_regs->dsts));
++		if (dsts.b.suspsts) {
++			fh_otg_pcd_remote_wakeup(pcd, 1);
++		}
++	} else {
++		fh_otg_pcd_initiate_srp(pcd);
++	}
++
++	return 0;
++
++}
++
++/**
++ * Start the SRP timer to detect when the SRP does not complete within
++ * 6 seconds.
++ *
++ * @param pcd the pcd structure.
++ */
++void fh_otg_pcd_initiate_srp(fh_otg_pcd_t * pcd)
++{
++	fh_irqflags_t flags;
++	FH_SPINLOCK_IRQSAVE(pcd->lock, &flags);
++	fh_otg_initiate_srp(GET_CORE_IF(pcd));
++	FH_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
++}
++
++int fh_otg_pcd_get_frame_number(fh_otg_pcd_t * pcd)
++{
++	return fh_otg_get_frame_number(GET_CORE_IF(pcd));
++}
++
++int fh_otg_pcd_is_lpm_enabled(fh_otg_pcd_t * pcd)
++{
++	return GET_CORE_IF(pcd)->core_params->lpm_enable;
++}
++
++int fh_otg_pcd_is_besl_enabled(fh_otg_pcd_t * pcd)
++{
++	return GET_CORE_IF(pcd)->core_params->besl_enable;
++}
++
++int fh_otg_pcd_get_param_baseline_besl(fh_otg_pcd_t * pcd)
++{
++	return GET_CORE_IF(pcd)->core_params->baseline_besl;
++}
++
++int fh_otg_pcd_get_param_deep_besl(fh_otg_pcd_t * pcd)
++{
++	return GET_CORE_IF(pcd)->core_params->deep_besl;
++}
++
++uint32_t get_b_hnp_enable(fh_otg_pcd_t * pcd)
++{
++	return pcd->b_hnp_enable;
++}
++
++uint32_t get_a_hnp_support(fh_otg_pcd_t * pcd)
++{
++	return pcd->a_hnp_support;
++}
++
++uint32_t get_a_alt_hnp_support(fh_otg_pcd_t * pcd)
++{
++	return pcd->a_alt_hnp_support;
++}
++
++int fh_otg_pcd_get_rmwkup_enable(fh_otg_pcd_t * pcd)
++{
++	return pcd->remote_wakeup_enable;
++}
++
++#endif /* FH_HOST_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.h
+new file mode 100644
+index 00000000..1d2a66bd
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd.h
+@@ -0,0 +1,268 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd.h $
++ * $Revision: #49 $
++ * $Date: 2013/05/16 $
++ * $Change: 2231774 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_HOST_ONLY
++#if !defined(__FH_PCD_H__)
++#define __FH_PCD_H__
++
++#include "fh_otg_os_dep.h"
++#include "../fh_common_port/usb.h"
++#include "fh_otg_cil.h"
++#include "fh_otg_pcd_if.h"
++struct cfiobject;
++
++/**
++ * @file
++ *
++ * This file contains the structures, constants, and interfaces for
++ * the Perpherial Contoller Driver (PCD).
++ *
++ * The Peripheral Controller Driver (PCD) for Linux will implement the
++ * Gadget API, so that the existing Gadget drivers can be used. For
++ * the Mass Storage Function driver the File-backed USB Storage Gadget
++ * (FBS) driver will be used.  The FBS driver supports the
++ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only
++ * transports.
++ *
++ */
++
++/** Invalid DMA Address */
++#define FH_DMA_ADDR_INVALID	(~(fh_dma_t)0)
++
++/** Max Transfer size for any EP */
++#define DDMA_MAX_TRANSFER_SIZE 65535
++
++/**
++ * Get the pointer to the core_if from the pcd pointer.
++ */
++#define GET_CORE_IF( _pcd ) (_pcd->core_if)
++
++/**
++ * States of EP0.
++ */
++typedef enum ep0_state {
++	EP0_DISCONNECT,		/* no host */
++	EP0_IDLE,
++	EP0_IN_DATA_PHASE,
++	EP0_OUT_DATA_PHASE,
++	EP0_IN_STATUS_PHASE,
++	EP0_OUT_STATUS_PHASE,
++	EP0_STALL,
++} ep0state_e;
++
++/** Fordward declaration.*/
++struct fh_otg_pcd;
++
++/** FH_otg iso request structure.
++ *
++ */
++typedef struct usb_iso_request fh_otg_pcd_iso_request_t;
++
++#ifdef FH_UTE_PER_IO
++
++/**
++ * This shall be the exact analogy of the same type structure defined in the
++ * usb_gadget.h. Each descriptor contains
++ */
++struct fh_iso_pkt_desc_port {
++	uint32_t offset;
++	uint32_t length;	/* expected length */
++	uint32_t actual_length;
++	uint32_t status;
++};
++
++struct fh_iso_xreq_port {
++	/** transfer/submission flag */
++	uint32_t tr_sub_flags;
++	/** Start the request ASAP */
++#define FH_EREQ_TF_ASAP		0x00000002
++	/** Just enqueue the request w/o initiating a transfer */
++#define FH_EREQ_TF_ENQUEUE		0x00000004
++
++	/**
++	* count of ISO packets attached to this request - shall
++	* not exceed the pio_alloc_pkt_count
++	*/
++	uint32_t pio_pkt_count;
++	/** count of ISO packets allocated for this request */
++	uint32_t pio_alloc_pkt_count;
++	/** number of ISO packet errors */
++	uint32_t error_count;
++	/** reserved for future extension */
++	uint32_t res;
++	/** Will be allocated and freed in the UTE gadget and based on the CFC value */
++	struct fh_iso_pkt_desc_port *per_io_frame_descs;
++};
++#endif
++/** FH_otg request structure.
++ * This structure is a list of requests.
++ */
++typedef struct fh_otg_pcd_request {
++	void *priv;
++	void *buf;
++	fh_dma_t dma;
++	uint32_t length;
++	uint32_t actual;
++	unsigned sent_zlp:1;
++    /**
++     * Used instead of original buffer if
++     * it(physical address) is not dword-aligned.
++     **/
++	uint8_t *dw_align_buf;
++	fh_dma_t dw_align_buf_dma;
++
++	 FH_CIRCLEQ_ENTRY(fh_otg_pcd_request) queue_entry;
++#ifdef FH_UTE_PER_IO
++	struct fh_iso_xreq_port ext_req;
++	//void *priv_ereq_nport; /*  */
++#endif
++} fh_otg_pcd_request_t;
++
++FH_CIRCLEQ_HEAD(req_list, fh_otg_pcd_request);
++
++/**	  PCD EP structure.
++ * This structure describes an EP, there is an array of EPs in the PCD
++ * structure.
++ */
++typedef struct fh_otg_pcd_ep {
++	/** USB EP Descriptor */
++	const usb_endpoint_descriptor_t *desc;
++
++	/** queue of fh_otg_pcd_requests. */
++	struct req_list queue;
++	unsigned stopped:1;
++	unsigned disabling:1;
++	unsigned dma:1;
++	unsigned queue_sof:1;
++
++#ifdef FH_EN_ISOC
++	/** ISOC req handle passed */
++	void *iso_req_handle;
++#endif				//_EN_ISOC_
++
++	/** FH_otg ep data. */
++	fh_ep_t fh_ep;
++
++	/** Pointer to PCD */
++	struct fh_otg_pcd *pcd;
++
++	void *priv;
++} fh_otg_pcd_ep_t;
++
++/** FH_otg PCD Structure.
++ * This structure encapsulates the data for the fh_otg PCD.
++ */
++struct fh_otg_pcd {
++	const struct fh_otg_pcd_function_ops *fops;
++	/** The FH otg device pointer */
++	struct fh_otg_device *otg_dev;
++	/** Core Interface */
++	fh_otg_core_if_t *core_if;
++	/** State of EP0 */
++	ep0state_e ep0state;
++	/** EP0 Request is pending */
++	unsigned ep0_pending:1;
++	/** Indicates when SET CONFIGURATION Request is in process */
++	unsigned request_config:1;
++	/** The state of the Remote Wakeup Enable. */
++	unsigned remote_wakeup_enable:1;
++	/** The state of the B-Device HNP Enable. */
++	unsigned b_hnp_enable:1;
++	/** The state of A-Device HNP Support. */
++	unsigned a_hnp_support:1;
++	/** The state of the A-Device Alt HNP support. */
++	unsigned a_alt_hnp_support:1;
++	/** Count of pending Requests */
++	unsigned request_pending;
++
++	/** SETUP packet for EP0
++	 * This structure is allocated as a DMA buffer on PCD initialization
++	 * with enough space for up to 3 setup packets.
++	 */
++	union {
++		usb_device_request_t req;
++		uint32_t d32[2];
++	} *setup_pkt;
++
++	fh_dma_t setup_pkt_dma_handle;
++
++	/* Additional buffer and flag for CTRL_WR premature case */
++	uint8_t *backup_buf;
++	unsigned data_terminated;
++
++	/** 2-byte dma buffer used to return status from GET_STATUS */
++	uint16_t *status_buf;
++	fh_dma_t status_buf_dma_handle;
++
++	/** EP0 */
++	fh_otg_pcd_ep_t ep0;
++
++	/** Array of IN EPs. */
++	fh_otg_pcd_ep_t in_ep[MAX_EPS_CHANNELS - 1];
++	/** Array of OUT EPs. */
++	fh_otg_pcd_ep_t out_ep[MAX_EPS_CHANNELS - 1];
++	/** number of valid EPs in the above array. */
++//        unsigned      num_eps : 4;
++	fh_spinlock_t *lock;
++
++	/** Tasklet to defer starting of TEST mode transmissions until
++	 *	Status Phase has been completed.
++	 */
++	fh_tasklet_t *test_mode_tasklet;
++
++	/** Tasklet to delay starting of xfer in DMA mode */
++	fh_tasklet_t *start_xfer_tasklet;
++
++	/** The test mode to enter when the tasklet is executed. */
++	unsigned test_mode;
++	/** The cfi_api structure that implements most of the CFI API
++	 * and OTG specific core configuration functionality
++	 */
++#ifdef FH_UTE_CFI
++	struct cfiobject *cfi;
++#endif
++
++};
++
++//FIXME this functions should be static, and this prototypes should be removed
++extern void fh_otg_request_nuke(fh_otg_pcd_ep_t * ep);
++extern void fh_otg_request_done(fh_otg_pcd_ep_t * ep,
++				fh_otg_pcd_request_t * req, int32_t status);
++
++void fh_otg_iso_buffer_done(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep,
++			    void *req_handle);
++extern void fh_otg_pcd_start_iso_ddma(fh_otg_core_if_t * core_if, 
++				fh_otg_pcd_ep_t * ep);
++
++extern void do_test_mode(void *data);
++#endif
++#endif /* FH_HOST_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_if.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_if.h
+new file mode 100644
+index 00000000..d82458ce
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_if.h
+@@ -0,0 +1,366 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd_if.h $
++ * $Revision: #13 $
++ * $Date: 2012/12/12 $
++ * $Change: 2125019 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_HOST_ONLY
++
++#if !defined(__FH_PCD_IF_H__)
++#define __FH_PCD_IF_H__
++
++//#include "fh_os.h"
++#include "fh_otg_core_if.h"
++
++/** @file
++ * This file defines FH_OTG PCD Core API.
++ */
++
++struct fh_otg_pcd;
++typedef struct fh_otg_pcd fh_otg_pcd_t;
++
++/** Maxpacket size for EP0 */
++#define MAX_EP0_SIZE	64
++/** Maxpacket size for any EP */
++#define MAX_PACKET_SIZE 1024
++
++/** @name Function Driver Callbacks */
++/** @{ */
++
++/** This function will be called whenever a previously queued request has
++ * completed.  The status value will be set to -FH_E_SHUTDOWN to indicated a
++ * failed or aborted transfer, or -FH_E_RESTART to indicate the device was reset,
++ * or -FH_E_TIMEOUT to indicate it timed out, or -FH_E_INVALID to indicate invalid
++ * parameters. */
++typedef int (*fh_completion_cb_t) (fh_otg_pcd_t * pcd, void *ep_handle,
++				    void *req_handle, int32_t status,
++				    uint32_t actual);
++/**
++ * This function will be called whenever a previousle queued ISOC request has
++ * completed. Count of ISOC packets could be read using fh_otg_pcd_get_iso_packet_count
++ * function.
++ * The status of each ISOC packet could be read using fh_otg_pcd_get_iso_packet_*
++ * functions.
++ */
++typedef int (*fh_isoc_completion_cb_t) (fh_otg_pcd_t * pcd, void *ep_handle,
++					 void *req_handle, int proc_buf_num);
++/** This function should handle any SETUP request that cannot be handled by the
++ * PCD Core.  This includes most GET_DESCRIPTORs, SET_CONFIGS, Any
++ * class-specific requests, etc.  The function must non-blocking.
++ *
++ * Returns 0 on success.
++ * Returns -FH_E_NOT_SUPPORTED if the request is not supported.
++ * Returns -FH_E_INVALID if the setup request had invalid parameters or bytes.
++ * Returns -FH_E_SHUTDOWN on any other error. */
++typedef int (*fh_setup_cb_t) (fh_otg_pcd_t * pcd, uint8_t * bytes);
++/** This is called whenever the device has been disconnected.  The function
++ * driver should take appropriate action to clean up all pending requests in the
++ * PCD Core, remove all endpoints (except ep0), and initialize back to reset
++ * state. */
++typedef int (*fh_disconnect_cb_t) (fh_otg_pcd_t * pcd);
++/** This function is called when device has been connected. */
++typedef int (*fh_connect_cb_t) (fh_otg_pcd_t * pcd, int speed);
++/** This function is called when device has been suspended */
++typedef int (*fh_suspend_cb_t) (fh_otg_pcd_t * pcd);
++/** This function is called when device has received LPM tokens, i.e.
++ * device has been sent to sleep state. */
++typedef int (*fh_sleep_cb_t) (fh_otg_pcd_t * pcd);
++/** This function is called when device has been resumed
++ * from suspend(L2) or L1 sleep state. */
++typedef int (*fh_resume_cb_t) (fh_otg_pcd_t * pcd);
++/** This function is called whenever hnp params has been changed.
++ * User can call get_b_hnp_enable, get_a_hnp_support, get_a_alt_hnp_support functions
++ * to get hnp parameters. */
++typedef int (*fh_hnp_params_changed_cb_t) (fh_otg_pcd_t * pcd);
++/** This function is called whenever USB RESET is detected. */
++typedef int (*fh_reset_cb_t) (fh_otg_pcd_t * pcd);
++
++typedef int (*cfi_setup_cb_t) (fh_otg_pcd_t * pcd, void *ctrl_req_bytes);
++
++/**
++ *
++ * @param ep_handle	Void pointer to the usb_ep structure
++ * @param ereq_port Pointer to the extended request structure created in the
++ *					portable part.
++ */
++typedef int (*xiso_completion_cb_t) (fh_otg_pcd_t * pcd, void *ep_handle,
++				     void *req_handle, int32_t status,
++				     void *ereq_port);
++/** Function Driver Ops Data Structure */
++struct fh_otg_pcd_function_ops {
++	fh_connect_cb_t connect;
++	fh_disconnect_cb_t disconnect;
++	fh_setup_cb_t setup;
++	fh_completion_cb_t complete;
++	fh_isoc_completion_cb_t isoc_complete;
++	fh_suspend_cb_t suspend;
++	fh_sleep_cb_t sleep;
++	fh_resume_cb_t resume;
++	fh_reset_cb_t reset;
++	fh_hnp_params_changed_cb_t hnp_changed;
++	cfi_setup_cb_t cfi_setup;
++#ifdef FH_UTE_PER_IO
++	xiso_completion_cb_t xisoc_complete;
++#endif
++};
++/** @} */
++
++/** @name Function Driver Functions */
++/** @{ */
++
++/** Call this function to get pointer on fh_otg_pcd_t,
++ * this pointer will be used for all PCD API functions.
++ *
++ * @param core_if The FH_OTG Core
++ */
++extern fh_otg_pcd_t *fh_otg_pcd_init(fh_otg_core_if_t * core_if);
++
++/** Frees PCD allocated by fh_otg_pcd_init
++ *
++ * @param pcd The PCD
++ */
++extern void fh_otg_pcd_remove(fh_otg_pcd_t * pcd);
++
++/** Call this to bind the function driver to the PCD Core.
++ *
++ * @param pcd Pointer on fh_otg_pcd_t returned by fh_otg_pcd_init function.
++ * @param fops The Function Driver Ops data structure containing pointers to all callbacks.
++ */
++extern void fh_otg_pcd_start(fh_otg_pcd_t * pcd,
++			      const struct fh_otg_pcd_function_ops *fops);
++
++/** Enables an endpoint for use.  This function enables an endpoint in
++ * the PCD.  The endpoint is described by the ep_desc which has the
++ * same format as a USB ep descriptor.  The ep_handle parameter is used to refer
++ * to the endpoint from other API functions and in callbacks.  Normally this
++ * should be called after a SET_CONFIGURATION/SET_INTERFACE to configure the
++ * core for that interface.
++ *
++ * Returns -FH_E_INVALID if invalid parameters were passed.
++ * Returns -FH_E_SHUTDOWN if any other error ocurred.
++ * Returns 0 on success.
++ *
++ * @param pcd The PCD
++ * @param ep_desc Endpoint descriptor
++ * @param ep_handle Handle on endpoint, that will be used to identify endpoint.
++ */
++extern int fh_otg_pcd_ep_enable(fh_otg_pcd_t * pcd,
++				 const uint8_t * ep_desc, void *ep_handle);
++
++/** Disable the endpoint referenced by ep_handle.
++ *
++ * Returns -FH_E_INVALID if invalid parameters were passed.
++ * Returns -FH_E_SHUTDOWN if any other error occurred.
++ * Returns 0 on success. */
++extern int fh_otg_pcd_ep_disable(fh_otg_pcd_t * pcd, void *ep_handle);
++
++/** Queue a data transfer request on the endpoint referenced by ep_handle.
++ * After the transfer is completes, the complete callback will be called with
++ * the request status.
++ *
++ * @param pcd The PCD
++ * @param ep_handle The handle of the endpoint
++ * @param buf The buffer for the data
++ * @param dma_buf The DMA buffer for the data
++ * @param buflen The length of the data transfer
++ * @param zero Specifies whether to send zero length last packet.
++ * @param req_handle Set this handle to any value to use to reference this
++ * request in the ep_dequeue function or from the complete callback
++ * @param atomic_alloc If driver need to perform atomic allocations
++ * for internal data structures.
++ *
++ * Returns -FH_E_INVALID if invalid parameters were passed.
++ * Returns -FH_E_SHUTDOWN if any other error ocurred.
++ * Returns 0 on success. */
++extern int fh_otg_pcd_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
++				uint8_t * buf, fh_dma_t dma_buf,
++				uint32_t buflen, int zero, void *req_handle,
++				int atomic_alloc);
++#ifdef FH_UTE_PER_IO
++/**
++ *
++ * @param ereq_nonport	Pointer to the extended request part of the
++ *						usb_request structure defined in usb_gadget.h file.
++ */
++extern int fh_otg_pcd_xiso_ep_queue(fh_otg_pcd_t * pcd, void *ep_handle,
++				     uint8_t * buf, fh_dma_t dma_buf,
++				     uint32_t buflen, int zero,
++				     void *req_handle, int atomic_alloc,
++				     void *ereq_nonport);
++
++#endif
++
++/** De-queue the specified data transfer that has not yet completed.
++ *
++ * Returns -FH_E_INVALID if invalid parameters were passed.
++ * Returns -FH_E_SHUTDOWN if any other error ocurred.
++ * Returns 0 on success. */
++extern int fh_otg_pcd_ep_dequeue(fh_otg_pcd_t * pcd, void *ep_handle,
++				  void *req_handle);
++
++/** Halt (STALL) an endpoint or clear it.
++ *
++ * Returns -FH_E_INVALID if invalid parameters were passed.
++ * Returns -FH_E_SHUTDOWN if any other error ocurred.
++ * Returns -FH_E_AGAIN if the STALL cannot be sent and must be tried again later
++ * Returns 0 on success. */
++extern int fh_otg_pcd_ep_halt(fh_otg_pcd_t * pcd, void *ep_handle, int value);
++
++/** This function should be called on every hardware interrupt */
++extern int32_t fh_otg_pcd_handle_intr(fh_otg_pcd_t * pcd);
++
++/** This function returns current frame number */
++extern int fh_otg_pcd_get_frame_number(fh_otg_pcd_t * pcd);
++
++/**
++ * Start isochronous transfers on the endpoint referenced by ep_handle.
++ * For isochronous transfers duble buffering is used.
++ * After processing each of buffers comlete callback will be called with
++ * status for each transaction.
++ *
++ * @param pcd The PCD
++ * @param ep_handle The handle of the endpoint
++ * @param buf0 The virtual address of first data buffer
++ * @param buf1 The virtual address of second data buffer
++ * @param dma0 The DMA address of first data buffer
++ * @param dma1 The DMA address of second data buffer
++ * @param sync_frame Data pattern frame number
++ * @param dp_frame Data size for pattern frame
++ * @param data_per_frame Data size for regular frame
++ * @param start_frame Frame number to start transfers, if -1 then start transfers ASAP.
++ * @param buf_proc_intrvl Interval of ISOC Buffer processing
++ * @param req_handle Handle of ISOC request
++ * @param atomic_alloc Specefies whether to perform atomic allocation for
++ * 			internal data structures.
++ *
++ * Returns -FH_E_NO_MEMORY if there is no enough memory.
++ * Returns -FH_E_INVALID if incorrect arguments are passed to the function.
++ * Returns -DW_E_SHUTDOWN for any other error.
++ * Returns 0 on success
++ */
++//extern int fh_otg_pcd_iso_ep_start(fh_otg_pcd_t * pcd, void *ep_handle,
++//				    uint8_t * buf0, uint8_t * buf1,
++//				    fh_dma_t dma0, fh_dma_t dma1,
++//				    int sync_frame, int dp_frame,
++//				    int data_per_frame, int start_frame,
++//				    int buf_proc_intrvl, void *req_handle,
++//				    int atomic_alloc);
++
++/** Stop ISOC transfers on endpoint referenced by ep_handle.
++ *
++ * @param pcd The PCD
++ * @param ep_handle The handle of the endpoint
++ * @param req_handle Handle of ISOC request
++ *
++ * Returns -FH_E_INVALID if incorrect arguments are passed to the function
++ * Returns 0 on success
++ */
++int fh_otg_pcd_iso_ep_stop(fh_otg_pcd_t * pcd, void *ep_handle,
++			    void *req_handle);
++
++/** Get ISOC packet status.
++ *
++ * @param pcd The PCD
++ * @param ep_handle The handle of the endpoint
++ * @param iso_req_handle Isochronoush request handle
++ * @param packet Number of packet
++ * @param status Out parameter for returning status
++ * @param actual Out parameter for returning actual length
++ * @param offset Out parameter for returning offset
++ *
++ */
++extern void fh_otg_pcd_get_iso_packet_params(fh_otg_pcd_t * pcd,
++					      void *ep_handle,
++					      void *iso_req_handle, int packet,
++					      int *status, int *actual,
++					      int *offset);
++
++/** Get ISOC packet count.
++ *
++ * @param pcd The PCD
++ * @param ep_handle The handle of the endpoint
++ * @param iso_req_handle
++ */
++extern int fh_otg_pcd_get_iso_packet_count(fh_otg_pcd_t * pcd,
++					    void *ep_handle,
++					    void *iso_req_handle);
++
++/** This function starts the SRP Protocol if no session is in progress. If
++ * a session is already in progress, but the device is suspended,
++ * remote wakeup signaling is started.
++ */
++extern int fh_otg_pcd_wakeup(fh_otg_pcd_t * pcd);
++
++/** This function returns 1 if LPM support is enabled, and 0 otherwise. */
++extern int fh_otg_pcd_is_lpm_enabled(fh_otg_pcd_t * pcd);
++
++/** This function returns 1 if LPM Errata support is enabled, and 0 otherwise. */
++extern int fh_otg_pcd_is_besl_enabled(fh_otg_pcd_t * pcd);
++
++/** This function returns baseline_besl module parametr. */
++extern int fh_otg_pcd_get_param_baseline_besl(fh_otg_pcd_t * pcd);
++
++/** This function returns deep_besl module parametr. */
++extern int fh_otg_pcd_get_param_deep_besl(fh_otg_pcd_t * pcd);
++
++/** This function returns 1 if remote wakeup is allowed and 0, otherwise. */
++extern int fh_otg_pcd_get_rmwkup_enable(fh_otg_pcd_t * pcd);
++
++/** Initiate SRP */
++extern void fh_otg_pcd_initiate_srp(fh_otg_pcd_t * pcd);
++
++/** Starts remote wakeup signaling. */
++extern void fh_otg_pcd_remote_wakeup(fh_otg_pcd_t * pcd, int set);
++
++/** Starts micorsecond soft disconnect. */
++extern void fh_otg_pcd_disconnect_us(fh_otg_pcd_t * pcd, int no_of_usecs);
++/** This function returns whether device is dualspeed.*/
++extern uint32_t fh_otg_pcd_is_dualspeed(fh_otg_pcd_t * pcd);
++
++/** This function returns whether device is otg. */
++extern uint32_t fh_otg_pcd_is_otg(fh_otg_pcd_t * pcd);
++
++/** These functions allow to get hnp parameters */
++extern uint32_t get_b_hnp_enable(fh_otg_pcd_t * pcd);
++extern uint32_t get_a_hnp_support(fh_otg_pcd_t * pcd);
++extern uint32_t get_a_alt_hnp_support(fh_otg_pcd_t * pcd);
++
++/** CFI specific Interface functions */
++/** Allocate a cfi buffer */
++//extern uint8_t *cfiw_ep_alloc_buffer(fh_otg_pcd_t * pcd, void *pep,
++//				     fh_dma_t * addr, size_t buflen,
++//				     int flags);
++
++/******************************************************************************/
++
++/** @} */
++
++#endif				/* __FH_PCD_IF_H__ */
++
++#endif				/* FH_HOST_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_intr.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_intr.c
+new file mode 100644
+index 00000000..066c9fcb
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_intr.c
+@@ -0,0 +1,5407 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd_intr.c $
++ * $Revision: #126 $
++ * $Date: 2014/08/25 $
++ * $Change: 2595073 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++#ifndef FH_HOST_ONLY
++
++#include "fh_otg_pcd.h"
++
++#ifdef FH_UTE_CFI
++#include "fh_otg_cfi.h"
++#endif
++
++#ifdef FH_UTE_PER_IO
++extern void complete_xiso_ep(fh_otg_pcd_ep_t * ep);
++#endif
++//#define PRINT_CFI_DMA_DESCS
++
++#define DEBUG_EP0
++
++/**
++ * This function updates OTG.
++ */
++static void fh_otg_pcd_update_otg(fh_otg_pcd_t * pcd, const unsigned reset)
++{
++
++	if (reset) {
++		pcd->b_hnp_enable = 0;
++		pcd->a_hnp_support = 0;
++		pcd->a_alt_hnp_support = 0;
++	}
++
++	if (pcd->fops->hnp_changed) {
++		pcd->fops->hnp_changed(pcd);
++	}
++}
++
++/** @file
++ * This file contains the implementation of the PCD Interrupt handlers.
++ *
++ * The PCD handles the device interrupts.  Many conditions can cause a
++ * device interrupt. When an interrupt occurs, the device interrupt
++ * service routine determines the cause of the interrupt and
++ * dispatches handling to the appropriate function. These interrupt
++ * handling functions are described below.
++ * All interrupt registers are processed from LSB to MSB.
++ */
++
++/**
++ * This function prints the ep0 state for debug purposes.
++ */
++static inline void print_ep0_state(fh_otg_pcd_t * pcd)
++{
++#ifdef DEBUG
++	char str[40];
++
++	switch (pcd->ep0state) {
++	case EP0_DISCONNECT:
++		fh_strcpy(str, "EP0_DISCONNECT");
++		break;
++	case EP0_IDLE:
++		fh_strcpy(str, "EP0_IDLE");
++		break;
++	case EP0_IN_DATA_PHASE:
++		fh_strcpy(str, "EP0_IN_DATA_PHASE");
++		break;
++	case EP0_OUT_DATA_PHASE:
++		fh_strcpy(str, "EP0_OUT_DATA_PHASE");
++		break;
++	case EP0_IN_STATUS_PHASE:
++		fh_strcpy(str, "EP0_IN_STATUS_PHASE");
++		break;
++	case EP0_OUT_STATUS_PHASE:
++		fh_strcpy(str, "EP0_OUT_STATUS_PHASE");
++		break;
++	case EP0_STALL:
++		fh_strcpy(str, "EP0_STALL");
++		break;
++	default:
++		fh_strcpy(str, "EP0_INVALID");
++	}
++
++	FH_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
++#endif
++}
++
++/**
++ * This function calculate the size of the payload in the memory 
++ * for out endpoints and prints size for debug purposes(used in 
++ * 2.93a DevOutNak feature).
++ */
++static inline void print_memory_payload(fh_otg_pcd_t * pcd,  fh_ep_t * ep)
++{
++#ifdef DEBUG
++	deptsiz_data_t deptsiz_init = {.d32 = 0 };
++	deptsiz_data_t deptsiz_updt = {.d32 = 0 };
++	int pack_num;
++	unsigned payload;
++	
++	deptsiz_init.d32 = pcd->core_if->start_doeptsiz_val[ep->num];
++	deptsiz_updt.d32 =
++		FH_READ_REG32(&pcd->core_if->dev_if->
++						out_ep_regs[ep->num]->doeptsiz);
++	/* Payload will be */
++	payload = deptsiz_init.b.xfersize - deptsiz_updt.b.xfersize;
++	/* Packet count is decremented every time a packet
++	 * is written to the RxFIFO not in to the external memory
++	 * So, if payload == 0, then it means no packet was sent to ext memory*/
++	pack_num = (!payload) ? 0 : (deptsiz_init.b.pktcnt - deptsiz_updt.b.pktcnt);
++	FH_DEBUGPL(DBG_PCDV,
++		"Payload for EP%d-%s\n",
++		ep->num, (ep->is_in ? "IN" : "OUT"));
++	FH_DEBUGPL(DBG_PCDV,
++		"Number of transfered bytes = 0x%08x\n", payload);
++	FH_DEBUGPL(DBG_PCDV,
++		"Number of transfered packets = %d\n", pack_num);	
++#endif	
++}
++
++
++#ifdef FH_UTE_CFI
++static inline void print_desc(struct fh_otg_dma_desc *ddesc,
++			      const uint8_t * epname, int descnum)
++{
++	CFI_INFO
++	    ("%s DMA_DESC(%d) buf=0x%08x bytes=0x%04x; sp=0x%x; l=0x%x; sts=0x%02x; bs=0x%02x\n",
++	     epname, descnum, ddesc->buf, ddesc->status.b.bytes,
++	     ddesc->status.b.sp, ddesc->status.b.l, ddesc->status.b.sts,
++	     ddesc->status.b.bs);
++}
++#endif
++
++/**
++ * This function returns pointer to in ep struct with number ep_num
++ */
++static inline fh_otg_pcd_ep_t *get_in_ep(fh_otg_pcd_t * pcd, uint32_t ep_num)
++{
++	int i;
++	int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
++	if (ep_num == 0) {
++		return &pcd->ep0;
++	} else {
++		for (i = 0; i < num_in_eps; ++i) {
++			if (pcd->in_ep[i].fh_ep.num == ep_num)
++				return &pcd->in_ep[i];
++		}
++		return 0;
++	}
++}
++
++/**
++ * This function returns pointer to out ep struct with number ep_num
++ */
++static inline fh_otg_pcd_ep_t *get_out_ep(fh_otg_pcd_t * pcd, uint32_t ep_num)
++{
++	int i;
++	int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
++	if (ep_num == 0) {
++		return &pcd->ep0;
++	} else {
++		for (i = 0; i < num_out_eps; ++i) {
++			if (pcd->out_ep[i].fh_ep.num == ep_num)
++				return &pcd->out_ep[i];
++		}
++		return 0;
++	}
++}
++
++/**
++ * This functions gets a pointer to an EP from the wIndex address
++ * value of the control request.
++ */
++fh_otg_pcd_ep_t *get_ep_by_addr(fh_otg_pcd_t * pcd, u16 wIndex)
++{
++	fh_otg_pcd_ep_t *ep;
++	uint32_t ep_num = UE_GET_ADDR(wIndex);
++
++	if (ep_num == 0) {
++		ep = &pcd->ep0;
++	} else if (UE_GET_DIR(wIndex) == UE_DIR_IN) {	/* in ep */
++		ep = &pcd->in_ep[ep_num - 1];
++	} else {
++		ep = &pcd->out_ep[ep_num - 1];
++	}
++
++	return ep;
++}
++
++/**
++ * This function checks the EP request queue, if the queue is not
++ * empty the next request is started.
++ */
++void start_next_request(fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_pcd_request_t *req = 0;
++	uint32_t max_transfer =
++	    GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
++
++#ifdef FH_UTE_CFI
++	struct fh_otg_pcd *pcd;
++	pcd = ep->pcd;
++#endif
++
++	if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		req = FH_CIRCLEQ_FIRST(&ep->queue);
++
++#ifdef FH_UTE_CFI
++		if (ep->fh_ep.buff_mode != BM_STANDARD) {
++			ep->fh_ep.cfi_req_len = req->length;
++			pcd->cfi->ops.build_descriptors(pcd->cfi, pcd, ep, req);
++		} else {
++#endif
++			/* Setup and start the Transfer */
++			if (req->dw_align_buf) {
++				ep->fh_ep.dma_addr = req->dw_align_buf_dma;
++				ep->fh_ep.start_xfer_buff = req->dw_align_buf;
++				ep->fh_ep.xfer_buff = req->dw_align_buf;
++			} else {
++				ep->fh_ep.dma_addr = req->dma;
++				ep->fh_ep.start_xfer_buff = req->buf;
++				ep->fh_ep.xfer_buff = req->buf;
++			}
++			ep->fh_ep.sent_zlp = 0;
++			ep->fh_ep.total_len = req->length;
++			ep->fh_ep.xfer_len = 0;
++			ep->fh_ep.xfer_count = 0;
++
++			ep->fh_ep.maxxfer = max_transfer;
++			if (GET_CORE_IF(ep->pcd)->dma_desc_enable) {
++				uint32_t out_max_xfer = DDMA_MAX_TRANSFER_SIZE
++				    - (DDMA_MAX_TRANSFER_SIZE % 4);
++				if (ep->fh_ep.is_in) {
++					if (ep->fh_ep.maxxfer >
++					    DDMA_MAX_TRANSFER_SIZE) {
++						ep->fh_ep.maxxfer =
++						    DDMA_MAX_TRANSFER_SIZE;
++					}
++				} else {
++					if (ep->fh_ep.maxxfer > out_max_xfer) {
++						ep->fh_ep.maxxfer =
++						    out_max_xfer;
++					}
++				}
++			}
++			if (ep->fh_ep.maxxfer < ep->fh_ep.total_len) {
++				ep->fh_ep.maxxfer -=
++				    (ep->fh_ep.maxxfer % ep->fh_ep.maxpacket);
++			}
++			if (req->sent_zlp) {
++				if ((ep->fh_ep.total_len %
++				     ep->fh_ep.maxpacket == 0)
++				    && (ep->fh_ep.total_len != 0)) {
++					ep->fh_ep.sent_zlp = 1;
++				}
++
++			}
++#ifdef FH_UTE_CFI
++		}
++#endif
++		fh_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->fh_ep);
++	} else if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
++		diepmsk_data_t intr_mask = {.d32 = 0 };
++
++		intr_mask.b.nak = 1;
++
++		if (GET_CORE_IF(ep->pcd)->multiproc_int_enable) {
++			FH_MODIFY_REG32(&GET_CORE_IF(ep->pcd)->dev_if->dev_global_regs->
++				diepeachintmsk[ep->fh_ep.num], intr_mask.d32, 0);
++		} else {
++			FH_MODIFY_REG32(&GET_CORE_IF(ep->pcd)->dev_if->dev_global_regs->diepmsk,
++				intr_mask.d32, 0);
++		}
++		FH_PRINTF("There are no more ISOC requests \n");
++		ep->fh_ep.frame_num = 0xFFFFFFFF;
++	}
++}
++
++/**
++ * This function handles the SOF Interrupts. At this time the SOF
++ * Interrupt is disabled.
++ */
++int32_t fh_otg_pcd_handle_sof_intr(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++
++	gintsts_data_t gintsts;
++
++	FH_DEBUGPL(DBG_PCD, "SOF\n");
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.sofintr = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This function handles the Rx Status Queue Level Interrupt, which
++ * indicates that there is a least one packet in the Rx FIFO.  The
++ * packets are moved from the FIFO to memory, where they will be
++ * processed when the Endpoint Interrupt Register indicates Transfer
++ * Complete or SETUP Phase Done.
++ *
++ * Repeat the following until the Rx Status Queue is empty:
++ *	 -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
++ *		info
++ *	 -# If Receive FIFO is empty then skip to step Clear the interrupt
++ *		and exit
++ *	 -# If SETUP Packet call fh_otg_read_setup_packet to copy the
++ *		SETUP data to the buffer
++ *	 -# If OUT Data Packet call fh_otg_read_packet to copy the data
++ *		to the destination buffer
++ */
++int32_t fh_otg_pcd_handle_rx_status_q_level_intr(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	gintmsk_data_t gintmask = {.d32 = 0 };
++	device_grxsts_data_t status;
++	fh_otg_pcd_ep_t *ep;
++	gintsts_data_t gintsts;
++#ifdef DEBUG
++	static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
++#endif
++
++	//FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
++	/* Disable the Rx Status Queue Level interrupt */
++	gintmask.b.rxstsqlvl = 1;
++	FH_MODIFY_REG32(&global_regs->gintmsk, gintmask.d32, 0);
++
++	/* Get the Status from the top of the FIFO */
++	status.d32 = FH_READ_REG32(&global_regs->grxstsp);
++
++	FH_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
++		    "pktsts:%x Frame:%d(0x%0x)\n",
++		    status.b.epnum, status.b.bcnt,
++		    dpid_str[status.b.dpid],
++		    status.b.pktsts, status.b.fn, status.b.fn);
++	/* Get pointer to EP structure */
++	ep = get_out_ep(pcd, status.b.epnum);
++
++	switch (status.b.pktsts) {
++	case FH_DSTS_GOUT_NAK:
++		FH_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
++		break;
++	case FH_STS_DATA_UPDT:
++		FH_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
++		if (status.b.bcnt && ep->fh_ep.xfer_buff) {
++			/** @todo NGS Check for buffer overflow? */
++			fh_otg_read_packet(core_if,
++					    ep->fh_ep.xfer_buff,
++					    status.b.bcnt);
++			ep->fh_ep.xfer_count += status.b.bcnt;
++			ep->fh_ep.xfer_buff += status.b.bcnt;
++		}
++		break;
++	case FH_STS_XFER_COMP:
++		FH_DEBUGPL(DBG_PCDV, "OUT Complete\n");
++		break;
++	case FH_DSTS_SETUP_COMP:
++#ifdef DEBUG_EP0
++		FH_DEBUGPL(DBG_PCDV, "Setup Complete\n");
++#endif
++		break;
++	case FH_DSTS_SETUP_UPDT:
++		fh_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
++#ifdef DEBUG_EP0
++		FH_DEBUGPL(DBG_PCD,
++			    "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
++			    pcd->setup_pkt->req.bmRequestType,
++			    pcd->setup_pkt->req.bRequest,
++			    UGETW(pcd->setup_pkt->req.wValue),
++			    UGETW(pcd->setup_pkt->req.wIndex),
++			    UGETW(pcd->setup_pkt->req.wLength));
++#endif
++		ep->fh_ep.xfer_count += status.b.bcnt;
++		break;
++	default:
++		FH_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
++			    status.b.pktsts);
++		break;
++	}
++
++	/* Enable the Rx Status Queue Level interrupt */
++	FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmask.d32);
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.rxstsqlvl = 1;
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	//FH_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__);
++	return 1;
++}
++
++/**
++ * This function examines the Device IN Token Learning Queue to
++ * determine the EP number of the last IN token received.  This
++ * implementation is for the Mass Storage device where there are only
++ * 2 IN EPs (Control-IN and BULK-IN).
++ *
++ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
++ * are 8 EP Numbers in each of the other possible DTKNQ Registers.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ *
++ */
++static inline int get_ep_of_last_in_token(fh_otg_core_if_t * core_if)
++{
++	fh_otg_device_global_regs_t *dev_global_regs =
++	    core_if->dev_if->dev_global_regs;
++	const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
++	/* Number of Token Queue Registers */
++	const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
++	dtknq1_data_t dtknqr1;
++	uint32_t in_tkn_epnums[4];
++	int ndx = 0;
++	int i = 0;
++	volatile uint32_t *addr = &dev_global_regs->dtknqr1;
++	int epnum = 0;
++
++	//FH_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH);
++
++	/* Read the DTKNQ Registers */
++	for (i = 0; i < DTKNQ_REG_CNT; i++) {
++		in_tkn_epnums[i] = FH_READ_REG32(addr);
++		FH_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
++			    in_tkn_epnums[i]);
++		if (addr == &dev_global_regs->dvbusdis) {
++			addr = &dev_global_regs->dtknqr3_dthrctl;
++		} else {
++			++addr;
++		}
++
++	}
++
++	/* Copy the DTKNQR1 data to the bit field. */
++	dtknqr1.d32 = in_tkn_epnums[0];
++	/* Get the EP numbers */
++	in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
++	ndx = dtknqr1.b.intknwptr - 1;
++
++	//FH_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx);
++	if (ndx == -1) {
++		/** @todo Find a simpler way to calculate the max
++		 * queue position.*/
++		int cnt = TOKEN_Q_DEPTH;
++		if (TOKEN_Q_DEPTH <= 6) {
++			cnt = TOKEN_Q_DEPTH - 1;
++		} else if (TOKEN_Q_DEPTH <= 14) {
++			cnt = TOKEN_Q_DEPTH - 7;
++		} else if (TOKEN_Q_DEPTH <= 22) {
++			cnt = TOKEN_Q_DEPTH - 15;
++		} else {
++			cnt = TOKEN_Q_DEPTH - 23;
++		}
++		epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
++	} else {
++		if (ndx <= 5) {
++			epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
++		} else if (ndx <= 13) {
++			ndx -= 6;
++			epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
++		} else if (ndx <= 21) {
++			ndx -= 14;
++			epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
++		} else if (ndx <= 29) {
++			ndx -= 22;
++			epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
++		}
++	}
++	//FH_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum);
++	return epnum;
++}
++
++/**
++ * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
++ * The active request is checked for the next packet to be loaded into
++ * the non-periodic Tx FIFO.
++ */
++int32_t fh_otg_pcd_handle_np_tx_fifo_empty_intr(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	fh_otg_dev_in_ep_regs_t *ep_regs;
++	gnptxsts_data_t txstatus = {.d32 = 0 };
++	gintsts_data_t gintsts;
++
++	int epnum = 0;
++	fh_otg_pcd_ep_t *ep = 0;
++	uint32_t len = 0;
++	int dwords;
++
++	/* Get the epnum from the IN Token Learning Queue. */
++	epnum = get_ep_of_last_in_token(core_if);
++	ep = get_in_ep(pcd, epnum);
++
++	FH_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %d \n", epnum);
++
++	ep_regs = core_if->dev_if->in_ep_regs[epnum];
++
++	len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
++	if (len > ep->fh_ep.maxpacket) {
++		len = ep->fh_ep.maxpacket;
++	}
++	dwords = (len + 3) / 4;
++
++	/* While there is space in the queue and space in the FIFO and
++	 * More data to tranfer, Write packets to the Tx FIFO */
++	txstatus.d32 = FH_READ_REG32(&global_regs->gnptxsts);
++	FH_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
++
++	while (txstatus.b.nptxqspcavail > 0 &&
++	       txstatus.b.nptxfspcavail > dwords &&
++	       ep->fh_ep.xfer_count < ep->fh_ep.xfer_len) {
++		/* Write the FIFO */
++		fh_otg_ep_write_packet(core_if, &ep->fh_ep, 0);
++		len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
++
++		if (len > ep->fh_ep.maxpacket) {
++			len = ep->fh_ep.maxpacket;
++		}
++
++		dwords = (len + 3) / 4;
++		txstatus.d32 = FH_READ_REG32(&global_regs->gnptxsts);
++		FH_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
++	}
++
++	FH_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
++		    FH_READ_REG32(&global_regs->gnptxsts));
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.nptxfempty = 1;
++	FH_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This function is called when dedicated Tx FIFO Empty interrupt occurs.
++ * The active request is checked for the next packet to be loaded into
++ * apropriate Tx FIFO.
++ */
++static int32_t write_empty_tx_fifo(fh_otg_pcd_t * pcd, uint32_t epnum)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	fh_otg_dev_in_ep_regs_t *ep_regs;
++	dtxfsts_data_t txstatus = {.d32 = 0 };
++	fh_otg_pcd_ep_t *ep = 0;
++	uint32_t len = 0;
++	int dwords;
++
++	ep = get_in_ep(pcd, epnum);
++
++	FH_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
++
++	ep_regs = core_if->dev_if->in_ep_regs[epnum];
++
++	len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
++
++	if (len > ep->fh_ep.maxpacket) {
++		len = ep->fh_ep.maxpacket;
++	}
++
++	dwords = (len + 3) / 4;
++
++	/* While there is space in the queue and space in the FIFO and
++	 * More data to tranfer, Write packets to the Tx FIFO */
++	txstatus.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
++	FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
++
++	while (txstatus.b.txfspcavail >= dwords &&
++	       ep->fh_ep.xfer_count < ep->fh_ep.xfer_len &&
++	       ep->fh_ep.xfer_len != 0) {
++		/* Write the FIFO */
++		fh_otg_ep_write_packet(core_if, &ep->fh_ep, 0);
++
++		len = ep->fh_ep.xfer_len - ep->fh_ep.xfer_count;
++		if (len > ep->fh_ep.maxpacket) {
++			len = ep->fh_ep.maxpacket;
++		}
++
++		dwords = (len + 3) / 4;
++		txstatus.d32 =
++		    FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts);
++		FH_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
++			    txstatus.d32);
++	}
++
++	FH_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
++		    FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dtxfsts));
++
++	return 1;
++}
++
++/**
++ * This function is called when the Device is disconnected. It stops
++ * any active requests and informs the Gadget driver of the
++ * disconnect.
++ */
++void fh_otg_pcd_stop(fh_otg_pcd_t * pcd)
++{
++	int i, num_in_eps, num_out_eps;
++	fh_otg_pcd_ep_t *ep;
++
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_SPINLOCK(pcd->lock);
++
++	num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
++	num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
++
++	FH_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
++	/* don't disconnect drivers more than once */
++	if (pcd->ep0state == EP0_DISCONNECT) {
++		FH_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
++		FH_SPINUNLOCK(pcd->lock);
++		return;
++	}
++	pcd->ep0state = EP0_DISCONNECT;
++
++	/* Reset the OTG state. */
++	fh_otg_pcd_update_otg(pcd, 1);
++
++	/* Disable the NP Tx Fifo Empty Interrupt. */
++	intr_mask.b.nptxfempty = 1;
++	FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
++			 intr_mask.d32, 0);
++
++	/* Flush the FIFOs */
++	/**@todo NGS Flush Periodic FIFOs */
++	fh_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10);
++	fh_otg_flush_rx_fifo(GET_CORE_IF(pcd));
++
++	/* prevent new request submissions, kill any outstanding requests  */
++	ep = &pcd->ep0;
++	fh_otg_request_nuke(ep);
++	/* prevent new request submissions, kill any outstanding requests  */
++	for (i = 0; i < num_in_eps; i++) {
++		fh_otg_pcd_ep_t *ep = &pcd->in_ep[i];
++		fh_otg_request_nuke(ep);
++	}
++	/* prevent new request submissions, kill any outstanding requests  */
++	for (i = 0; i < num_out_eps; i++) {
++		fh_otg_pcd_ep_t *ep = &pcd->out_ep[i];
++		fh_otg_request_nuke(ep);
++	}
++
++	/* report disconnect; the driver is already quiesced */
++	if (pcd->fops->disconnect) {
++		FH_SPINUNLOCK(pcd->lock);
++		pcd->fops->disconnect(pcd);
++		FH_SPINLOCK(pcd->lock);
++	}
++	FH_SPINUNLOCK(pcd->lock);
++}
++
++/**
++ * This interrupt indicates that ...
++ */
++int32_t fh_otg_pcd_handle_i2c_intr(fh_otg_pcd_t * pcd)
++{
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	gintsts_data_t gintsts;
++
++	FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "i2cintr");
++	intr_mask.b.i2cintr = 1;
++	FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
++			 intr_mask.d32, 0);
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.i2cintr = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++	return 1;
++}
++
++/**
++ * This interrupt indicates that ...
++ */
++int32_t fh_otg_pcd_handle_early_suspend_intr(fh_otg_pcd_t * pcd)
++{
++	gintsts_data_t gintsts;
++#if defined(VERBOSE)
++	FH_PRINTF("Early Suspend Detected\n");
++#endif
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.erlysuspend = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++	return 1;
++}
++
++/**
++ * This function configures EPO to receive SETUP packets.
++ *
++ * @todo NGS: Update the comments from the HW FS.
++ *
++ *	-# Program the following fields in the endpoint specific registers
++ *	for Control OUT EP 0, in order to receive a setup packet
++ *	- DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
++ *	  setup packets)
++ *	- DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
++ *	  to back setup packets)
++ *		- In DMA mode, DOEPDMA0 Register with a memory address to
++ *		  store any setup packets received
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param pcd	  Programming view of the PCD.
++ */
++static inline void ep0_out_start(fh_otg_core_if_t * core_if,
++				 fh_otg_pcd_t * pcd)
++{
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	deptsiz0_data_t doeptsize0 = {.d32 = 0 };
++	fh_otg_dev_dma_desc_t *dma_desc;
++	depctl_data_t doepctl = {.d32 = 0 };
++
++#ifdef VERBOSE
++	FH_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
++		    FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
++#endif
++	if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
++		doepctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl);
++		if (doepctl.b.epena) {
++			return;
++		}
++	}
++
++	doeptsize0.b.supcnt = 3;
++	doeptsize0.b.pktcnt = 1;
++	doeptsize0.b.xfersize = 8 * 3;
++
++	if (core_if->dma_enable) {
++		if (!core_if->dma_desc_enable) {
++			/** put here as for Hermes mode deptisz register should not be written */
++			FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
++					doeptsize0.d32);
++
++			/** @todo dma needs to handle multiple setup packets (up to 3) */
++			FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
++					pcd->setup_pkt_dma_handle);
++		} else {
++			dev_if->setup_desc_index =
++			    (dev_if->setup_desc_index + 1) & 1;
++			dma_desc =
++			    dev_if->setup_desc_addr[dev_if->setup_desc_index];
++
++			/** DMA Descriptor Setup */
++			dma_desc->status.b.bs = BS_HOST_BUSY;
++			if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
++				dma_desc->status.b.sr = 0;
++				dma_desc->status.b.mtrf = 0;
++			}
++			dma_desc->status.b.l = 1;
++			dma_desc->status.b.ioc = 1;
++			dma_desc->status.b.bytes = pcd->ep0.fh_ep.maxpacket;
++			dma_desc->buf = pcd->setup_pkt_dma_handle;
++			dma_desc->status.b.sts = 0;
++			dma_desc->status.b.bs = BS_HOST_READY;
++
++			/** DOEPDMA0 Register write */
++			FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepdma,
++					dev_if->dma_setup_desc_addr
++					[dev_if->setup_desc_index]);
++		}
++
++	} else {
++		/** put here as for Hermes mode deptisz register should not be written */
++		FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doeptsiz,
++				doeptsize0.d32);
++	}
++
++	/** DOEPCTL0 Register write cnak will be set after setup interrupt */
++	doepctl.d32 = 0;
++	doepctl.b.epena = 1;
++	if (core_if->snpsid <= OTG_CORE_REV_2_94a) {
++		doepctl.b.cnak = 1;
++		FH_WRITE_REG32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
++	} else {
++		FH_MODIFY_REG32(&dev_if->out_ep_regs[0]->doepctl, 0, doepctl.d32);
++	}
++
++#ifdef VERBOSE
++	FH_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
++		    FH_READ_REG32(&dev_if->out_ep_regs[0]->doepctl));
++	FH_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
++		    FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl));
++#endif
++}
++
++/**
++ * This interrupt occurs when a USB Reset is detected. When the USB
++ * Reset Interrupt occurs the device state is set to DEFAULT and the
++ * EP0 state is set to IDLE.
++ *	-#	Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
++ *	-#	Unmask the following interrupt bits
++ *		- DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
++ *	- DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
++ *	- DOEPMSK.SETUP = 1
++ *	- DOEPMSK.XferCompl = 1
++ *	- DIEPMSK.XferCompl = 1
++ *	- DIEPMSK.TimeOut = 1
++ *	-# Program the following fields in the endpoint specific registers
++ *	for Control OUT EP 0, in order to receive a setup packet
++ *	- DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
++ *	  setup packets)
++ *	- DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
++ *	  to back setup packets)
++ *		- In DMA mode, DOEPDMA0 Register with a memory address to
++ *		  store any setup packets received
++ * At this point, all the required initialization, except for enabling
++ * the control 0 OUT endpoint is done, for receiving SETUP packets.
++ */
++int32_t fh_otg_pcd_handle_usb_reset_intr(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	depctl_data_t doepctl = {.d32 = 0 };
++	depctl_data_t diepctl = {.d32 = 0 };
++	daint_data_t daintmsk = {.d32 = 0 };
++	doepmsk_data_t doepmsk = {.d32 = 0 };
++	diepmsk_data_t diepmsk = {.d32 = 0 };
++	dcfg_data_t dcfg = {.d32 = 0 };
++	grstctl_t resetctl = {.d32 = 0 };
++	dctl_data_t dctl = {.d32 = 0 };
++	int i = 0;
++	gintsts_data_t gintsts;
++	pcgcctl_data_t power = {.d32 = 0 };
++
++	power.d32 = FH_READ_REG32(core_if->pcgcctl);
++	if (power.b.stoppclk) {
++		power.d32 = 0;
++		power.b.stoppclk = 1;
++		FH_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
++
++		power.b.pwrclmp = 1;
++		FH_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
++
++		power.b.rstpdwnmodule = 1;
++		FH_MODIFY_REG32(core_if->pcgcctl, power.d32, 0);
++	}
++
++	core_if->lx_state = FH_OTG_L0;
++	core_if->otg_sts = 0;
++	
++	FH_PRINTF("USB RESET\n");
++#ifdef FH_EN_ISOC
++	for (i = 1; i < 16; ++i) {
++		fh_otg_pcd_ep_t *ep;
++		fh_ep_t *fh_ep;
++		ep = get_in_ep(pcd, i);
++		if (ep != 0) {
++			fh_ep = &ep->fh_ep;
++			fh_ep->next_frame = 0xffffffff;
++		}
++	}
++#endif /* FH_EN_ISOC */
++
++	/* reset the HNP settings */
++	fh_otg_pcd_update_otg(pcd, 1);
++
++	/* Clear the Remote Wakeup Signalling */
++	dctl.b.rmtwkupsig = 1;
++	FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
++
++	/* Set NAK for all OUT EPs */
++	doepctl.b.snak = 1;
++	for (i = 0; i <= dev_if->num_out_eps; i++) {
++		FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
++	}
++
++	/* Flush the NP Tx FIFO */
++	fh_otg_flush_tx_fifo(core_if, 0x10);
++	/* Flush the Learning Queue */
++	resetctl.b.intknqflsh = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
++
++	if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable) {
++		core_if->start_predict = 0;
++		for (i = 0; i <= core_if->dev_if->num_in_eps; ++i) {
++			core_if->nextep_seq[i] = 0xff;	// 0xff - EP not active
++		}
++		core_if->nextep_seq[0] = 0;
++		core_if->first_in_nextep_seq = 0;
++		diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
++		diepctl.b.nextep = 0;
++		FH_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
++
++		/* Update IN Endpoint Mismatch Count by active IN NP EP count + 1 */
++		dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
++		dcfg.b.epmscnt = 2;
++		FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
++
++		FH_DEBUGPL(DBG_PCDV,
++			    "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
++			    __func__, core_if->first_in_nextep_seq);
++		for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++			FH_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
++		}
++	}
++
++	if (core_if->multiproc_int_enable) {
++		daintmsk.b.inep0 = 1;
++		daintmsk.b.outep0 = 1;
++		FH_WRITE_REG32(&dev_if->dev_global_regs->deachintmsk,
++				daintmsk.d32);
++
++		doepmsk.b.setup = 1;
++		doepmsk.b.xfercompl = 1;
++		doepmsk.b.ahberr = 1;
++		doepmsk.b.epdisabled = 1;
++
++		if ((core_if->dma_desc_enable) ||
++		    (core_if->dma_enable
++		     && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
++			doepmsk.b.stsphsercvd = 1;
++		}
++		if (core_if->dma_desc_enable)
++			doepmsk.b.bna = 1;
++/*		
++		doepmsk.b.babble = 1;
++		doepmsk.b.nyet = 1;
++		
++		if (core_if->dma_enable) {
++			doepmsk.b.nak = 1;
++		}
++*/
++		FH_WRITE_REG32(&dev_if->dev_global_regs->doepeachintmsk[0],
++				doepmsk.d32);
++
++		diepmsk.b.xfercompl = 1;
++		diepmsk.b.timeout = 1;
++		diepmsk.b.epdisabled = 1;
++		diepmsk.b.ahberr = 1;
++		diepmsk.b.intknepmis = 1;
++		if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
++			diepmsk.b.intknepmis = 0;
++
++/*		if (core_if->dma_desc_enable) {
++			diepmsk.b.bna = 1;
++		}
++*/
++/*		
++		if (core_if->dma_enable) {
++			diepmsk.b.nak = 1;
++		}
++*/
++		FH_WRITE_REG32(&dev_if->dev_global_regs->diepeachintmsk[0],
++				diepmsk.d32);
++	} else {
++		daintmsk.b.inep0 = 1;
++		daintmsk.b.outep0 = 1;
++		FH_WRITE_REG32(&dev_if->dev_global_regs->daintmsk,
++				daintmsk.d32);
++
++		doepmsk.b.setup = 1;
++		doepmsk.b.xfercompl = 1;
++		doepmsk.b.ahberr = 1;
++		doepmsk.b.epdisabled = 1;
++
++		if ((core_if->dma_desc_enable) ||
++		    (core_if->dma_enable
++		     && core_if->snpsid >= OTG_CORE_REV_3_00a)) {
++			doepmsk.b.stsphsercvd = 1;
++		}
++		if (core_if->dma_desc_enable)
++			doepmsk.b.bna = 1;
++		FH_WRITE_REG32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
++
++		diepmsk.b.xfercompl = 1;
++		diepmsk.b.timeout = 1;
++		diepmsk.b.epdisabled = 1;
++		diepmsk.b.ahberr = 1;
++		if (!core_if->en_multiple_tx_fifo && core_if->dma_enable)
++			diepmsk.b.intknepmis = 0;
++/*
++		if (core_if->dma_desc_enable) {
++			diepmsk.b.bna = 1;
++		}
++*/
++
++		FH_WRITE_REG32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
++	}
++
++	/* Reset Device Address */
++	dcfg.d32 = FH_READ_REG32(&dev_if->dev_global_regs->dcfg);
++	dcfg.b.devaddr = 0;
++	FH_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
++
++	/* setup EP0 to receive SETUP packets */
++	if (core_if->snpsid <= OTG_CORE_REV_2_94a)
++		ep0_out_start(core_if, pcd);
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.usbreset = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * Get the device speed from the device status register and convert it
++ * to USB speed constant.
++ *
++ * @param core_if Programming view of FH_otg controller.
++ */
++static int get_device_speed(fh_otg_core_if_t * core_if)
++{
++	dsts_data_t dsts;
++	int speed = 0;
++	dsts.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
++
++	switch (dsts.b.enumspd) {
++	case FH_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
++		speed = USB_SPEED_HIGH;
++		break;
++	case FH_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
++	case FH_DSTS_ENUMSPD_FS_PHY_48MHZ:
++		speed = USB_SPEED_FULL;
++		break;
++
++	case FH_DSTS_ENUMSPD_LS_PHY_6MHZ:
++		speed = USB_SPEED_LOW;
++		break;
++	}
++
++	return speed;
++}
++
++/**
++ * Read the device status register and set the device speed in the
++ * data structure.
++ * Set up EP0 to receive SETUP packets by calling fh_ep0_activate.
++ */
++int32_t fh_otg_pcd_handle_enum_done_intr(fh_otg_pcd_t * pcd)
++{
++	fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
++	gintsts_data_t gintsts;
++	gusbcfg_data_t gusbcfg;
++	fh_otg_core_global_regs_t *global_regs =
++	    GET_CORE_IF(pcd)->core_global_regs;
++	uint8_t utmi16b, utmi8b;
++	int speed;
++	dcfg_data_t dcfg;
++
++	FH_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
++
++	/* WA for the case when SW gets SPEED ENUM without first USB RESET case
++	* due to USB RESET issued by the host earlier. Anyways USB Reset routine
++	* needs to be called to at least program EP 0 OUT - vahrama
++	*/
++	dcfg.d32 = FH_READ_REG32(&pcd->core_if->dev_if->dev_global_regs->dcfg);
++	if (pcd->core_if->otg_ver && dcfg.b.devaddr)
++		fh_otg_pcd_handle_usb_reset_intr(pcd);
++
++
++	if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_2_60a) {
++		utmi16b = 6;	//vahrama old value was 6;
++		utmi8b = 9;
++	} else {
++		utmi16b = 4;
++		utmi8b = 8;
++	}
++	fh_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->fh_ep);
++	if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a) {
++		ep0_out_start(GET_CORE_IF(pcd), pcd);
++	}
++
++#ifdef DEBUG_EP0
++	print_ep0_state(pcd);
++#endif
++
++	if (pcd->ep0state == EP0_DISCONNECT) {
++		pcd->ep0state = EP0_IDLE;
++	} else if (pcd->ep0state == EP0_STALL) {
++		pcd->ep0state = EP0_IDLE;
++	}
++
++	pcd->ep0state = EP0_IDLE;
++
++	ep0->stopped = 0;
++
++	speed = get_device_speed(GET_CORE_IF(pcd));
++	pcd->fops->connect(pcd, speed);
++
++	/* Set USB turnaround time based on device speed and PHY interface. */
++	gusbcfg.d32 = FH_READ_REG32(&global_regs->gusbcfg);
++	if (speed == USB_SPEED_HIGH) {
++		if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
++		    FH_HWCFG2_HS_PHY_TYPE_ULPI) {
++			/* ULPI interface */
++			gusbcfg.b.usbtrdtim = 9;
++		}
++		if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
++		    FH_HWCFG2_HS_PHY_TYPE_UTMI) {
++			/* UTMI+ interface */
++			if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) {
++				gusbcfg.b.usbtrdtim = utmi8b;
++			} else if (GET_CORE_IF(pcd)->hwcfg4.
++				   b.utmi_phy_data_width == 1) {
++				gusbcfg.b.usbtrdtim = utmi16b;
++			} else if (GET_CORE_IF(pcd)->
++				   core_params->phy_utmi_width == 8) {
++				gusbcfg.b.usbtrdtim = utmi8b;
++			} else {
++				gusbcfg.b.usbtrdtim = utmi16b;
++			}
++		}
++		if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
++		    FH_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
++			/* UTMI+  OR  ULPI interface */
++			if (gusbcfg.b.ulpi_utmi_sel == 1) {
++				/* ULPI interface */
++				gusbcfg.b.usbtrdtim = 9;
++			} else {
++				/* UTMI+ interface */
++				if (GET_CORE_IF(pcd)->
++				    core_params->phy_utmi_width == 16) {
++					gusbcfg.b.usbtrdtim = utmi16b;
++				} else {
++					gusbcfg.b.usbtrdtim = utmi8b;
++				}
++			}
++		}
++	} else {
++		/* Full or low speed */
++		gusbcfg.b.usbtrdtim = 9;
++	}
++	FH_WRITE_REG32(&global_regs->gusbcfg, gusbcfg.d32);
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.enumdone = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++	return 1;
++}
++
++/**
++ * This interrupt indicates that the ISO OUT Packet was dropped due to
++ * Rx FIFO full or Rx Status Queue Full.  If this interrupt occurs
++ * read all the data from the Rx FIFO.
++ */
++int32_t fh_otg_pcd_handle_isoc_out_packet_dropped_intr(fh_otg_pcd_t * pcd)
++{
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	gintsts_data_t gintsts;
++
++	FH_WARN("INTERRUPT Handler not implemented for %s\n",
++		 "ISOC Out Dropped");
++
++	intr_mask.b.isooutdrop = 1;
++	FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
++			 intr_mask.d32, 0);
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.isooutdrop = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This interrupt indicates the end of the portion of the micro-frame
++ * for periodic transactions.  If there is a periodic transaction for
++ * the next frame, load the packets into the EP periodic Tx FIFO.
++ */
++int32_t fh_otg_pcd_handle_end_periodic_frame_intr(fh_otg_pcd_t * pcd)
++{
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	gintsts_data_t gintsts;
++	FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "EOP");
++
++	intr_mask.b.eopframe = 1;
++	FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
++			 intr_mask.d32, 0);
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.eopframe = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This interrupt indicates that EP of the packet on the top of the
++ * non-periodic Tx FIFO does not match EP of the IN Token received.
++ *
++ * The "Device IN Token Queue" Registers are read to determine the
++ * order the IN Tokens have been received. The non-periodic Tx FIFO
++ * is flushed, so it can be reloaded in the order seen in the IN Token
++ * Queue.
++ */
++int32_t fh_otg_pcd_handle_ep_mismatch_intr(fh_otg_pcd_t * pcd)
++{
++	gintsts_data_t gintsts;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	dctl_data_t dctl;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	if (!core_if->en_multiple_tx_fifo && core_if->dma_enable) {
++		core_if->start_predict = 1;
++
++		FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
++	
++		gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++		if (!gintsts.b.ginnakeff) {
++			/* Disable EP Mismatch interrupt */
++			intr_mask.d32 = 0;
++			intr_mask.b.epmismatch = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0);
++			/* Enable the Global IN NAK Effective Interrupt */
++			intr_mask.d32 = 0;
++			intr_mask.b.ginnakeff = 1;
++			FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
++			/* Set the global non-periodic IN NAK handshake */
++			dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++			dctl.b.sgnpinnak = 1;
++			FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
++		} else {
++			FH_PRINTF("gintsts.b.ginnakeff = 1! dctl.b.sgnpinnak not set\n");
++		}
++		/* Disabling of all EP's will be done in fh_otg_pcd_handle_in_nak_effective()
++		 * handler after Global IN NAK Effective interrupt will be asserted */
++	}
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.epmismatch = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This interrupt is valid only in DMA mode. This interrupt indicates that the
++ * core has stopped fetching data for IN endpoints due to the unavailability of
++ * TxFIFO space or Request Queue space. This interrupt is used by the
++ * application for an endpoint mismatch algorithm.
++ * 
++ * @param pcd The PCD 
++ */
++int32_t fh_otg_pcd_handle_ep_fetsusp_intr(fh_otg_pcd_t * pcd)
++{
++	gintsts_data_t gintsts;
++	gintmsk_data_t gintmsk_data;
++	dctl_data_t dctl;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
++
++	/* Clear the global non-periodic IN NAK handshake */
++	dctl.d32 = 0;
++	dctl.b.cgnpinnak = 1;
++	FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); 
++	
++	/* Mask GINTSTS.FETSUSP interrupt */
++	gintmsk_data.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
++	gintmsk_data.b.fetsusp = 0;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk_data.d32);
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.fetsusp = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This funcion stalls EP0.
++ */
++static inline void ep0_do_stall(fh_otg_pcd_t * pcd, const int err_val)
++{
++	fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
++	usb_device_request_t *ctrl = &pcd->setup_pkt->req;
++	FH_WARN("req %02x.%02x protocol STALL; err %d\n",
++		 ctrl->bmRequestType, ctrl->bRequest, err_val);
++
++	ep0->fh_ep.is_in = 1;
++	fh_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->fh_ep);
++	ep0->fh_ep.is_in = 0;
++    fh_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->fh_ep);
++	pcd->ep0.stopped = 1;
++	pcd->ep0state = EP0_IDLE;
++	ep0_out_start(GET_CORE_IF(pcd), pcd);
++}
++
++/**
++ * This functions delegates the setup command to the gadget driver.
++ */
++static inline void do_gadget_setup(fh_otg_pcd_t * pcd,
++				   usb_device_request_t * ctrl)
++{
++	int ret = 0;
++	FH_SPINUNLOCK(pcd->lock);
++	ret = pcd->fops->setup(pcd, (uint8_t *) ctrl);
++	FH_SPINLOCK(pcd->lock);
++	if (ret < 0) {
++		ep0_do_stall(pcd, ret);
++	}
++
++	/** @todo This is a g_file_storage gadget driver specific
++	 * workaround: a DELAYED_STATUS result from the fsg_setup
++	 * routine will result in the gadget queueing a EP0 IN status
++	 * phase for a two-stage control transfer. Exactly the same as
++	 * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
++	 * specific request.  Need a generic way to know when the gadget
++	 * driver will queue the status phase. Can we assume when we
++	 * call the gadget driver setup() function that it will always
++	 * queue and require the following flag? Need to look into
++	 * this.
++	 */
++
++	if (ret == 256 + 999) {
++		pcd->request_config = 1;
++	}
++}
++
++#ifdef FH_UTE_CFI
++/**
++ * This functions delegates the CFI setup commands to the gadget driver.
++ * This function will return a negative value to indicate a failure.
++ */
++static inline int cfi_gadget_setup(fh_otg_pcd_t * pcd,
++				   struct cfi_usb_ctrlrequest *ctrl_req)
++{
++	int ret = 0;
++
++	if (pcd->fops && pcd->fops->cfi_setup) {
++		FH_SPINUNLOCK(pcd->lock);
++		ret = pcd->fops->cfi_setup(pcd, ctrl_req);
++		FH_SPINLOCK(pcd->lock);
++		if (ret < 0) {
++			ep0_do_stall(pcd, ret);
++			return ret;
++		}
++	}
++
++	return ret;
++}
++#endif
++
++/**
++ * This function starts the Zero-Length Packet for the IN status phase
++ * of a 2 stage control transfer.
++ */
++static inline void do_setup_in_status_phase(fh_otg_pcd_t * pcd)
++{
++	fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
++	if (pcd->ep0state == EP0_STALL) {
++		return;
++	}
++
++	pcd->ep0state = EP0_IN_STATUS_PHASE;
++
++	/* Prepare for more SETUP Packets */
++	FH_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
++	if ((GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_3_00a)
++	    && (pcd->core_if->dma_desc_enable)
++	    && (ep0->fh_ep.xfer_count < ep0->fh_ep.total_len)) {
++		FH_DEBUGPL(DBG_PCDV,
++			    "Data terminated wait next packet in out_desc_addr\n");
++		pcd->backup_buf = phys_to_virt(ep0->fh_ep.dma_addr);
++		pcd->data_terminated = 1;
++	}
++	ep0->fh_ep.xfer_len = 0;
++	ep0->fh_ep.xfer_count = 0;
++	ep0->fh_ep.is_in = 1;
++	ep0->fh_ep.dma_addr = pcd->setup_pkt_dma_handle;
++	fh_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->fh_ep);
++
++	/* Prepare for more SETUP Packets */
++	//ep0_out_start(GET_CORE_IF(pcd), pcd);
++}
++
++/**
++ * This function starts the Zero-Length Packet for the OUT status phase
++ * of a 2 stage control transfer.
++ */
++static inline void do_setup_out_status_phase(fh_otg_pcd_t * pcd)
++{
++	fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
++	doepint_data_t doepint;
++	doepint.d32 = FH_READ_REG32(&pcd->core_if->dev_if->out_ep_regs[0]->doepint);
++	if (pcd->ep0state == EP0_STALL) {
++		FH_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
++		return;
++	}
++	pcd->ep0state = EP0_OUT_STATUS_PHASE;
++
++	FH_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
++	ep0->fh_ep.xfer_len = 0;
++	ep0->fh_ep.xfer_count = 0;
++	ep0->fh_ep.is_in = 0;
++	ep0->fh_ep.dma_addr = pcd->setup_pkt_dma_handle;
++	/* If there is xfercomplete on EP0 OUT do not start OUT Status stage.
++	 * xfercomplete means that ZLP was already received as EP0 OUT is enabled 
++	 * during IN Data stage
++	 */
++	if ((doepint.b.xfercompl == 1) && (pcd->core_if->snpsid >= OTG_CORE_REV_3_00a)
++	    && (pcd->core_if->dma_enable == 1) && (pcd->core_if->dma_desc_enable == 0)) {
++		FH_DEBUGPL(DBG_PCD, "Status stage already completed\n");
++		return;
++	}
++
++	fh_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->fh_ep);
++
++	/* Prepare for more SETUP Packets */
++	if (GET_CORE_IF(pcd)->dma_enable == 0) {
++		ep0_out_start(GET_CORE_IF(pcd), pcd);
++	}
++}
++
++/**
++ * Clear the EP halt (STALL) and if pending requests start the
++ * transfer.
++ */
++static inline void pcd_clear_halt(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep)
++{
++	if (ep->fh_ep.stall_clear_flag) {
++		/* Start Control Status Phase */
++		do_setup_in_status_phase(pcd);
++		return;
++	}
++
++	fh_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->fh_ep);
++
++	/* Reactive the EP */
++	fh_otg_ep_activate(GET_CORE_IF(pcd), &ep->fh_ep);
++	if (ep->stopped) {
++		ep->stopped = 0;
++		/* If there is a request in the EP queue start it */
++
++		/** @todo FIXME: this causes an EP mismatch in DMA mode.
++		 * epmismatch not yet implemented. */
++
++		/*
++		 * Above fixme is solved by implmenting a tasklet to call the
++		 * start_next_request(), outside of interrupt context at some
++		 * time after the current time, after a clear-halt setup packet.
++		 * Still need to implement ep mismatch in the future if a gadget
++		 * ever uses more than one endpoint at once
++		 */
++		ep->queue_sof = 1;
++		FH_TASK_SCHEDULE(pcd->start_xfer_tasklet);
++	}
++	/* Start Control Status Phase */
++	do_setup_in_status_phase(pcd);
++}
++
++/**
++ * This function is called when the SET_FEATURE TEST_MODE Setup packet
++ * is sent from the host.  The Device Control register is written with
++ * the Test Mode bits set to the specified Test Mode.  This is done as
++ * a tasklet so that the "Status" phase of the control transfer
++ * completes before transmitting the TEST packets.
++ *
++ * @todo This has not been tested since the tasklet struct was put
++ * into the PCD struct!
++ *
++ */
++void do_test_mode(void *data)
++{
++	dctl_data_t dctl;
++	fh_otg_pcd_t *pcd = (fh_otg_pcd_t *) data;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	int test_mode = pcd->test_mode;
++
++//        FH_WARN("%s() has not been tested since being rewritten!\n", __func__);
++
++	dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++	switch (test_mode) {
++	case 1:		// TEST_J
++		dctl.b.tstctl = 1;
++		break;
++
++	case 2:		// TEST_K
++		dctl.b.tstctl = 2;
++		break;
++
++	case 3:		// TEST_SE0_NAK
++		dctl.b.tstctl = 3;
++		break;
++
++	case 4:		// TEST_PACKET
++		dctl.b.tstctl = 4;
++		break;
++
++	case 5:		// TEST_FORCE_ENABLE
++		dctl.b.tstctl = 5;
++		break;
++	case 7:	
++		fh_otg_set_hnpreq(core_if, 1);
++	}
++	FH_PRINTF("test mode = %d\n",test_mode);
++	core_if->test_mode = test_mode;
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
++}
++
++/**
++ * This function process the GET_STATUS Setup Commands.
++ */
++static inline void do_get_status(fh_otg_pcd_t * pcd)
++{
++	usb_device_request_t ctrl = pcd->setup_pkt->req;
++	fh_otg_pcd_ep_t *ep;
++	fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
++	uint16_t *status = pcd->status_buf;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++
++#ifdef DEBUG_EP0
++	FH_DEBUGPL(DBG_PCD,
++		    "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
++		    ctrl.bmRequestType, ctrl.bRequest,
++		    UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
++		    UGETW(ctrl.wLength));
++#endif
++
++	switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
++	case UT_DEVICE:
++		if (UGETW(ctrl.wIndex) == 0xF000) {	/* OTG Status selector */
++			FH_PRINTF("wIndex - %d\n", UGETW(ctrl.wIndex));
++			FH_PRINTF("OTG VERSION - %d\n", core_if->otg_ver);
++			FH_PRINTF("OTG CAP - %d, %d\n",
++				   core_if->core_params->otg_cap,
++				   FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
++			if (core_if->otg_ver == 1
++			    && core_if->core_params->otg_cap ==
++			    FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
++				uint8_t *otgsts = (uint8_t *) pcd->status_buf;
++				*otgsts = (core_if->otg_sts & 0x1);
++				pcd->ep0_pending = 1;
++				ep0->fh_ep.start_xfer_buff =
++				    (uint8_t *) otgsts;
++				ep0->fh_ep.xfer_buff = (uint8_t *) otgsts;
++				ep0->fh_ep.dma_addr =
++				    pcd->status_buf_dma_handle;
++				ep0->fh_ep.xfer_len = 1;
++				ep0->fh_ep.xfer_count = 0;
++				ep0->fh_ep.total_len = ep0->fh_ep.xfer_len;
++				fh_otg_ep0_start_transfer(GET_CORE_IF(pcd),
++							   &ep0->fh_ep);
++				return;
++			} else {
++				ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++				return;
++			}
++			break;
++		} else {
++			*status = 0x1;	/* Self powered */
++			*status |= pcd->remote_wakeup_enable << 1;
++			break;
++		}
++	case UT_INTERFACE:
++		*status = 0;
++		break;
++
++	case UT_ENDPOINT:
++		ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
++		if (ep == 0 || UGETW(ctrl.wLength) > 2) {
++			ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++			return;
++		}
++		/** @todo check for EP stall */
++		*status = ep->stopped;
++		break;
++	}
++	pcd->ep0_pending = 1;
++	ep0->fh_ep.start_xfer_buff = (uint8_t *) status;
++	ep0->fh_ep.xfer_buff = (uint8_t *) status;
++	ep0->fh_ep.dma_addr = pcd->status_buf_dma_handle;
++	ep0->fh_ep.xfer_len = 2;
++	ep0->fh_ep.xfer_count = 0;
++	ep0->fh_ep.total_len = ep0->fh_ep.xfer_len;
++	fh_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->fh_ep);
++}
++
++/**
++ * This function process the SET_FEATURE Setup Commands.
++ */
++static inline void do_set_feature(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++	usb_device_request_t ctrl = pcd->setup_pkt->req;
++	fh_otg_pcd_ep_t *ep = 0;
++	int32_t otg_cap_param = core_if->core_params->otg_cap;
++	gotgctl_data_t gotgctl = {.d32 = 0 };
++	gintmsk_data_t gintmsk = {.d32 = 0 };
++
++	FH_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
++		    ctrl.bmRequestType, ctrl.bRequest,
++		    UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
++		    UGETW(ctrl.wLength));
++	FH_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
++
++	switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
++	case UT_DEVICE:
++		switch (UGETW(ctrl.wValue)) {
++		case UF_DEVICE_REMOTE_WAKEUP:
++			pcd->remote_wakeup_enable = 1;
++			break;
++
++		case UF_TEST_MODE:
++			/* Setup the Test Mode tasklet to do the Test
++			 * Packet generation after the SETUP Status
++			 * phase has completed. */
++
++			/** @todo This has not been tested since the
++			 * tasklet struct was put into the PCD
++			 * struct! */
++			pcd->test_mode = UGETW(ctrl.wIndex) >> 8;
++			FH_TASK_SCHEDULE(pcd->test_mode_tasklet);
++			break;
++
++		case UF_DEVICE_B_HNP_ENABLE:
++			FH_DEBUGPL(DBG_PCDV,
++				    "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
++
++			/* dev may initiate HNP */
++			if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
++				gotgctl.b.devhnpen = 1;
++				if (core_if->otg_ver) {
++					FH_MODIFY_REG32(&global_regs->gotgctl, 0, gotgctl.d32);
++					/* Ensure that USB Suspend interrupt is unmasked */
++					gintmsk.b.usbsuspend = 1;
++					FH_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
++				}
++				else {
++					pcd->b_hnp_enable = 1;
++					fh_otg_pcd_update_otg(pcd, 0);
++					FH_DEBUGPL(DBG_PCD, "Request B HNP\n");
++					/**@todo Is the gotgctl.devhnpen cleared
++					 * by a USB Reset? */
++					gotgctl.b.hnpreq = 1;
++					FH_WRITE_REG32(&global_regs->gotgctl, gotgctl.d32);
++				}
++			} else {
++				ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++				return;
++			}
++			break;
++
++		case UF_DEVICE_A_HNP_SUPPORT:
++			/* RH port supports HNP */
++			FH_DEBUGPL(DBG_PCDV,
++				    "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
++			if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
++				pcd->a_hnp_support = 1;
++				fh_otg_pcd_update_otg(pcd, 0);
++			} else {
++				ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++				return;
++			}
++			break;
++
++		case UF_DEVICE_A_ALT_HNP_SUPPORT:
++			/* other RH port does */
++			FH_DEBUGPL(DBG_PCDV,
++				    "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
++			if (otg_cap_param == FH_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
++				pcd->a_alt_hnp_support = 1;
++				fh_otg_pcd_update_otg(pcd, 0);
++			} else {
++				ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++				return;
++			}
++			break;
++
++		default:
++			ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++			return;
++
++		}
++		do_setup_in_status_phase(pcd);
++		break;
++
++	case UT_INTERFACE:
++		do_gadget_setup(pcd, &ctrl);
++		break;
++
++	case UT_ENDPOINT:
++		if (UGETW(ctrl.wValue) == UF_ENDPOINT_HALT) {
++			ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
++			if (ep == 0) {
++				ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++				return;
++			}
++			ep->stopped = 1;
++			fh_otg_ep_set_stall(core_if, &ep->fh_ep);
++		}
++		do_setup_in_status_phase(pcd);
++		break;
++	}
++}
++
++/**
++ * This function process the CLEAR_FEATURE Setup Commands.
++ */
++static inline void do_clear_feature(fh_otg_pcd_t * pcd)
++{
++	usb_device_request_t ctrl = pcd->setup_pkt->req;
++	fh_otg_pcd_ep_t *ep = 0;
++
++	FH_DEBUGPL(DBG_PCD,
++		    "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
++		    ctrl.bmRequestType, ctrl.bRequest,
++		    UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
++		    UGETW(ctrl.wLength));
++
++	switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
++	case UT_DEVICE:
++		switch (UGETW(ctrl.wValue)) {
++		case UF_DEVICE_REMOTE_WAKEUP:
++			pcd->remote_wakeup_enable = 0;
++			break;
++
++		case UF_TEST_MODE:
++			/** @todo Add CLEAR_FEATURE for TEST modes. */
++			break;
++
++		default:
++			ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++			return;
++		}
++		do_setup_in_status_phase(pcd);
++		break;
++
++	case UT_ENDPOINT:
++		ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
++		if (ep == 0) {
++			ep0_do_stall(pcd, -FH_E_NOT_SUPPORTED);
++			return;
++		}
++
++		pcd_clear_halt(pcd, ep);
++
++		break;
++	}
++}
++
++/**
++ * This function process the SET_ADDRESS Setup Commands.
++ */
++static inline void do_set_address(fh_otg_pcd_t * pcd)
++{
++	fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
++	usb_device_request_t ctrl = pcd->setup_pkt->req;
++
++	if (ctrl.bmRequestType == UT_DEVICE) {
++		dcfg_data_t dcfg = {.d32 = 0 };
++
++#ifdef DEBUG_EP0
++//                      FH_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue);
++#endif
++		dcfg.b.devaddr = UGETW(ctrl.wValue);
++		FH_MODIFY_REG32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
++		do_setup_in_status_phase(pcd);
++	}
++}
++
++/**
++ *	This function processes SETUP commands. In Linux, the USB Command
++ *	processing is done in two places - the first being the PCD and the
++ *	second in the Gadget Driver (for example, the File-Backed Storage
++ *	Gadget Driver).
++ *
++ * <table>
++ * <tr><td>Command	</td><td>Driver </td><td>Description</td></tr>
++ *
++ * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
++ * defined in chapter 9 of the USB 2.0 Specification chapter 9
++ * </td></tr>
++ *
++ * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
++ * requests are the ENDPOINT_HALT feature is procesed, all others the
++ * interface requests are ignored.</td></tr>
++ *
++ * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
++ * requests are processed by the PCD.  Interface requests are passed
++ * to the Gadget Driver.</td></tr>
++ *
++ * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
++ * with device address received </td></tr>
++ *
++ * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
++ * requested descriptor</td></tr>
++ *
++ * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
++ * not implemented by any of the existing Gadget Drivers.</td></tr>
++ *
++ * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
++ * all EPs and enable EPs for new configuration.</td></tr>
++ *
++ * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
++ * the current configuration</td></tr>
++ *
++ * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
++ * EPs and enable EPs for new configuration.</td></tr>
++ *
++ * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
++ * current interface.</td></tr>
++ *
++ * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
++ * message.</td></tr>
++ * </table>
++ *
++ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
++ * processed by pcd_setup. Calling the Function Driver's setup function from
++ * pcd_setup processes the gadget SETUP commands.
++ */
++static inline void pcd_setup(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	usb_device_request_t ctrl = pcd->setup_pkt->req;
++	fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
++
++	deptsiz0_data_t doeptsize0 = {.d32 = 0 };
++
++#ifdef FH_UTE_CFI
++	int retval = 0;
++	struct cfi_usb_ctrlrequest cfi_req;
++#endif
++
++	doeptsize0.d32 = FH_READ_REG32(&dev_if->out_ep_regs[0]->doeptsiz);
++
++	/** In BDMA more then 1 setup packet is not supported till 3.00a */
++	if (core_if->dma_enable && core_if->dma_desc_enable == 0
++	    && (doeptsize0.b.supcnt < 2)
++	    && (core_if->snpsid < OTG_CORE_REV_2_94a)) {
++		FH_ERROR
++		    ("\n\n-----------	 CANNOT handle > 1 setup packet in DMA mode\n\n");
++	}
++	if ((core_if->snpsid >= OTG_CORE_REV_3_00a)
++	    && (core_if->dma_enable == 1) && (core_if->dma_desc_enable == 0)) {
++		if (doeptsize0.b.supcnt == 3 && ep0->fh_ep.stp_rollover == 0) {
++			FH_ERROR(" !!! Setup packet count was not updated by the core\n");
++			return;
++		}
++		ctrl =
++		    (pcd->setup_pkt +
++		     (3 - doeptsize0.b.supcnt - 1 +
++		      ep0->fh_ep.stp_rollover))->req;
++	}
++#ifdef DEBUG_EP0
++	FH_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
++		    ctrl.bmRequestType, ctrl.bRequest,
++		    UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
++		    UGETW(ctrl.wLength));
++#endif
++
++	/* Clean up the request queue */
++	fh_otg_request_nuke(ep0);
++	ep0->stopped = 0;
++
++	if (ctrl.bmRequestType & UE_DIR_IN) {
++		ep0->fh_ep.is_in = 1;
++		pcd->ep0state = EP0_IN_DATA_PHASE;
++	} else {
++		ep0->fh_ep.is_in = 0;
++		pcd->ep0state = EP0_OUT_DATA_PHASE;
++	}
++
++	if (UGETW(ctrl.wLength) == 0) {
++		ep0->fh_ep.is_in = 1;
++		pcd->ep0state = EP0_IN_STATUS_PHASE;
++	}
++
++	if (UT_GET_TYPE(ctrl.bmRequestType) != UT_STANDARD) {
++
++#ifdef FH_UTE_CFI
++		FH_MEMCPY(&cfi_req, &ctrl, sizeof(usb_device_request_t));
++
++		//printk(KERN_ALERT "CFI: req_type=0x%02x; req=0x%02x\n", 
++		ctrl.bRequestType, ctrl.bRequest);
++		if (UT_GET_TYPE(cfi_req.bRequestType) == UT_VENDOR) {
++			if (cfi_req.bRequest > 0xB0 && cfi_req.bRequest < 0xBF) {
++				retval = cfi_setup(pcd, &cfi_req);
++				if (retval < 0) {
++					ep0_do_stall(pcd, retval);
++					pcd->ep0_pending = 0;
++					return;
++				}
++
++				/* if need gadget setup then call it and check the retval */
++				if (pcd->cfi->need_gadget_att) {
++					retval =
++					    cfi_gadget_setup(pcd,
++							     &pcd->
++							     cfi->ctrl_req);
++					if (retval < 0) {
++						pcd->ep0_pending = 0;
++						return;
++					}
++				}
++
++				if (pcd->cfi->need_status_in_complete) {
++					do_setup_in_status_phase(pcd);
++				}
++				return;
++			}
++		}
++#endif
++
++		/* handle non-standard (class/vendor) requests in the gadget driver */
++		do_gadget_setup(pcd, &ctrl);
++		return;
++	}
++
++	/** @todo NGS: Handle bad setup packet? */
++
++///////////////////////////////////////////
++//// --- Standard Request handling --- ////
++
++	switch (ctrl.bRequest) {
++	case UR_GET_STATUS:
++		do_get_status(pcd);
++		break;
++
++	case UR_CLEAR_FEATURE:
++		do_clear_feature(pcd);
++		break;
++
++	case UR_SET_FEATURE:
++		do_set_feature(pcd);
++		break;
++
++	case UR_SET_ADDRESS:
++		do_set_address(pcd);
++		break;
++
++	case UR_SET_INTERFACE:
++	case UR_SET_CONFIG:
++//              _pcd->request_config = 1;       /* Configuration changed */
++		do_gadget_setup(pcd, &ctrl);
++		break;
++
++	case UR_SYNCH_FRAME:
++		do_gadget_setup(pcd, &ctrl);
++		break;
++
++	default:
++		/* Call the Gadget Driver's setup functions */
++		do_gadget_setup(pcd, &ctrl);
++		break;
++	}
++}
++
++/**
++ * This function completes the ep0 control transfer.
++ */
++static int32_t ep0_complete_request(fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	fh_otg_dev_in_ep_regs_t *in_ep_regs =
++	    dev_if->in_ep_regs[ep->fh_ep.num];
++#ifdef DEBUG_EP0
++	fh_otg_dev_out_ep_regs_t *out_ep_regs =
++	    dev_if->out_ep_regs[ep->fh_ep.num];
++#endif
++	deptsiz0_data_t deptsiz;
++	dev_dma_desc_sts_t desc_sts = {.d32 = 0 };
++	fh_otg_pcd_request_t *req;
++	int is_last = 0;
++	fh_otg_pcd_t *pcd = ep->pcd;
++
++#ifdef FH_UTE_CFI
++	struct cfi_usb_ctrlrequest *ctrlreq;
++	int retval = -FH_E_NOT_SUPPORTED;
++#endif
++
++	if (pcd->ep0_pending && FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		if (ep->fh_ep.is_in) {
++#ifdef DEBUG_EP0
++			FH_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
++#endif
++			do_setup_out_status_phase(pcd);
++		} else {
++#ifdef DEBUG_EP0
++			FH_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
++#endif
++
++#ifdef FH_UTE_CFI
++			ctrlreq = &pcd->cfi->ctrl_req;
++
++			if (UT_GET_TYPE(ctrlreq->bRequestType) == UT_VENDOR) {
++				if (ctrlreq->bRequest > 0xB0
++				    && ctrlreq->bRequest < 0xBF) {
++
++					/* Return if the PCD failed to handle the request */
++					if ((retval =
++					     pcd->cfi->ops.
++					     ctrl_write_complete(pcd->cfi,
++								 pcd)) < 0) {
++						CFI_INFO
++						    ("ERROR setting a new value in the PCD(%d)\n",
++						     retval);
++						ep0_do_stall(pcd, retval);
++						pcd->ep0_pending = 0;
++						return 0;
++					}
++
++					/* If the gadget needs to be notified on the request */
++					if (pcd->cfi->need_gadget_att == 1) {
++						//retval = do_gadget_setup(pcd, &pcd->cfi->ctrl_req);
++						retval =
++						    cfi_gadget_setup(pcd,
++								     &pcd->cfi->
++								     ctrl_req);
++
++						/* Return from the function if the gadget failed to process
++						 * the request properly - this should never happen !!!
++						 */
++						if (retval < 0) {
++							CFI_INFO
++							    ("ERROR setting a new value in the gadget(%d)\n",
++							     retval);
++							pcd->ep0_pending = 0;
++							return 0;
++						}
++					}
++
++					CFI_INFO("%s: RETVAL=%d\n", __func__,
++						 retval);
++					/* If we hit here then the PCD and the gadget has properly
++					 * handled the request - so send the ZLP IN to the host.
++					 */
++					/* @todo: MAS - decide whether we need to start the setup
++					 * stage based on the need_setup value of the cfi object
++					 */
++					do_setup_in_status_phase(pcd);
++					pcd->ep0_pending = 0;
++					return 1;
++				}
++			}
++#endif
++
++			do_setup_in_status_phase(pcd);
++		}
++		pcd->ep0_pending = 0;
++		return 1;
++	}
++
++	if (FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		return 0;
++	}
++	req = FH_CIRCLEQ_FIRST(&ep->queue);
++
++	if (pcd->ep0state == EP0_OUT_STATUS_PHASE
++	    || pcd->ep0state == EP0_IN_STATUS_PHASE) {
++		is_last = 1;
++	} else if (ep->fh_ep.is_in) {
++		deptsiz.d32 = FH_READ_REG32(&in_ep_regs->dieptsiz);
++		if (core_if->dma_desc_enable != 0)
++			desc_sts = dev_if->in_desc_addr->status;
++#ifdef DEBUG_EP0
++		FH_DEBUGPL(DBG_PCDV, "%d len=%d  xfersize=%d pktcnt=%d\n",
++			    ep->fh_ep.num, ep->fh_ep.xfer_len,
++			    deptsiz.b.xfersize, deptsiz.b.pktcnt);
++#endif
++
++		if (((core_if->dma_desc_enable == 0)
++		     && (deptsiz.b.xfersize == 0))
++		    || ((core_if->dma_desc_enable != 0)
++			&& (desc_sts.b.bytes == 0))) {
++			req->actual = ep->fh_ep.xfer_count;
++			/* Is a Zero Len Packet needed? */
++			if (req->sent_zlp) {
++#ifdef DEBUG_EP0
++				FH_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
++#endif
++				req->sent_zlp = 0;
++			}
++			do_setup_out_status_phase(pcd);
++		}
++	} else {
++		/* ep0-OUT */
++#ifdef DEBUG_EP0
++		deptsiz.d32 = FH_READ_REG32(&out_ep_regs->doeptsiz);
++		FH_DEBUGPL(DBG_PCDV, "%d len=%d xsize=%d pktcnt=%d\n",
++			    ep->fh_ep.num, ep->fh_ep.xfer_len,
++			    deptsiz.b.xfersize, deptsiz.b.pktcnt);
++#endif
++		req->actual = ep->fh_ep.xfer_count;
++
++		/* Is a Zero Len Packet needed? */
++		if (req->sent_zlp) {
++#ifdef DEBUG_EP0
++			FH_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
++#endif
++			req->sent_zlp = 0;
++		}
++		/* For older cores do setup in status phase in Slave/BDMA modes, 
++		 * starting from 3.00 do that only in slave, and for DMA modes 
++		 * just re-enable ep 0 OUT here*/
++		if (core_if->dma_enable == 0
++		    || (core_if->dma_desc_enable == 0
++			&& core_if->snpsid <= OTG_CORE_REV_2_94a)) {
++			do_setup_in_status_phase(pcd);
++		} else if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
++			FH_DEBUGPL(DBG_PCDV,
++				    "Enable out ep before in status phase\n");
++			ep0_out_start(core_if, pcd);
++		}
++	}
++
++	/* Complete the request */
++	if (is_last) {
++		fh_otg_request_done(ep, req, 0);
++		ep->fh_ep.start_xfer_buff = 0;
++		ep->fh_ep.xfer_buff = 0;
++		ep->fh_ep.xfer_len = 0;
++		return 1;
++	}
++	return 0;
++}
++
++#ifdef FH_UTE_CFI
++/**
++ * This function calculates traverses all the CFI DMA descriptors and
++ * and accumulates the bytes that are left to be transfered.
++ *
++ * @return The total bytes left to transfered, or a negative value as failure
++ */
++static inline int cfi_calc_desc_residue(fh_otg_pcd_ep_t * ep)
++{
++	int32_t ret = 0;
++	int i;
++	struct fh_otg_dma_desc *ddesc = NULL;
++	struct cfi_ep *cfiep;
++
++	/* See if the pcd_ep has its respective cfi_ep mapped */
++	cfiep = get_cfi_ep_by_pcd_ep(ep->pcd->cfi, ep);
++	if (!cfiep) {
++		CFI_INFO("%s: Failed to find ep\n", __func__);
++		return -1;
++	}
++
++	ddesc = ep->fh_ep.descs;
++
++	for (i = 0; (i < cfiep->desc_count) && (i < MAX_DMA_DESCS_PER_EP); i++) {
++
++#if defined(PRINT_CFI_DMA_DESCS)
++		print_desc(ddesc, ep->ep.name, i);
++#endif
++		ret += ddesc->status.b.bytes;
++		ddesc++;
++	}
++
++	if (ret)
++		CFI_INFO("!!!!!!!!!! WARNING (%s) - residue=%d\n", __func__,
++			 ret);
++
++	return ret;
++}
++#endif
++
++/**
++ * This function completes the request for the EP. If there are
++ * additional requests for the EP in the queue they will be started.
++ */
++static void complete_ep(fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	fh_otg_dev_in_ep_regs_t *in_ep_regs =
++	    dev_if->in_ep_regs[ep->fh_ep.num];
++	deptsiz_data_t deptsiz;
++	dev_dma_desc_sts_t desc_sts;
++	fh_otg_pcd_request_t *req = 0;
++	fh_otg_dev_dma_desc_t *dma_desc;
++	uint32_t byte_count = 0;
++	int is_last = 0;
++	int i;
++
++	FH_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->fh_ep.num,
++		    (ep->fh_ep.is_in ? "IN" : "OUT"));
++
++	/* Get any pending requests */
++	if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		req = FH_CIRCLEQ_FIRST(&ep->queue);
++		if (!req) {
++			FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
++			return;
++		}
++	} else {
++		FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
++		return;
++	}
++
++	FH_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending);
++
++	if (ep->fh_ep.is_in) {
++		deptsiz.d32 = FH_READ_REG32(&in_ep_regs->dieptsiz);
++
++		if (core_if->dma_enable) {
++			if (core_if->dma_desc_enable == 0) {
++				if (deptsiz.b.xfersize == 0
++				    && deptsiz.b.pktcnt == 0) {
++					byte_count =
++					    ep->fh_ep.xfer_len -
++					    ep->fh_ep.xfer_count;
++
++					ep->fh_ep.xfer_buff += byte_count;
++					ep->fh_ep.dma_addr += byte_count;
++					ep->fh_ep.xfer_count += byte_count;
++
++					FH_DEBUGPL(DBG_PCDV,
++						    "%d-%s len=%d  xfersize=%d pktcnt=%d\n",
++						    ep->fh_ep.num,
++						    (ep->fh_ep.
++						     is_in ? "IN" : "OUT"),
++						    ep->fh_ep.xfer_len,
++						    deptsiz.b.xfersize,
++						    deptsiz.b.pktcnt);
++
++					if (ep->fh_ep.xfer_len <
++					    ep->fh_ep.total_len) {
++						fh_otg_ep_start_transfer
++						    (core_if, &ep->fh_ep);
++					} else if (ep->fh_ep.sent_zlp) {
++						/*     
++						 * This fragment of code should initiate 0
++						 * length transfer in case if it is queued
++						 * a transfer with size divisible to EPs max
++						 * packet size and with usb_request zero field
++						 * is set, which means that after data is transfered,
++						 * it is also should be transfered
++						 * a 0 length packet at the end. For Slave and
++						 * Buffer DMA modes in this case SW has
++						 * to initiate 2 transfers one with transfer size,
++						 * and the second with 0 size. For Descriptor
++						 * DMA mode SW is able to initiate a transfer,
++						 * which will handle all the packets including
++						 * the last  0 length.
++						 */
++						ep->fh_ep.sent_zlp = 0;
++						fh_otg_ep_start_zl_transfer
++						    (core_if, &ep->fh_ep);
++					} else {
++						is_last = 1;
++					}
++				} else {
++					if (ep->fh_ep.type ==
++					    FH_OTG_EP_TYPE_ISOC) {
++						req->actual = 0;
++						fh_otg_request_done(ep, req, 0);
++
++						ep->fh_ep.start_xfer_buff = 0;
++						ep->fh_ep.xfer_buff = 0;
++						ep->fh_ep.xfer_len = 0;
++
++						/* If there is a request in the queue start it. */
++						start_next_request(ep);
++					} else
++						FH_WARN
++						("Incomplete transfer (%d - %s [siz=%d pkt=%d])\n",
++						ep->fh_ep.num,
++						(ep->fh_ep.is_in ? "IN" : "OUT"),
++						deptsiz.b.xfersize,
++						deptsiz.b.pktcnt);
++				}
++			} else {
++				dma_desc = ep->fh_ep.desc_addr;
++				byte_count = 0;
++				ep->fh_ep.sent_zlp = 0;
++
++#ifdef FH_UTE_CFI
++				CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
++					 ep->fh_ep.buff_mode);
++				if (ep->fh_ep.buff_mode != BM_STANDARD) {
++					int residue;
++
++					residue = cfi_calc_desc_residue(ep);
++					if (residue < 0)
++						return;
++
++					byte_count = residue;
++				} else {
++#endif
++					for (i = 0; i < ep->fh_ep.desc_cnt;
++					     ++i) {
++						desc_sts = dma_desc->status;
++						byte_count += desc_sts.b.bytes;
++						dma_desc++;
++					}
++#ifdef FH_UTE_CFI
++				}
++#endif
++				if (byte_count == 0) {
++					ep->fh_ep.xfer_count =
++					    ep->fh_ep.total_len;
++					is_last = 1;
++				} else {
++					FH_WARN("Incomplete transfer\n");
++				}
++			}
++		} else {
++			if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
++				FH_DEBUGPL(DBG_PCDV,
++					    "%d-%s len=%d  xfersize=%d pktcnt=%d\n",
++					    ep->fh_ep.num,
++					    ep->fh_ep.is_in ? "IN" : "OUT",
++					    ep->fh_ep.xfer_len,
++					    deptsiz.b.xfersize,
++					    deptsiz.b.pktcnt);
++
++				/*      Check if the whole transfer was completed, 
++				 *      if no, setup transfer for next portion of data
++				 */
++				if (ep->fh_ep.xfer_len < ep->fh_ep.total_len) {
++					fh_otg_ep_start_transfer(core_if,
++								  &ep->fh_ep);
++				} else if (ep->fh_ep.sent_zlp) {
++					/*     
++					 * This fragment of code should initiate 0
++					 * length trasfer in case if it is queued
++					 * a trasfer with size divisible to EPs max
++					 * packet size and with usb_request zero field
++					 * is set, which means that after data is transfered,
++					 * it is also should be transfered
++					 * a 0 length packet at the end. For Slave and
++					 * Buffer DMA modes in this case SW has
++					 * to initiate 2 transfers one with transfer size,
++					 * and the second with 0 size. For Desriptor
++					 * DMA mode SW is able to initiate a transfer,
++					 * which will handle all the packets including
++					 * the last  0 legth.
++					 */
++					ep->fh_ep.sent_zlp = 0;
++					fh_otg_ep_start_zl_transfer(core_if,
++								     &ep->fh_ep);
++				} else {
++					is_last = 1;
++				}
++			} else {
++				FH_WARN
++				    ("Incomplete transfer (%d-%s [siz=%d pkt=%d])\n",
++				     ep->fh_ep.num,
++				     (ep->fh_ep.is_in ? "IN" : "OUT"),
++				     deptsiz.b.xfersize, deptsiz.b.pktcnt);
++			}
++		}
++	} else {
++		fh_otg_dev_out_ep_regs_t *out_ep_regs =
++		    dev_if->out_ep_regs[ep->fh_ep.num];
++		desc_sts.d32 = 0;
++		if (core_if->dma_enable) {
++			if (core_if->dma_desc_enable) {
++				dma_desc = ep->fh_ep.desc_addr;
++				byte_count = 0;
++				ep->fh_ep.sent_zlp = 0;
++
++#ifdef FH_UTE_CFI
++				CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
++					 ep->fh_ep.buff_mode);
++				if (ep->fh_ep.buff_mode != BM_STANDARD) {
++					int residue;
++					residue = cfi_calc_desc_residue(ep);
++					if (residue < 0)
++						return;
++					byte_count = residue;
++				} else {
++#endif
++
++					for (i = 0; i < ep->fh_ep.desc_cnt;
++					     ++i) {
++						desc_sts = dma_desc->status;
++						byte_count += desc_sts.b.bytes;
++						dma_desc++;
++					}
++
++#ifdef FH_UTE_CFI
++				}
++#endif
++				/* Checking for interrupt Out transfers with not 
++				 * dword aligned mps sizes 
++				 */
++				if (ep->fh_ep.type == FH_OTG_EP_TYPE_INTR &&
++				    (ep->fh_ep.maxpacket % 4)) {
++					ep->fh_ep.xfer_count =
++					    ep->fh_ep.total_len - byte_count;
++					if ((ep->fh_ep.xfer_len %
++					     ep->fh_ep.maxpacket)
++					    && (ep->fh_ep.xfer_len /
++						ep->fh_ep.maxpacket <
++						MAX_DMA_DESC_CNT))
++						ep->fh_ep.xfer_len -=
++						    (ep->fh_ep.desc_cnt -
++						     1) * ep->fh_ep.maxpacket +
++						    ep->fh_ep.xfer_len %
++						    ep->fh_ep.maxpacket;
++					else
++						ep->fh_ep.xfer_len -=
++						    ep->fh_ep.desc_cnt *
++						    ep->fh_ep.maxpacket;
++					if (ep->fh_ep.xfer_len > 0) {
++						fh_otg_ep_start_transfer
++						    (core_if, &ep->fh_ep);
++					} else {
++						is_last = 1;
++					}
++				} else {
++					ep->fh_ep.xfer_count =
++					    ep->fh_ep.total_len - byte_count +
++					    ((4 -
++					      (ep->fh_ep.
++					       total_len & 0x3)) & 0x3);
++					is_last = 1;
++				}
++			} else {
++				deptsiz.d32 = 0;
++				deptsiz.d32 =
++				    FH_READ_REG32(&out_ep_regs->doeptsiz);
++
++				byte_count = (ep->fh_ep.xfer_len -
++					      ep->fh_ep.xfer_count -
++					      deptsiz.b.xfersize);
++				ep->fh_ep.xfer_buff += byte_count;
++				ep->fh_ep.dma_addr += byte_count;
++				ep->fh_ep.xfer_count += byte_count;
++
++				/*      Check if the whole transfer was completed, 
++				 *      if no, setup transfer for next portion of data
++				 */
++				if (ep->fh_ep.xfer_len < ep->fh_ep.total_len) {
++					fh_otg_ep_start_transfer(core_if,
++								  &ep->fh_ep);
++				} else if (ep->fh_ep.sent_zlp) {
++					/*     
++					 * This fragment of code should initiate 0
++					 * length trasfer in case if it is queued
++					 * a trasfer with size divisible to EPs max
++					 * packet size and with usb_request zero field
++					 * is set, which means that after data is transfered,
++					 * it is also should be transfered
++					 * a 0 length packet at the end. For Slave and
++					 * Buffer DMA modes in this case SW has
++					 * to initiate 2 transfers one with transfer size,
++					 * and the second with 0 size. For Desriptor
++					 * DMA mode SW is able to initiate a transfer,
++					 * which will handle all the packets including
++					 * the last  0 legth.
++					 */
++					ep->fh_ep.sent_zlp = 0;
++					fh_otg_ep_start_zl_transfer(core_if,
++								     &ep->fh_ep);
++				} else {
++					is_last = 1;
++				}
++			}
++		} else {
++			/*      Check if the whole transfer was completed, 
++			 *      if no, setup transfer for next portion of data
++			 */
++			if (ep->fh_ep.xfer_len < ep->fh_ep.total_len) {
++				fh_otg_ep_start_transfer(core_if, &ep->fh_ep);
++			} else if (ep->fh_ep.sent_zlp) {
++				/*     
++				 * This fragment of code should initiate 0
++				 * length transfer in case if it is queued
++				 * a transfer with size divisible to EPs max
++				 * packet size and with usb_request zero field
++				 * is set, which means that after data is transfered,
++				 * it is also should be transfered
++				 * a 0 length packet at the end. For Slave and
++				 * Buffer DMA modes in this case SW has
++				 * to initiate 2 transfers one with transfer size,
++				 * and the second with 0 size. For Descriptor
++				 * DMA mode SW is able to initiate a transfer,
++				 * which will handle all the packets including
++				 * the last  0 length.
++				 */
++				ep->fh_ep.sent_zlp = 0;
++				fh_otg_ep_start_zl_transfer(core_if,
++							     &ep->fh_ep);
++			} else {
++				is_last = 1;
++			}
++		}
++
++		FH_DEBUGPL(DBG_PCDV,
++			    "addr %p,	 %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n",
++			    &out_ep_regs->doeptsiz, ep->fh_ep.num,
++			    ep->fh_ep.is_in ? "IN" : "OUT",
++			    ep->fh_ep.xfer_len, ep->fh_ep.xfer_count,
++			    deptsiz.b.xfersize, deptsiz.b.pktcnt);
++	}
++
++	/* Complete the request */
++	if (is_last) {
++#ifdef FH_UTE_CFI
++		if (ep->fh_ep.buff_mode != BM_STANDARD) {
++			req->actual = ep->fh_ep.cfi_req_len - byte_count;
++		} else {
++#endif
++			req->actual = ep->fh_ep.xfer_count;
++#ifdef FH_UTE_CFI
++		}
++#endif
++		if (req->dw_align_buf) {
++			if (!ep->fh_ep.is_in) {
++				fh_memcpy(req->buf, req->dw_align_buf, req->length); 
++			}
++			FH_DMA_FREE(req->length, req->dw_align_buf,
++				     req->dw_align_buf_dma);
++		}
++
++		fh_otg_request_done(ep, req, 0);
++
++		ep->fh_ep.start_xfer_buff = 0;
++		ep->fh_ep.xfer_buff = 0;
++		ep->fh_ep.xfer_len = 0;
++
++		/* If there is a request in the queue start it. */
++		start_next_request(ep);
++	}
++}
++/**
++ * This function completes the request for the ISO EP in DDMA. If it is last
++ * descriptor and ep was disabled, then program already prepared(during ep_queue)
++ * descriptor chain if there are more requests to process
++ */
++static void complete_ddma_iso_ep(fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
++	dev_dma_desc_sts_t desc_sts;
++	fh_otg_pcd_request_t *req = 0;
++	fh_otg_dev_dma_desc_t *dma_desc;
++	fh_dma_t dma_desc_addr;
++	fh_ep_t *fh_ep;
++	uint32_t depdma;
++	uint32_t index;
++
++	FH_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->fh_ep.num,
++		    (ep->fh_ep.is_in ? "IN" : "OUT"));
++	fh_ep = &ep->fh_ep;
++	if (fh_ep->use_add_buf) {
++		dma_desc_addr = fh_ep->dma_desc_addr;
++		dma_desc = fh_ep->desc_addr;
++	} else {
++		dma_desc_addr = fh_ep->dma_desc_addr1;
++		dma_desc = fh_ep->desc_addr1;
++	}
++	/* Get any pending requests */
++	if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++		req = FH_CIRCLEQ_FIRST(&ep->queue);
++		if (!req) {
++			FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
++			return;
++		}
++	} else {
++		FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
++		return;
++	}
++
++	if (fh_ep->is_in) {
++		depdma = FH_READ_REG32(&core_if->dev_if->in_ep_regs[fh_ep->num]->diepdma);
++		index = (depdma - dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t) - 1;
++		desc_sts = dma_desc[index].status;
++		req->actual = req->length - desc_sts.b_iso_in.txbytes;
++	} else {
++		depdma = FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepdma);
++		index = (depdma - dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t) - 1;
++		desc_sts = dma_desc[index].status;
++		if (req->length%4)
++			req->actual = req->length - desc_sts.b_iso_out.rxbytes + (4 - req->length%4);
++		else
++			req->actual = req->length - desc_sts.b_iso_out.rxbytes;
++	}
++
++	/* Complete the request */
++	fh_otg_request_done(ep, req, 0);
++}
++
++#ifdef FH_EN_ISOC
++
++/**
++ * This function BNA interrupt for Isochronous EPs
++ *
++ */
++static void fh_otg_pcd_handle_iso_bna(fh_otg_pcd_ep_t * ep)
++{
++	fh_ep_t *fh_ep = &ep->fh_ep;
++	volatile uint32_t *addr;
++	depctl_data_t depctl = {.d32 = 0 };
++	fh_otg_pcd_t *pcd = ep->pcd;
++	fh_otg_dev_dma_desc_t *dma_desc;
++	int i;
++
++	dma_desc =
++	    fh_ep->iso_desc_addr + fh_ep->desc_cnt * (fh_ep->proc_buf_num);
++
++	if (fh_ep->is_in) {
++		dev_dma_desc_sts_t sts = {.d32 = 0 };
++		for (i = 0; i < fh_ep->desc_cnt; ++i, ++dma_desc) {
++			sts.d32 = dma_desc->status.d32;
++			sts.b_iso_in.bs = BS_HOST_READY;
++			dma_desc->status.d32 = sts.d32;
++		}
++	} else {
++		dev_dma_desc_sts_t sts = {.d32 = 0 };
++		for (i = 0; i < fh_ep->desc_cnt; ++i, ++dma_desc) {
++			sts.d32 = dma_desc->status.d32;
++			sts.b_iso_out.bs = BS_HOST_READY;
++			dma_desc->status.d32 = sts.d32;
++		}
++	}
++
++	if (fh_ep->is_in == 0) {
++		addr =
++		    &GET_CORE_IF(pcd)->dev_if->out_ep_regs[fh_ep->
++							   num]->doepctl;
++	} else {
++		addr =
++		    &GET_CORE_IF(pcd)->dev_if->in_ep_regs[fh_ep->num]->diepctl;
++	}
++	depctl.b.epena = 1;
++	FH_MODIFY_REG32(addr, depctl.d32, depctl.d32);
++}
++
++/**
++ * This function sets latest iso packet information(non-PTI mode)
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ *
++ */
++void set_current_pkt_info(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	deptsiz_data_t deptsiz = {.d32 = 0 };
++	dma_addr_t dma_addr;
++	uint32_t offset;
++
++	if (ep->proc_buf_num)
++		dma_addr = ep->dma_addr1;
++	else
++		dma_addr = ep->dma_addr0;
++
++	if (ep->is_in) {
++		deptsiz.d32 =
++		    FH_READ_REG32(&core_if->dev_if->
++				   in_ep_regs[ep->num]->dieptsiz);
++		offset = ep->data_per_frame;
++	} else {
++		deptsiz.d32 =
++		    FH_READ_REG32(&core_if->dev_if->
++				   out_ep_regs[ep->num]->doeptsiz);
++		offset =
++		    ep->data_per_frame +
++		    (0x4 & (0x4 - (ep->data_per_frame & 0x3)));
++	}
++
++	if (!deptsiz.b.xfersize) {
++		ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
++		ep->pkt_info[ep->cur_pkt].offset =
++		    ep->cur_pkt_dma_addr - dma_addr;
++		ep->pkt_info[ep->cur_pkt].status = 0;
++	} else {
++		ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
++		ep->pkt_info[ep->cur_pkt].offset =
++		    ep->cur_pkt_dma_addr - dma_addr;
++		ep->pkt_info[ep->cur_pkt].status = -FH_E_NO_DATA;
++	}
++	ep->cur_pkt_addr += offset;
++	ep->cur_pkt_dma_addr += offset;
++	ep->cur_pkt++;
++}
++
++/**
++ * This function sets latest iso packet information(DDMA mode)
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param fh_ep The EP to start the transfer on.
++ *
++ */
++static void set_ddma_iso_pkts_info(fh_otg_core_if_t * core_if,
++				   fh_ep_t * fh_ep)
++{
++	fh_otg_dev_dma_desc_t *dma_desc;
++	dev_dma_desc_sts_t sts = {.d32 = 0 };
++	iso_pkt_info_t *iso_packet;
++	uint32_t data_per_desc;
++	uint32_t offset;
++	int i, j;
++
++	iso_packet = fh_ep->pkt_info;
++
++	/** Reinit closed DMA Descriptors*/
++	/** ISO OUT EP */
++	if (fh_ep->is_in == 0) {
++		dma_desc =
++		    fh_ep->iso_desc_addr +
++		    fh_ep->desc_cnt * fh_ep->proc_buf_num;
++		offset = 0;
++
++		for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
++		     i += fh_ep->pkt_per_frm) {
++			for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
++				data_per_desc =
++				    ((j + 1) * fh_ep->maxpacket >
++				     fh_ep->
++				     data_per_frame) ? fh_ep->data_per_frame -
++				    j * fh_ep->maxpacket : fh_ep->maxpacket;
++				data_per_desc +=
++				    (data_per_desc % 4) ? (4 -
++							   data_per_desc %
++							   4) : 0;
++
++				sts.d32 = dma_desc->status.d32;
++
++				/* Write status in iso_packet_decsriptor  */
++				iso_packet->status =
++				    sts.b_iso_out.rxsts +
++				    (sts.b_iso_out.bs ^ BS_DMA_DONE);
++				if (iso_packet->status) {
++					iso_packet->status = -FH_E_NO_DATA;
++				}
++
++				/* Received data length */
++				if (!sts.b_iso_out.rxbytes) {
++					iso_packet->length =
++					    data_per_desc -
++					    sts.b_iso_out.rxbytes;
++				} else {
++					iso_packet->length =
++					    data_per_desc -
++					    sts.b_iso_out.rxbytes + (4 -
++								     fh_ep->data_per_frame
++								     % 4);
++				}
++
++				iso_packet->offset = offset;
++
++				offset += data_per_desc;
++				dma_desc++;
++				iso_packet++;
++			}
++		}
++
++		for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
++			data_per_desc =
++			    ((j + 1) * fh_ep->maxpacket >
++			     fh_ep->data_per_frame) ? fh_ep->data_per_frame -
++			    j * fh_ep->maxpacket : fh_ep->maxpacket;
++			data_per_desc +=
++			    (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
++
++			sts.d32 = dma_desc->status.d32;
++
++			/* Write status in iso_packet_decsriptor  */
++			iso_packet->status =
++			    sts.b_iso_out.rxsts +
++			    (sts.b_iso_out.bs ^ BS_DMA_DONE);
++			if (iso_packet->status) {
++				iso_packet->status = -FH_E_NO_DATA;
++			}
++
++			/* Received data length */
++			iso_packet->length =
++			    fh_ep->data_per_frame - sts.b_iso_out.rxbytes;
++
++			iso_packet->offset = offset;
++
++			offset += data_per_desc;
++			iso_packet++;
++			dma_desc++;
++		}
++
++		sts.d32 = dma_desc->status.d32;
++
++		/* Write status in iso_packet_decsriptor  */
++		iso_packet->status =
++		    sts.b_iso_out.rxsts + (sts.b_iso_out.bs ^ BS_DMA_DONE);
++		if (iso_packet->status) {
++			iso_packet->status = -FH_E_NO_DATA;
++		}
++		/* Received data length */
++		if (!sts.b_iso_out.rxbytes) {
++			iso_packet->length =
++			    fh_ep->data_per_frame - sts.b_iso_out.rxbytes;
++		} else {
++			iso_packet->length =
++			    fh_ep->data_per_frame - sts.b_iso_out.rxbytes +
++			    (4 - fh_ep->data_per_frame % 4);
++		}
++
++		iso_packet->offset = offset;
++	} else {
++/** ISO IN EP */
++
++		dma_desc =
++		    fh_ep->iso_desc_addr +
++		    fh_ep->desc_cnt * fh_ep->proc_buf_num;
++
++		for (i = 0; i < fh_ep->desc_cnt - 1; i++) {
++			sts.d32 = dma_desc->status.d32;
++
++			/* Write status in iso packet descriptor */
++			iso_packet->status =
++			    sts.b_iso_in.txsts +
++			    (sts.b_iso_in.bs ^ BS_DMA_DONE);
++			if (iso_packet->status != 0) {
++				iso_packet->status = -FH_E_NO_DATA;
++
++			}
++			/* Bytes has been transfered */
++			iso_packet->length =
++			    fh_ep->data_per_frame - sts.b_iso_in.txbytes;
++
++			dma_desc++;
++			iso_packet++;
++		}
++
++		sts.d32 = dma_desc->status.d32;
++		while (sts.b_iso_in.bs == BS_DMA_BUSY) {
++			sts.d32 = dma_desc->status.d32;
++		}
++
++		/* Write status in iso packet descriptor ??? do be done with ERROR codes */
++		iso_packet->status =
++		    sts.b_iso_in.txsts + (sts.b_iso_in.bs ^ BS_DMA_DONE);
++		if (iso_packet->status != 0) {
++			iso_packet->status = -FH_E_NO_DATA;
++		}
++
++		/* Bytes has been transfered */
++		iso_packet->length =
++		    fh_ep->data_per_frame - sts.b_iso_in.txbytes;
++	}
++}
++
++/**
++ * This function reinitialize DMA Descriptors for Isochronous transfer
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param fh_ep The EP to start the transfer on.
++ *
++ */
++static void reinit_ddma_iso_xfer(fh_otg_core_if_t * core_if, fh_ep_t * fh_ep)
++{
++	int i, j;
++	fh_otg_dev_dma_desc_t *dma_desc;
++	dma_addr_t dma_ad;
++	volatile uint32_t *addr;
++	dev_dma_desc_sts_t sts = {.d32 = 0 };
++	uint32_t data_per_desc;
++
++	if (fh_ep->is_in == 0) {
++		addr = &core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl;
++	} else {
++		addr = &core_if->dev_if->in_ep_regs[fh_ep->num]->diepctl;
++	}
++
++	if (fh_ep->proc_buf_num == 0) {
++		/** Buffer 0 descriptors setup */
++		dma_ad = fh_ep->dma_addr0;
++	} else {
++		/** Buffer 1 descriptors setup */
++		dma_ad = fh_ep->dma_addr1;
++	}
++
++	/** Reinit closed DMA Descriptors*/
++	/** ISO OUT EP */
++	if (fh_ep->is_in == 0) {
++		dma_desc =
++		    fh_ep->iso_desc_addr +
++		    fh_ep->desc_cnt * fh_ep->proc_buf_num;
++
++		sts.b_iso_out.bs = BS_HOST_READY;
++		sts.b_iso_out.rxsts = 0;
++		sts.b_iso_out.l = 0;
++		sts.b_iso_out.sp = 0;
++		sts.b_iso_out.ioc = 0;
++		sts.b_iso_out.pid = 0;
++		sts.b_iso_out.framenum = 0;
++
++		for (i = 0; i < fh_ep->desc_cnt - fh_ep->pkt_per_frm;
++		     i += fh_ep->pkt_per_frm) {
++			for (j = 0; j < fh_ep->pkt_per_frm; ++j) {
++				data_per_desc =
++				    ((j + 1) * fh_ep->maxpacket >
++				     fh_ep->
++				     data_per_frame) ? fh_ep->data_per_frame -
++				    j * fh_ep->maxpacket : fh_ep->maxpacket;
++				data_per_desc +=
++				    (data_per_desc % 4) ? (4 -
++							   data_per_desc %
++							   4) : 0;
++				sts.b_iso_out.rxbytes = data_per_desc;
++				dma_desc->buf = dma_ad;
++				dma_desc->status.d32 = sts.d32;
++
++				dma_ad += data_per_desc;
++				dma_desc++;
++			}
++		}
++
++		for (j = 0; j < fh_ep->pkt_per_frm - 1; ++j) {
++
++			data_per_desc =
++			    ((j + 1) * fh_ep->maxpacket >
++			     fh_ep->data_per_frame) ? fh_ep->data_per_frame -
++			    j * fh_ep->maxpacket : fh_ep->maxpacket;
++			data_per_desc +=
++			    (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
++			sts.b_iso_out.rxbytes = data_per_desc;
++
++			dma_desc->buf = dma_ad;
++			dma_desc->status.d32 = sts.d32;
++
++			dma_desc++;
++			dma_ad += data_per_desc;
++		}
++
++		sts.b_iso_out.ioc = 1;
++		sts.b_iso_out.l = fh_ep->proc_buf_num;
++
++		data_per_desc =
++		    ((j + 1) * fh_ep->maxpacket >
++		     fh_ep->data_per_frame) ? fh_ep->data_per_frame -
++		    j * fh_ep->maxpacket : fh_ep->maxpacket;
++		data_per_desc +=
++		    (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
++		sts.b_iso_out.rxbytes = data_per_desc;
++
++		dma_desc->buf = dma_ad;
++		dma_desc->status.d32 = sts.d32;
++	} else {
++/** ISO IN EP */
++
++		dma_desc =
++		    fh_ep->iso_desc_addr +
++		    fh_ep->desc_cnt * fh_ep->proc_buf_num;
++
++		sts.b_iso_in.bs = BS_HOST_READY;
++		sts.b_iso_in.txsts = 0;
++		sts.b_iso_in.sp = 0;
++		sts.b_iso_in.ioc = 0;
++		sts.b_iso_in.pid = fh_ep->pkt_per_frm;
++		sts.b_iso_in.framenum = fh_ep->next_frame;
++		sts.b_iso_in.txbytes = fh_ep->data_per_frame;
++		sts.b_iso_in.l = 0;
++
++		for (i = 0; i < fh_ep->desc_cnt - 1; i++) {
++			dma_desc->buf = dma_ad;
++			dma_desc->status.d32 = sts.d32;
++
++			sts.b_iso_in.framenum += fh_ep->bInterval;
++			dma_ad += fh_ep->data_per_frame;
++			dma_desc++;
++		}
++
++		sts.b_iso_in.ioc = 1;
++		sts.b_iso_in.l = fh_ep->proc_buf_num;
++
++		dma_desc->buf = dma_ad;
++		dma_desc->status.d32 = sts.d32;
++
++		fh_ep->next_frame =
++		    sts.b_iso_in.framenum + fh_ep->bInterval * 1;
++	}
++	fh_ep->proc_buf_num = (fh_ep->proc_buf_num ^ 1) & 0x1;
++}
++
++/**
++ * This function is to handle Iso EP transfer complete interrupt
++ * in case Iso out packet was dropped
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param fh_ep The EP for wihich transfer complete was asserted
++ *
++ */
++static uint32_t handle_iso_out_pkt_dropped(fh_otg_core_if_t * core_if,
++					   fh_ep_t * fh_ep)
++{
++	uint32_t dma_addr;
++	uint32_t drp_pkt;
++	uint32_t drp_pkt_cnt;
++	deptsiz_data_t deptsiz = {.d32 = 0 };
++	depctl_data_t depctl = {.d32 = 0 };
++	int i;
++
++	deptsiz.d32 =
++	    FH_READ_REG32(&core_if->dev_if->
++			   out_ep_regs[fh_ep->num]->doeptsiz);
++
++	drp_pkt = fh_ep->pkt_cnt - deptsiz.b.pktcnt;
++	drp_pkt_cnt = fh_ep->pkt_per_frm - (drp_pkt % fh_ep->pkt_per_frm);
++
++	/* Setting dropped packets status */
++	for (i = 0; i < drp_pkt_cnt; ++i) {
++		fh_ep->pkt_info[drp_pkt].status = -FH_E_NO_DATA;
++		drp_pkt++;
++		deptsiz.b.pktcnt--;
++	}
++
++	if (deptsiz.b.pktcnt > 0) {
++		deptsiz.b.xfersize =
++		    fh_ep->xfer_len - (fh_ep->pkt_cnt -
++					deptsiz.b.pktcnt) * fh_ep->maxpacket;
++	} else {
++		deptsiz.b.xfersize = 0;
++		deptsiz.b.pktcnt = 0;
++	}
++
++	FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doeptsiz,
++			deptsiz.d32);
++
++	if (deptsiz.b.pktcnt > 0) {
++		if (fh_ep->proc_buf_num) {
++			dma_addr =
++			    fh_ep->dma_addr1 + fh_ep->xfer_len -
++			    deptsiz.b.xfersize;
++		} else {
++			dma_addr =
++			    fh_ep->dma_addr0 + fh_ep->xfer_len -
++			    deptsiz.b.xfersize;;
++		}
++
++		FH_WRITE_REG32(&core_if->dev_if->
++				out_ep_regs[fh_ep->num]->doepdma, dma_addr);
++
++		/** Re-enable endpoint, clear nak  */
++		depctl.d32 = 0;
++		depctl.b.epena = 1;
++		depctl.b.cnak = 1;
++
++		FH_MODIFY_REG32(&core_if->dev_if->
++				 out_ep_regs[fh_ep->num]->doepctl, depctl.d32,
++				 depctl.d32);
++		return 0;
++	} else {
++		return 1;
++	}
++}
++
++/**
++ * This function sets iso packets information(PTI mode)
++ *
++ * @param core_if Programming view of FH_otg controller.
++ * @param ep The EP to start the transfer on.
++ *
++ */
++static uint32_t set_iso_pkts_info(fh_otg_core_if_t * core_if, fh_ep_t * ep)
++{
++	int i, j;
++	dma_addr_t dma_ad;
++	iso_pkt_info_t *packet_info = ep->pkt_info;
++	uint32_t offset;
++	uint32_t frame_data;
++	deptsiz_data_t deptsiz;
++
++	if (ep->proc_buf_num == 0) {
++		/** Buffer 0 descriptors setup */
++		dma_ad = ep->dma_addr0;
++	} else {
++		/** Buffer 1 descriptors setup */
++		dma_ad = ep->dma_addr1;
++	}
++
++	if (ep->is_in) {
++		deptsiz.d32 =
++		    FH_READ_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
++				   dieptsiz);
++	} else {
++		deptsiz.d32 =
++		    FH_READ_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
++				   doeptsiz);
++	}
++
++	if (!deptsiz.b.xfersize) {
++		offset = 0;
++		for (i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) {
++			frame_data = ep->data_per_frame;
++			for (j = 0; j < ep->pkt_per_frm; ++j) {
++
++				/* Packet status - is not set as initially
++				 * it is set to 0 and if packet was sent
++				 successfully, status field will remain 0*/
++
++				/* Bytes has been transfered */
++				packet_info->length =
++				    (ep->maxpacket <
++				     frame_data) ? ep->maxpacket : frame_data;
++
++				/* Received packet offset */
++				packet_info->offset = offset;
++				offset += packet_info->length;
++				frame_data -= packet_info->length;
++
++				packet_info++;
++			}
++		}
++		return 1;
++	} else {
++		/* This is a workaround for in case of Transfer Complete with
++		 * PktDrpSts interrupts merging - in this case Transfer complete
++		 * interrupt for Isoc Out Endpoint is asserted without PktDrpSts
++		 * set and with DOEPTSIZ register non zero. Investigations showed,
++		 * that this happens when Out packet is dropped, but because of
++		 * interrupts merging during first interrupt handling PktDrpSts
++		 * bit is cleared and for next merged interrupts it is not reset.
++		 * In this case SW hadles the interrupt as if PktDrpSts bit is set.
++		 */
++		if (ep->is_in) {
++			return 1;
++		} else {
++			return handle_iso_out_pkt_dropped(core_if, ep);
++		}
++	}
++}
++
++/**
++ * This function is to handle Iso EP transfer complete interrupt
++ *
++ * @param pcd The PCD
++ * @param ep The EP for which transfer complete was asserted
++ *
++ */
++static void complete_iso_ep(fh_otg_pcd_t * pcd, fh_otg_pcd_ep_t * ep)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
++	fh_ep_t *fh_ep = &ep->fh_ep;
++	uint8_t is_last = 0;
++
++	if (ep->fh_ep.next_frame == 0xffffffff) {
++		FH_WARN("Next frame is not set!\n");
++		return;
++	}
++
++	if (core_if->dma_enable) {
++		if (core_if->dma_desc_enable) {
++			set_ddma_iso_pkts_info(core_if, fh_ep);
++			reinit_ddma_iso_xfer(core_if, fh_ep);
++			is_last = 1;
++		} else {
++			if (core_if->pti_enh_enable) {
++				if (set_iso_pkts_info(core_if, fh_ep)) {
++					fh_ep->proc_buf_num =
++					    (fh_ep->proc_buf_num ^ 1) & 0x1;
++					fh_otg_iso_ep_start_buf_transfer
++					    (core_if, fh_ep);
++					is_last = 1;
++				}
++			} else {
++				set_current_pkt_info(core_if, fh_ep);
++				if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
++					is_last = 1;
++					fh_ep->cur_pkt = 0;
++					fh_ep->proc_buf_num =
++					    (fh_ep->proc_buf_num ^ 1) & 0x1;
++					if (fh_ep->proc_buf_num) {
++						fh_ep->cur_pkt_addr =
++						    fh_ep->xfer_buff1;
++						fh_ep->cur_pkt_dma_addr =
++						    fh_ep->dma_addr1;
++					} else {
++						fh_ep->cur_pkt_addr =
++						    fh_ep->xfer_buff0;
++						fh_ep->cur_pkt_dma_addr =
++						    fh_ep->dma_addr0;
++					}
++
++				}
++				fh_otg_iso_ep_start_frm_transfer(core_if,
++								  fh_ep);
++			}
++		}
++	} else {
++		set_current_pkt_info(core_if, fh_ep);
++		if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
++			is_last = 1;
++			fh_ep->cur_pkt = 0;
++			fh_ep->proc_buf_num = (fh_ep->proc_buf_num ^ 1) & 0x1;
++			if (fh_ep->proc_buf_num) {
++				fh_ep->cur_pkt_addr = fh_ep->xfer_buff1;
++				fh_ep->cur_pkt_dma_addr = fh_ep->dma_addr1;
++			} else {
++				fh_ep->cur_pkt_addr = fh_ep->xfer_buff0;
++				fh_ep->cur_pkt_dma_addr = fh_ep->dma_addr0;
++			}
++
++		}
++		fh_otg_iso_ep_start_frm_transfer(core_if, fh_ep);
++	}
++	if (is_last)
++		fh_otg_iso_buffer_done(pcd, ep, ep->iso_req_handle);
++}
++#endif /* FH_EN_ISOC */
++
++/**
++ * This function handle BNA interrupt for Non Isochronous EPs
++ *
++ */
++static void fh_otg_pcd_handle_noniso_bna(fh_otg_pcd_ep_t * ep)
++{
++	fh_ep_t *fh_ep = &ep->fh_ep;
++	volatile uint32_t *addr;
++	depctl_data_t depctl = {.d32 = 0 };
++	fh_otg_pcd_t *pcd = ep->pcd;
++	fh_otg_dev_dma_desc_t *dma_desc;
++	dev_dma_desc_sts_t sts = {.d32 = 0 };
++	fh_otg_core_if_t *core_if = ep->pcd->core_if;
++	int i, start;
++
++	if (!fh_ep->desc_cnt)
++		FH_WARN("Ep%d %s Descriptor count = %d \n", fh_ep->num,
++			 (fh_ep->is_in ? "IN" : "OUT"), fh_ep->desc_cnt);
++
++	if (core_if->core_params->cont_on_bna && !fh_ep->is_in
++							&& fh_ep->type != FH_OTG_EP_TYPE_CONTROL) {
++		uint32_t doepdma;
++		fh_otg_dev_out_ep_regs_t *out_regs =
++			core_if->dev_if->out_ep_regs[fh_ep->num];
++		doepdma = FH_READ_REG32(&(out_regs->doepdma));
++		start = (doepdma - fh_ep->dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t);
++		dma_desc = &(fh_ep->desc_addr[start]);
++	} else {
++		start = 0;
++		dma_desc = fh_ep->desc_addr;
++	}
++	
++
++	for (i = start; i < fh_ep->desc_cnt; ++i, ++dma_desc) {
++		sts.d32 = dma_desc->status.d32;
++		sts.b.bs = BS_HOST_READY;
++		dma_desc->status.d32 = sts.d32;
++	}
++
++	if (fh_ep->is_in == 0) {
++		addr =
++		    &GET_CORE_IF(pcd)->dev_if->out_ep_regs[fh_ep->num]->
++		    doepctl;
++	} else {
++		addr =
++		    &GET_CORE_IF(pcd)->dev_if->in_ep_regs[fh_ep->num]->diepctl;
++	}
++	depctl.b.epena = 1;
++	depctl.b.cnak = 1;
++	FH_MODIFY_REG32(addr, 0, depctl.d32);
++}
++
++/**
++ * This function handles EP0 Control transfers.
++ *
++ * The state of the control transfers are tracked in
++ * <code>ep0state</code>.
++ */
++static void handle_ep0(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_pcd_ep_t *ep0 = &pcd->ep0;
++	dev_dma_desc_sts_t desc_sts;
++	deptsiz0_data_t deptsiz;
++	uint32_t byte_count;
++
++#ifdef DEBUG_EP0
++	FH_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
++	print_ep0_state(pcd);
++#endif
++
++	switch (pcd->ep0state) {
++	case EP0_DISCONNECT:
++		break;
++
++	case EP0_IDLE:
++		pcd->request_config = 0;
++
++		pcd_setup(pcd);
++		break;
++
++	case EP0_IN_DATA_PHASE:
++#ifdef DEBUG_EP0
++		FH_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
++			    ep0->fh_ep.num, (ep0->fh_ep.is_in ? "IN" : "OUT"),
++			    ep0->fh_ep.type, ep0->fh_ep.maxpacket);
++#endif
++
++		if (core_if->dma_enable != 0) {
++			/*
++			 * For EP0 we can only program 1 packet at a time so we
++			 * need to do the make calculations after each complete.
++			 * Call write_packet to make the calculations, as in
++			 * slave mode, and use those values to determine if we
++			 * can complete.
++			 */
++			if (core_if->dma_desc_enable == 0) {
++				deptsiz.d32 =
++				    FH_READ_REG32(&core_if->
++						   dev_if->in_ep_regs[0]->
++						   dieptsiz);
++				byte_count =
++				    ep0->fh_ep.xfer_len - deptsiz.b.xfersize;
++			} else {
++				desc_sts =
++				    core_if->dev_if->in_desc_addr->status;
++				byte_count =
++				    ep0->fh_ep.xfer_len - desc_sts.b.bytes;
++			}
++			ep0->fh_ep.xfer_count += byte_count;
++			ep0->fh_ep.xfer_buff += byte_count;
++			ep0->fh_ep.dma_addr += byte_count;
++		}
++		if (ep0->fh_ep.xfer_count < ep0->fh_ep.total_len) {
++			fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
++						      &ep0->fh_ep);
++			FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
++		} else if (ep0->fh_ep.sent_zlp) {
++			fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
++						      &ep0->fh_ep);
++			ep0->fh_ep.sent_zlp = 0;
++			FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
++		} else {
++			ep0_complete_request(ep0);
++			FH_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
++		}
++		break;
++	case EP0_OUT_DATA_PHASE:
++#ifdef DEBUG_EP0
++		FH_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
++			    ep0->fh_ep.num, (ep0->fh_ep.is_in ? "IN" : "OUT"),
++			    ep0->fh_ep.type, ep0->fh_ep.maxpacket);
++#endif
++		if (core_if->dma_enable != 0) {
++			if (core_if->dma_desc_enable == 0) {
++				deptsiz.d32 =
++				    FH_READ_REG32(&core_if->
++						   dev_if->out_ep_regs[0]->
++						   doeptsiz);
++				byte_count =
++				    ep0->fh_ep.maxpacket - deptsiz.b.xfersize;
++			} else {
++				desc_sts =
++				    core_if->dev_if->out_desc_addr->status;
++				byte_count =
++				    ep0->fh_ep.maxpacket - desc_sts.b.bytes;
++			}
++			ep0->fh_ep.xfer_count += byte_count;
++			ep0->fh_ep.xfer_buff += byte_count;
++			ep0->fh_ep.dma_addr += byte_count;
++		}
++		if (ep0->fh_ep.xfer_count < ep0->fh_ep.total_len) {
++			fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
++						      &ep0->fh_ep);
++			FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
++		} else if (ep0->fh_ep.sent_zlp) {
++			fh_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
++						      &ep0->fh_ep);
++			ep0->fh_ep.sent_zlp = 0;
++			FH_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER sent zlp\n");
++		} else {
++			ep0_complete_request(ep0);
++			FH_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
++		}
++		break;
++
++	case EP0_IN_STATUS_PHASE:
++	case EP0_OUT_STATUS_PHASE:
++		FH_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
++		ep0_complete_request(ep0);
++		pcd->ep0state = EP0_IDLE;
++		ep0->stopped = 1;
++		ep0->fh_ep.is_in = 0;	/* OUT for next SETUP */
++
++		/* Prepare for more SETUP Packets */
++		if (core_if->dma_enable) {
++			ep0_out_start(core_if, pcd);
++		}
++		break;
++
++	case EP0_STALL:
++		FH_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
++		break;
++	}
++#ifdef DEBUG_EP0
++	print_ep0_state(pcd);
++#endif
++}
++
++/**
++ * Restart transfer
++ */
++static void restart_transfer(fh_otg_pcd_t * pcd, const uint32_t epnum)
++{
++	fh_otg_core_if_t *core_if;
++	fh_otg_dev_if_t *dev_if;
++	deptsiz_data_t dieptsiz = {.d32 = 0 };
++	fh_otg_pcd_ep_t *ep;
++
++	ep = get_in_ep(pcd, epnum);
++
++#ifdef FH_EN_ISOC
++	if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
++		return;
++	}
++#endif /* FH_EN_ISOC  */
++
++	core_if = GET_CORE_IF(pcd);
++	dev_if = core_if->dev_if;
++
++	dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
++
++	FH_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
++		    " stopped=%d\n", ep->fh_ep.xfer_buff,
++		    ep->fh_ep.xfer_count, ep->fh_ep.xfer_len, ep->stopped);
++	/*
++	 * If xfersize is 0 and pktcnt in not 0, resend the last packet.
++	 */
++	if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 &&
++	    ep->fh_ep.start_xfer_buff != 0) {
++		if (ep->fh_ep.total_len <= ep->fh_ep.maxpacket) {
++			ep->fh_ep.xfer_count = 0;
++			ep->fh_ep.xfer_buff = ep->fh_ep.start_xfer_buff;
++			ep->fh_ep.xfer_len = ep->fh_ep.xfer_count;
++		} else {
++			ep->fh_ep.xfer_count -= ep->fh_ep.maxpacket;
++			/* convert packet size to dwords. */
++			ep->fh_ep.xfer_buff -= ep->fh_ep.maxpacket;
++			ep->fh_ep.xfer_len = ep->fh_ep.xfer_count;
++		}
++		ep->stopped = 0;
++		FH_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
++			    "xfer_len=%0x stopped=%d\n",
++			    ep->fh_ep.xfer_buff,
++			    ep->fh_ep.xfer_count, ep->fh_ep.xfer_len,
++			    ep->stopped);
++		if (epnum == 0) {
++			fh_otg_ep0_start_transfer(core_if, &ep->fh_ep);
++		} else {
++			fh_otg_ep_start_transfer(core_if, &ep->fh_ep);
++		}
++	}
++}
++
++/*
++ * This function create new nextep sequnce based on Learn Queue.
++ *
++ * @param core_if Programming view of FH_otg controller
++ */
++void predict_nextep_seq( fh_otg_core_if_t * core_if)
++{
++	fh_otg_device_global_regs_t *dev_global_regs =
++	    core_if->dev_if->dev_global_regs;
++	const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
++	/* Number of Token Queue Registers */
++	const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
++	dtknq1_data_t dtknqr1;
++	uint32_t in_tkn_epnums[4];
++	uint8_t seqnum[MAX_EPS_CHANNELS];
++	uint8_t intkn_seq[TOKEN_Q_DEPTH];
++	grstctl_t resetctl = {.d32 = 0 };
++	uint8_t temp;
++	int ndx = 0;
++	int start = 0;
++	int end = 0;
++	int sort_done = 0;
++	int i = 0;
++	volatile uint32_t *addr = &dev_global_regs->dtknqr1;
++
++	FH_DEBUGPL(DBG_PCD, "dev_token_q_depth=%d\n", TOKEN_Q_DEPTH);
++
++	/* Read the DTKNQ Registers */
++	for (i = 0; i < DTKNQ_REG_CNT; i++) {
++		in_tkn_epnums[i] = FH_READ_REG32(addr);
++		FH_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
++			    in_tkn_epnums[i]);
++		if (addr == &dev_global_regs->dvbusdis) {
++			addr = &dev_global_regs->dtknqr3_dthrctl;
++		} else {
++			++addr;
++		}
++
++	}
++
++	/* Copy the DTKNQR1 data to the bit field. */
++	dtknqr1.d32 = in_tkn_epnums[0];
++	if (dtknqr1.b.wrap_bit) {
++		ndx = dtknqr1.b.intknwptr;
++		end = ndx - 1;
++		if (end < 0)
++			end = TOKEN_Q_DEPTH - 1;
++	} else {
++		ndx = 0;
++		end = dtknqr1.b.intknwptr - 1;
++		if (end < 0)
++			end = 0;
++	}
++	start = ndx;
++
++	/* Fill seqnum[] by initial values: EP number + 31 */
++	for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++		seqnum[i] = i + 31;
++	}
++
++	/* Fill intkn_seq[] from in_tkn_epnums[0] */
++	for (i = 0; (i < 6) && (i < TOKEN_Q_DEPTH); i++)
++		intkn_seq[i] = (in_tkn_epnums[0] >> ((7 - i) * 4)) & 0xf;
++
++	if (TOKEN_Q_DEPTH > 6) {
++		/* Fill intkn_seq[] from in_tkn_epnums[1] */
++		for (i = 6; (i < 14) && (i < TOKEN_Q_DEPTH); i++)
++			intkn_seq[i] =
++			    (in_tkn_epnums[1] >> ((7 - (i - 6)) * 4)) & 0xf;
++	}
++
++	if (TOKEN_Q_DEPTH > 14) {
++		/* Fill intkn_seq[] from in_tkn_epnums[1] */
++		for (i = 14; (i < 22) && (i < TOKEN_Q_DEPTH); i++)
++			intkn_seq[i] =
++			    (in_tkn_epnums[2] >> ((7 - (i - 14)) * 4)) & 0xf;
++	}
++
++	if (TOKEN_Q_DEPTH > 22) {
++		/* Fill intkn_seq[] from in_tkn_epnums[1] */
++		for (i = 22; (i < 30) && (i < TOKEN_Q_DEPTH); i++)
++			intkn_seq[i] =
++			    (in_tkn_epnums[3] >> ((7 - (i - 22)) * 4)) & 0xf;
++	}
++
++	FH_DEBUGPL(DBG_PCDV, "%s start=%d end=%d intkn_seq[]:\n", __func__,
++		    start, end);
++	for (i = 0; i < TOKEN_Q_DEPTH; i++)
++		FH_DEBUGPL(DBG_PCDV, "%d\n", intkn_seq[i]);
++
++	/* Update seqnum based on intkn_seq[] */
++	i = 0;
++	do {
++		seqnum[intkn_seq[ndx]] = i;
++		ndx++;
++		i++;
++		if (ndx == TOKEN_Q_DEPTH)
++			ndx = 0;
++	} while (i < TOKEN_Q_DEPTH);
++
++	/* Mark non active EP's in seqnum[] by 0xff */
++	for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++		if (core_if->nextep_seq[i] == 0xff)
++			seqnum[i] = 0xff;
++	}
++
++	/* Sort seqnum[] */
++	sort_done = 0;
++	while (!sort_done) {
++		sort_done = 1;
++		for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
++			if (seqnum[i] > seqnum[i + 1]) {
++				temp = seqnum[i];
++				seqnum[i] = seqnum[i + 1];
++				seqnum[i + 1] = temp;
++				sort_done = 0;
++			}
++		}
++	}
++
++	ndx = start + seqnum[0];
++	if (ndx >= TOKEN_Q_DEPTH)
++		ndx = ndx % TOKEN_Q_DEPTH;
++	core_if->first_in_nextep_seq = intkn_seq[ndx];
++
++	/* Update seqnum[] by EP numbers  */
++	for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++		ndx = start + i;
++		if (seqnum[i] < 31) {
++			ndx = start + seqnum[i];
++			if (ndx >= TOKEN_Q_DEPTH)
++				ndx = ndx % TOKEN_Q_DEPTH;
++			seqnum[i] = intkn_seq[ndx];
++		} else {
++			if (seqnum[i] < 0xff) {
++				seqnum[i] = seqnum[i] - 31;
++			} else {
++				break;
++			}
++		}
++	}
++
++	/* Update nextep_seq[] based on seqnum[] */
++	for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
++		if (seqnum[i] != 0xff) {
++			if (seqnum[i + 1] != 0xff) {
++				core_if->nextep_seq[seqnum[i]] = seqnum[i + 1];
++			} else {
++				core_if->nextep_seq[seqnum[i]] = core_if->first_in_nextep_seq;
++				break;
++			}
++		} else {
++			break;
++		}
++	}
++
++	FH_DEBUGPL(DBG_PCDV, "%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
++		    __func__, core_if->first_in_nextep_seq);
++	for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++		FH_DEBUGPL(DBG_PCDV, "%2d\n", core_if->nextep_seq[i]);
++	}
++
++	/* Flush the Learning Queue */
++	resetctl.d32 = FH_READ_REG32(&core_if->core_global_regs->grstctl);
++	resetctl.b.intknqflsh = 1;
++	FH_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
++	
++
++}
++
++/**
++ * handle the IN EP disable interrupt.
++ */
++static inline void handle_in_ep_disable_intr(fh_otg_pcd_t * pcd,
++					     const uint32_t epnum)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	deptsiz_data_t dieptsiz = {.d32 = 0 };
++	dctl_data_t dctl = {.d32 = 0 };
++	fh_otg_pcd_ep_t *ep;
++	fh_ep_t *fh_ep;
++	gintmsk_data_t gintmsk_data;
++	depctl_data_t depctl;
++	uint32_t diepdma;
++	uint32_t remain_to_transfer = 0;
++	uint8_t i;
++	uint32_t xfer_size;
++
++	ep = get_in_ep(pcd, epnum);
++	fh_ep = &ep->fh_ep;
++
++	if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++		fh_otg_flush_tx_fifo(core_if, fh_ep->tx_fifo_num);
++		complete_ep(ep);
++		return;
++	}
++
++	FH_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", epnum,
++		    FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl));
++	dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->dieptsiz);
++	depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
++
++	FH_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
++		    dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
++
++	if ((core_if->start_predict == 0) || (depctl.b.eptype & 1)) {
++		if (ep->stopped) {
++			if (core_if->en_multiple_tx_fifo)
++				/* Flush the Tx FIFO */
++				fh_otg_flush_tx_fifo(core_if, fh_ep->tx_fifo_num);
++			/* Clear the Global IN NP NAK */
++			dctl.d32 = 0;
++			dctl.b.cgnpinnak = 1;
++			FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); 
++			/* Restart the transaction */
++			if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
++				restart_transfer(pcd, epnum);
++			}
++		} else {
++			/* Restart the transaction */
++			if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
++				restart_transfer(pcd, epnum);
++			}
++			FH_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
++		}
++		return;
++	}
++
++	if (core_if->start_predict > 2) {	// NP IN EP
++		core_if->start_predict--;
++		return;
++	}
++
++	core_if->start_predict--;
++
++	if (core_if->start_predict == 1) {	// All NP IN Ep's disabled now
++
++		predict_nextep_seq(core_if);
++
++		/* Update all active IN EP's NextEP field based of nextep_seq[] */
++		for (i = 0; i <= core_if->dev_if->num_in_eps; i++) {
++			depctl.d32 =
++			    FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++			if (core_if->nextep_seq[i] != 0xff) {	// Active NP IN EP
++				depctl.b.nextep = core_if->nextep_seq[i];
++				FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
++			}
++		}
++		/* Flush Shared NP TxFIFO */
++		fh_otg_flush_tx_fifo(core_if, 0);
++		/* Rewind buffers */
++		if (!core_if->dma_desc_enable) {		
++			i = core_if->first_in_nextep_seq;
++			do {
++				ep = get_in_ep(pcd, i);
++				dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
++				xfer_size = ep->fh_ep.total_len - ep->fh_ep.xfer_count;
++				if (xfer_size > ep->fh_ep.maxxfer) 
++					xfer_size = ep->fh_ep.maxxfer;
++				depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++				if (dieptsiz.b.pktcnt != 0) {
++					if (xfer_size == 0) {
++						remain_to_transfer = 0;
++					} else {
++						if ((xfer_size % ep->fh_ep.maxpacket) == 0) {
++							remain_to_transfer = 
++								dieptsiz.b.pktcnt * ep->fh_ep.maxpacket;
++						} else {
++							remain_to_transfer = ((dieptsiz.b.pktcnt -1) * ep->fh_ep.maxpacket) 
++								+ (xfer_size % ep->fh_ep.maxpacket);
++						}
++					}
++					diepdma = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepdma);
++					dieptsiz.b.xfersize = remain_to_transfer;
++					FH_WRITE_REG32(&dev_if->in_ep_regs[i]->dieptsiz, dieptsiz.d32);
++					diepdma = ep->fh_ep.dma_addr + (xfer_size - remain_to_transfer);
++					FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepdma, diepdma);
++				}
++				i = core_if->nextep_seq[i];
++			} while (i != core_if->first_in_nextep_seq);
++		} else { // dma_desc_enable
++				FH_PRINTF("%s Learning Queue not supported in DDMA\n", __func__);
++		}
++				
++		/* Restart transfers in predicted sequences */
++		i = core_if->first_in_nextep_seq;
++		do {
++			dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
++			depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++			if (dieptsiz.b.pktcnt != 0) {
++				depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++				depctl.b.epena = 1;
++				depctl.b.cnak = 1;
++				FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
++			}
++			i = core_if->nextep_seq[i];
++		} while (i != core_if->first_in_nextep_seq);
++
++		/* Clear the global non-periodic IN NAK handshake */
++		dctl.d32 = 0;
++		dctl.b.cgnpinnak = 1;
++		FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); 
++			
++		/* Unmask EP Mismatch interrupt */
++		gintmsk_data.d32 = 0;
++		gintmsk_data.b.epmismatch = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, gintmsk_data.d32);
++		
++		core_if->start_predict = 0;
++
++	} 
++}
++
++/**
++ * Handler for the IN EP timeout handshake interrupt.
++ */
++static inline void handle_in_ep_timeout_intr(fh_otg_pcd_t * pcd,
++					     const uint32_t epnum)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++
++#ifdef DEBUG
++	deptsiz_data_t dieptsiz = {.d32 = 0 };
++	uint32_t num = 0;
++#endif
++	dctl_data_t dctl = {.d32 = 0 };
++	fh_otg_pcd_ep_t *ep;
++
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	ep = get_in_ep(pcd, epnum);
++
++	/* Disable the NP Tx Fifo Empty Interrrupt */
++	if (!core_if->dma_enable) {
++		intr_mask.b.nptxfempty = 1;
++		FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
++				 intr_mask.d32, 0);
++	}
++	/** @todo NGS Check EP type.
++	 * Implement for Periodic EPs */
++	/*
++	 * Non-periodic EP
++	 */
++	/* Enable the Global IN NAK Effective Interrupt */
++	intr_mask.b.ginnakeff = 1;
++	FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
++
++	/* Set Global IN NAK */
++	dctl.b.sgnpinnak = 1;
++	FH_MODIFY_REG32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
++
++	ep->stopped = 1;
++
++#ifdef DEBUG
++	dieptsiz.d32 = FH_READ_REG32(&dev_if->in_ep_regs[num]->dieptsiz);
++	FH_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
++		    dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
++#endif
++
++#ifdef DISABLE_PERIODIC_EP
++	/*
++	 * Set the NAK bit for this EP to
++	 * start the disable process.
++	 */
++	diepctl.d32 = 0;
++	diepctl.b.snak = 1;
++	FH_MODIFY_REG32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32,
++			 diepctl.d32);
++	ep->disabling = 1;
++	ep->stopped = 1;
++#endif
++}
++
++/**
++ * Handler for the IN EP NAK interrupt.
++ */
++static inline int32_t handle_in_ep_nak_intr(fh_otg_pcd_t * pcd,
++					    const uint32_t epnum)
++{
++	/** @todo implement ISR */
++	fh_otg_core_if_t *core_if;
++	diepmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
++	core_if = GET_CORE_IF(pcd);
++	intr_mask.b.nak = 1;
++
++	if (core_if->multiproc_int_enable) {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++				 diepeachintmsk[epnum], intr_mask.d32, 0);
++	} else {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->diepmsk,
++				 intr_mask.d32, 0);
++	}
++
++	return 1;
++}
++
++/**
++ * Handler for the OUT EP Babble interrupt.
++ */
++static inline int32_t handle_out_ep_babble_intr(fh_otg_pcd_t * pcd,
++						const uint32_t epnum)
++{
++	/** @todo implement ISR */
++	fh_otg_core_if_t *core_if;
++	doepmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_PRINTF("INTERRUPT Handler not implemented for %s\n",
++		   "OUT EP Babble");
++	core_if = GET_CORE_IF(pcd);
++	intr_mask.b.babble = 1;
++
++	if (core_if->multiproc_int_enable) {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++				 doepeachintmsk[epnum], intr_mask.d32, 0);
++	} else {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
++				 intr_mask.d32, 0);
++	}
++
++	return 1;
++}
++
++/**
++ * Handler for the OUT EP NAK interrupt.
++ */
++static inline int32_t handle_out_ep_nak_intr(fh_otg_pcd_t * pcd,
++					     const uint32_t epnum)
++{
++	/** @todo implement ISR */
++	fh_otg_core_if_t *core_if;
++	doepmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_DEBUGPL(DBG_ANY, "INTERRUPT Handler not implemented for %s\n", "OUT EP NAK");
++	core_if = GET_CORE_IF(pcd);
++	intr_mask.b.nak = 1;
++
++	if (core_if->multiproc_int_enable) {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++				 doepeachintmsk[epnum], intr_mask.d32, 0);
++	} else {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
++				 intr_mask.d32, 0);
++	}
++
++	return 1;
++}
++
++/**
++ * Handler for the OUT EP NYET interrupt.
++ */
++static inline int32_t handle_out_ep_nyet_intr(fh_otg_pcd_t * pcd,
++					      const uint32_t epnum)
++{
++	/** @todo implement ISR */
++	fh_otg_core_if_t *core_if;
++	doepmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_PRINTF("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
++	core_if = GET_CORE_IF(pcd);
++	intr_mask.b.nyet = 1;
++
++	if (core_if->multiproc_int_enable) {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
++				 doepeachintmsk[epnum], intr_mask.d32, 0);
++	} else {
++		FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk,
++				 intr_mask.d32, 0);
++	}
++
++	return 1;
++}
++static void handle_xfercompl_iso_ddma (fh_otg_dev_if_t *dev_if, fh_otg_pcd_ep_t *ep)
++{
++	 depctl_data_t depctl;
++	 fh_ep_t *fh_ep;
++	 uint32_t doepdma;
++	 fh_dma_t dma_desc_addr;
++	 fh_otg_dev_dma_desc_t *dma_desc;
++	 int index = 0;
++	 uint8_t epnum;
++
++	 fh_ep = &ep->fh_ep;
++	 epnum = fh_ep->num;
++		
++	 complete_ddma_iso_ep(ep);
++
++	 if (fh_ep->is_in) {
++		 depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
++		 if (!depctl.b.epena) {
++			 if (fh_ep->use_add_buf) {
++				 FH_DEBUGPL(DBG_PCD, "go to second buffer \n");
++				 fh_ep->use_add_buf = 0;
++				 fh_ep->iso_desc_first = 0;
++				 if (fh_ep->iso_desc_second) {
++					 depctl_data_t diepctl;
++					 FH_WRITE_REG32(&dev_if->in_ep_regs[epnum]->diepdma,
++						 fh_ep->dma_desc_addr1);
++					 diepctl.d32 = 0;
++					 diepctl.b.epena = 1;
++					 diepctl.b.cnak = 1;
++					 FH_MODIFY_REG32(&dev_if->in_ep_regs[epnum]->diepctl,
++						 0, diepctl.d32);
++				 } else {
++					 FH_DEBUGPL(DBG_PCD, "DDMA: No more ISOC requests 1\n");
++				 }
++			 } else {
++				 FH_DEBUGPL(DBG_PCD, "go to first buffer \n");
++				 fh_ep->use_add_buf = 1;
++				 fh_ep->iso_desc_second = 0;
++				 if (fh_ep->iso_desc_first) {
++					 depctl_data_t diepctl;
++					 FH_WRITE_REG32(&dev_if->in_ep_regs[epnum]->diepdma,
++						 fh_ep->dma_desc_addr);
++					 diepctl.d32 = 0;
++					 diepctl.b.epena = 1;
++					 diepctl.b.cnak = 1;
++					 FH_MODIFY_REG32(&dev_if->in_ep_regs[epnum]->diepctl,
++						 0, diepctl.d32);
++				 } else {
++					 FH_DEBUGPL(DBG_PCD, "DDMA: No more ISOC requests 2\n");
++				 }
++			 }
++		 }
++	 } else {
++		 depctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[epnum]->doepctl);
++		 doepdma = FH_READ_REG32(&dev_if->out_ep_regs[epnum]->doepdma);
++
++		 if (fh_ep->use_add_buf) {
++			 index = fh_ep->iso_desc_first;
++			 dma_desc_addr = fh_ep->dma_desc_addr;
++		 } else {
++			 index = fh_ep->iso_desc_second;
++			 dma_desc_addr = fh_ep->dma_desc_addr1;
++		 }
++
++		 if (index == (doepdma - dma_desc_addr)/sizeof(fh_otg_dev_dma_desc_t)) {
++			 depctl.d32 = 0;
++			 depctl.b.epdis = 1;
++			 FH_MODIFY_REG32(&dev_if->out_ep_regs[epnum]->doepctl, 0, depctl.d32);
++		 }
++		 dma_desc = fh_ep->desc_addr + fh_ep->iso_desc_first;
++		 if (!depctl.b.epena) {
++			 if (fh_ep->use_add_buf) {
++				 FH_DEBUGPL(DBG_PCD, "go to second buffer \n");
++				 fh_ep->use_add_buf = 0;
++				 fh_ep->iso_desc_first = 0;
++				 if (fh_ep->iso_desc_second) {
++					 FH_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepdma, fh_ep->dma_desc_addr1);
++					 depctl.d32 = 0;
++					 depctl.b.epena = 1;
++					 depctl.b.cnak = 1;
++					 FH_MODIFY_REG32(&dev_if->out_ep_regs[epnum]->doepctl, 0, depctl.d32);
++				 } else {
++					 FH_DEBUGPL(DBG_PCD, "DDMA: There are no more ISOC requests 1!!! \n");
++				 }
++			 } else {
++				 fh_ep->use_add_buf = 1;
++				 fh_ep->iso_desc_second = 0;
++				 if (fh_ep->iso_desc_first) {
++					 FH_DEBUGPL(DBG_PCD, "go to first buffer");
++					 FH_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepdma, fh_ep->dma_desc_addr);
++					 depctl.d32 = 0;
++					 depctl.b.epena = 1;
++					 depctl.b.cnak = 1;
++					 FH_MODIFY_REG32(&dev_if->out_ep_regs[epnum]->doepctl, 0, depctl.d32);
++				 } else {
++					 FH_DEBUGPL(DBG_PCD, "DDMA: There are no more ISOC requests 2!!! \n");
++				 }
++			 }
++		 }
++	 }
++}
++/**
++ * This interrupt indicates that an IN EP has a pending Interrupt.
++ * The sequence for handling the IN EP interrupt is shown below:
++ * -#	Read the Device All Endpoint Interrupt register
++ * -#	Repeat the following for each IN EP interrupt bit set (from
++ *		LSB to MSB).
++ * -#	Read the Device Endpoint Interrupt (DIEPINTn) register
++ * -#	If "Transfer Complete" call the request complete function
++ * -#	If "Endpoint Disabled" complete the EP disable procedure.
++ * -#	If "AHB Error Interrupt" log error
++ * -#	If "Time-out Handshake" log error
++ * -#	If "IN Token Received when TxFIFO Empty" write packet to Tx
++ *		FIFO.
++ * -#	If "IN Token EP Mismatch" (disable, this is handled by EP
++ *		Mismatch Interrupt)
++ */
++static int32_t fh_otg_pcd_handle_in_ep_intr(fh_otg_pcd_t * pcd)
++{
++#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \
++do { \
++		diepint_data_t diepint = {.d32=0}; \
++		diepint.b.__intr = 1; \
++		FH_WRITE_REG32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
++		diepint.d32); \
++} while (0)
++
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	fh_otg_dev_if_t *dev_if = core_if->dev_if;
++	diepint_data_t diepint = {.d32 = 0 };
++	depctl_data_t depctl = {.d32 = 0 };
++	uint32_t ep_intr;
++	uint32_t epnum = 0;
++	fh_otg_pcd_ep_t *ep;
++	fh_ep_t *fh_ep;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
++
++	/* Read in the device interrupt bits */
++	ep_intr = fh_otg_read_dev_all_in_ep_intr(core_if);
++
++	/* Service the Device IN interrupts for each endpoint */
++	while (ep_intr) {
++		if (ep_intr & 0x1) {
++			uint32_t empty_msk;
++			/* Get EP pointer */
++			ep = get_in_ep(pcd, epnum);
++			fh_ep = &ep->fh_ep;
++
++			depctl.d32 =
++			    FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
++			empty_msk =
++			    FH_READ_REG32(&dev_if->
++					   dev_global_regs->dtknqr4_fifoemptymsk);
++
++			FH_DEBUGPL(DBG_PCDV,
++				    "IN EP INTERRUPT - %d\nepmty_msk - %8x  diepctl - %8x\n",
++				    epnum, empty_msk, depctl.d32);
++
++			FH_DEBUGPL(DBG_PCD,
++				    "EP%d-%s: type=%d, mps=%d\n",
++				    fh_ep->num, (fh_ep->is_in ? "IN" : "OUT"),
++				    fh_ep->type, fh_ep->maxpacket);
++
++			diepint.d32 =
++			    fh_otg_read_dev_in_ep_intr(core_if, fh_ep);
++
++			FH_DEBUGPL(DBG_PCDV,
++				    "EP %d Interrupt Register - 0x%x\n", epnum,
++				    diepint.d32);
++			/* Transfer complete */
++			if (diepint.b.xfercompl) {
++				/* Disable the NP Tx FIFO Empty
++				 * Interrupt */
++				if (core_if->en_multiple_tx_fifo == 0) {
++					intr_mask.b.nptxfempty = 1;
++					FH_MODIFY_REG32
++					    (&core_if->core_global_regs->gintmsk,
++					     intr_mask.d32, 0);
++				} else {
++					/* Disable the Tx FIFO Empty Interrupt for this EP */
++					uint32_t fifoemptymsk =
++					    0x1 << fh_ep->num;
++					FH_MODIFY_REG32(&core_if->
++							 dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
++							 fifoemptymsk, 0);
++				}
++				/* Clear the bit in DIEPINTn for this interrupt */
++				CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
++
++				/* Complete the transfer */
++				if (epnum == 0) {
++					handle_ep0(pcd);
++				}
++#ifdef FH_EN_ISOC
++				else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++					if (!ep->stopped)
++						complete_iso_ep(pcd, ep);
++				}
++#endif /* FH_EN_ISOC */
++#ifdef FH_UTE_PER_IO
++				else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++					if (!ep->stopped)
++						complete_xiso_ep(ep);
++				}
++#endif /* FH_UTE_PER_IO */
++				else {
++					if (core_if->dma_desc_enable && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++						handle_xfercompl_iso_ddma(dev_if, ep);
++					} else {
++						if (fh_ep->type == FH_OTG_EP_TYPE_ISOC && 
++								fh_ep->bInterval > 1) {
++							fh_ep->frame_num += fh_ep->bInterval;
++							if (fh_ep->frame_num > 0x3FFF)
++							{
++								fh_ep->frm_overrun = 1;
++								fh_ep->frame_num &= 0x3FFF;
++							} else 
++								fh_ep->frm_overrun = 0;
++						}
++						complete_ep(ep);
++						if(diepint.b.nak)
++							CLEAR_IN_EP_INTR(core_if, epnum, nak);
++					}
++				}
++			}
++			/* Endpoint disable      */
++			if (diepint.b.epdisabled) {
++				FH_DEBUGPL(DBG_ANY, "EP%d IN disabled\n",
++					    epnum);
++				handle_in_ep_disable_intr(pcd, epnum);
++
++				/* Clear the bit in DIEPINTn for this interrupt */
++				CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
++			}
++			/* AHB Error */
++			if (diepint.b.ahberr) {
++				FH_ERROR("EP%d IN AHB Error\n", epnum);
++				/* Clear the bit in DIEPINTn for this interrupt */
++				CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
++			}
++			/* TimeOUT Handshake (non-ISOC IN EPs) */
++			if (diepint.b.timeout) {
++				FH_ERROR("EP%d IN Time-out\n", epnum);
++				handle_in_ep_timeout_intr(pcd, epnum);
++
++				CLEAR_IN_EP_INTR(core_if, epnum, timeout);
++			}
++			/** IN Token received with TxF Empty */
++			if (diepint.b.intktxfemp) {
++				FH_DEBUGPL(DBG_ANY,
++					    "EP%d IN TKN TxFifo Empty\n",
++					    epnum);
++				if (!ep->stopped && epnum != 0) {
++
++					diepmsk_data_t diepmsk = {.d32 = 0 };
++					diepmsk.b.intktxfemp = 1;
++
++					if (core_if->multiproc_int_enable) {
++						FH_MODIFY_REG32
++						    (&dev_if->dev_global_regs->diepeachintmsk
++						     [epnum], diepmsk.d32, 0);
++					} else {
++						FH_MODIFY_REG32
++						    (&dev_if->dev_global_regs->diepmsk,
++						     diepmsk.d32, 0);
++					}
++				} else if (core_if->dma_desc_enable
++					   && epnum == 0
++					   && pcd->ep0state ==
++					   EP0_OUT_STATUS_PHASE) {
++					// EP0 IN set STALL
++					depctl.d32 =
++					    FH_READ_REG32(&dev_if->in_ep_regs
++							   [epnum]->diepctl);
++
++					/* set the disable and stall bits */
++					if (depctl.b.epena) {
++						depctl.b.epdis = 1;
++					}
++					depctl.b.stall = 1;
++					FH_WRITE_REG32(&dev_if->in_ep_regs
++							[epnum]->diepctl,
++							depctl.d32);
++				}
++				CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
++			}
++			/** IN Token Received with EP mismatch */
++			if (diepint.b.intknepmis) {
++				FH_DEBUGPL(DBG_ANY,
++					    "EP%d IN TKN EP Mismatch\n", epnum);
++				CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
++			}
++			/** IN Endpoint NAK Effective */
++			if (diepint.b.inepnakeff) {
++				FH_DEBUGPL(DBG_ANY,
++					    "EP%d IN EP NAK Effective\n",
++					    epnum);
++				/* Periodic EP */
++				if (ep->disabling) {
++					depctl.d32 = 0;
++					depctl.b.snak = 1;
++					depctl.b.epdis = 1;
++					FH_MODIFY_REG32(&dev_if->in_ep_regs
++							 [epnum]->diepctl,
++							 depctl.d32,
++							 depctl.d32);
++				}
++				CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
++
++			}
++
++			/** IN EP Tx FIFO Empty Intr */
++			if (diepint.b.emptyintr) {
++				FH_DEBUGPL(DBG_ANY,
++					    "EP%d Tx FIFO Empty Intr \n",
++					    epnum);
++				write_empty_tx_fifo(pcd, epnum);
++
++				CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
++
++			}
++
++			/** IN EP BNA Intr */
++			if (diepint.b.bna) {
++				CLEAR_IN_EP_INTR(core_if, epnum, bna);
++				if (core_if->dma_desc_enable) {
++#ifdef FH_EN_ISOC
++					if (fh_ep->type ==
++					    FH_OTG_EP_TYPE_ISOC) {
++						/*
++						 * This checking is performed to prevent first "false" BNA
++						 * handling occuring right after reconnect
++						 */
++						if (fh_ep->next_frame !=
++						    0xffffffff)
++							fh_otg_pcd_handle_iso_bna(ep);
++					} else
++#endif				/* FH_EN_ISOC */
++					{
++						fh_otg_pcd_handle_noniso_bna(ep);
++					}
++				}
++			}
++			/* NAK Interrupt */
++			if (diepint.b.nak) {
++				FH_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n",
++					    epnum);
++				if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
++					if (core_if->dma_desc_enable) {
++						if (ep->fh_ep.frame_num == 0xFFFFFFFF) {
++							ep->fh_ep.frame_num = core_if->frame_num;
++							fh_otg_pcd_start_iso_ddma(core_if, ep);
++						} else { 
++							CLEAR_IN_EP_INTR(core_if, epnum, nak); 
++						}
++					} else {
++						depctl_data_t depctl;
++						if (ep->fh_ep.frame_num == 0xFFFFFFFF) {
++							ep->fh_ep.frame_num = core_if->frame_num;
++							if (ep->fh_ep.bInterval > 1) {
++								depctl.d32 = 0;
++								depctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[epnum]->diepctl);
++								if (ep->fh_ep.frame_num & 0x1) {
++									depctl.b.setd1pid = 1;
++									depctl.b.setd0pid = 0;
++								} else {
++									depctl.b.setd0pid = 1;
++									depctl.b.setd1pid = 0;
++								}
++								FH_WRITE_REG32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32);
++							}
++							start_next_request(ep);
++						}
++						ep->fh_ep.frame_num += ep->fh_ep.bInterval;
++						if (fh_ep->frame_num > 0x3FFF)	{
++							fh_ep->frm_overrun = 1;
++							fh_ep->frame_num &= 0x3FFF;
++						} else {
++							fh_ep->frm_overrun = 0;
++						}
++					}
++				}
++
++				CLEAR_IN_EP_INTR(core_if, epnum, nak);
++			}
++		}
++		epnum++;
++		ep_intr >>= 1;
++	}
++
++	return 1;
++#undef CLEAR_IN_EP_INTR
++}
++
++/**
++ * This interrupt indicates that an OUT EP has a pending Interrupt.
++ * The sequence for handling the OUT EP interrupt is shown below:
++ * -#	Read the Device All Endpoint Interrupt register
++ * -#	Repeat the following for each OUT EP interrupt bit set (from
++ *		LSB to MSB).
++ * -#	Read the Device Endpoint Interrupt (DOEPINTn) register
++ * -#	If "Transfer Complete" call the request complete function
++ * -#	If "Endpoint Disabled" complete the EP disable procedure.
++ * -#	If "AHB Error Interrupt" log error
++ * -#	If "Setup Phase Done" process Setup Packet (See Standard USB
++ *		Command Processing)
++ */
++static int32_t fh_otg_pcd_handle_out_ep_intr(fh_otg_pcd_t * pcd)
++{
++#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \
++do { \
++		doepint_data_t doepint = {.d32=0}; \
++		doepint.b.__intr = 1; \
++		FH_WRITE_REG32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
++		doepint.d32); \
++} while (0)
++
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	uint32_t ep_intr;
++	doepint_data_t doepint = {.d32 = 0 };
++	uint32_t epnum = 0;
++	fh_otg_pcd_ep_t *ep;
++	fh_ep_t *fh_ep;
++	dctl_data_t dctl = {.d32 = 0 };
++	gintmsk_data_t gintmsk = {.d32 = 0 };
++
++
++	FH_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
++
++	/* Read in the device interrupt bits */
++	ep_intr = fh_otg_read_dev_all_out_ep_intr(core_if);
++
++	while (ep_intr) {
++		if (ep_intr & 0x1) {
++			/* Get EP pointer */
++			ep = get_out_ep(pcd, epnum);
++			fh_ep = &ep->fh_ep;
++
++#ifdef VERBOSE
++			FH_DEBUGPL(DBG_PCDV,
++				    "EP%d-%s: type=%d, mps=%d\n",
++				    fh_ep->num, (fh_ep->is_in ? "IN" : "OUT"),
++				    fh_ep->type, fh_ep->maxpacket);
++#endif
++			doepint.d32 =
++			    fh_otg_read_dev_out_ep_intr(core_if, fh_ep);
++
++			/* Transfer complete */
++			if (doepint.b.xfercompl) {
++
++				if (epnum == 0) {
++					/* Clear the bit in DOEPINTn for this interrupt */
++					CLEAR_OUT_EP_INTR(core_if, epnum, xfercompl); 
++					if (core_if->snpsid >= OTG_CORE_REV_3_00a) {
++						FH_DEBUGPL(DBG_PCDV, "in xfer xomplete DOEPINT=%x doepint=%x\n", 
++							FH_READ_REG32(&core_if->dev_if->out_ep_regs[0]->doepint),
++							doepint.d32);
++						FH_DEBUGPL(DBG_PCDV, "DOEPCTL=%x \n", 
++							FH_READ_REG32(&core_if->dev_if->out_ep_regs[0]->doepctl));
++
++						if (core_if->snpsid >= OTG_CORE_REV_3_00a
++							&& core_if->dma_enable == 0) {
++							doepint_data_t doepint;
++							doepint.d32 = FH_READ_REG32(&core_if->dev_if->
++														out_ep_regs[0]->doepint);
++							if (pcd->ep0state == EP0_IDLE && doepint.b.sr) {
++								CLEAR_OUT_EP_INTR(core_if, epnum, sr);
++								if (doepint.b.stsphsercvd)
++									CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
++								goto exit_xfercompl;
++							}
++						}
++						/* In case of DDMA  look at SR bit to go to the Data Stage */
++						if (core_if->dma_desc_enable) {
++							dev_dma_desc_sts_t status = {.d32 = 0};
++							if (pcd->ep0state == EP0_IDLE) {
++								status.d32 = core_if->dev_if->setup_desc_addr[core_if->
++											dev_if->setup_desc_index]->status.d32;
++								if(pcd->data_terminated) {
++									 pcd->data_terminated = 0;
++									 status.d32 = core_if->dev_if->out_desc_addr->status.d32;
++									 fh_memcpy(&pcd->setup_pkt->req, pcd->backup_buf, 8);
++								}
++								if (status.b.sr) {
++									if (doepint.b.setup) {
++										FH_DEBUGPL(DBG_PCDV, "DMA DESC EP0_IDLE SR=1 setup=1\n");
++										/* Already started data stage, clear setup */
++										CLEAR_OUT_EP_INTR(core_if, epnum, setup);
++										doepint.b.setup = 0;
++										handle_ep0(pcd);
++										/* Prepare for more setup packets */
++										if (pcd->ep0state == EP0_IN_STATUS_PHASE || 
++											pcd->ep0state == EP0_IN_DATA_PHASE) {
++											ep0_out_start(core_if, pcd);
++										}
++										
++										goto exit_xfercompl;
++									} else {
++										/* Prepare for more setup packets */
++										FH_DEBUGPL(DBG_PCDV, 
++											"EP0_IDLE SR=1 setup=0 new setup comes\n");
++										ep0_out_start(core_if, pcd);
++									}
++								}
++							} else {
++								fh_otg_pcd_request_t *req;
++								dev_dma_desc_sts_t status = {.d32 = 0};
++								diepint_data_t diepint0;
++								diepint0.d32 = FH_READ_REG32(&core_if->dev_if->
++															in_ep_regs[0]->diepint);
++
++								if (pcd->ep0state == EP0_STALL || pcd->ep0state == EP0_DISCONNECT) {
++									FH_ERROR("EP0 is stalled/disconnected\n");
++								}
++
++								/* Clear IN xfercompl if set */
++								if (diepint0.b.xfercompl && (pcd->ep0state == EP0_IN_STATUS_PHASE
++									|| pcd->ep0state == EP0_IN_DATA_PHASE)) {
++									FH_WRITE_REG32(&core_if->dev_if->
++										in_ep_regs[0]->diepint, diepint0.d32);
++								}
++
++								status.d32 = core_if->dev_if->setup_desc_addr[core_if->
++									dev_if->setup_desc_index]->status.d32;
++
++								if ((pcd->ep0state == EP0_OUT_STATUS_PHASE) || 
++									(ep->fh_ep.xfer_count != ep->fh_ep.total_len
++									&& pcd->ep0state == EP0_OUT_DATA_PHASE))
++									status.d32 = core_if->dev_if->out_desc_addr->status.d32;
++								if (status.b.sr) {
++									if (FH_CIRCLEQ_EMPTY(&ep->queue)) {
++										FH_DEBUGPL(DBG_PCDV, "Request queue empty!!\n");
++									} else {
++										FH_DEBUGPL(DBG_PCDV, "complete req!!\n");
++										req = FH_CIRCLEQ_FIRST(&ep->queue);
++										if (ep->fh_ep.xfer_count != ep->fh_ep.total_len &&
++											pcd->ep0state == EP0_OUT_DATA_PHASE) {
++												/* Read arrived setup packet from req->buf */
++												fh_memcpy(&pcd->setup_pkt->req, 
++													req->buf + ep->fh_ep.xfer_count, 8);
++										}
++										req->actual = ep->fh_ep.xfer_count;
++										fh_otg_request_done(ep, req, -ECONNRESET);
++										ep->fh_ep.start_xfer_buff = 0;
++										ep->fh_ep.xfer_buff = 0;
++										ep->fh_ep.xfer_len = 0;
++									}
++									pcd->ep0state = EP0_IDLE;
++									if (doepint.b.setup) {
++										FH_DEBUGPL(DBG_PCDV, "EP0_IDLE SR=1 setup=1\n");
++										/* Data stage started, clear setup */
++										CLEAR_OUT_EP_INTR(core_if, epnum, setup);
++										doepint.b.setup = 0;
++										handle_ep0(pcd);
++										/* Prepare for setup packets if ep0in was enabled*/
++										if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
++											ep0_out_start(core_if, pcd);
++										}
++
++										goto exit_xfercompl;
++									} else {
++										/* Prepare for more setup packets */
++										FH_DEBUGPL(DBG_PCDV, 
++											"EP0_IDLE SR=1 setup=0 new setup comes 2\n");
++										ep0_out_start(core_if, pcd);
++									}
++								}
++							}
++ 						}
++						if (core_if->snpsid >= OTG_CORE_REV_3_00a && core_if->dma_enable
++							&& core_if->dma_desc_enable == 0) {
++							doepint_data_t doepint_temp = {.d32 = 0};
++							deptsiz0_data_t doeptsize0 = {.d32 = 0 };
++							doepint_temp.d32 = FH_READ_REG32(&core_if->dev_if->
++															out_ep_regs[ep->fh_ep.num]->doepint);
++							doeptsize0.d32 = FH_READ_REG32(&core_if->dev_if->
++															out_ep_regs[ep->fh_ep.num]->doeptsiz);
++							if (((ep->fh_ep.xfer_count == ep->fh_ep.total_len || doeptsize0.b.xfersize == 64) &&
++								pcd->ep0state == EP0_OUT_DATA_PHASE && doepint.b.stsphsercvd) ||
++								(doeptsize0.b.xfersize == 24 && pcd->ep0state == EP0_IN_STATUS_PHASE)) {
++									CLEAR_OUT_EP_INTR(core_if, epnum, xfercompl);
++									FH_DEBUGPL(DBG_PCDV, "WA for xfercompl along with stsphs \n");
++									doepint.b.xfercompl = 0;
++									ep0_out_start(core_if, pcd);
++									goto exit_xfercompl;
++							}
++
++							if (pcd->ep0state == EP0_IDLE) {
++								if (doepint_temp.b.sr) {
++									CLEAR_OUT_EP_INTR(core_if, epnum, sr);	
++								}
++									/* Delay is needed for core to update setup 
++									 * packet count from 3 to 2 after receiving 
++									 * setup packet*/
++									fh_udelay(100);
++									doepint.d32 = FH_READ_REG32(&core_if->dev_if->
++																	out_ep_regs[0]->doepint);
++									if (doeptsize0.b.supcnt == 3) {
++										FH_DEBUGPL(DBG_ANY, "Rolling over!!!!!!!\n");
++										ep->fh_ep.stp_rollover = 1;
++									}
++									if (doepint.b.setup) {
++retry:
++										/* Already started data stage, clear setup */
++										CLEAR_OUT_EP_INTR(core_if, epnum, setup);
++										doepint.b.setup = 0;
++										handle_ep0(pcd);
++										ep->fh_ep.stp_rollover = 0;
++										/* Prepare for more setup packets */
++										if (pcd->ep0state == EP0_IN_STATUS_PHASE || 
++											pcd->ep0state == EP0_IN_DATA_PHASE) {
++											depctl_data_t depctl = {.d32 = 0};
++											depctl.b.cnak = 1;
++											ep0_out_start(core_if, pcd);
++											/* Core not updating setup packet count 
++											 * in case of PET testing - @TODO vahrama
++											 * to check with HW team further */
++											if (!core_if->otg_ver) {
++												FH_MODIFY_REG32(&core_if->dev_if->
++													out_ep_regs[0]->doepctl, 0, depctl.d32);
++											}
++										}
++										goto exit_xfercompl;
++									} else {
++										/* Prepare for more setup packets */
++										FH_DEBUGPL(DBG_ANY, 
++											"EP0_IDLE SR=1 setup=0 new setup comes\n");
++										doepint.d32 = FH_READ_REG32(&core_if->dev_if->
++																	out_ep_regs[0]->doepint);
++										if(doepint.b.setup)
++											goto retry;
++										ep0_out_start(core_if, pcd);
++									}
++							} else {
++								fh_otg_pcd_request_t *req;
++								diepint_data_t diepint0 = {.d32 = 0};
++								doepint_data_t doepint_temp = {.d32 = 0};
++								depctl_data_t diepctl0;
++								diepint0.d32 = FH_READ_REG32(&core_if->dev_if->
++																in_ep_regs[0]->diepint);
++								diepctl0.d32 = FH_READ_REG32(&core_if->dev_if->
++																in_ep_regs[0]->diepctl);
++								
++								if (pcd->ep0state == EP0_IN_DATA_PHASE
++									|| pcd->ep0state == EP0_IN_STATUS_PHASE) {
++									if (diepint0.b.xfercompl) {
++										FH_WRITE_REG32(&core_if->dev_if->
++											in_ep_regs[0]->diepint, diepint0.d32);
++									}
++									if (diepctl0.b.epena) {
++										diepint_data_t diepint = {.d32 = 0};
++										diepctl0.b.snak = 1;
++										FH_WRITE_REG32(&core_if->dev_if->
++														in_ep_regs[0]->diepctl, diepctl0.d32);
++										do {
++											fh_udelay(10);
++											diepint.d32 = FH_READ_REG32(&core_if->dev_if->
++												in_ep_regs[0]->diepint);
++										} while (!diepint.b.inepnakeff); 
++										diepint.b.inepnakeff = 1;
++										FH_WRITE_REG32(&core_if->dev_if->
++											in_ep_regs[0]->diepint, diepint.d32);
++										diepctl0.d32 = 0;
++										diepctl0.b.epdis = 1;
++										FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl,
++														diepctl0.d32);
++										do {
++											fh_udelay(10);
++											diepint.d32 = FH_READ_REG32(&core_if->dev_if->
++												in_ep_regs[0]->diepint);
++										} while (!diepint.b.epdisabled); 
++										diepint.b.epdisabled = 1;
++										FH_WRITE_REG32(&core_if->dev_if->in_ep_regs[0]->diepint,
++															diepint.d32);
++									}
++								}
++								doepint_temp.d32 = FH_READ_REG32(&core_if->dev_if->
++																out_ep_regs[ep->fh_ep.num]->doepint);
++								if (doepint_temp.b.sr) {
++									CLEAR_OUT_EP_INTR(core_if, epnum, sr);
++									if (FH_CIRCLEQ_EMPTY(&ep->queue)) {
++										FH_DEBUGPL(DBG_PCDV, "Request queue empty!!\n");
++									} else {
++										FH_DEBUGPL(DBG_PCDV, "complete req!!\n");
++										req = FH_CIRCLEQ_FIRST(&ep->queue);
++										if (ep->fh_ep.xfer_count != ep->fh_ep.total_len &&
++											pcd->ep0state == EP0_OUT_DATA_PHASE) {
++												/* Read arrived setup packet from req->buf */
++												fh_memcpy(&pcd->setup_pkt->req, 
++													req->buf + ep->fh_ep.xfer_count, 8);
++										}
++										req->actual = ep->fh_ep.xfer_count;
++										fh_otg_request_done(ep, req, -ECONNRESET);
++										ep->fh_ep.start_xfer_buff = 0;
++										ep->fh_ep.xfer_buff = 0;
++										ep->fh_ep.xfer_len = 0;
++									}
++									pcd->ep0state = EP0_IDLE;
++									if (doepint.b.setup) {
++										FH_DEBUGPL(DBG_PCDV, "EP0_IDLE SR=1 setup=1\n");
++										/* Data stage started, clear setup */
++										CLEAR_OUT_EP_INTR(core_if, epnum, setup);
++										doepint.b.setup = 0;
++										handle_ep0(pcd);
++										/* Prepare for setup packets if ep0in was enabled*/
++										if (pcd->ep0state == EP0_IN_STATUS_PHASE) {
++											depctl_data_t depctl = {.d32 = 0};
++											depctl.b.cnak = 1;
++											ep0_out_start(core_if, pcd);
++											/* Core not updating setup packet count 
++											* in case of PET testing - @TODO vahrama
++											* to check with HW team further */
++											if (!core_if->otg_ver) {
++												FH_MODIFY_REG32(&core_if->dev_if->
++														out_ep_regs[0]->doepctl, 0, depctl.d32);
++											}
++										}
++										goto exit_xfercompl;
++									} else {
++										/* Prepare for more setup packets */
++										FH_DEBUGPL(DBG_PCDV, 
++											"EP0_IDLE SR=1 setup=0 new setup comes 2\n");
++										ep0_out_start(core_if, pcd);
++									}
++								}
++							}
++						} 
++						if (core_if->dma_enable == 0 || pcd->ep0state != EP0_IDLE)
++							handle_ep0(pcd);
++exit_xfercompl:
++						FH_DEBUGPL(DBG_PCDV, "after DOEPINT=%x doepint=%x\n", 
++							fh_otg_read_dev_out_ep_intr(core_if, fh_ep), doepint.d32);
++					} else {
++						if (core_if->dma_desc_enable == 0
++							|| pcd->ep0state != EP0_IDLE)
++							handle_ep0(pcd);
++					}
++#ifdef FH_EN_ISOC
++				} else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++					if (doepint.b.pktdrpsts == 0) {
++						/* Clear the bit in DOEPINTn for this interrupt */
++						CLEAR_OUT_EP_INTR(core_if,
++								  epnum,
++								  xfercompl);
++						complete_iso_ep(pcd, ep);
++					} else {
++
++						doepint_data_t doepint = {.d32 = 0 };
++						doepint.b.xfercompl = 1;
++						doepint.b.pktdrpsts = 1;
++						FH_WRITE_REG32
++						    (&core_if->dev_if->out_ep_regs
++						     [epnum]->doepint,
++						     doepint.d32);
++						if (handle_iso_out_pkt_dropped
++						    (core_if, fh_ep)) {
++							complete_iso_ep(pcd,
++									ep);
++						}
++					}
++#endif /* FH_EN_ISOC */
++#ifdef FH_UTE_PER_IO
++				} else if (fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++					CLEAR_OUT_EP_INTR(core_if, epnum, xfercompl);
++					if (!ep->stopped)
++						complete_xiso_ep(ep);
++#endif /* FH_UTE_PER_IO */
++				} else {
++					/* Clear the bit in DOEPINTn for this interrupt */
++					CLEAR_OUT_EP_INTR(core_if, epnum,
++							  xfercompl);
++
++					if (core_if->core_params->dev_out_nak) {
++						FH_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[epnum]);
++						pcd->core_if->ep_xfer_info[epnum].state = 0;
++#ifdef DEBUG
++						print_memory_payload(pcd, fh_ep);
++#endif
++					}
++					if (core_if->dma_desc_enable && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++						handle_xfercompl_iso_ddma(core_if->dev_if, ep);
++					} else {
++						complete_ep(ep);
++					}
++				}
++
++			}
++			if (doepint.b.stsphsercvd) {
++				deptsiz0_data_t deptsiz;
++				CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
++				deptsiz.d32 =
++					FH_READ_REG32(&core_if->dev_if->
++					out_ep_regs[0]->doeptsiz);
++				if ((core_if->dma_desc_enable) || (core_if->dma_enable &&
++					core_if->snpsid >= OTG_CORE_REV_3_00a)) {
++						do_setup_in_status_phase(pcd);
++				}
++			}
++		   
++			/* Endpoint disable      */
++			if (doepint.b.epdisabled) {
++
++				/* Clear the bit in DOEPINTn for this interrupt */
++				CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled);
++				if (core_if->core_params->dev_out_nak) {
++#ifdef DEBUG
++					print_memory_payload(pcd, fh_ep);
++#endif
++					/* In case of timeout condition */
++					if (core_if->ep_xfer_info[epnum].state == 2) {
++						dctl.d32 = FH_READ_REG32(&core_if->dev_if->
++										dev_global_regs->dctl);
++						dctl.b.cgoutnak = 1;
++						FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
++																dctl.d32);
++						/* Unmask goutnakeff interrupt which was masked
++						 * during handle nak out interrupt */
++						gintmsk.b.goutnakeff = 1;
++						FH_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
++																0, gintmsk.d32);
++					
++						complete_ep(ep);
++					}
++				}
++				if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC)
++				{
++					dctl_data_t dctl;
++					gintmsk_data_t intr_mask = {.d32 = 0};
++					fh_otg_pcd_request_t *req = 0;
++
++					dctl.d32 = FH_READ_REG32(&core_if->dev_if->
++						dev_global_regs->dctl);
++					dctl.b.cgoutnak = 1;
++					FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl,
++						dctl.d32);
++
++					intr_mask.d32 = 0;
++					intr_mask.b.incomplisoout = 1;
++
++					/* Get any pending requests */
++					if (!FH_CIRCLEQ_EMPTY(&ep->queue)) {
++						req = FH_CIRCLEQ_FIRST(&ep->queue);
++						if (!req) {
++							FH_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
++						} else {
++							fh_otg_request_done(ep, req, 0);
++							start_next_request(ep);
++						}
++					} else {
++						FH_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
++					}
++				}
++			}
++			/* AHB Error */
++			if (doepint.b.ahberr) {
++				FH_ERROR("EP%d OUT AHB Error\n", epnum);
++				FH_ERROR("EP%d DEPDMA=0x%08x \n",
++					  epnum, core_if->dev_if->out_ep_regs[epnum]->doepdma);
++				CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
++			}
++			/* Setup Phase Done (contorl EPs) */
++			if (doepint.b.setup) {
++#ifdef DEBUG_EP0
++				FH_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n", epnum);
++#endif
++				CLEAR_OUT_EP_INTR(core_if, epnum, setup);
++
++				handle_ep0(pcd);
++			}
++
++			/** OUT EP BNA Intr */
++			if (doepint.b.bna) {
++				CLEAR_OUT_EP_INTR(core_if, epnum, bna);
++				if (core_if->dma_desc_enable) {
++#ifdef FH_EN_ISOC
++					if (fh_ep->type ==
++					    FH_OTG_EP_TYPE_ISOC) {
++						/*
++						 * This checking is performed to prevent first "false" BNA
++						 * handling occuring right after reconnect
++						 */
++						if (fh_ep->next_frame !=
++						    0xffffffff)
++							fh_otg_pcd_handle_iso_bna(ep);
++					} else
++#endif				/* FH_EN_ISOC */
++					if (ep->fh_ep.type != FH_OTG_EP_TYPE_ISOC) {
++						fh_otg_pcd_handle_noniso_bna(ep);
++					}
++				}
++			}
++			/* Babble Interrupt */
++			if (doepint.b.babble) {
++				FH_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n",
++					    epnum);
++				handle_out_ep_babble_intr(pcd, epnum);
++
++				CLEAR_OUT_EP_INTR(core_if, epnum, babble);
++			}
++			if (doepint.b.outtknepdis) {
++				FH_DEBUGPL(DBG_ANY, "EP%d OUT Token received when EP is \
++					disabled\n",epnum);
++				if (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
++					if (core_if->dma_desc_enable) {
++						if (!ep->fh_ep.iso_transfer_started) {
++							ep->fh_ep.frame_num = core_if->frame_num;
++							fh_otg_pcd_start_iso_ddma(core_if, ep);
++						}
++					} else {
++						doepmsk_data_t doepmsk = {.d32 = 0};
++						ep->fh_ep.frame_num = core_if->frame_num;
++						if (ep->fh_ep.bInterval > 1) {
++							depctl_data_t depctl;
++							depctl.d32 = FH_READ_REG32(&core_if->dev_if->
++														out_ep_regs[epnum]->doepctl);
++							if (ep->fh_ep.frame_num & 0x1) {
++								depctl.b.setd1pid = 1;
++								depctl.b.setd0pid = 0;
++							} else {
++								depctl.b.setd0pid = 1;
++								depctl.b.setd1pid = 0;
++							}
++							FH_WRITE_REG32(&core_if->dev_if->
++											out_ep_regs[epnum]->doepctl, depctl.d32);
++						}
++					
++						start_next_request(ep);
++						doepmsk.b.outtknepdis = 1;
++						FH_MODIFY_REG32(&core_if->dev_if->dev_global_regs->doepmsk, 
++								 doepmsk.d32, 0);
++					}
++				}
++				CLEAR_OUT_EP_INTR(core_if, epnum, outtknepdis);
++			}
++
++			/* NAK Interrutp */
++			if (doepint.b.nak) {
++				FH_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum);
++				handle_out_ep_nak_intr(pcd, epnum);
++
++				CLEAR_OUT_EP_INTR(core_if, epnum, nak);
++			}
++			/* NYET Interrutp */
++			if (doepint.b.nyet) {
++				FH_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum);
++				handle_out_ep_nyet_intr(pcd, epnum);
++
++				CLEAR_OUT_EP_INTR(core_if, epnum, nyet);
++			}
++		}
++
++		epnum++;
++		ep_intr >>= 1;
++	}
++
++	return 1;
++
++#undef CLEAR_OUT_EP_INTR
++}
++static int drop_transfer(uint32_t trgt_fr, uint32_t curr_fr, uint8_t frm_overrun)
++{
++	int retval = 0;
++	if(!frm_overrun && curr_fr >= trgt_fr) 
++		retval = 1;
++	else if (frm_overrun
++		 && (curr_fr >= trgt_fr && ((curr_fr - trgt_fr) < 0x3FFF / 2)))
++		retval = 1;
++	return retval;
++}
++
++/**
++ * Incomplete ISO IN Transfer Interrupt.
++ * This interrupt indicates one of the following conditions occurred
++ * while transmitting an ISOC transaction.
++ * - Corrupted IN Token for ISOC EP.
++ * - Packet not complete in FIFO.
++ * The follow actions will be taken:
++ *	-#	Determine the EP
++ *	-#	Set incomplete flag in fh_ep structure
++ *	-#	Disable EP; when "Endpoint Disabled" interrupt is received
++ *		Flush FIFO
++ */
++int32_t fh_otg_pcd_handle_incomplete_isoc_in_intr(fh_otg_pcd_t * pcd)
++{
++	gintsts_data_t gintsts;
++
++#ifdef FH_EN_ISOC
++	fh_otg_dev_if_t *dev_if;
++	deptsiz_data_t deptsiz = {.d32 = 0 };
++	depctl_data_t depctl = {.d32 = 0 };
++	dsts_data_t dsts = {.d32 = 0 };
++	fh_ep_t *fh_ep;
++	int i;
++
++	dev_if = GET_CORE_IF(pcd)->dev_if;
++
++	for (i = 1; i <= dev_if->num_in_eps; ++i) {
++		fh_ep = &pcd->in_ep[i].fh_ep;
++		if (fh_ep->active && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++			deptsiz.d32 =
++			    FH_READ_REG32(&dev_if->in_ep_regs[i]->dieptsiz);
++			depctl.d32 =
++			    FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++
++			if (depctl.b.epdis && deptsiz.d32) {
++				set_current_pkt_info(GET_CORE_IF(pcd), fh_ep);
++				if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
++					fh_ep->cur_pkt = 0;
++					fh_ep->proc_buf_num =
++					    (fh_ep->proc_buf_num ^ 1) & 0x1;
++
++					if (fh_ep->proc_buf_num) {
++						fh_ep->cur_pkt_addr =
++						    fh_ep->xfer_buff1;
++						fh_ep->cur_pkt_dma_addr =
++						    fh_ep->dma_addr1;
++					} else {
++						fh_ep->cur_pkt_addr =
++						    fh_ep->xfer_buff0;
++						fh_ep->cur_pkt_dma_addr =
++						    fh_ep->dma_addr0;
++					}
++
++				}
++
++				dsts.d32 =
++				    FH_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
++						   dev_global_regs->dsts);
++				fh_ep->next_frame = dsts.b.soffn;
++
++				fh_otg_iso_ep_start_frm_transfer(GET_CORE_IF
++								  (pcd),
++								  fh_ep);
++			}
++		}
++	}
++
++#else
++	depctl_data_t depctl = {.d32 = 0 };
++	fh_ep_t *fh_ep;
++	fh_otg_dev_if_t *dev_if;
++	int i;
++	dev_if = GET_CORE_IF(pcd)->dev_if;
++
++	FH_DEBUGPL(DBG_PCD,"Incomplete ISO IN \n");
++	
++	for (i = 1; i <= dev_if->num_in_eps; ++i) {
++		fh_ep = &pcd->in_ep[i-1].fh_ep;
++		depctl.d32 =
++			FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++		if (depctl.b.epena && fh_ep->type == FH_OTG_EP_TYPE_ISOC) {
++			if (drop_transfer(fh_ep->frame_num, GET_CORE_IF(pcd)->frame_num, 
++							fh_ep->frm_overrun))
++			{
++				depctl.d32 =
++					FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++				depctl.b.snak = 1;
++				depctl.b.epdis = 1;
++				FH_MODIFY_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32, depctl.d32);
++			}
++		}
++	}
++
++	/*intr_mask.b.incomplisoin = 1;
++	   FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
++	   intr_mask.d32, 0);    */
++#endif //FH_EN_ISOC
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.incomplisoin = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * Incomplete ISO OUT Transfer Interrupt.
++ *
++ * This interrupt indicates that the core has dropped an ISO OUT
++ * packet. The following conditions can be the cause:
++ * - FIFO Full, the entire packet would not fit in the FIFO.
++ * - CRC Error
++ * - Corrupted Token
++ * The follow actions will be taken:
++ *	-#	Determine the EP
++ *	-#	Set incomplete flag in fh_ep structure
++ *	-#	Read any data from the FIFO
++ *	-#	Disable EP. When "Endpoint Disabled" interrupt is received
++ *		re-enable EP.
++ */
++int32_t fh_otg_pcd_handle_incomplete_isoc_out_intr(fh_otg_pcd_t * pcd)
++{
++
++	gintsts_data_t gintsts;
++
++#ifdef FH_EN_ISOC
++	fh_otg_dev_if_t *dev_if;
++	deptsiz_data_t deptsiz = {.d32 = 0 };
++	depctl_data_t depctl = {.d32 = 0 };
++	dsts_data_t dsts = {.d32 = 0 };
++	fh_ep_t *fh_ep;
++	int i;
++
++	dev_if = GET_CORE_IF(pcd)->dev_if;
++
++	for (i = 1; i <= dev_if->num_out_eps; ++i) {
++		fh_ep = &pcd->in_ep[i].fh_ep;
++		if (pcd->out_ep[i].fh_ep.active &&
++		    pcd->out_ep[i].fh_ep.type == FH_OTG_EP_TYPE_ISOC) {
++			deptsiz.d32 =
++			    FH_READ_REG32(&dev_if->out_ep_regs[i]->doeptsiz);
++			depctl.d32 =
++			    FH_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
++
++			if (depctl.b.epdis && deptsiz.d32) {
++				set_current_pkt_info(GET_CORE_IF(pcd),
++						     &pcd->out_ep[i].fh_ep);
++				if (fh_ep->cur_pkt >= fh_ep->pkt_cnt) {
++					fh_ep->cur_pkt = 0;
++					fh_ep->proc_buf_num =
++					    (fh_ep->proc_buf_num ^ 1) & 0x1;
++
++					if (fh_ep->proc_buf_num) {
++						fh_ep->cur_pkt_addr =
++						    fh_ep->xfer_buff1;
++						fh_ep->cur_pkt_dma_addr =
++						    fh_ep->dma_addr1;
++					} else {
++						fh_ep->cur_pkt_addr =
++						    fh_ep->xfer_buff0;
++						fh_ep->cur_pkt_dma_addr =
++						    fh_ep->dma_addr0;
++					}
++
++				}
++
++				dsts.d32 =
++				    FH_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
++						   dev_global_regs->dsts);
++				fh_ep->next_frame = dsts.b.soffn;
++
++				fh_otg_iso_ep_start_frm_transfer(GET_CORE_IF
++								  (pcd),
++								  fh_ep);
++			}
++		}
++	}
++#else
++	/** @todo implement ISR */
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	fh_otg_core_if_t *core_if;
++	deptsiz_data_t deptsiz = {.d32 = 0 };
++	depctl_data_t depctl = {.d32 = 0 };
++	dctl_data_t dctl = {.d32 = 0 };
++	fh_ep_t *fh_ep = NULL;
++	int i;
++	core_if = GET_CORE_IF(pcd);
++
++	for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
++		fh_ep = &pcd->out_ep[i].fh_ep;
++		depctl.d32 =
++			FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl);
++		if (depctl.b.epena && depctl.b.dpid == (core_if->frame_num & 0x1)) {
++			core_if->dev_if->isoc_ep = fh_ep;	
++			deptsiz.d32 =
++					FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doeptsiz);
++				break;
++		}
++	}
++	dctl.d32 = FH_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
++	gintsts.d32 = FH_READ_REG32(&core_if->core_global_regs->gintsts);
++	intr_mask.d32 = FH_READ_REG32(&core_if->core_global_regs->gintmsk);
++
++	if (!intr_mask.b.goutnakeff) {
++		/* Unmask it */
++		intr_mask.b.goutnakeff = 1;
++		FH_WRITE_REG32(&core_if->core_global_regs->gintmsk, intr_mask.d32);
++ 	}
++	if (!gintsts.b.goutnakeff) {
++		dctl.b.sgoutnak = 1;
++	}
++	FH_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
++
++	depctl.d32 = FH_READ_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl);
++	if (depctl.b.epena) {
++		depctl.b.epdis = 1;
++		depctl.b.snak = 1;
++	}
++	FH_WRITE_REG32(&core_if->dev_if->out_ep_regs[fh_ep->num]->doepctl, depctl.d32);
++
++	intr_mask.d32 = 0;
++	intr_mask.b.incomplisoout = 1;
++		
++#endif /* FH_EN_ISOC */
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.incomplisoout = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * This function handles the Global IN NAK Effective interrupt.
++ *
++ */
++int32_t fh_otg_pcd_handle_in_nak_effective(fh_otg_pcd_t * pcd)
++{
++	fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
++	depctl_data_t diepctl = {.d32 = 0 };
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	gintsts_data_t gintsts;
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++	int i;
++
++	FH_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
++
++	/* Disable all active IN EPs */
++	for (i = 0; i <= dev_if->num_in_eps; i++) {
++		diepctl.d32 = FH_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
++		if (!(diepctl.b.eptype & 1) && diepctl.b.epena) {
++			if (core_if->start_predict > 0)
++				core_if->start_predict++;
++			diepctl.b.epdis = 1;
++			diepctl.b.snak = 1;
++			FH_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, diepctl.d32);
++		}						
++	}
++	
++
++	/* Disable the Global IN NAK Effective Interrupt */
++	intr_mask.b.ginnakeff = 1;
++	FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
++			 intr_mask.d32, 0);
++
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.ginnakeff = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * OUT NAK Effective.
++ *
++ */
++int32_t fh_otg_pcd_handle_out_nak_effective(fh_otg_pcd_t * pcd)
++{
++	fh_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
++	gintmsk_data_t intr_mask = {.d32 = 0 };
++	gintsts_data_t gintsts;
++	depctl_data_t doepctl;
++	int i;
++
++	/* Disable the Global OUT NAK Effective Interrupt */
++	intr_mask.b.goutnakeff = 1;
++	FH_MODIFY_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
++			 intr_mask.d32, 0);
++
++	/* If DEV OUT NAK enabled */
++	if (pcd->core_if->core_params->dev_out_nak) {
++		/* Run over all out endpoints to determine the ep number on
++		 * which the timeout has happened 
++		 */
++		for (i = 0; i <= dev_if->num_out_eps; i++) {
++			if (pcd->core_if->ep_xfer_info[i].state == 2)
++				break;
++		}
++		if (i > dev_if->num_out_eps) {
++			dctl_data_t dctl;
++			dctl.d32 =
++			    FH_READ_REG32(&dev_if->dev_global_regs->dctl);
++			dctl.b.cgoutnak = 1;
++			FH_WRITE_REG32(&dev_if->dev_global_regs->dctl,
++					dctl.d32);
++			goto out;
++		}
++
++		/* Disable the endpoint */
++		doepctl.d32 = FH_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
++		if (doepctl.b.epena) {
++			doepctl.b.epdis = 1;
++			doepctl.b.snak = 1;
++		}
++		FH_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
++		return 1;
++	}
++	/* We come here from Incomplete ISO OUT handler */
++	if (dev_if->isoc_ep) {
++		fh_ep_t *fh_ep = (fh_ep_t *) dev_if->isoc_ep;
++		uint32_t epnum = fh_ep->num;
++		doepint_data_t doepint;
++		doepint.d32 =
++		    FH_READ_REG32(&dev_if->out_ep_regs[fh_ep->num]->doepint);
++		dev_if->isoc_ep = NULL;
++		doepctl.d32 =
++		    FH_READ_REG32(&dev_if->out_ep_regs[epnum]->doepctl);
++		FH_PRINTF("Before disable DOEPCTL = %08x\n", doepctl.d32);
++		if (doepctl.b.epena) {
++			doepctl.b.epdis = 1;
++			doepctl.b.snak = 1;
++		}
++		FH_WRITE_REG32(&dev_if->out_ep_regs[epnum]->doepctl,
++				doepctl.d32);
++		return 1;
++	} else
++		FH_PRINTF("INTERRUPT Handler not implemented for %s\n",
++			   "Global OUT NAK Effective\n");
++
++out:
++	/* Clear interrupt */
++	gintsts.d32 = 0;
++	gintsts.b.goutnakeff = 1;
++	FH_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
++			gintsts.d32);
++
++	return 1;
++}
++
++/**
++ * PCD interrupt handler.
++ *
++ * The PCD handles the device interrupts.  Many conditions can cause a
++ * device interrupt. When an interrupt occurs, the device interrupt
++ * service routine determines the cause of the interrupt and
++ * dispatches handling to the appropriate function. These interrupt
++ * handling functions are described below.
++ *
++ * All interrupt registers are processed from LSB to MSB.
++ *
++ */
++int32_t fh_otg_pcd_handle_intr(fh_otg_pcd_t * pcd)
++{
++	fh_otg_core_if_t *core_if = GET_CORE_IF(pcd);
++#ifdef VERBOSE
++	fh_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
++#endif
++	gintsts_data_t gintr_status;
++	int32_t retval = 0;
++	
++	if (fh_otg_check_haps_status(core_if) == -1 ) {
++		FH_WARN("HAPS is disconnected");			
++		return retval;
++	}
++
++	/* Exit from ISR if core is hibernated */
++	if (core_if->hibernation_suspend == 1) {
++		return retval;
++	}
++#ifdef VERBOSE
++	FH_DEBUGPL(DBG_ANY, "%s() gintsts=%08x	 gintmsk=%08x\n",
++		    __func__,
++		    FH_READ_REG32(&global_regs->gintsts),
++		    FH_READ_REG32(&global_regs->gintmsk));
++#endif
++
++	if (fh_otg_is_device_mode(core_if)) {
++		FH_SPINLOCK(pcd->lock);
++#ifdef VERBOSE
++		FH_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x  gintmsk=%08x\n",
++			    __func__,
++			    FH_READ_REG32(&global_regs->gintsts),
++			    FH_READ_REG32(&global_regs->gintmsk));
++#endif
++
++		gintr_status.d32 = fh_otg_read_core_intr(core_if);
++
++		FH_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n",
++			    __func__, gintr_status.d32);
++
++		if (gintr_status.b.sofintr) {
++			retval |= fh_otg_pcd_handle_sof_intr(pcd);
++		}
++		if (gintr_status.b.rxstsqlvl) {
++			retval |=
++			    fh_otg_pcd_handle_rx_status_q_level_intr(pcd);
++		}
++		if (gintr_status.b.nptxfempty) {
++			retval |= fh_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
++		}
++		if (gintr_status.b.goutnakeff) {
++			retval |= fh_otg_pcd_handle_out_nak_effective(pcd);
++		}
++		if (gintr_status.b.i2cintr) {
++			retval |= fh_otg_pcd_handle_i2c_intr(pcd);
++		}
++		if (gintr_status.b.erlysuspend) {
++			retval |= fh_otg_pcd_handle_early_suspend_intr(pcd);
++		}
++		if (gintr_status.b.usbreset) {
++			retval |= fh_otg_pcd_handle_usb_reset_intr(pcd);
++		}
++		if (gintr_status.b.enumdone) {
++			retval |= fh_otg_pcd_handle_enum_done_intr(pcd);
++		}
++		if (gintr_status.b.isooutdrop) {
++			retval |=
++			    fh_otg_pcd_handle_isoc_out_packet_dropped_intr
++			    (pcd);
++		}
++		if (gintr_status.b.eopframe) {
++			retval |=
++			    fh_otg_pcd_handle_end_periodic_frame_intr(pcd);
++		}
++		if (gintr_status.b.inepint) {
++			if (!core_if->multiproc_int_enable) {
++				retval |= fh_otg_pcd_handle_in_ep_intr(pcd);
++			}
++		}
++		if (gintr_status.b.outepintr) {
++			if (!core_if->multiproc_int_enable) {
++				retval |= fh_otg_pcd_handle_out_ep_intr(pcd);
++			}
++		}
++		if (gintr_status.b.epmismatch) {
++			retval |= fh_otg_pcd_handle_ep_mismatch_intr(pcd);
++		}
++		if (gintr_status.b.fetsusp) {
++			retval |= fh_otg_pcd_handle_ep_fetsusp_intr(pcd);
++		}
++		if (gintr_status.b.ginnakeff) {
++			retval |= fh_otg_pcd_handle_in_nak_effective(pcd);
++		}
++		if (gintr_status.b.incomplisoin) {
++			retval |=
++			    fh_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
++		}
++		if (gintr_status.b.incomplisoout) {
++			retval |=
++			    fh_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
++		}
++
++		/* In MPI mode Device Endpoints interrupts are asserted
++		 * without setting outepintr and inepint bits set, so these
++		 * Interrupt handlers are called without checking these bit-fields
++		 */
++		if (core_if->multiproc_int_enable) {
++			retval |= fh_otg_pcd_handle_in_ep_intr(pcd);
++			retval |= fh_otg_pcd_handle_out_ep_intr(pcd);
++		}
++#ifdef VERBOSE
++		FH_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__,
++			    FH_READ_REG32(&global_regs->gintsts));
++#endif
++		FH_SPINUNLOCK(pcd->lock);
++	}
++	return retval;
++}
++
++#endif /* FH_HOST_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_linux.c b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_linux.c
+new file mode 100644
+index 00000000..f2816628
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_pcd_linux.c
+@@ -0,0 +1,1421 @@
++ /* ==========================================================================
++  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_pcd_linux.c $
++  * $Revision: #30 $
++  * $Date: 2015/08/06 $
++  * $Change: 2913039 $
++  *
++  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++  * otherwise expressly agreed to in writing between Synopsys and you.
++  *
++  * The Software IS NOT an item of Licensed Software or Licensed Product under
++  * any End User Software License Agreement or Agreement for Licensed Product
++  * with Synopsys or any supplement thereto. You are permitted to use and
++  * redistribute this Software in source and binary forms, with or without
++  * modification, provided that redistributions of source code must retain this
++  * notice. You may not view, use, disclose, copy or distribute this file or
++  * any information contained herein except pursuant to this license grant from
++  * Synopsys. If you do not agree with this notice, including the disclaimer
++  * below, then you are not authorized to use the Software.
++  *
++  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++  * DAMAGE.
++  * ========================================================================== */
++#ifndef FH_HOST_ONLY
++
++/** @file
++ * This file implements the Peripheral Controller Driver.
++ *
++ * The Peripheral Controller Driver (PCD) is responsible for
++ * translating requests from the Function Driver into the appropriate
++ * actions on the FH_otg controller. It isolates the Function Driver
++ * from the specifics of the controller by providing an API to the
++ * Function Driver.
++ *
++ * The Peripheral Controller Driver for Linux will implement the
++ * Gadget API, so that the existing Gadget drivers can be used.
++ * (Gadget Driver is the Linux terminology for a Function Driver.)
++ *
++ * The Linux Gadget API is defined in the header file
++ * <code><linux/usb_gadget.h></code>.  The USB EP operations API is
++ * defined in the structure <code>usb_ep_ops</code> and the USB
++ * Controller API is defined in the structure
++ * <code>usb_gadget_ops</code>.
++ *
++ */
++#include <linux/platform_device.h>
++
++#include "fh_otg_os_dep.h"
++#include "fh_otg_pcd_if.h"
++#include "fh_otg_pcd.h"
++#include "fh_otg_driver.h"
++#include "fh_otg_dbg.h"
++
++static struct gadget_wrapper {
++	fh_otg_pcd_t *pcd;
++
++	struct usb_gadget gadget;
++	struct usb_gadget_driver *driver;
++
++	struct usb_ep ep0;
++	struct usb_ep in_ep[16];
++	struct usb_ep out_ep[16];
++
++} *gadget_wrapper;
++
++/* Display the contents of the buffer */
++extern void dump_msg(const u8 * buf, unsigned int length);
++/**
++ * Get the fh_otg_pcd_ep_t* from usb_ep* pointer - NULL in case
++ * if the endpoint is not found
++ */
++static struct fh_otg_pcd_ep *ep_from_handle(fh_otg_pcd_t * pcd, void *handle)
++{
++	int i;
++	if (pcd->ep0.priv == handle) {
++		return &pcd->ep0;
++	}
++
++	for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
++		if (pcd->in_ep[i].priv == handle)
++			return &pcd->in_ep[i];
++		if (pcd->out_ep[i].priv == handle)
++			return &pcd->out_ep[i];
++	}
++
++	return NULL;
++}
++
++/* USB Endpoint Operations */
++/*
++ * The following sections briefly describe the behavior of the Gadget
++ * API endpoint operations implemented in the FH_otg driver
++ * software. Detailed descriptions of the generic behavior of each of
++ * these functions can be found in the Linux header file
++ * include/linux/usb_gadget.h.
++ *
++ * The Gadget API provides wrapper functions for each of the function
++ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper
++ * function, which then calls the underlying PCD function. The
++ * following sections are named according to the wrapper
++ * functions. Within each section, the corresponding FH_otg PCD
++ * function name is specified.
++ *
++ */
++
++/**
++ * This function is called by the Gadget Driver for each EP to be
++ * configured for the current configuration (SET_CONFIGURATION).
++ *
++ * This function initializes the fh_otg_ep_t data structure, and then
++ * calls fh_otg_ep_activate.
++ */
++static int ep_enable(struct usb_ep *usb_ep,
++		     const struct usb_endpoint_descriptor *ep_desc)
++{
++	int retval;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, usb_ep, ep_desc);
++
++	if (!usb_ep || !ep_desc || ep_desc->bDescriptorType != USB_DT_ENDPOINT) {
++		FH_WARN("%s, bad ep or descriptor\n", __func__);
++		return -EINVAL;
++	}
++	if (usb_ep == &gadget_wrapper->ep0) {
++		FH_WARN("%s, bad ep(0)\n", __func__);
++		return -EINVAL;
++	}
++
++	/* Check FIFO size? */
++	if (!ep_desc->wMaxPacketSize) {
++		FH_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name);
++		return -ERANGE;
++	}
++
++	if (!gadget_wrapper->driver ||
++	    gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
++		FH_WARN("%s, bogus device state\n", __func__);
++		return -ESHUTDOWN;
++	}
++
++	/* Delete after check - MAS */
++#if 0
++	nat = (uint32_t) ep_desc->wMaxPacketSize;
++	printk(KERN_ALERT "%s: nat (before) =%d\n", __func__, nat);
++	nat = (nat >> 11) & 0x03;
++	printk(KERN_ALERT "%s: nat (after) =%d\n", __func__, nat);
++#endif
++	retval = fh_otg_pcd_ep_enable(gadget_wrapper->pcd,
++				       (const uint8_t *)ep_desc,
++				       (void *)usb_ep);
++	if (retval) {
++		FH_WARN("fh_otg_pcd_ep_enable failed\n");
++		return -EINVAL;
++	}
++
++	usb_ep->maxpacket = le16_to_cpu(ep_desc->wMaxPacketSize);
++
++	return 0;
++}
++
++/**
++ * This function is called when an EP is disabled due to disconnect or
++ * change in configuration. Any pending requests will terminate with a
++ * status of -ESHUTDOWN.
++ *
++ * This function modifies the fh_otg_ep_t data structure for this EP,
++ * and then calls fh_otg_ep_deactivate.
++ */
++static int ep_disable(struct usb_ep *usb_ep)
++{
++	int retval;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, usb_ep);
++	if (!usb_ep) {
++		FH_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__,
++			    usb_ep ? usb_ep->name : NULL);
++		return -EINVAL;
++	}
++
++	retval = fh_otg_pcd_ep_disable(gadget_wrapper->pcd, usb_ep);
++	if (retval) {
++		retval = -EINVAL;
++	}
++
++	return retval;
++}
++
++/**
++ * This function allocates a request object to use with the specified
++ * endpoint.
++ *
++ * @param ep The endpoint to be used with with the request
++ * @param gfp_flags the GFP_* flags to use.
++ */
++static struct usb_request *fh_otg_pcd_alloc_request(struct usb_ep *ep,
++						     gfp_t gfp_flags)
++{
++	struct usb_request *usb_req;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p,%d)\n", __func__, ep, gfp_flags);
++	if (0 == ep) {
++		FH_WARN("%s() %s\n", __func__, "Invalid EP!\n");
++		return 0;
++	}
++	usb_req = kmalloc(sizeof(*usb_req), gfp_flags);
++	if (0 == usb_req) {
++		FH_WARN("%s() %s\n", __func__, "request allocation failed!\n");
++		return 0;
++	}
++	memset(usb_req, 0, sizeof(*usb_req));
++	usb_req->dma = FH_DMA_ADDR_INVALID;
++
++	return usb_req;
++}
++
++/**
++ * This function frees a request object.
++ *
++ * @param ep The endpoint associated with the request
++ * @param req The request being freed
++ */
++static void fh_otg_pcd_free_request(struct usb_ep *ep, struct usb_request *req)
++{
++	FH_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, ep, req);
++
++	if (0 == ep || 0 == req) {
++		FH_WARN("%s() %s\n", __func__,
++			 "Invalid ep or req argument!\n");
++		return;
++	}
++
++	kfree(req);
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++/**
++ * This function allocates an I/O buffer to be used for a transfer
++ * to/from the specified endpoint.
++ *
++ * @param usb_ep The endpoint to be used with with the request
++ * @param bytes The desired number of bytes for the buffer
++ * @param dma Pointer to the buffer's DMA address; must be valid
++ * @param gfp_flags the GFP_* flags to use.
++ * @return address of a new buffer or null is buffer could not be allocated.
++ */
++static void *fh_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes,
++				      dma_addr_t * dma, gfp_t gfp_flags)
++{
++	void *buf;
++	fh_otg_pcd_t *pcd = 0;
++
++	pcd = gadget_wrapper->pcd;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes,
++		    dma, gfp_flags);
++
++	/* Check dword alignment */
++	if ((bytes & 0x3UL) != 0) {
++		FH_WARN("%s() Buffer size is not a multiple of"
++			 "DWORD size (%d)", __func__, bytes);
++	}
++
++	buf = dma_alloc_coherent(NULL, bytes, dma, gfp_flags);
++
++	/* Check dword alignment */
++	if (((int)buf & 0x3UL) != 0) {
++		FH_WARN("%s() Buffer is not DWORD aligned (%p)",
++			 __func__, buf);
++	}
++
++	return buf;
++}
++
++/**
++ * This function frees an I/O buffer that was allocated by alloc_buffer.
++ *
++ * @param usb_ep the endpoint associated with the buffer
++ * @param buf address of the buffer
++ * @param dma The buffer's DMA address
++ * @param bytes The number of bytes of the buffer
++ */
++static void fh_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf,
++				    dma_addr_t dma, unsigned bytes)
++{
++	fh_otg_pcd_t *pcd = 0;
++
++	pcd = gadget_wrapper->pcd;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p,%0x,%d)\n", __func__, buf, dma, bytes);
++
++	dma_free_coherent(NULL, bytes, buf, dma);
++}
++#endif
++
++/**
++ * This function is used to submit an I/O Request to an EP.
++ *
++ *	- When the request completes the request's completion callback
++ *	  is called to return the request to the driver.
++ *	- An EP, except control EPs, may have multiple requests
++ *	  pending.
++ *	- Once submitted the request cannot be examined or modified.
++ *	- Each request is turned into one or more packets.
++ *	- A BULK EP can queue any amount of data; the transfer is
++ *	  packetized.
++ *	- Zero length Packets are specified with the request 'zero'
++ *	  flag.
++ */
++static int ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
++		    gfp_t gfp_flags)
++{
++	fh_otg_pcd_t *pcd;
++	struct fh_otg_pcd_ep *ep;
++	int retval, is_isoc_ep, is_in_ep;
++	dma_addr_t dma_addr;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p,%p,%d)\n",
++		    __func__, usb_ep, usb_req, gfp_flags);
++
++	if (!usb_req || !usb_req->complete || !usb_req->buf) {
++		FH_WARN("bad params\n");
++		return -EINVAL;
++	}
++
++	if (!usb_ep) {
++		FH_WARN("bad ep\n");
++		return -EINVAL;
++	}
++
++	pcd = gadget_wrapper->pcd;
++	if (!gadget_wrapper->driver ||
++	    gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
++		FH_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n",
++			    gadget_wrapper->gadget.speed);
++		FH_WARN("bogus device state\n");
++		return -ESHUTDOWN;
++	}
++
++	FH_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n",
++		    usb_ep->name, usb_req, usb_req->length, usb_req->buf);
++
++	usb_req->status = -EINPROGRESS;
++	usb_req->actual = 0;
++
++	ep = ep_from_handle(pcd, usb_ep);
++	if (ep == NULL) {
++		is_isoc_ep = 0;
++		is_in_ep = 0;
++	} else {
++		is_isoc_ep = (ep->fh_ep.type == FH_OTG_EP_TYPE_ISOC) ? 1 : 0;
++		is_in_ep = ep->fh_ep.is_in;
++	}
++
++	dma_addr = usb_req->dma;
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)
++	if (GET_CORE_IF(pcd)->dma_enable) {
++		struct platform_device *dev =
++		    gadget_wrapper->pcd->otg_dev->os_dep.pdev;
++		if (dma_addr == FH_DMA_ADDR_INVALID) {
++			if (usb_req->length != 0) {
++				dma_addr = dma_map_single(dev, usb_req->buf,
++					usb_req->length, is_in_ep ?
++					DMA_TO_DEVICE : DMA_FROM_DEVICE);
++				usb_req->dma = dma_addr;
++			} else {
++				dma_addr = 0;
++			}
++		}
++	}
++#endif
++
++#ifdef FH_UTE_PER_IO
++	if (is_isoc_ep == 1) {
++		retval =
++		    fh_otg_pcd_xiso_ep_queue(pcd, usb_ep, usb_req->buf,
++					      dma_addr, usb_req->length,
++					      usb_req->zero, usb_req,
++					      gfp_flags == GFP_ATOMIC ? 1 : 0,
++					      &usb_req->ext_req);
++		if (retval)
++			return -EINVAL;
++
++		return 0;
++	}
++#endif
++	retval = fh_otg_pcd_ep_queue(pcd, usb_ep, usb_req->buf, dma_addr,
++				      usb_req->length, usb_req->zero, usb_req,
++				      gfp_flags == GFP_ATOMIC ? 1 : 0);
++	if (retval) {
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/**
++ * This function cancels an I/O request from an EP.
++ */
++static int ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
++{
++	FH_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, usb_ep, usb_req);
++
++	if (!usb_ep || !usb_req) {
++		FH_WARN("bad argument\n");
++		return -EINVAL;
++	}
++	if (!gadget_wrapper->driver ||
++	    gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
++		FH_WARN("bogus device state\n");
++		return -ESHUTDOWN;
++	}
++	if (fh_otg_pcd_ep_dequeue(gadget_wrapper->pcd, usb_ep, usb_req)) {
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/**
++ * usb_ep_set_halt stalls an endpoint.
++ *
++ * usb_ep_clear_halt clears an endpoint halt and resets its data
++ * toggle.
++ *
++ * Both of these functions are implemented with the same underlying
++ * function. The behavior depends on the value argument.
++ *
++ * @param[in] usb_ep the Endpoint to halt or clear halt.
++ * @param[in] value
++ *	- 0 means clear_halt.
++ *	- 1 means set_halt,
++ *	- 2 means clear stall lock flag.
++ *	- 3 means set  stall lock flag.
++ */
++static int ep_halt(struct usb_ep *usb_ep, int value)
++{
++	int retval = 0;
++
++	FH_DEBUGPL(DBG_PCD, "HALT %s %d\n", usb_ep->name, value);
++
++	if (!usb_ep) {
++		FH_WARN("bad ep\n");
++		return -EINVAL;
++	}
++
++	retval = fh_otg_pcd_ep_halt(gadget_wrapper->pcd, usb_ep, value);
++	if (retval == -FH_E_AGAIN) {
++		return -EAGAIN;
++	} else if (retval) {
++		retval = -EINVAL;
++	}
++
++	return retval;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++static int ep_wedge(struct usb_ep *usb_ep)
++{
++	FH_DEBUGPL(DBG_PCD, "WEDGE %s\n", usb_ep->name);
++
++	return ep_halt(usb_ep, 3);
++}
++#endif
++
++#ifdef FH_EN_ISOC
++/**
++ * This function is used to submit an ISOC Transfer Request to an EP.
++ *
++ *	- Every time a sync period completes the request's completion callback
++ *	  is called to provide data to the gadget driver.
++ *	- Once submitted the request cannot be modified.
++ *	- Each request is turned into periodic data packets untill ISO
++ *	  Transfer is stopped..
++ */
++static int iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req,
++			gfp_t gfp_flags)
++{
++	int retval = 0;
++
++	if (!req || !req->process_buffer || !req->buf0 || !req->buf1) {
++		FH_WARN("bad params\n");
++		return -EINVAL;
++	}
++
++	if (!usb_ep) {
++		FH_PRINTF("bad params\n");
++		return -EINVAL;
++	}
++
++	req->status = -EINPROGRESS;
++
++	retval =
++	    fh_otg_pcd_iso_ep_start(gadget_wrapper->pcd, usb_ep, req->buf0,
++				     req->buf1, req->dma0, req->dma1,
++				     req->sync_frame, req->data_pattern_frame,
++				     req->data_per_frame,
++				     req->
++				     flags & USB_REQ_ISO_ASAP ? -1 :
++				     req->start_frame, req->buf_proc_intrvl,
++				     req, gfp_flags == GFP_ATOMIC ? 1 : 0);
++
++	if (retval) {
++		return -EINVAL;
++	}
++
++	return retval;
++}
++
++/**
++ * This function stops ISO EP Periodic Data Transfer.
++ */
++static int iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req)
++{
++	int retval = 0;
++	if (!usb_ep) {
++		FH_WARN("bad ep\n");
++	}
++
++	if (!gadget_wrapper->driver ||
++	    gadget_wrapper->gadget.speed == USB_SPEED_UNKNOWN) {
++		FH_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n",
++			    gadget_wrapper->gadget.speed);
++		FH_WARN("bogus device state\n");
++	}
++
++	fh_otg_pcd_iso_ep_stop(gadget_wrapper->pcd, usb_ep, req);
++	if (retval) {
++		retval = -EINVAL;
++	}
++
++	return retval;
++}
++
++static struct usb_iso_request *alloc_iso_request(struct usb_ep *ep,
++						 int packets, gfp_t gfp_flags)
++{
++	struct usb_iso_request *pReq = NULL;
++	uint32_t req_size;
++
++	req_size = sizeof(struct usb_iso_request);
++	req_size +=
++	    (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor)));
++
++	pReq = kmalloc(req_size, gfp_flags);
++	if (!pReq) {
++		FH_WARN("Can't allocate Iso Request\n");
++		return 0;
++	}
++	pReq->iso_packet_desc0 = (void *)(pReq + 1);
++
++	pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets;
++
++	return pReq;
++}
++
++static void free_iso_request(struct usb_ep *ep, struct usb_iso_request *req)
++{
++	kfree(req);
++}
++
++static struct usb_isoc_ep_ops fh_otg_pcd_ep_ops = {
++	.ep_ops = {
++			.enable = ep_enable,
++			.disable = ep_disable,
++
++		  	.alloc_request = fh_otg_pcd_alloc_request,
++			.free_request = fh_otg_pcd_free_request,
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++		 .alloc_buffer = fh_otg_pcd_alloc_buffer,
++         .free_buffer = fh_otg_pcd_free_buffer,
++#endif
++
++    	   .queue = ep_queue,
++	       .dequeue = ep_dequeue,
++
++		   .set_halt = ep_halt,
++		 
++		    #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++					.set_wedge = ep_wedge,
++			#endif
++					.fifo_status = 0,
++					.fifo_flush = 0,
++			},
++
++	.iso_ep_start = iso_ep_start,
++	.iso_ep_stop = iso_ep_stop,
++	.alloc_iso_request = alloc_iso_request,
++	.free_iso_request = free_iso_request,
++};
++
++#else
++
++static struct usb_ep_ops fh_otg_pcd_ep_ops = {
++	.enable = ep_enable,
++	.disable = ep_disable,
++
++	.alloc_request = fh_otg_pcd_alloc_request,
++	.free_request = fh_otg_pcd_free_request,
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++	.alloc_buffer = fh_otg_pcd_alloc_buffer,
++	.free_buffer = fh_otg_pcd_free_buffer,
++#endif
++
++	.queue = ep_queue,
++	.dequeue = ep_dequeue,
++
++	.set_halt = ep_halt,
++	
++	#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++		.set_wedge = ep_wedge,
++    #endif
++
++	.fifo_status = 0,
++	.fifo_flush = 0,
++
++};
++
++#endif /* _EN_ISOC_ */
++/*	Gadget Operations */
++/**
++ * The following gadget operations will be implemented in the FH_otg
++ * PCD. Functions in the API that are not described below are not
++ * implemented.
++ *
++ * The Gadget API provides wrapper functions for each of the function
++ * pointers defined in usb_gadget_ops. The Gadget Driver calls the
++ * wrapper function, which then calls the underlying PCD function. The
++ * following sections are named according to the wrapper functions
++ * (except for ioctl, which doesn't have a wrapper function). Within
++ * each section, the corresponding FH_otg PCD function name is
++ * specified.
++ *
++ */
++
++/**
++ *Gets the USB Frame number of the last SOF.
++ */
++static int get_frame_number(struct usb_gadget *gadget)
++{
++	struct gadget_wrapper *d;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, gadget);
++
++	if (gadget == 0) {
++		return -ENODEV;
++	}
++
++	d = container_of(gadget, struct gadget_wrapper, gadget);
++	return fh_otg_pcd_get_frame_number(d->pcd);
++}
++
++#ifdef CONFIG_USB_FH_OTG_LPM
++static int test_lpm_enabled(struct usb_gadget *gadget)
++{
++	struct gadget_wrapper *d;
++
++	d = container_of(gadget, struct gadget_wrapper, gadget);
++
++	return fh_otg_pcd_is_lpm_enabled(d->pcd);
++}
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
++static int test_besl_enabled(struct usb_gadget *gadget)
++{
++	struct gadget_wrapper *d;
++
++	d = container_of(gadget, struct gadget_wrapper, gadget);
++
++	return fh_otg_pcd_is_besl_enabled(d->pcd);
++}
++static int get_param_baseline_besl(struct usb_gadget *gadget)
++{
++	struct gadget_wrapper *d;
++
++	d = container_of(gadget, struct gadget_wrapper, gadget);
++
++	return fh_otg_pcd_get_param_baseline_besl(d->pcd);
++}
++static int get_param_deep_besl(struct usb_gadget *gadget)
++{
++	struct gadget_wrapper *d;
++
++	d = container_of(gadget, struct gadget_wrapper, gadget);
++
++	return fh_otg_pcd_get_param_deep_besl(d->pcd);
++}
++#endif
++#endif
++
++/**
++ * Initiates Session Request Protocol (SRP) to wakeup the host if no
++ * session is in progress. If a session is already in progress, but
++ * the device is suspended, remote wakeup signaling is started.
++ *
++ */
++static int wakeup(struct usb_gadget *gadget)
++{
++	struct gadget_wrapper *d;
++
++	FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, gadget);
++
++	if (gadget == 0) {
++		return -ENODEV;
++	} else {
++		d = container_of(gadget, struct gadget_wrapper, gadget);
++	}
++	fh_otg_pcd_wakeup(d->pcd);
++	return 0;
++}
++
++static const struct usb_gadget_ops fh_otg_pcd_ops = {
++	.get_frame = get_frame_number,
++	.wakeup = wakeup,
++#ifdef CONFIG_USB_FH_OTG_LPM
++	.lpm_support = test_lpm_enabled,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)	
++	.besl_support = test_besl_enabled,
++	.get_baseline_besl = get_param_baseline_besl,
++	.get_deep_besl = get_param_deep_besl,
++#endif	
++#endif
++	// current versions must always be self-powered
++};
++
++static int _setup(fh_otg_pcd_t * pcd, uint8_t * bytes)
++{
++	int retval = -FH_E_NOT_SUPPORTED;
++	if (gadget_wrapper->driver && gadget_wrapper->driver->setup) {
++		retval = gadget_wrapper->driver->setup(&gadget_wrapper->gadget,
++						       (struct usb_ctrlrequest
++							*)bytes);
++	}
++
++	if (retval == -ENOTSUPP) {
++		retval = -FH_E_NOT_SUPPORTED;
++	} else if (retval < 0) {
++		retval = -FH_E_INVALID;
++	}
++
++	return retval;
++}
++
++#ifdef FH_EN_ISOC
++static int _isoc_complete(fh_otg_pcd_t * pcd, void *ep_handle,
++			  void *req_handle, int proc_buf_num)
++{
++	int i, packet_count;
++	struct usb_gadget_iso_packet_descriptor *iso_packet = 0;
++	struct usb_iso_request *iso_req = req_handle;
++
++	if (proc_buf_num) {
++		iso_packet = iso_req->iso_packet_desc1;
++	} else {
++		iso_packet = iso_req->iso_packet_desc0;
++	}
++	packet_count =
++	    fh_otg_pcd_get_iso_packet_count(pcd, ep_handle, req_handle);
++	for (i = 0; i < packet_count; ++i) {
++		int status;
++		int actual;
++		int offset;
++		fh_otg_pcd_get_iso_packet_params(pcd, ep_handle, req_handle,
++						  i, &status, &actual, &offset);
++		switch (status) {
++		case -FH_E_NO_DATA:
++			status = -ENODATA;
++			break;
++		default:
++			if (status) {
++				FH_PRINTF("unknown status in isoc packet\n");
++			}
++
++		}
++		iso_packet[i].status = status;
++		iso_packet[i].offset = offset;
++		iso_packet[i].actual_length = actual;
++	}
++
++	iso_req->status = 0;
++	iso_req->process_buffer(ep_handle, iso_req);
++
++	return 0;
++}
++#endif /* FH_EN_ISOC */
++
++#ifdef FH_UTE_PER_IO
++/**
++ * Copy the contents of the extended request to the Linux usb_request's
++ * extended part and call the gadget's completion.
++ *
++ * @param pcd			Pointer to the pcd structure
++ * @param ep_handle		Void pointer to the usb_ep structure
++ * @param req_handle	Void pointer to the usb_request structure
++ * @param status		Request status returned from the portable logic
++ * @param ereq_port		Void pointer to the extended request structure
++ *						created in the the portable part that contains the
++ *						results of the processed iso packets.
++ */
++static int _xisoc_complete(fh_otg_pcd_t * pcd, void *ep_handle,
++			   void *req_handle, int32_t status, void *ereq_port)
++{
++	struct fh_ute_iso_req_ext *ereqorg = NULL;
++	struct fh_iso_xreq_port *ereqport = NULL;
++	struct fh_ute_iso_packet_descriptor *desc_org = NULL;
++	int i;
++	struct usb_request *req;
++	//struct fh_ute_iso_packet_descriptor *
++	//int status = 0;
++
++	req = (struct usb_request *)req_handle;
++	ereqorg = &req->ext_req;
++	ereqport = (struct fh_iso_xreq_port *)ereq_port;
++	desc_org = ereqorg->per_io_frame_descs;
++
++	if (req && req->complete) {
++		/* Copy the request data from the portable logic to our request */
++		for (i = 0; i < ereqport->pio_pkt_count; i++) {
++			desc_org[i].actual_length =
++			    ereqport->per_io_frame_descs[i].actual_length;
++			desc_org[i].status =
++			    ereqport->per_io_frame_descs[i].status;
++		}
++
++		switch (status) {
++		case -FH_E_SHUTDOWN:
++			req->status = -ESHUTDOWN;
++			break;
++		case -FH_E_RESTART:
++			req->status = -ECONNRESET;
++			break;
++		case -FH_E_INVALID:
++			req->status = -EINVAL;
++			break;
++		case -FH_E_TIMEOUT:
++			req->status = -ETIMEDOUT;
++			break;
++		default:
++			req->status = status;
++		}
++
++		/* And call the gadget's completion */
++		req->complete(ep_handle, req);
++	}
++
++	return 0;
++}
++#endif /* FH_UTE_PER_IO */
++
++static int _complete(fh_otg_pcd_t *pcd, void *ep_handle,
++		     void *req_handle, int32_t status, uint32_t actual)
++{
++	struct usb_request *req = (struct usb_request *)req_handle;
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)
++	if (GET_CORE_IF(pcd)->dma_enable && req->length != 0) {
++		struct platform_device *dev =
++			gadget_wrapper->pcd->otg_dev->os_dep.pdev;
++		struct fh_otg_pcd_ep *ep = ep_from_handle(pcd, ep_handle);
++		int is_in_ep = 0;
++
++		if (ep)
++			is_in_ep = ep->fh_ep.is_in;
++
++		dma_unmap_single(dev, req->dma, req->length, is_in_ep ?
++				 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
++		req->dma = FH_DMA_ADDR_INVALID;
++	};
++#endif
++
++	if (req && req->complete) {
++		switch (status) {
++		case -FH_E_SHUTDOWN:
++			req->status = -ESHUTDOWN;
++			break;
++		case -FH_E_RESTART:
++			req->status = -ECONNRESET;
++			break;
++		case -FH_E_INVALID:
++			req->status = -EINVAL;
++			break;
++		case -FH_E_TIMEOUT:
++			req->status = -ETIMEDOUT;
++			break;
++		default:
++			req->status = status;
++
++		}
++
++		req->actual = actual;
++		FH_SPINUNLOCK(pcd->lock);
++		req->complete(ep_handle, req);
++		FH_SPINLOCK(pcd->lock);
++	}
++
++	return 0;
++}
++
++static int _connect(fh_otg_pcd_t * pcd, int speed)
++{
++	gadget_wrapper->gadget.speed = speed;
++	return 0;
++}
++
++static int _disconnect(fh_otg_pcd_t * pcd)
++{
++	if (gadget_wrapper->driver && gadget_wrapper->driver->disconnect) {
++		gadget_wrapper->driver->disconnect(&gadget_wrapper->gadget);
++	}
++	return 0;
++}
++
++static int _resume(fh_otg_pcd_t * pcd)
++{
++	if (gadget_wrapper->driver && gadget_wrapper->driver->resume) {
++		gadget_wrapper->driver->resume(&gadget_wrapper->gadget);
++	}
++
++	return 0;
++}
++
++static int _suspend(fh_otg_pcd_t * pcd)
++{
++	if (gadget_wrapper->driver && gadget_wrapper->driver->suspend) {
++		gadget_wrapper->driver->suspend(&gadget_wrapper->gadget);
++	}
++	return 0;
++}
++
++/**
++ * This function updates the otg values in the gadget structure.
++ */
++static int _hnp_changed(fh_otg_pcd_t * pcd)
++{
++
++	if (!gadget_wrapper->gadget.is_otg)
++		return 0;
++
++	gadget_wrapper->gadget.b_hnp_enable = get_b_hnp_enable(pcd);
++	gadget_wrapper->gadget.a_hnp_support = get_a_hnp_support(pcd);
++	gadget_wrapper->gadget.a_alt_hnp_support = get_a_alt_hnp_support(pcd);
++	return 0;
++}
++
++static int _reset(fh_otg_pcd_t * pcd)
++{
++	return 0;
++}
++
++#ifdef FH_UTE_CFI
++static int _cfi_setup(fh_otg_pcd_t * pcd, void *cfi_req)
++{
++	int retval = -FH_E_INVALID;
++	if (gadget_wrapper->driver->cfi_feature_setup) {
++		retval =
++		    gadget_wrapper->driver->
++		    cfi_feature_setup(&gadget_wrapper->gadget,
++				      (struct cfi_usb_ctrlrequest *)cfi_req);
++	}
++
++	return retval;
++}
++#endif
++
++static const struct fh_otg_pcd_function_ops fops = {
++	.complete = _complete,
++#ifdef FH_EN_ISOC
++	.isoc_complete = _isoc_complete,
++#endif
++	.setup = _setup,
++	.disconnect = _disconnect,
++	.connect = _connect,
++	.resume = _resume,
++	.suspend = _suspend,
++	.hnp_changed = _hnp_changed,
++	.reset = _reset,
++#ifdef FH_UTE_CFI
++	.cfi_setup = _cfi_setup,
++#endif
++#ifdef FH_UTE_PER_IO
++	.xisoc_complete = _xisoc_complete,
++#endif
++};
++
++/**
++ * This function is the top level PCD interrupt handler.
++ */
++static irqreturn_t fh_otg_pcd_irq(int irq, void *dev)
++{
++	fh_otg_pcd_t *pcd = dev;
++	int32_t retval = IRQ_NONE;
++
++	retval = fh_otg_pcd_handle_intr(pcd);
++	if (retval != 0) {
++		S3C2410X_CLEAR_EINTPEND();
++	}
++	return IRQ_RETVAL(retval);
++}
++
++/**
++ * This function initialized the usb_ep structures to there default
++ * state.
++ *
++ * @param d Pointer on gadget_wrapper.
++ */
++void gadget_add_eps(struct gadget_wrapper *d)
++{
++	static const char *names[] = {
++
++		"ep0",
++		"ep1in",
++		"ep2out",
++		"ep3in",
++		"ep4out",
++		"ep5in",
++		"ep6out",
++		"ep7in",
++		"ep8out",
++		"ep9in",
++		"ep10out",
++		"ep11in",
++		"ep12out",
++	};
++
++	int i;
++	struct usb_ep *ep;
++	int8_t dev_endpoints;
++
++	FH_DEBUGPL(DBG_PCDV, "%s\n", __func__);
++
++	INIT_LIST_HEAD(&d->gadget.ep_list);
++	d->gadget.ep0 = &d->ep0;
++	d->gadget.speed = USB_SPEED_UNKNOWN;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
++	d->gadget.max_speed = USB_SPEED_HIGH;
++#endif
++
++	INIT_LIST_HEAD(&d->gadget.ep0->ep_list);
++
++	/**
++	 * Initialize the EP0 structure.
++	 */
++	ep = &d->ep0;
++
++	/* Init the usb_ep structure. */
++	ep->name = names[0];
++	ep->ops = (struct usb_ep_ops *)&fh_otg_pcd_ep_ops;
++
++	/**
++	 * @todo NGS: What should the max packet size be set to
++	 * here?  Before EP type is set?
++	 */
++	ep->maxpacket = MAX_PACKET_SIZE;
++	
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
++	ep->maxpacket_limit = MAX_PACKET_SIZE;
++#endif
++	
++	fh_otg_pcd_ep_enable(d->pcd, NULL, ep);
++
++	list_add_tail(&ep->ep_list, &d->gadget.ep_list);
++
++	/**
++	 * Initialize the EP structures.
++	 */
++	dev_endpoints = d->pcd->core_if->dev_if->num_in_eps;
++
++	for (i = 0; i < dev_endpoints; i++) {
++		ep = &d->in_ep[i];
++
++		/* Init the usb_ep structure. */
++		ep->name = names[d->pcd->in_ep[i].fh_ep.num];
++		ep->ops = (struct usb_ep_ops *)&fh_otg_pcd_ep_ops;
++
++		/**
++		 * @todo NGS: What should the max packet size be set to
++		 * here?  Before EP type is set?
++		 */
++		ep->maxpacket = MAX_PACKET_SIZE;		
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
++		ep->maxpacket_limit = MAX_PACKET_SIZE;
++#endif
++		
++		list_add_tail(&ep->ep_list, &d->gadget.ep_list);
++	}
++
++	dev_endpoints = d->pcd->core_if->dev_if->num_out_eps;
++
++	for (i = 0; i < dev_endpoints; i++) {
++		ep = &d->out_ep[i];
++
++		/* Init the usb_ep structure. */
++		ep->name = names[d->pcd->out_ep[i].fh_ep.num];
++		ep->ops = (struct usb_ep_ops *)&fh_otg_pcd_ep_ops;
++
++		/**
++		 * @todo NGS: What should the max packet size be set to
++		 * here?  Before EP type is set?
++		 */
++		ep->maxpacket = MAX_PACKET_SIZE;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
++		ep->maxpacket_limit = MAX_PACKET_SIZE;
++#endif
++
++		list_add_tail(&ep->ep_list, &d->gadget.ep_list);
++	}
++
++
++	/* remove ep0 from the list.  There is a ep0 pointer. */
++	list_del_init(&d->ep0.ep_list);
++
++	d->ep0.maxpacket = MAX_EP0_SIZE;
++}
++
++/**
++ * This function releases the Gadget device.
++ * required by device_unregister().
++ *
++ * @todo Should this do something?	Should it free the PCD?
++ */
++static void fh_otg_pcd_gadget_release(struct device *dev)
++{
++	FH_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, dev);
++}
++
++static struct gadget_wrapper *alloc_wrapper(struct platform_device *_dev)
++{
++	static char pcd_name[] = "fh_otg";
++    fh_otg_device_t *otg_dev = platform_get_drvdata(_dev);
++
++	struct gadget_wrapper *d;
++	int retval;
++
++	d = FH_ALLOC(sizeof(*d));
++	if (d == NULL) {
++		return NULL;
++	}
++
++	memset(d, 0, sizeof(*d));
++
++	d->gadget.name = pcd_name;
++	d->pcd = otg_dev->pcd;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
++	strcpy(d->gadget.dev.bus_id, "gadget");
++#else
++	dev_set_name(&d->gadget.dev, "%s", "gadget");
++#endif
++
++	d->gadget.dev.parent = &_dev->dev;
++	d->gadget.dev.release = fh_otg_pcd_gadget_release;
++	d->gadget.ops = &fh_otg_pcd_ops;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
++	d->gadget.is_dualspeed = fh_otg_pcd_is_dualspeed(otg_dev->pcd);
++#endif
++	d->gadget.is_otg = fh_otg_pcd_is_otg(otg_dev->pcd);
++
++	d->driver = 0;
++	/* Register the gadget device */
++	retval = device_register(&d->gadget.dev);
++	if (retval != 0) {
++		FH_ERROR("device_register failed\n");
++		FH_FREE(d);
++		return NULL;
++	}
++
++	return d;
++}
++
++static void free_wrapper(struct gadget_wrapper *d)
++{
++	if (d->driver) {
++		/* should have been done already by driver model core */
++		FH_WARN("driver '%s' is still registered\n",
++			 d->driver->driver.name);
++		usb_gadget_unregister_driver(d->driver);
++	}
++
++	device_unregister(&d->gadget.dev);
++	FH_FREE(d);
++}
++
++/**
++ * This function initialized the PCD portion of the driver.
++ *
++ */
++int pcd_init(struct platform_device *dev, int irq)
++{
++	printk(KERN_ERR "%s(%p)\n", __func__, dev);
++	fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
++
++	int retval = 0;
++
++	otg_dev->pcd = fh_otg_pcd_init(otg_dev->core_if);
++
++	if (!otg_dev->pcd) {
++		FH_ERROR("fh_otg_pcd_init failed\n");
++		return -ENOMEM;
++	}
++
++	otg_dev->pcd->otg_dev = otg_dev;
++	gadget_wrapper = alloc_wrapper(dev);
++
++	/*
++	 * Initialize EP structures
++	 */
++	gadget_add_eps(gadget_wrapper);
++	/*
++	 * Setup interupt handler
++	 */
++
++	retval = request_irq(irq, fh_otg_pcd_irq,
++			     IRQF_SHARED | IRQF_DISABLED,
++			     gadget_wrapper->gadget.name, otg_dev->pcd);
++	if (retval != 0) {
++		FH_ERROR("request of irq%d failed\n", irq);
++		free_wrapper(gadget_wrapper);
++		return -EBUSY;
++	}
++
++	fh_otg_pcd_start(gadget_wrapper->pcd, &fops);
++	platform_set_drvdata(dev, otg_dev);
++
++	return retval;
++}
++
++/**
++ * Cleanup the PCD.
++ */
++void pcd_remove(struct platform_device *dev, int irq)
++{
++
++	fh_otg_device_t *otg_dev = platform_get_drvdata(dev);
++	printk(KERN_ERR "%s(%p)(%p)\n", __func__, dev, otg_dev);
++	
++	fh_otg_pcd_t *pcd = otg_dev->pcd;
++
++	/*
++	 * Free the IRQ
++	 */
++	printk(KERN_ERR "pcd free irq :%d\n", irq);
++	free_irq(irq, pcd);
++	free_wrapper(gadget_wrapper);
++	fh_otg_pcd_remove(otg_dev->pcd);	
++	otg_dev->pcd = 0;
++}
++
++/**
++ * This function registers a gadget driver with the PCD.
++ *
++ * When a driver is successfully registered, it will receive control
++ * requests including set_configuration(), which enables non-control
++ * requests.  then usb traffic follows until a disconnect is reported.
++ * then a host may connect again, or the driver might get unbound.
++ *
++ * @param driver The driver being registered
++ * @param bind The bind function of gadget driver
++ */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
++	int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
++#else
++int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
++		int (*bind)(struct usb_gadget *))
++#endif
++{
++	int retval;
++
++	FH_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n",
++		    driver->driver.name);
++
++	if (!driver || 
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
++		driver->speed == USB_SPEED_UNKNOWN ||
++#else
++		driver->max_speed == USB_SPEED_UNKNOWN ||
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
++	    !driver->bind ||
++#else
++		!bind ||
++#endif
++	    !driver->unbind || !driver->disconnect || !driver->setup) {
++		FH_DEBUGPL(DBG_PCDV, "EINVAL\n");
++		return -EINVAL;
++	}
++	if (gadget_wrapper == 0) {
++		FH_DEBUGPL(DBG_PCDV, "ENODEV\n");
++		return -ENODEV;
++	}
++	if (gadget_wrapper->driver != 0) {
++		FH_DEBUGPL(DBG_PCDV, "EBUSY (%p)\n", gadget_wrapper->driver);
++		return -EBUSY;
++	}
++
++	/* hook up the driver */
++	gadget_wrapper->driver = driver;
++	gadget_wrapper->gadget.dev.driver = &driver->driver;
++
++	FH_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
++	retval = driver->bind(&gadget_wrapper->gadget);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
++	retval = driver->bind(&gadget_wrapper->gadget,gadget_wrapper->driver);		
++#else
++	retval = bind(&gadget_wrapper->gadget);
++#endif
++	if (retval) {
++		FH_ERROR("bind to driver %s --> error %d\n",
++			  driver->driver.name, retval);
++		gadget_wrapper->driver = 0;
++		gadget_wrapper->gadget.dev.driver = 0;
++		return retval;
++	}
++	FH_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n",
++		    driver->driver.name);
++	return 0;
++}
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
++EXPORT_SYMBOL(usb_gadget_register_driver);
++#else
++EXPORT_SYMBOL(usb_gadget_probe_driver);
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,1)
++
++int usb_udc_attach_driver(const char *name, struct usb_gadget_driver *driver)
++ {
++	int retval;
++	if(strcmp(name, "fh_otg")){
++		FH_ERROR("NO FH DEV FOUND \n");
++		return -ENODEV;
++	}	
++
++	FH_DEBUGPL(DBG_PCD, "Registering gadget driver '%s'\n",
++		    driver->driver.name);
++
++	if (!driver || driver->max_speed == USB_SPEED_UNKNOWN || !driver->bind ||
++	    !driver->unbind || !driver->disconnect || !driver->setup) {
++		FH_DEBUGPL(DBG_PCDV, "EINVAL\n");
++		return -EINVAL;
++	}
++	if (gadget_wrapper == 0) {
++		FH_DEBUGPL(DBG_PCDV, "ENODEV\n");
++		return -ENODEV;
++	}
++	if (gadget_wrapper->driver != 0) {
++		FH_DEBUGPL(DBG_PCDV, "EBUSY (%p)\n", gadget_wrapper->driver);
++		return -EBUSY;
++	}
++
++	/* hook up the driver */
++	gadget_wrapper->driver = driver;
++	gadget_wrapper->gadget.dev.driver = &driver->driver;
++
++	FH_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name);
++	retval = driver->bind(&gadget_wrapper->gadget,gadget_wrapper->driver);		
++	if (retval) {
++		FH_ERROR("bind to driver %s --> error %d\n",
++			  driver->driver.name, retval);
++		gadget_wrapper->driver = 0;
++		gadget_wrapper->gadget.dev.driver = 0;
++		return retval;
++	}
++	FH_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n",
++		    driver->driver.name);
++	return 0;
++}
++EXPORT_SYMBOL(usb_udc_attach_driver);
++
++void usb_gadget_set_state(struct usb_gadget *gadget,
++                enum usb_device_state state)
++{
++        gadget->state = state;
++		FH_SCHEDULE_SYSTEM_WORK(&gadget->work);
++}
++EXPORT_SYMBOL_GPL(usb_gadget_set_state);
++
++#endif 
++
++
++
++/**
++ * This function unregisters a gadget driver
++ *
++ * @param driver The driver being unregistered
++ */
++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++{
++	//FH_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver);
++
++	if (gadget_wrapper == 0) {
++		FH_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__,
++			    -ENODEV);
++		return -ENODEV;
++	}
++	if (driver == 0 || driver != gadget_wrapper->driver) {
++		FH_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__,
++			    -EINVAL);
++		return -EINVAL;
++	}
++
++	driver->unbind(&gadget_wrapper->gadget);
++	gadget_wrapper->driver = 0;
++
++	FH_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n", driver->driver.name);
++	return 0;
++}
++
++EXPORT_SYMBOL(usb_gadget_unregister_driver);
++
++#endif /* FH_HOST_ONLY */
+diff --git a/drivers/usb/host/fh_otg/fh_otg/fh_otg_regs.h b/drivers/usb/host/fh_otg/fh_otg/fh_otg_regs.h
+new file mode 100644
+index 00000000..e1070282
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/fh_otg_regs.h
+@@ -0,0 +1,2558 @@
++/* ==========================================================================
++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/fh_otg_regs.h $
++ * $Revision: #105 $
++ * $Date: 2015/10/12 $
++ * $Change: 2972621 $
++ *
++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Product
++ * with Synopsys or any supplement thereto. You are permitted to use and
++ * redistribute this Software in source and binary forms, with or without
++ * modification, provided that redistributions of source code must retain this
++ * notice. You may not view, use, disclose, copy or distribute this file or
++ * any information contained herein except pursuant to this license grant from
++ * Synopsys. If you do not agree with this notice, including the disclaimer
++ * below, then you are not authorized to use the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ * ========================================================================== */
++
++#ifndef __FH_OTG_REGS_H__
++#define __FH_OTG_REGS_H__
++
++#include "fh_otg_core_if.h"
++
++/**
++ * @file
++ *
++ * This file contains the data structures for accessing the FH_otg core registers.
++ *
++ * The application interfaces with the HS OTG core by reading from and
++ * writing to the Control and Status Register (CSR) space through the
++ * AHB Slave interface. These registers are 32 bits wide, and the
++ * addresses are 32-bit-block aligned.
++ * CSRs are classified as follows:
++ * - Core Global Registers
++ * - Device Mode Registers
++ * - Device Global Registers
++ * - Device Endpoint Specific Registers
++ * - Host Mode Registers
++ * - Host Global Registers
++ * - Host Port CSRs
++ * - Host Channel Specific Registers
++ *
++ * Only the Core Global registers can be accessed in both Device and
++ * Host modes. When the HS OTG core is operating in one mode, either
++ * Device or Host, the application must not access registers from the
++ * other mode. When the core switches from one mode to another, the
++ * registers in the new mode of operation must be reprogrammed as they
++ * would be after a power-on reset.
++ */
++
++/****************************************************************************/
++/** FH_otg Core registers . 
++ * The fh_otg_core_global_regs structure defines the size
++ * and relative field offsets for the Core Global registers.
++ */
++typedef struct fh_otg_core_global_regs {
++	/** OTG Control and Status Register.  <i>Offset: 000h</i> */
++	volatile uint32_t gotgctl;
++	/** OTG Interrupt Register.	 <i>Offset: 004h</i> */
++	volatile uint32_t gotgint;
++	/**Core AHB Configuration Register.	 <i>Offset: 008h</i> */
++	volatile uint32_t gahbcfg;
++
++#define FH_GLBINTRMASK		0x0001
++#define FH_DMAENABLE		0x0020
++#define FH_NPTXEMPTYLVL_EMPTY	0x0080
++#define FH_NPTXEMPTYLVL_HALFEMPTY	0x0000
++#define FH_PTXEMPTYLVL_EMPTY	0x0100
++#define FH_PTXEMPTYLVL_HALFEMPTY	0x0000
++
++	/**Core USB Configuration Register.	 <i>Offset: 00Ch</i> */
++	volatile uint32_t gusbcfg;
++	/**Core Reset Register.	 <i>Offset: 010h</i> */
++	volatile uint32_t grstctl;
++	/**Core Interrupt Register.	 <i>Offset: 014h</i> */
++	volatile uint32_t gintsts;
++	/**Core Interrupt Mask Register.  <i>Offset: 018h</i> */
++	volatile uint32_t gintmsk;
++	/**Receive Status Queue Read Register (Read Only).	<i>Offset: 01Ch</i> */
++	volatile uint32_t grxstsr;
++	/**Receive Status Queue Read & POP Register (Read Only).  <i>Offset: 020h</i>*/
++	volatile uint32_t grxstsp;
++	/**Receive FIFO Size Register.	<i>Offset: 024h</i> */
++	volatile uint32_t grxfsiz;
++	/**Non Periodic Transmit FIFO Size Register.  <i>Offset: 028h</i> */
++	volatile uint32_t gnptxfsiz;
++	/**Non Periodic Transmit FIFO/Queue Status Register (Read
++	 * Only). <i>Offset: 02Ch</i> */
++	volatile uint32_t gnptxsts;
++	/**I2C Access Register.	 <i>Offset: 030h</i> */
++	volatile uint32_t gi2cctl;
++	/**PHY Vendor Control Register.	 <i>Offset: 034h</i> */
++	volatile uint32_t gpvndctl;
++	/**General Purpose Input/Output Register.  <i>Offset: 038h</i> */
++	volatile uint32_t ggpio;
++	/**User ID Register.  <i>Offset: 03Ch</i> */
++	volatile uint32_t guid;
++	/**Synopsys ID Register (Read Only).  <i>Offset: 040h</i> */
++	volatile uint32_t gsnpsid;
++	/**User HW Config1 Register (Read Only).  <i>Offset: 044h</i> */
++	volatile uint32_t ghwcfg1;
++	/**User HW Config2 Register (Read Only).  <i>Offset: 048h</i> */
++	volatile uint32_t ghwcfg2;
++#define FH_SLAVE_ONLY_ARCH 0
++#define FH_EXT_DMA_ARCH 1
++#define FH_INT_DMA_ARCH 2
++
++#define FH_MODE_HNP_SRP_CAPABLE	0
++#define FH_MODE_SRP_ONLY_CAPABLE	1
++#define FH_MODE_NO_HNP_SRP_CAPABLE		2
++#define FH_MODE_SRP_CAPABLE_DEVICE		3
++#define FH_MODE_NO_SRP_CAPABLE_DEVICE	4
++#define FH_MODE_SRP_CAPABLE_HOST	5
++#define FH_MODE_NO_SRP_CAPABLE_HOST	6
++
++	/**User HW Config3 Register (Read Only).  <i>Offset: 04Ch</i> */
++	volatile uint32_t ghwcfg3;
++	/**User HW Config4 Register (Read Only).  <i>Offset: 050h</i>*/
++	volatile uint32_t ghwcfg4;
++	/** Core LPM Configuration register <i>Offset: 054h</i>*/
++	volatile uint32_t glpmcfg;
++	/** Global PowerDn Register <i>Offset: 058h</i> */
++	volatile uint32_t gpwrdn;
++	/** Global DFIFO SW Config Register  <i>Offset: 05Ch</i> */
++	volatile uint32_t gdfifocfg;
++	/** ADP Control Register  <i>Offset: 060h</i> */
++	volatile uint32_t adpctl;
++	/** Reserved  <i>Offset: 064h-0FFh</i> */
++	volatile uint32_t reserved39[39];
++	/** Host Periodic Transmit FIFO Size Register. <i>Offset: 100h</i> */
++	volatile uint32_t hptxfsiz;
++	/** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled,
++		otherwise Device Transmit FIFO#n Register.
++	 * <i>Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15).</i> */
++	volatile uint32_t dtxfsiz[15];
++	
++} fh_otg_core_global_regs_t;
++
++/**
++ * This union represents the bit fields of the Core OTG Control
++ * and Status Register (GOTGCTL).  Set the bits using the bit
++ * fields then write the <i>d32</i> value to the register.
++ */
++typedef union gotgctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned sesreqscs:1;
++		unsigned sesreq:1;
++		unsigned vbvalidoven:1;
++		unsigned vbvalidovval:1;
++		unsigned avalidoven:1;
++		unsigned avalidovval:1;
++		unsigned bvalidoven:1;
++		unsigned bvalidovval:1;
++		unsigned hstnegscs:1;
++		unsigned hnpreq:1;
++		unsigned hstsethnpen:1;
++		unsigned devhnpen:1;
++		unsigned reserved12_15:4;
++		unsigned conidsts:1;
++		unsigned dbnctime:1;
++		unsigned asesvld:1;
++		unsigned bsesvld:1;
++		unsigned otgver:1;
++		unsigned reserved1:1;
++		unsigned multvalidbc:5;
++		unsigned chirpen:1;
++		unsigned reserved28_31:4;
++	} b;
++} gotgctl_data_t;
++
++/**
++ * This union represents the bit fields of the Core OTG Interrupt Register
++ * (GOTGINT).  Set/clear the bits using the bit fields then write the <i>d32</i>
++ * value to the register.
++ */
++typedef union gotgint_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Current Mode */
++		unsigned reserved0_1:2;
++
++		/** Session End Detected */
++		unsigned sesenddet:1;
++
++		unsigned reserved3_7:5;
++
++		/** Session Request Success Status Change */
++		unsigned sesreqsucstschng:1;
++		/** Host Negotiation Success Status Change */
++		unsigned hstnegsucstschng:1;
++
++		unsigned reserved10_16:7;
++
++		/** Host Negotiation Detected */
++		unsigned hstnegdet:1;
++		/** A-Device Timeout Change */
++		unsigned adevtoutchng:1;
++		/** Debounce Done */
++		unsigned debdone:1;
++		/** Multi-Valued input changed */
++		unsigned mvic:1;
++
++		unsigned reserved31_21:11;
++
++	} b;
++} gotgint_data_t;
++
++/**
++ * This union represents the bit fields of the Core AHB Configuration
++ * Register (GAHBCFG). Set/clear the bits using the bit fields then
++ * write the <i>d32</i> value to the register.
++ */
++typedef union gahbcfg_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned glblintrmsk:1;
++#define FH_GAHBCFG_GLBINT_ENABLE		1
++
++		unsigned hburstlen:4;
++#define FH_GAHBCFG_INT_DMA_BURST_SINGLE	0
++#define FH_GAHBCFG_INT_DMA_BURST_INCR		1
++#define FH_GAHBCFG_INT_DMA_BURST_INCR4		3
++#define FH_GAHBCFG_INT_DMA_BURST_INCR8		5
++#define FH_GAHBCFG_INT_DMA_BURST_INCR16	7
++
++		unsigned dmaenable:1;
++#define FH_GAHBCFG_DMAENABLE			1
++		unsigned reserved:1;
++		unsigned nptxfemplvl_txfemplvl:1;
++		unsigned ptxfemplvl:1;
++#define FH_GAHBCFG_TXFEMPTYLVL_EMPTY		1
++#define FH_GAHBCFG_TXFEMPTYLVL_HALFEMPTY	0
++		unsigned reserved9_20:12;
++		unsigned remmemsupp:1;
++		unsigned notialldmawrit:1;
++		unsigned ahbsingle:1;
++		unsigned reserved24_31:8;
++	} b;
++} gahbcfg_data_t;
++
++/**
++ * This union represents the bit fields of the Core USB Configuration
++ * Register (GUSBCFG). Set the bits using the bit fields then write
++ * the <i>d32</i> value to the register.
++ */
++typedef union gusbcfg_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned toutcal:3;
++		unsigned phyif:1;
++		unsigned ulpi_utmi_sel:1;
++		unsigned fsintf:1;
++		unsigned physel:1;
++		unsigned ddrsel:1;
++		unsigned srpcap:1;
++		unsigned hnpcap:1;
++		unsigned usbtrdtim:4;
++		unsigned reserved1:1;
++		unsigned phylpwrclksel:1;
++		unsigned otgutmifssel:1;
++		unsigned ulpi_fsls:1;
++		unsigned ulpi_auto_res:1;
++		unsigned ulpi_clk_sus_m:1;
++		unsigned ulpi_ext_vbus_drv:1;
++		unsigned ulpi_int_vbus_indicator:1;
++		unsigned term_sel_dl_pulse:1;
++		unsigned indicator_complement:1;
++		unsigned indicator_pass_through:1;
++		unsigned ulpi_int_prot_dis:1;
++		unsigned ic_usb_cap:1;
++		unsigned ic_traffic_pull_remove:1;
++		unsigned tx_end_delay:1;
++		unsigned force_host_mode:1;
++		unsigned force_dev_mode:1;
++		unsigned reserved31:1;
++	} b;
++} gusbcfg_data_t;
++
++/**
++ * This union represents the bit fields of the Core Reset Register
++ * (GRSTCTL).  Set/clear the bits using the bit fields then write the
++ * <i>d32</i> value to the register.
++ */
++typedef union grstctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Core Soft Reset (CSftRst) (Device and Host)
++		 *
++		 * The application can flush the control logic in the
++		 * entire core using this bit. This bit resets the
++		 * pipelines in the AHB Clock domain as well as the
++		 * PHY Clock domain.
++		 *
++		 * The state machines are reset to an IDLE state, the
++		 * control bits in the CSRs are cleared, all the
++		 * transmit FIFOs and the receive FIFO are flushed.
++		 *
++		 * The status mask bits that control the generation of
++		 * the interrupt, are cleared, to clear the
++		 * interrupt. The interrupt status bits are not
++		 * cleared, so the application can get the status of
++		 * any events that occurred in the core after it has
++		 * set this bit.
++		 *
++		 * Any transactions on the AHB are terminated as soon
++		 * as possible following the protocol. Any
++		 * transactions on the USB are terminated immediately.
++		 *
++		 * The configuration settings in the CSRs are
++		 * unchanged, so the software doesn't have to
++		 * reprogram these registers (Device
++		 * Configuration/Host Configuration/Core System
++		 * Configuration/Core PHY Configuration).
++		 *
++		 * The application can write to this bit, any time it
++		 * wants to reset the core. This is a self clearing
++		 * bit and the core clears this bit after all the
++		 * necessary logic is reset in the core, which may
++		 * take several clocks, depending on the current state
++		 * of the core.
++		 */
++		unsigned csftrst:1;
++		/** Hclk Soft Reset
++		 *
++		 * The application uses this bit to reset the control logic in
++		 * the AHB clock domain. Only AHB clock domain pipelines are
++		 * reset.
++		 */
++		unsigned hsftrst:1;
++		/** Host Frame Counter Reset (Host Only)<br>
++		 *
++		 * The application can reset the (micro)frame number
++		 * counter inside the core, using this bit. When the
++		 * (micro)frame counter is reset, the subsequent SOF
++		 * sent out by the core, will have a (micro)frame
++		 * number of 0.
++		 */
++		unsigned hstfrm:1;
++		/** In Token Sequence Learning Queue Flush
++		 * (INTknQFlsh) (Device Only)
++		 */
++		unsigned intknqflsh:1;
++		/** RxFIFO Flush (RxFFlsh) (Device and Host)
++		 *
++		 * The application can flush the entire Receive FIFO
++		 * using this bit. The application must first
++		 * ensure that the core is not in the middle of a
++		 * transaction. The application should write into
++		 * this bit, only after making sure that neither the
++		 * DMA engine is reading from the RxFIFO nor the MAC
++		 * is writing the data in to the FIFO. The
++		 * application should wait until the bit is cleared
++		 * before performing any other operations. This bit
++		 * will takes 8 clocks (slowest of PHY or AHB clock)
++		 * to clear.
++		 */
++		unsigned rxfflsh:1;
++		/** TxFIFO Flush (TxFFlsh) (Device and Host). 
++		 *
++		 * This bit is used to selectively flush a single or
++		 * all transmit FIFOs. The application must first
++		 * ensure that the core is not in the middle of a
++		 * transaction. The application should write into
++		 * this bit, only after making sure that neither the
++		 * DMA engine is writing into the TxFIFO nor the MAC
++		 * is reading the data out of the FIFO. The
++		 * application should wait until the core clears this
++		 * bit, before performing any operations. This bit
++		 * will takes 8 clocks (slowest of PHY or AHB clock)
++		 * to clear.
++		 */
++		unsigned txfflsh:1;
++
++		/** TxFIFO Number (TxFNum) (Device and Host).
++		 *
++		 * This is the FIFO number which needs to be flushed,
++		 * using the TxFIFO Flush bit. This field should not
++		 * be changed until the TxFIFO Flush bit is cleared by
++		 * the core.
++		 *	 - 0x0 : Non Periodic TxFIFO Flush
++		 *	 - 0x1 : Periodic TxFIFO #1 Flush in device mode
++		 *	   or Periodic TxFIFO in host mode
++		 *	 - 0x2 : Periodic TxFIFO #2 Flush in device mode.
++		 *	 - ...
++		 *	 - 0xF : Periodic TxFIFO #15 Flush in device mode
++		 *	 - 0x10: Flush all the Transmit NonPeriodic and
++		 *	   Transmit Periodic FIFOs in the core
++		 */
++		unsigned txfnum:5;
++		/** Reserved */
++		unsigned reserved11_29:19;
++		/** DMA Request Signal.	 Indicated DMA request is in
++		 * probress. Used for debug purpose. */
++		unsigned dmareq:1;
++		/** AHB Master Idle.  Indicates the AHB Master State
++		 * Machine is in IDLE condition. */
++		unsigned ahbidle:1;
++	} b;
++} grstctl_t;
++
++/**
++ * This union represents the bit fields of the Core Interrupt Mask
++ * Register (GINTMSK). Set/clear the bits using the bit fields then
++ * write the <i>d32</i> value to the register.
++ */
++typedef union gintmsk_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned reserved0:1;
++		unsigned modemismatch:1;
++		unsigned otgintr:1;
++		unsigned sofintr:1;
++		unsigned rxstsqlvl:1;
++		unsigned nptxfempty:1;
++		unsigned ginnakeff:1;
++		unsigned goutnakeff:1;
++		unsigned ulpickint:1;
++		unsigned i2cintr:1;
++		unsigned erlysuspend:1;
++		unsigned usbsuspend:1;
++		unsigned usbreset:1;
++		unsigned enumdone:1;
++		unsigned isooutdrop:1;
++		unsigned eopframe:1;
++		unsigned restoredone:1;
++		unsigned epmismatch:1;
++		unsigned inepintr:1;
++		unsigned outepintr:1;
++		unsigned incomplisoin:1;
++		unsigned incomplisoout:1;
++		unsigned fetsusp:1;
++		unsigned resetdet:1;
++		unsigned portintr:1;
++		unsigned hcintr:1;
++		unsigned ptxfempty:1;
++		unsigned lpmtranrcvd:1;
++		unsigned conidstschng:1;
++		unsigned disconnect:1;
++		unsigned sessreqintr:1;
++		unsigned wkupintr:1;
++	} b;
++} gintmsk_data_t;
++/**
++ * This union represents the bit fields of the Core Interrupt Register
++ * (GINTSTS).  Set/clear the bits using the bit fields then write the
++ * <i>d32</i> value to the register.
++ */
++typedef union gintsts_data {
++	/** raw register data */
++	uint32_t d32;
++#define FH_SOF_INTR_MASK 0x0008
++	/** register bits */
++	struct {
++#define FH_HOST_MODE 1
++		unsigned curmode:1;
++		unsigned modemismatch:1;
++		unsigned otgintr:1;
++		unsigned sofintr:1;
++		unsigned rxstsqlvl:1;
++		unsigned nptxfempty:1;
++		unsigned ginnakeff:1;
++		unsigned goutnakeff:1;
++		unsigned ulpickint:1;
++		unsigned i2cintr:1;
++		unsigned erlysuspend:1;
++		unsigned usbsuspend:1;
++		unsigned usbreset:1;
++		unsigned enumdone:1;
++		unsigned isooutdrop:1;
++		unsigned eopframe:1;
++		unsigned restoredone:1;
++		unsigned epmismatch:1;
++		unsigned inepint:1;
++		unsigned outepintr:1;
++		unsigned incomplisoin:1;
++		unsigned incomplisoout:1;
++		unsigned fetsusp:1;
++		unsigned resetdet:1;
++		unsigned portintr:1;
++		unsigned hcintr:1;
++		unsigned ptxfempty:1;
++		unsigned lpmtranrcvd:1;
++		unsigned conidstschng:1;
++		unsigned disconnect:1;
++		unsigned sessreqintr:1;
++		unsigned wkupintr:1;
++	} b;
++} gintsts_data_t;
++
++/**
++ * This union represents the bit fields in the Device Receive Status Read and
++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
++ * element then read out the bits using the <i>b</i>it elements.
++ */
++typedef union device_grxsts_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned epnum:4;
++		unsigned bcnt:11;
++		unsigned dpid:2;
++
++#define FH_STS_DATA_UPDT		0x2	// OUT Data Packet
++#define FH_STS_XFER_COMP		0x3	// OUT Data Transfer Complete
++
++#define FH_DSTS_GOUT_NAK		0x1	// Global OUT NAK
++#define FH_DSTS_SETUP_COMP		0x4	// Setup Phase Complete
++#define FH_DSTS_SETUP_UPDT 0x6	// SETUP Packet
++		unsigned pktsts:4;
++		unsigned fn:4;
++		unsigned reserved25_31:7;
++	} b;
++} device_grxsts_data_t;
++
++/**
++ * This union represents the bit fields in the Host Receive Status Read and
++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
++ * element then read out the bits using the <i>b</i>it elements.
++ */
++typedef union host_grxsts_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned chnum:4;
++		unsigned bcnt:11;
++		unsigned dpid:2;
++
++		unsigned pktsts:4;
++#define FH_GRXSTS_PKTSTS_IN			  0x2
++#define FH_GRXSTS_PKTSTS_IN_XFER_COMP	  0x3
++#define FH_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5
++#define FH_GRXSTS_PKTSTS_CH_HALTED		  0x7
++
++		unsigned reserved21_31:11;
++	} b;
++} host_grxsts_data_t;
++
++/**
++ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ,
++ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the <i>d32</i> element 
++ * then read out the bits using the <i>b</i>it elements.
++ */
++typedef union fifosize_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned startaddr:16;
++		unsigned depth:16;
++	} b;
++} fifosize_data_t;
++
++/**
++ * This union represents the bit fields in the Non-Periodic Transmit
++ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the
++ * <i>d32</i> element then read out the bits using the <i>b</i>it
++ * elements.
++ */
++typedef union gnptxsts_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned nptxfspcavail:16;
++		unsigned nptxqspcavail:8;
++		/** Top of the Non-Periodic Transmit Request Queue
++		 *	- bit 24 - Terminate (Last entry for the selected
++		 *	  channel/EP)
++		 *	- bits 26:25 - Token Type
++		 *	  - 2'b00 - IN/OUT
++		 *	  - 2'b01 - Zero Length OUT
++		 *	  - 2'b10 - PING/Complete Split
++		 *	  - 2'b11 - Channel Halt
++		 *	- bits 30:27 - Channel/EP Number
++		 */
++		unsigned nptxqtop_terminate:1;
++		unsigned nptxqtop_token:2;
++		unsigned nptxqtop_chnep:4;
++		unsigned reserved:1;
++	} b;
++} gnptxsts_data_t;
++
++/**
++ * This union represents the bit fields in the Transmit
++ * FIFO Status Register (DTXFSTS). Read the register into the
++ * <i>d32</i> element then read out the bits using the <i>b</i>it
++ * elements.
++ */
++typedef union dtxfsts_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned txfspcavail:16;
++		unsigned reserved:16;
++	} b;
++} dtxfsts_data_t;
++
++/**
++ * This union represents the bit fields in the I2C Control Register
++ * (I2CCTL). Read the register into the <i>d32</i> element then read out the
++ * bits using the <i>b</i>it elements.
++ */
++typedef union gi2cctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned rwdata:8;
++		unsigned regaddr:8;
++		unsigned addr:7;
++		unsigned i2cen:1;
++		unsigned ack:1;
++		unsigned i2csuspctl:1;
++		unsigned i2cdevaddr:2;
++		unsigned i2cdatse0:1;
++		unsigned reserved:1;
++		unsigned rw:1;
++		unsigned bsydne:1;
++	} b;
++} gi2cctl_data_t;
++
++/**
++ * This union represents the bit fields in the PHY Vendor Control Register
++ * (GPVNDCTL). Read the register into the <i>d32</i> element then read out the
++ * bits using the <i>b</i>it elements.
++ */
++typedef union gpvndctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned regdata:8;
++		unsigned vctrl:8;
++		unsigned regaddr16_21:6;
++		unsigned regwr:1;
++		unsigned reserved23_24:2;
++		unsigned newregreq:1;
++		unsigned vstsbsy:1;
++		unsigned vstsdone:1;
++		unsigned reserved28_30:3;
++		unsigned disulpidrvr:1;
++	} b;
++} gpvndctl_data_t;
++
++/**
++ * This union represents the bit fields in the General Purpose 
++ * Input/Output Register (GGPIO).
++ * Read the register into the <i>d32</i> element then read out the
++ * bits using the <i>b</i>it elements.
++ */
++typedef union ggpio_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned gpi:16;
++		unsigned gpo:16;
++	} b;
++} ggpio_data_t;
++
++/**
++ * This union represents the bit fields in the User ID Register
++ * (GUID). Read the register into the <i>d32</i> element then read out the
++ * bits using the <i>b</i>it elements.
++ */
++typedef union guid_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned rwdata:32;
++	} b;
++} guid_data_t;
++
++/**
++ * This union represents the bit fields in the Synopsys ID Register
++ * (GSNPSID). Read the register into the <i>d32</i> element then read out the
++ * bits using the <i>b</i>it elements.
++ */
++typedef union gsnpsid_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned rwdata:32;
++	} b;
++} gsnpsid_data_t;
++
++/**
++ * This union represents the bit fields in the User HW Config1
++ * Register.  Read the register into the <i>d32</i> element then read
++ * out the bits using the <i>b</i>it elements.
++ */
++typedef union hwcfg1_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned ep_dir0:2;
++		unsigned ep_dir1:2;
++		unsigned ep_dir2:2;
++		unsigned ep_dir3:2;
++		unsigned ep_dir4:2;
++		unsigned ep_dir5:2;
++		unsigned ep_dir6:2;
++		unsigned ep_dir7:2;
++		unsigned ep_dir8:2;
++		unsigned ep_dir9:2;
++		unsigned ep_dir10:2;
++		unsigned ep_dir11:2;
++		unsigned ep_dir12:2;
++		unsigned ep_dir13:2;
++		unsigned ep_dir14:2;
++		unsigned ep_dir15:2;
++	} b;
++} hwcfg1_data_t;
++
++/**
++ * This union represents the bit fields in the User HW Config2
++ * Register.  Read the register into the <i>d32</i> element then read
++ * out the bits using the <i>b</i>it elements.
++ */
++typedef union hwcfg2_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/* GHWCFG2 */
++		unsigned op_mode:3;
++#define FH_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0
++#define FH_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1
++#define FH_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2
++#define FH_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
++#define FH_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
++#define FH_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
++#define FH_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
++
++		unsigned architecture:2;
++		unsigned point2point:1;
++		unsigned hs_phy_type:2;
++#define FH_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
++#define FH_HWCFG2_HS_PHY_TYPE_UTMI 1
++#define FH_HWCFG2_HS_PHY_TYPE_ULPI 2
++#define FH_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
++
++		unsigned fs_phy_type:2;
++		unsigned num_dev_ep:4;
++		unsigned num_host_chan:4;
++		unsigned perio_ep_supported:1;
++		unsigned dynamic_fifo:1;
++		unsigned multi_proc_int:1;
++		unsigned reserved21:1;
++		unsigned nonperio_tx_q_depth:2;
++		unsigned host_perio_tx_q_depth:2;
++		unsigned dev_token_q_depth:5;
++		unsigned otg_enable_ic_usb:1;
++	} b;
++} hwcfg2_data_t;
++
++/**
++ * This union represents the bit fields in the User HW Config3
++ * Register.  Read the register into the <i>d32</i> element then read
++ * out the bits using the <i>b</i>it elements.
++ */
++typedef union hwcfg3_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/* GHWCFG3 */
++		unsigned xfer_size_cntr_width:4;
++		unsigned packet_size_cntr_width:3;
++		unsigned otg_func:1;
++		unsigned i2c:1;
++		unsigned vendor_ctrl_if:1;
++		unsigned optional_features:1;
++		unsigned synch_reset_type:1;
++		unsigned adp_supp:1;
++		unsigned otg_enable_hsic:1;
++		unsigned bc_support:1;
++		unsigned otg_lpm_en:1;
++		unsigned dfifo_depth:16;
++	} b;
++} hwcfg3_data_t;
++
++/**
++ * This union represents the bit fields in the User HW Config4
++ * Register.  Read the register into the <i>d32</i> element then read
++ * out the bits using the <i>b</i>it elements.
++ */
++typedef union hwcfg4_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned num_dev_perio_in_ep:4;
++		unsigned power_optimiz:1;
++		unsigned min_ahb_freq:1;
++		unsigned hiber:1;
++		unsigned xhiber:1;
++		unsigned reserved:6;
++		unsigned utmi_phy_data_width:2;
++		unsigned num_dev_mode_ctrl_ep:4;
++		unsigned iddig_filt_en:1;
++		unsigned vbus_valid_filt_en:1;
++		unsigned a_valid_filt_en:1;
++		unsigned b_valid_filt_en:1;
++		unsigned session_end_filt_en:1;
++		unsigned ded_fifo_en:1;
++		unsigned num_in_eps:4;
++		unsigned desc_dma:1;
++		unsigned desc_dma_dyn:1;
++	} b;
++} hwcfg4_data_t;
++
++/**
++ * This union represents the bit fields of the Core LPM Configuration
++ * Register (GLPMCFG). Set the bits using bit fields then write
++ * the <i>d32</i> value to the register.
++ */
++typedef union glpmctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** LPM-Capable (LPMCap) (Device and Host)
++		 * The application uses this bit to control
++		 * the FH_otg core LPM capabilities.
++		 */
++		unsigned lpm_cap_en:1;
++		/** LPM response programmed by application (AppL1Res) (Device)
++		 * Handshake response to LPM token pre-programmed
++		 * by device application software.
++		 */
++		unsigned appl_resp:1;
++		/** Host Initiated Resume Duration (HIRD) (Device and Host)
++		 * In Host mode this field indicates the value of HIRD
++		 * to be sent in an LPM transaction.
++		 * In Device mode this field is updated with the
++		 * Received LPM Token HIRD bmAttribute
++		 * when an ACK/NYET/STALL response is sent
++		 * to an LPM transaction.
++		 */
++		unsigned hird:4;
++		/** RemoteWakeEnable (bRemoteWake) (Device and Host)
++		 * In Host mode this bit indicates the value of remote
++		 * wake up to be sent in wIndex field of LPM transaction.
++		 * In Device mode this field is updated with the
++		 * Received LPM Token bRemoteWake bmAttribute
++		 * when an ACK/NYET/STALL response is sent
++		 * to an LPM transaction.
++		 */
++		unsigned rem_wkup_en:1;
++		/** Enable utmi_sleep_n (EnblSlpM) (Device and Host)
++		 * The application uses this bit to control
++		 * the utmi_sleep_n assertion to the PHY when in L1 state.
++		 */
++		unsigned en_utmi_sleep:1;
++		/** HIRD Threshold (HIRD_Thres) (Device and Host)
++		 */
++		unsigned hird_thres:5;
++		/** LPM Response (CoreL1Res) (Device and Host)
++		 * In Host mode this bit contains handsake response to
++		 * LPM transaction.
++		 * In Device mode the response of the core to
++		 * LPM transaction received is reflected in these two bits.
++		 	- 0x0 : ERROR (No handshake response)
++			- 0x1 : STALL
++			- 0x2 : NYET
++			- 0x3 : ACK			
++		 */
++		unsigned lpm_resp:2;
++		/** Port Sleep Status (SlpSts) (Device and Host)
++		 * This bit is set as long as a Sleep condition
++		 * is present on the USB bus.
++		 */
++		unsigned prt_sleep_sts:1;
++		/** Sleep State Resume OK (L1ResumeOK) (Device and Host)
++		 * Indicates that the application or host
++		 * can start resume from Sleep state.
++		 */
++		unsigned sleep_state_resumeok:1;
++		/** LPM channel Index (LPM_Chnl_Indx) (Host)
++		 * The channel number on which the LPM transaction
++		 * has to be applied while sending
++		 * an LPM transaction to the local device.
++		 */
++		unsigned lpm_chan_index:4;
++		/** LPM Retry Count (LPM_Retry_Cnt) (Host)
++		 * Number host retries that would be performed
++		 * if the device response was not valid response.
++		 */
++		unsigned retry_count:3;
++		/** Send LPM Transaction (SndLPM) (Host)
++		 * When set by application software,
++		 * an LPM transaction containing two tokens
++		 * is sent.
++		 */
++		unsigned send_lpm:1;
++		/** LPM Retry status (LPM_RetryCnt_Sts) (Host)
++		 * Number of LPM Host Retries still remaining
++		 * to be transmitted for the current LPM sequence
++		 */
++		unsigned retry_count_sts:3;
++		/** Enable Best Effort Service Latency (BESL) (Device and Host)
++		 *  This bit enables the BESL features as defined in the LPM errata
++		 */
++		unsigned en_besl:1;
++		
++		unsigned reserved29:1;
++		/** In host mode once this bit is set, the host
++		 * configures to drive the HSIC Idle state on the bus.
++		 * It then waits for the  device to initiate the Connect sequence.
++		 * In device mode once this bit is set, the device waits for
++		 * the HSIC Idle line state on the bus. Upon receving the Idle
++		 * line state, it initiates the HSIC Connect sequence.
++		 */
++		unsigned hsic_connect:1;
++		/** This bit overrides and functionally inverts
++		 * the if_select_hsic input port signal.
++		 */
++		unsigned inv_sel_hsic:1;
++	} b;
++} glpmcfg_data_t;
++
++/**
++ * This union represents the bit fields of the Core ADP Timer, Control and
++ * Status Register (ADPTIMCTLSTS). Set the bits using bit fields then write
++ * the <i>d32</i> value to the register.
++ */
++typedef union adpctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Probe Discharge (PRB_DSCHG)
++		 *  These bits set the times for TADP_DSCHG. 
++		 *  These bits are defined as follows:
++		 *  2'b00 - 4 msec
++		 *  2'b01 - 8 msec
++		 *  2'b10 - 16 msec
++		 *  2'b11 - 32 msec
++		 */
++		unsigned prb_dschg:2;
++		/** Probe Delta (PRB_DELTA)
++		 *  These bits set the resolution for RTIM   value.
++		 *  The bits are defined in units of 32 kHz clock cycles as follows:
++		 *  2'b00  -  1 cycles
++		 *  2'b01  -  2 cycles
++		 *  2'b10 -  3 cycles
++		 *  2'b11 - 4 cycles
++		 *  For example if this value is chosen to 2'b01, it means that RTIM
++		 *  increments for every 3(three) 32Khz clock cycles.
++		 */
++		unsigned prb_delta:2;
++		/** Probe Period (PRB_PER)
++		 *  These bits sets the TADP_PRD as shown in Figure 4 as follows:
++		 *  2'b00  -  0.625 to 0.925 sec (typical 0.775 sec)
++		 *  2'b01  -  1.25 to 1.85 sec (typical 1.55 sec)
++		 *  2'b10  -  1.9 to 2.6 sec (typical 2.275 sec)
++		 *  2'b11  -  Reserved
++		 */
++		unsigned prb_per:2;
++		/** These bits capture the latest time it took for VBUS to ramp from 
++		 *  VADP_SINK to VADP_PRB. 
++		 *  0x000  -  1 cycles
++		 *  0x001  -  2 cycles
++		 *  0x002  -  3 cycles
++		 *  etc
++		 *  0x7FF  -  2048 cycles
++		 *  A time of 1024 cycles at 32 kHz corresponds to a time of 32 msec.
++		*/
++		unsigned rtim:11;
++		/** Enable Probe (EnaPrb)
++		 *  When programmed to 1'b1, the core performs a probe operation.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned enaprb:1;
++		/** Enable Sense (EnaSns)
++		 *  When programmed to 1'b1, the core performs a Sense operation.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned enasns:1;
++		/** ADP Reset (ADPRes)
++		 *  When set, ADP controller is reset.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++ 		 */
++		unsigned adpres:1;
++		/** ADP Enable (ADPEn)
++		 *  When set, the core performs either ADP probing or sensing
++		 *  based on EnaPrb or EnaSns.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned adpen:1;
++		/** ADP Probe Interrupt (ADP_PRB_INT)
++		 *  When this bit is set, it means that the VBUS
++		 *  voltage is greater than VADP_PRB or VADP_PRB is reached.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned adp_prb_int:1;
++		/**
++		 *  ADP Sense Interrupt (ADP_SNS_INT)
++		 *  When this bit is set, it means that the VBUS voltage is greater than 
++		 *  VADP_SNS value or VADP_SNS is reached.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned adp_sns_int:1;
++		/** ADP Tomeout Interrupt (ADP_TMOUT_INT)
++		 *  This bit is relevant only for an ADP probe.
++		 *  When this bit is set, it means that the ramp time has
++		 *  completed ie ADPCTL.RTIM has reached its terminal value
++		 *  of 0x7FF.  This is a debug feature that allows software
++		 *  to read the ramp time after each cycle.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned adp_tmout_int:1;
++		/** ADP Probe Interrupt Mask (ADP_PRB_INT_MSK)
++		 *  When this bit is set, it unmasks the interrupt due to ADP_PRB_INT.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned adp_prb_int_msk:1;
++		/** ADP Sense Interrupt Mask (ADP_SNS_INT_MSK)
++		 *  When this bit is set, it unmasks the interrupt due to ADP_SNS_INT.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned adp_sns_int_msk:1;
++		/** ADP Timoeout Interrupt Mask (ADP_TMOUT_MSK)
++		 *  When this bit is set, it unmasks the interrupt due to ADP_TMOUT_INT.
++		 *  This bit is valid only if OTG_Ver = 1'b1.
++		 */
++		unsigned adp_tmout_int_msk:1;
++		/** Access Request
++		 * 2'b00 - Read/Write Valid (updated by the core) 
++		 * 2'b01 - Read
++		 * 2'b00 - Write
++		 * 2'b00 - Reserved
++		 */
++		unsigned ar:2;
++		 /** Reserved */
++		unsigned reserved29_31:3;
++	} b;
++} adpctl_data_t;
++
++////////////////////////////////////////////
++// Device Registers
++/**
++ * Device Global Registers. <i>Offsets 800h-BFFh</i>
++ *
++ * The following structures define the size and relative field offsets
++ * for the Device Mode Registers.
++ *
++ * <i>These registers are visible only in Device mode and must not be
++ * accessed in Host mode, as the results are unknown.</i>
++ */
++typedef struct fh_otg_dev_global_regs {
++	/** Device Configuration Register. <i>Offset 800h</i> */
++	volatile uint32_t dcfg;
++	/** Device Control Register. <i>Offset: 804h</i> */
++	volatile uint32_t dctl;
++	/** Device Status Register (Read Only). <i>Offset: 808h</i> */
++	volatile uint32_t dsts;
++	/** Reserved. <i>Offset: 80Ch</i> */
++	uint32_t unused;
++	/** Device IN Endpoint Common Interrupt Mask
++	 * Register. <i>Offset: 810h</i> */
++	volatile uint32_t diepmsk;
++	/** Device OUT Endpoint Common Interrupt Mask
++	 * Register. <i>Offset: 814h</i> */
++	volatile uint32_t doepmsk;
++	/** Device All Endpoints Interrupt Register.  <i>Offset: 818h</i> */
++	volatile uint32_t daint;
++	/** Device All Endpoints Interrupt Mask Register.  <i>Offset:
++	 * 81Ch</i> */
++	volatile uint32_t daintmsk;
++	/** Device IN Token Queue Read Register-1 (Read Only).
++	 * <i>Offset: 820h</i> */
++	volatile uint32_t dtknqr1;
++	/** Device IN Token Queue Read Register-2 (Read Only).
++	 * <i>Offset: 824h</i> */
++	volatile uint32_t dtknqr2;
++	/** Device VBUS	 discharge Register.  <i>Offset: 828h</i> */
++	volatile uint32_t dvbusdis;
++	/** Device VBUS Pulse Register.	 <i>Offset: 82Ch</i> */
++	volatile uint32_t dvbuspulse;
++	/** Device IN Token Queue Read Register-3 (Read Only). /
++	 *	Device Thresholding control register (Read/Write)
++	 * <i>Offset: 830h</i> */
++	volatile uint32_t dtknqr3_dthrctl;
++	/** Device IN Token Queue Read Register-4 (Read Only). /
++	 *	Device IN EPs empty Inr. Mask Register (Read/Write)
++	 * <i>Offset: 834h</i> */
++	volatile uint32_t dtknqr4_fifoemptymsk;
++	/** Device Each Endpoint Interrupt Register (Read Only). /
++	 * <i>Offset: 838h</i> */
++	volatile uint32_t deachint;
++	/** Device Each Endpoint Interrupt mask Register (Read/Write). /
++	 * <i>Offset: 83Ch</i> */
++	volatile uint32_t deachintmsk;
++	/** Device Each In Endpoint Interrupt mask Register (Read/Write). /
++	 * <i>Offset: 840h</i> */
++	volatile uint32_t diepeachintmsk[MAX_EPS_CHANNELS];
++	/** Device Each Out Endpoint Interrupt mask Register (Read/Write). /
++	 * <i>Offset: 880h</i> */
++	volatile uint32_t doepeachintmsk[MAX_EPS_CHANNELS];
++} fh_otg_device_global_regs_t;
++
++/**
++ * This union represents the bit fields in the Device Configuration
++ * Register.  Read the register into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.  Write the
++ * <i>d32</i> member to the dcfg register.
++ */
++typedef union dcfg_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Device Speed */
++		unsigned devspd:2;
++		/** Non Zero Length Status OUT Handshake */
++		unsigned nzstsouthshk:1;
++#define FH_DCFG_SEND_STALL 1
++
++		unsigned ena32khzs:1;
++		/** Device Addresses */
++		unsigned devaddr:7;
++		/** Periodic Frame Interval */
++		unsigned perfrint:2;
++#define FH_DCFG_FRAME_INTERVAL_80 0
++#define FH_DCFG_FRAME_INTERVAL_85 1
++#define FH_DCFG_FRAME_INTERVAL_90 2
++#define FH_DCFG_FRAME_INTERVAL_95 3
++
++		/** Enable Device OUT NAK for bulk in DDMA mode */
++		unsigned endevoutnak:1;
++
++		unsigned reserved14_17:4;
++		/** In Endpoint Mis-match count */
++		unsigned epmscnt:5;
++		/** Enable Descriptor DMA in Device mode */
++		unsigned descdma:1;
++		unsigned perschintvl:2;
++		unsigned resvalid:6;
++	} b;
++} dcfg_data_t;
++
++/**
++ * This union represents the bit fields in the Device Control
++ * Register.  Read the register into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.
++ */
++typedef union dctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Remote Wakeup */
++		unsigned rmtwkupsig:1;
++		/** Soft Disconnect */
++		unsigned sftdiscon:1;
++		/** Global Non-Periodic IN NAK Status */
++		unsigned gnpinnaksts:1;
++		/** Global OUT NAK Status */
++		unsigned goutnaksts:1;
++		/** Test Control */
++		unsigned tstctl:3;
++		/** Set Global Non-Periodic IN NAK */
++		unsigned sgnpinnak:1;
++		/** Clear Global Non-Periodic IN NAK */
++		unsigned cgnpinnak:1;
++		/** Set Global OUT NAK */
++		unsigned sgoutnak:1;
++		/** Clear Global OUT NAK */
++		unsigned cgoutnak:1;
++		/** Power-On Programming Done */
++		unsigned pwronprgdone:1;
++		/** Reserved */
++		unsigned reserved:1;
++		/** Global Multi Count */
++		unsigned gmc:2;
++		/** Ignore Frame Number for ISOC EPs */
++		unsigned ifrmnum:1;
++		/** NAK on Babble */
++		unsigned nakonbble:1;
++		/** Enable Continue on BNA */
++		unsigned encontonbna:1;
++		/** Enable deep sleep besl reject feature*/
++		unsigned besl_reject:1;
++
++		unsigned reserved17_31:13;
++	} b;
++} dctl_data_t;
++
++/**
++ * This union represents the bit fields in the Device Status
++ * Register.  Read the register into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.
++ */
++typedef union dsts_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Suspend Status */
++		unsigned suspsts:1;
++		/** Enumerated Speed */
++		unsigned enumspd:2;
++#define FH_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0
++#define FH_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1
++#define FH_DSTS_ENUMSPD_LS_PHY_6MHZ		   2
++#define FH_DSTS_ENUMSPD_FS_PHY_48MHZ		   3
++		/** Erratic Error */
++		unsigned errticerr:1;
++		unsigned reserved4_7:4;
++		/** Frame or Microframe Number of the received SOF */
++		unsigned soffn:14;
++		unsigned reserved22_31:10;
++	} b;
++} dsts_data_t;
++
++/**
++ * This union represents the bit fields in the Device IN EP Interrupt
++ * Register and the Device IN EP Common Mask Register.
++ *
++ * - Read the register into the <i>d32</i> member then set/clear the
++ *	 bits using the <i>b</i>it elements.
++ */
++typedef union diepint_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Transfer complete mask */
++		unsigned xfercompl:1;
++		/** Endpoint disable mask */
++		unsigned epdisabled:1;
++		/** AHB Error mask */
++		unsigned ahberr:1;
++		/** TimeOUT Handshake mask (non-ISOC EPs) */
++		unsigned timeout:1;
++		/** IN Token received with TxF Empty mask */
++		unsigned intktxfemp:1;
++		/** IN Token Received with EP mismatch mask */
++		unsigned intknepmis:1;
++		/** IN Endpoint NAK Effective mask */
++		unsigned inepnakeff:1;
++		/** Reserved */
++		unsigned emptyintr:1;
++
++		unsigned txfifoundrn:1;
++
++		/** BNA Interrupt mask */
++		unsigned bna:1;
++
++		unsigned reserved10_12:3;
++		/** BNA Interrupt mask */
++		unsigned nak:1;
++
++		unsigned reserved14_31:18;
++	} b;
++} diepint_data_t;
++
++/**
++ * This union represents the bit fields in the Device IN EP
++ * Common/Dedicated Interrupt Mask Register.
++ */
++typedef union diepint_data diepmsk_data_t;
++
++/**
++ * This union represents the bit fields in the Device OUT EP Interrupt
++ * Registerand Device OUT EP Common Interrupt Mask Register.
++ *
++ * - Read the register into the <i>d32</i> member then set/clear the
++ *	 bits using the <i>b</i>it elements.
++ */
++typedef union doepint_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Transfer complete */
++		unsigned xfercompl:1;
++		/** Endpoint disable  */
++		unsigned epdisabled:1;
++		/** AHB Error */
++		unsigned ahberr:1;
++		/** Setup Phase Done (contorl EPs) */
++		unsigned setup:1;
++		/** OUT Token Received when Endpoint Disabled */
++		unsigned outtknepdis:1;
++
++		unsigned stsphsercvd:1;
++		/** Back-to-Back SETUP Packets Received */
++		unsigned back2backsetup:1;
++
++		unsigned reserved7:1;
++		/** OUT packet Error */
++		unsigned outpkterr:1;
++		/** BNA Interrupt */
++		unsigned bna:1;
++
++		unsigned reserved10:1;
++		/** Packet Drop Status */
++		unsigned pktdrpsts:1;
++		/** Babble Interrupt */
++		unsigned babble:1;
++		/** NAK Interrupt */
++		unsigned nak:1;
++		/** NYET Interrupt */
++		unsigned nyet:1;
++		/** Bit indicating setup packet received */
++		unsigned sr:1;
++
++		unsigned reserved16_31:16;
++	} b;
++} doepint_data_t;
++
++/**
++ * This union represents the bit fields in the Device OUT EP
++ * Common/Dedicated Interrupt Mask Register.
++ */
++typedef union doepint_data doepmsk_data_t;
++
++/**
++ * This union represents the bit fields in the Device All EP Interrupt
++ * and Mask Registers.
++ * - Read the register into the <i>d32</i> member then set/clear the
++ *	 bits using the <i>b</i>it elements.
++ */
++typedef union daint_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** IN Endpoint bits */
++		unsigned in:16;
++		/** OUT Endpoint bits */
++		unsigned out:16;
++	} ep;
++	struct {
++		/** IN Endpoint bits */
++		unsigned inep0:1;
++		unsigned inep1:1;
++		unsigned inep2:1;
++		unsigned inep3:1;
++		unsigned inep4:1;
++		unsigned inep5:1;
++		unsigned inep6:1;
++		unsigned inep7:1;
++		unsigned inep8:1;
++		unsigned inep9:1;
++		unsigned inep10:1;
++		unsigned inep11:1;
++		unsigned inep12:1;
++		unsigned inep13:1;
++		unsigned inep14:1;
++		unsigned inep15:1;
++		/** OUT Endpoint bits */
++		unsigned outep0:1;
++		unsigned outep1:1;
++		unsigned outep2:1;
++		unsigned outep3:1;
++		unsigned outep4:1;
++		unsigned outep5:1;
++		unsigned outep6:1;
++		unsigned outep7:1;
++		unsigned outep8:1;
++		unsigned outep9:1;
++		unsigned outep10:1;
++		unsigned outep11:1;
++		unsigned outep12:1;
++		unsigned outep13:1;
++		unsigned outep14:1;
++		unsigned outep15:1;
++	} b;
++} daint_data_t;
++
++/**
++ * This union represents the bit fields in the Device IN Token Queue
++ * Read Registers.
++ * - Read the register into the <i>d32</i> member.
++ * - READ-ONLY Register
++ */
++typedef union dtknq1_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** In Token Queue Write Pointer */
++		unsigned intknwptr:5;
++		/** Reserved */
++		unsigned reserved05_06:2;
++		/** write pointer has wrapped. */
++		unsigned wrap_bit:1;
++		/** EP Numbers of IN Tokens 0 ... 4 */
++		unsigned epnums0_5:24;
++	} b;
++} dtknq1_data_t;
++
++/**
++ * This union represents Threshold control Register
++ * - Read and write the register into the <i>d32</i> member.
++ * - READ-WRITABLE Register
++ */
++typedef union dthrctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** non ISO Tx Thr. Enable */
++		unsigned non_iso_thr_en:1;
++		/** ISO Tx Thr. Enable */
++		unsigned iso_thr_en:1;
++		/** Tx Thr. Length */
++		unsigned tx_thr_len:9;
++		/** AHB Threshold ratio */
++		unsigned ahb_thr_ratio:2;
++		/** Reserved */
++		unsigned reserved13_15:3;
++		/** Rx Thr. Enable */
++		unsigned rx_thr_en:1;
++		/** Rx Thr. Length */
++		unsigned rx_thr_len:9;
++		unsigned reserved26:1;
++		/** Arbiter Parking Enable*/
++		unsigned arbprken:1;
++		/** Reserved */
++		unsigned reserved28_31:4;
++	} b;
++} dthrctl_data_t;
++
++/**
++ * Device Logical IN Endpoint-Specific Registers. <i>Offsets
++ * 900h-AFCh</i>
++ *
++ * There will be one set of endpoint registers per logical endpoint
++ * implemented.
++ *
++ * <i>These registers are visible only in Device mode and must not be
++ * accessed in Host mode, as the results are unknown.</i>
++ */
++typedef struct fh_otg_dev_in_ep_regs {
++	/** Device IN Endpoint Control Register. <i>Offset:900h +
++	 * (ep_num * 20h) + 00h</i> */
++	volatile uint32_t diepctl;
++	/** Reserved. <i>Offset:900h + (ep_num * 20h) + 04h</i> */
++	uint32_t reserved04;
++	/** Device IN Endpoint Interrupt Register. <i>Offset:900h +
++	 * (ep_num * 20h) + 08h</i> */
++	volatile uint32_t diepint;
++	/** Reserved. <i>Offset:900h + (ep_num * 20h) + 0Ch</i> */
++	uint32_t reserved0C;
++	/** Device IN Endpoint Transfer Size
++	 * Register. <i>Offset:900h + (ep_num * 20h) + 10h</i> */
++	volatile uint32_t dieptsiz;
++	/** Device IN Endpoint DMA Address Register. <i>Offset:900h +
++	 * (ep_num * 20h) + 14h</i> */
++	volatile uint32_t diepdma;
++	/** Device IN Endpoint Transmit FIFO Status Register. <i>Offset:900h +
++	 * (ep_num * 20h) + 18h</i> */
++	volatile uint32_t dtxfsts;
++	/** Device IN Endpoint DMA Buffer Register. <i>Offset:900h +
++	 * (ep_num * 20h) + 1Ch</i> */
++	volatile uint32_t diepdmab;
++} fh_otg_dev_in_ep_regs_t;
++
++/**
++ * Device Logical OUT Endpoint-Specific Registers. <i>Offsets:
++ * B00h-CFCh</i>
++ *
++ * There will be one set of endpoint registers per logical endpoint
++ * implemented.
++ *
++ * <i>These registers are visible only in Device mode and must not be
++ * accessed in Host mode, as the results are unknown.</i>
++ */
++typedef struct fh_otg_dev_out_ep_regs {
++	/** Device OUT Endpoint Control Register. <i>Offset:B00h +
++	 * (ep_num * 20h) + 00h</i> */
++	volatile uint32_t doepctl;
++	/** Reserved. <i>Offset:B00h + (ep_num * 20h) + 04h</i> */
++	uint32_t reserved04;
++	/** Device OUT Endpoint Interrupt Register. <i>Offset:B00h +
++	 * (ep_num * 20h) + 08h</i> */
++	volatile uint32_t doepint;
++	/** Reserved. <i>Offset:B00h + (ep_num * 20h) + 0Ch</i> */
++	uint32_t reserved0C;
++	/** Device OUT Endpoint Transfer Size Register. <i>Offset:
++	 * B00h + (ep_num * 20h) + 10h</i> */
++	volatile uint32_t doeptsiz;
++	/** Device OUT Endpoint DMA Address Register. <i>Offset:B00h
++	 * + (ep_num * 20h) + 14h</i> */
++	volatile uint32_t doepdma;
++	/** Reserved. <i>Offset:B00h + 	 * (ep_num * 20h) + 18h</i> */
++	uint32_t unused;
++	/** Device OUT Endpoint DMA Buffer Register. <i>Offset:B00h
++	 * + (ep_num * 20h) + 1Ch</i> */
++	uint32_t doepdmab;
++} fh_otg_dev_out_ep_regs_t;
++
++/**
++ * This union represents the bit fields in the Device EP Control
++ * Register.  Read the register into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.
++ */
++typedef union depctl_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Maximum Packet Size
++		 * IN/OUT EPn
++		 * IN/OUT EP0 - 2 bits
++		 *	 2'b00: 64 Bytes
++		 *	 2'b01: 32
++		 *	 2'b10: 16
++		 *	 2'b11: 8 */
++		unsigned mps:11;
++#define FH_DEP0CTL_MPS_64	 0
++#define FH_DEP0CTL_MPS_32	 1
++#define FH_DEP0CTL_MPS_16	 2
++#define FH_DEP0CTL_MPS_8	 3
++
++		/** Next Endpoint
++		 * IN EPn/IN EP0
++		 * OUT EPn/OUT EP0 - reserved */
++		unsigned nextep:4;
++
++		/** USB Active Endpoint */
++		unsigned usbactep:1;
++
++		/** Endpoint DPID (INTR/Bulk IN and OUT endpoints)
++		 * This field contains the PID of the packet going to
++		 * be received or transmitted on this endpoint. The
++		 * application should program the PID of the first
++		 * packet going to be received or transmitted on this
++		 * endpoint , after the endpoint is
++		 * activated. Application use the SetD1PID and
++		 * SetD0PID fields of this register to program either
++		 * D0 or D1 PID.
++		 *
++		 * The encoding for this field is
++		 *	 - 0: D0
++		 *	 - 1: D1
++		 */
++		unsigned dpid:1;
++
++		/** NAK Status */
++		unsigned naksts:1;
++
++		/** Endpoint Type
++		 *	2'b00: Control
++		 *	2'b01: Isochronous
++		 *	2'b10: Bulk
++		 *	2'b11: Interrupt */
++		unsigned eptype:2;
++
++		/** Snoop Mode
++		 * OUT EPn/OUT EP0
++		 * IN EPn/IN EP0 - reserved */
++		unsigned snp:1;
++
++		/** Stall Handshake */
++		unsigned stall:1;
++
++		/** Tx Fifo Number
++		 * IN EPn/IN EP0
++		 * OUT EPn/OUT EP0 - reserved */
++		unsigned txfnum:4;
++
++		/** Clear NAK */
++		unsigned cnak:1;
++		/** Set NAK */
++		unsigned snak:1;
++		/** Set DATA0 PID (INTR/Bulk IN and OUT endpoints)
++		 * Writing to this field sets the Endpoint DPID (DPID)
++		 * field in this register to DATA0. Set Even
++		 * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints)
++		 * Writing to this field sets the Even/Odd
++		 * (micro)frame (EO_FrNum) field to even (micro)
++		 * frame.
++		 */
++		unsigned setd0pid:1;
++		/** Set DATA1 PID (INTR/Bulk IN and OUT endpoints)
++		 * Writing to this field sets the Endpoint DPID (DPID)
++		 * field in this register to DATA1 Set Odd
++		 * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints)
++		 * Writing to this field sets the Even/Odd
++		 * (micro)frame (EO_FrNum) field to odd (micro) frame.
++		 */
++		unsigned setd1pid:1;
++
++		/** Endpoint Disable */
++		unsigned epdis:1;
++		/** Endpoint Enable */
++		unsigned epena:1;
++	} b;
++} depctl_data_t;
++
++/**
++ * This union represents the bit fields in the Device EP Transfer
++ * Size Register.  Read the register into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.
++ */
++typedef union deptsiz_data {
++		/** raw register data */
++	uint32_t d32;
++		/** register bits */
++	struct {
++		/** Transfer size */
++		unsigned xfersize:19;
++/** Max packet count for EP (pow(2,10)-1) */
++#define MAX_PKT_CNT 1023
++		/** Packet Count */
++		unsigned pktcnt:10;
++		/** Multi Count - Periodic IN endpoints */
++		unsigned mc:2;
++		unsigned reserved:1;
++	} b;
++} deptsiz_data_t;
++
++/**
++ * This union represents the bit fields in the Device EP 0 Transfer
++ * Size Register.  Read the register into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.
++ */
++typedef union deptsiz0_data {
++		/** raw register data */
++	uint32_t d32;
++		/** register bits */
++	struct {
++		/** Transfer size */
++		unsigned xfersize:7;
++				/** Reserved */
++		unsigned reserved7_18:12;
++		/** Packet Count */
++		unsigned pktcnt:2;
++				/** Reserved */
++		unsigned reserved21_28:8;
++				/**Setup Packet Count (DOEPTSIZ0 Only) */
++		unsigned supcnt:2;
++		unsigned reserved31;
++	} b;
++} deptsiz0_data_t;
++
++/////////////////////////////////////////////////
++// DMA Descriptor Specific Structures
++//
++
++/** Buffer status definitions */
++
++#define BS_HOST_READY	0x0
++#define BS_DMA_BUSY		0x1
++#define BS_DMA_DONE		0x2
++#define BS_HOST_BUSY	0x3
++
++/** Receive/Transmit status definitions */
++
++#define RTS_SUCCESS		0x0
++#define RTS_BUFFLUSH	0x1
++#define RTS_RESERVED	0x2
++#define RTS_BUFERR		0x3
++
++/**
++ * This union represents the bit fields in the DMA Descriptor
++ * status quadlet. Read the quadlet into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it, <i>b_iso_out</i> and
++ * <i>b_iso_in</i> elements.
++ */
++typedef union dev_dma_desc_sts {
++		/** raw register data */
++	uint32_t d32;
++		/** quadlet bits */
++	struct {
++		/** Received number of bytes */
++		unsigned bytes:16;
++		/** NAK bit - only for OUT EPs */
++		unsigned nak:1;
++		unsigned reserved17_22:6;
++		/** Multiple Transfer - only for OUT EPs */
++		unsigned mtrf:1;
++		/** Setup Packet received - only for OUT EPs */
++		unsigned sr:1;
++		/** Interrupt On Complete */
++		unsigned ioc:1;
++		/** Short Packet */
++		unsigned sp:1;
++		/** Last */
++		unsigned l:1;
++		/** Receive Status */
++		unsigned sts:2;
++		/** Buffer Status */
++		unsigned bs:2;
++	} b;
++
++//#ifdef FH_EN_ISOC
++		/** iso out quadlet bits */
++	struct {
++		/** Received number of bytes */
++		unsigned rxbytes:11;
++
++		unsigned reserved11:1;
++		/** Frame Number */
++		unsigned framenum:11;
++		/** Received ISO Data PID */
++		unsigned pid:2;
++		/** Interrupt On Complete */
++		unsigned ioc:1;
++		/** Short Packet */
++		unsigned sp:1;
++		/** Last */
++		unsigned l:1;
++		/** Receive Status */
++		unsigned rxsts:2;
++		/** Buffer Status */
++		unsigned bs:2;
++	} b_iso_out;
++
++		/** iso in quadlet bits */
++	struct {
++		/** Transmited number of bytes */
++		unsigned txbytes:12;
++		/** Frame Number */
++		unsigned framenum:11;
++		/** Transmited ISO Data PID */
++		unsigned pid:2;
++		/** Interrupt On Complete */
++		unsigned ioc:1;
++		/** Short Packet */
++		unsigned sp:1;
++		/** Last */
++		unsigned l:1;
++		/** Transmit Status */
++		unsigned txsts:2;
++		/** Buffer Status */
++		unsigned bs:2;
++	} b_iso_in;
++//#endif                                /* FH_EN_ISOC */
++} dev_dma_desc_sts_t;
++
++/**
++ * DMA Descriptor structure
++ *
++ * DMA Descriptor structure contains two quadlets:
++ * Status quadlet and Data buffer pointer.
++ */
++typedef struct fh_otg_dev_dma_desc {
++	/** DMA Descriptor status quadlet */
++	dev_dma_desc_sts_t status;
++	/** DMA Descriptor data buffer pointer */
++	uint32_t buf;
++} fh_otg_dev_dma_desc_t;
++
++/**
++ * The fh_otg_dev_if structure contains information needed to manage
++ * the FH_otg controller acting in device mode. It represents the
++ * programming view of the device-specific aspects of the controller.
++ */
++typedef struct fh_otg_dev_if {
++	/** Pointer to device Global registers.
++	 * Device Global Registers starting at offset 800h
++	 */
++	fh_otg_device_global_regs_t *dev_global_regs;
++#define FH_DEV_GLOBAL_REG_OFFSET 0x800
++
++	/**
++	 * Device Logical IN Endpoint-Specific Registers 900h-AFCh
++	 */
++	fh_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS];
++#define FH_DEV_IN_EP_REG_OFFSET 0x900
++#define FH_EP_REG_OFFSET 0x20
++
++	/** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */
++	fh_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS];
++#define FH_DEV_OUT_EP_REG_OFFSET 0xB00
++
++	/* Device configuration information */
++	uint8_t speed;				 /**< Device Speed	0: Unknown, 1: LS, 2:FS, 3: HS */
++	uint8_t num_in_eps;		 /**< Number # of Tx EP range: 0-15 exept ep0 */
++	uint8_t num_out_eps;		 /**< Number # of Rx EP range: 0-15 exept ep 0*/
++
++	/** Size of periodic FIFOs (Bytes) */
++	uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS];
++
++	/** Size of Tx FIFOs (Bytes) */
++	uint16_t tx_fifo_size[MAX_TX_FIFOS];
++
++	/** Thresholding enable flags and length varaiables **/
++	uint16_t rx_thr_en;
++	uint16_t iso_tx_thr_en;
++	uint16_t non_iso_tx_thr_en;
++
++	uint16_t rx_thr_length;
++	uint16_t tx_thr_length;
++
++	/**
++	 * Pointers to the DMA Descriptors for EP0 Control
++	 * transfers (virtual and physical)
++	 */
++
++	/** 2 descriptors for SETUP packets */
++	fh_dma_t dma_setup_desc_addr[2];
++	fh_otg_dev_dma_desc_t *setup_desc_addr[2];
++
++	/** Pointer to Descriptor with latest SETUP packet */
++	fh_otg_dev_dma_desc_t *psetup;
++
++	/** Index of current SETUP handler descriptor */
++	uint32_t setup_desc_index;
++
++	/** Descriptor for Data In or Status In phases */
++	fh_dma_t dma_in_desc_addr;
++	fh_otg_dev_dma_desc_t *in_desc_addr;
++
++	/** Descriptor for Data Out or Status Out phases */
++	fh_dma_t dma_out_desc_addr;
++	fh_otg_dev_dma_desc_t *out_desc_addr;
++
++	/** Setup Packet Detected - if set clear NAK when queueing */
++	uint32_t spd;
++	/** Isoc ep pointer on which incomplete happens */
++	void *isoc_ep;
++
++} fh_otg_dev_if_t;
++
++/////////////////////////////////////////////////
++// Host Mode Register Structures
++//
++/**
++ * The Host Global Registers structure defines the size and relative
++ * field offsets for the Host Mode Global Registers.  Host Global
++ * Registers offsets 400h-7FFh.
++*/
++typedef struct fh_otg_host_global_regs {
++	/** Host Configuration Register.   <i>Offset: 400h</i> */
++	volatile uint32_t hcfg;
++	/** Host Frame Interval Register.	<i>Offset: 404h</i> */
++	volatile uint32_t hfir;
++	/** Host Frame Number / Frame Remaining Register. <i>Offset: 408h</i> */
++	volatile uint32_t hfnum;
++	/** Reserved.	<i>Offset: 40Ch</i> */
++	uint32_t reserved40C;
++	/** Host Periodic Transmit FIFO/ Queue Status Register. <i>Offset: 410h</i> */
++	volatile uint32_t hptxsts;
++	/** Host All Channels Interrupt Register. <i>Offset: 414h</i> */
++	volatile uint32_t haint;
++	/** Host All Channels Interrupt Mask Register. <i>Offset: 418h</i> */
++	volatile uint32_t haintmsk;
++	/** Host Frame List Base Address Register . <i>Offset: 41Ch</i> */
++	volatile uint32_t hflbaddr;
++} fh_otg_host_global_regs_t;
++
++/**
++ * This union represents the bit fields in the Host Configuration Register.
++ * Read the register into the <i>d32</i> member then set/clear the bits using
++ * the <i>b</i>it elements. Write the <i>d32</i> member to the hcfg register.
++ */
++typedef union hcfg_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		/** FS/LS Phy Clock Select */
++		unsigned fslspclksel:2;
++#define FH_HCFG_30_60_MHZ 0
++#define FH_HCFG_48_MHZ	   1
++#define FH_HCFG_6_MHZ	   2
++
++		/** FS/LS Only Support */
++		unsigned fslssupp:1;
++		unsigned reserved3_6:4;
++		/** Enable 32-KHz Suspend Mode */
++		unsigned ena32khzs:1;
++		/** Resume Validation Periiod */
++		unsigned resvalid:8;
++		unsigned reserved16_22:7;
++		/** Enable Scatter/gather DMA in Host mode */
++		unsigned descdma:1;
++		/** Frame List Entries */
++		unsigned frlisten:2;
++		/** Enable Periodic Scheduling */
++		unsigned perschedena:1;
++		unsigned reserved27_30:4;
++		unsigned modechtimen:1;
++	} b;
++} hcfg_data_t;
++
++/**
++ * This union represents the bit fields in the Host Frame Remaing/Number
++ * Register. 
++ */
++typedef union hfir_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		unsigned frint:16;
++		unsigned hfirrldctrl:1;
++		unsigned reserved:15;
++	} b;
++} hfir_data_t;
++
++/**
++ * This union represents the bit fields in the Host Frame Remaing/Number
++ * Register. 
++ */
++typedef union hfnum_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		unsigned frnum:16;
++#define FH_HFNUM_MAX_FRNUM 0x3FFF
++		unsigned frrem:16;
++	} b;
++} hfnum_data_t;
++
++typedef union hptxsts_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		unsigned ptxfspcavail:16;
++		unsigned ptxqspcavail:8;
++		/** Top of the Periodic Transmit Request Queue
++		 *	- bit 24 - Terminate (last entry for the selected channel)
++		 *	- bits 26:25 - Token Type
++		 *	  - 2'b00 - Zero length
++		 *	  - 2'b01 - Ping
++		 *	  - 2'b10 - Disable
++		 *	- bits 30:27 - Channel Number
++		 *	- bit 31 - Odd/even microframe
++		 */
++		unsigned ptxqtop_terminate:1;
++		unsigned ptxqtop_token:2;
++		unsigned ptxqtop_chnum:4;
++		unsigned ptxqtop_odd:1;
++	} b;
++} hptxsts_data_t;
++
++/**
++ * This union represents the bit fields in the Host Port Control and Status
++ * Register. Read the register into the <i>d32</i> member then set/clear the
++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
++ * hprt0 register.
++ */
++typedef union hprt0_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned prtconnsts:1;
++		unsigned prtconndet:1;
++		unsigned prtena:1;
++		unsigned prtenchng:1;
++		unsigned prtovrcurract:1;
++		unsigned prtovrcurrchng:1;
++		unsigned prtres:1;
++		unsigned prtsusp:1;
++		unsigned prtrst:1;
++		unsigned reserved9:1;
++		unsigned prtlnsts:2;
++		unsigned prtpwr:1;
++		unsigned prttstctl:4;
++		unsigned prtspd:2;
++#define FH_HPRT0_PRTSPD_HIGH_SPEED 0
++#define FH_HPRT0_PRTSPD_FULL_SPEED 1
++#define FH_HPRT0_PRTSPD_LOW_SPEED	2
++		unsigned reserved19_31:13;
++	} b;
++} hprt0_data_t;
++
++/**
++ * This union represents the bit fields in the Host All Interrupt
++ * Register. 
++ */
++typedef union haint_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned ch0:1;
++		unsigned ch1:1;
++		unsigned ch2:1;
++		unsigned ch3:1;
++		unsigned ch4:1;
++		unsigned ch5:1;
++		unsigned ch6:1;
++		unsigned ch7:1;
++		unsigned ch8:1;
++		unsigned ch9:1;
++		unsigned ch10:1;
++		unsigned ch11:1;
++		unsigned ch12:1;
++		unsigned ch13:1;
++		unsigned ch14:1;
++		unsigned ch15:1;
++		unsigned reserved:16;
++	} b;
++
++	struct {
++		unsigned chint:16;
++		unsigned reserved:16;
++	} b2;
++} haint_data_t;
++
++/**
++ * This union represents the bit fields in the Host All Interrupt
++ * Register. 
++ */
++typedef union haintmsk_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned ch0:1;
++		unsigned ch1:1;
++		unsigned ch2:1;
++		unsigned ch3:1;
++		unsigned ch4:1;
++		unsigned ch5:1;
++		unsigned ch6:1;
++		unsigned ch7:1;
++		unsigned ch8:1;
++		unsigned ch9:1;
++		unsigned ch10:1;
++		unsigned ch11:1;
++		unsigned ch12:1;
++		unsigned ch13:1;
++		unsigned ch14:1;
++		unsigned ch15:1;
++		unsigned reserved:16;
++	} b;
++
++	struct {
++		unsigned chint:16;
++		unsigned reserved:16;
++	} b2;
++} haintmsk_data_t;
++
++/**
++ * Host Channel Specific Registers. <i>500h-5FCh</i>
++ */
++typedef struct fh_otg_hc_regs {
++	/** Host Channel 0 Characteristic Register. <i>Offset: 500h + (chan_num * 20h) + 00h</i> */
++	volatile uint32_t hcchar;
++	/** Host Channel 0 Split Control Register. <i>Offset: 500h + (chan_num * 20h) + 04h</i> */
++	volatile uint32_t hcsplt;
++	/** Host Channel 0 Interrupt Register. <i>Offset: 500h + (chan_num * 20h) + 08h</i> */
++	volatile uint32_t hcint;
++	/** Host Channel 0 Interrupt Mask Register. <i>Offset: 500h + (chan_num * 20h) + 0Ch</i> */
++	volatile uint32_t hcintmsk;
++	/** Host Channel 0 Transfer Size Register. <i>Offset: 500h + (chan_num * 20h) + 10h</i> */
++	volatile uint32_t hctsiz;
++	/** Host Channel 0 DMA Address Register. <i>Offset: 500h + (chan_num * 20h) + 14h</i> */
++	volatile uint32_t hcdma;
++	volatile uint32_t reserved;
++	/** Host Channel 0 DMA Buffer Address Register. <i>Offset: 500h + (chan_num * 20h) + 1Ch</i> */
++	volatile uint32_t hcdmab;
++} fh_otg_hc_regs_t;
++
++/**
++ * This union represents the bit fields in the Host Channel Characteristics
++ * Register. Read the register into the <i>d32</i> member then set/clear the
++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
++ * hcchar register.
++ */
++typedef union hcchar_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		/** Maximum packet size in bytes */
++		unsigned mps:11;
++
++		/** Endpoint number */
++		unsigned epnum:4;
++
++		/** 0: OUT, 1: IN */
++		unsigned epdir:1;
++
++		unsigned reserved:1;
++
++		/** 0: Full/high speed device, 1: Low speed device */
++		unsigned lspddev:1;
++
++		/** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */
++		unsigned eptype:2;
++
++		/** Packets per frame for periodic transfers. 0 is reserved. */
++		unsigned multicnt:2;
++
++		/** Device address */
++		unsigned devaddr:7;
++
++		/**
++		 * Frame to transmit periodic transaction.
++		 * 0: even, 1: odd
++		 */
++		unsigned oddfrm:1;
++
++		/** Channel disable */
++		unsigned chdis:1;
++
++		/** Channel enable */
++		unsigned chen:1;
++	} b;
++} hcchar_data_t;
++
++typedef union hcsplt_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		/** Port Address */
++		unsigned prtaddr:7;
++
++		/** Hub Address */
++		unsigned hubaddr:7;
++
++		/** Transaction Position */
++		unsigned xactpos:2;
++#define FH_HCSPLIT_XACTPOS_MID 0
++#define FH_HCSPLIT_XACTPOS_END 1
++#define FH_HCSPLIT_XACTPOS_BEGIN 2
++#define FH_HCSPLIT_XACTPOS_ALL 3
++
++		/** Do Complete Split */
++		unsigned compsplt:1;
++
++		/** Reserved */
++		unsigned reserved:14;
++
++		/** Split Enble */
++		unsigned spltena:1;
++	} b;
++} hcsplt_data_t;
++
++/**
++ * This union represents the bit fields in the Host All Interrupt
++ * Register. 
++ */
++typedef union hcint_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** Transfer Complete */
++		unsigned xfercomp:1;
++		/** Channel Halted */
++		unsigned chhltd:1;
++		/** AHB Error */
++		unsigned ahberr:1;
++		/** STALL Response Received */
++		unsigned stall:1;
++		/** NAK Response Received */
++		unsigned nak:1;
++		/** ACK Response Received */
++		unsigned ack:1;
++		/** NYET Response Received */
++		unsigned nyet:1;
++		/** Transaction Err */
++		unsigned xacterr:1;
++		/** Babble Error */
++		unsigned bblerr:1;
++		/** Frame Overrun */
++		unsigned frmovrun:1;
++		/** Data Toggle Error */
++		unsigned datatglerr:1;
++		/** Buffer Not Available (only for DDMA mode) */
++		unsigned bna:1;
++		/** Exessive transaction error (only for DDMA mode) */
++		unsigned xcs_xact:1;
++		/** Frame List Rollover interrupt */
++		unsigned frm_list_roll:1;
++		/** Reserved */
++		unsigned reserved14_31:18;
++	} b;
++} hcint_data_t;
++
++/**
++ * This union represents the bit fields in the Host Channel Interrupt Mask
++ * Register. Read the register into the <i>d32</i> member then set/clear the
++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
++ * hcintmsk register.
++ */
++typedef union hcintmsk_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		unsigned xfercompl:1;
++		unsigned chhltd:1;
++		unsigned ahberr:1;
++		unsigned stall:1;
++		unsigned nak:1;
++		unsigned ack:1;
++		unsigned nyet:1;
++		unsigned xacterr:1;
++		unsigned bblerr:1;
++		unsigned frmovrun:1;
++		unsigned datatglerr:1;
++		unsigned bna:1;
++		unsigned xcs_xact:1;
++		unsigned frm_list_roll:1;
++		unsigned reserved14_31:18;
++	} b;
++} hcintmsk_data_t;
++
++/**
++ * This union represents the bit fields in the Host Channel Transfer Size
++ * Register. Read the register into the <i>d32</i> member then set/clear the
++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
++ * hcchar register.
++ */
++
++typedef union hctsiz_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		/** Total transfer size in bytes */
++		unsigned xfersize:19;
++
++		/** Data packets to transfer */
++		unsigned pktcnt:10;
++
++		/**
++		 * Packet ID for next data packet
++		 * 0: DATA0
++		 * 1: DATA2
++		 * 2: DATA1
++		 * 3: MDATA (non-Control), SETUP (Control)
++		 */
++		unsigned pid:2;
++#define FH_HCTSIZ_DATA0 0
++#define FH_HCTSIZ_DATA1 2
++#define FH_HCTSIZ_DATA2 1
++#define FH_HCTSIZ_MDATA 3
++#define FH_HCTSIZ_SETUP 3
++
++		/** Do PING protocol when 1 */
++		unsigned dopng:1;
++	} b;
++
++	/** register bits */
++	struct {
++		/** Scheduling information */
++		unsigned schinfo:8;
++
++		/** Number of transfer descriptors.
++		 * Max value:
++		 * 64 in general,
++		 * 256 only for HS isochronous endpoint.
++		 */
++		unsigned ntd:8;
++
++		/** Data packets to transfer */
++		unsigned reserved16_28:13;
++
++		/**
++		 * Packet ID for next data packet
++		 * 0: DATA0
++		 * 1: DATA2
++		 * 2: DATA1
++		 * 3: MDATA (non-Control)
++		 */
++		unsigned pid:2;
++
++		/** Do PING protocol when 1 */
++		unsigned dopng:1;
++	} b_ddma;
++} hctsiz_data_t;
++
++/**
++ * This union represents the bit fields in the Host DMA Address 
++ * Register used in Descriptor DMA mode.
++ */
++typedef union hcdma_data {
++	/** raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		unsigned reserved0_2:3;
++		/** Current Transfer Descriptor. Not used for ISOC */
++		unsigned ctd:8;
++		/** Start Address of Descriptor List */
++		unsigned dma_addr:21;
++	} b;
++} hcdma_data_t;
++
++/**
++ * This union represents the bit fields in the DMA Descriptor
++ * status quadlet for host mode. Read the quadlet into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.
++ */
++typedef union host_dma_desc_sts {
++	/** raw register data */
++	uint32_t d32;
++	/** quadlet bits */
++
++	/* for non-isochronous  */
++	struct {
++		/** Number of bytes */
++		unsigned n_bytes:17;
++		/** QTD offset to jump when Short Packet received - only for IN EPs */
++		unsigned qtd_offset:6;
++		/**
++		 * Set to request the core to jump to alternate QTD if
++		 * Short Packet received - only for IN EPs
++		 */
++		unsigned a_qtd:1;
++		 /**
++		  * Setup Packet bit. When set indicates that buffer contains
++		  * setup packet.
++		  */
++		unsigned sup:1;
++		/** Interrupt On Complete */
++		unsigned ioc:1;
++		/** End of List */
++		unsigned eol:1;
++		unsigned reserved27:1;
++		/** Rx/Tx Status */
++		unsigned sts:2;
++#define DMA_DESC_STS_PKTERR	1
++		unsigned reserved30:1;
++		/** Active Bit */
++		unsigned a:1;
++	} b;
++	/* for isochronous */
++	struct {
++		/** Number of bytes */
++		unsigned n_bytes:12;
++		unsigned reserved12_24:13;
++		/** Interrupt On Complete */
++		unsigned ioc:1;
++		unsigned reserved26_27:2;
++		/** Rx/Tx Status */
++		unsigned sts:2;
++		unsigned reserved30:1;
++		/** Active Bit */
++		unsigned a:1;
++	} b_isoc;
++} host_dma_desc_sts_t;
++
++#define	MAX_DMA_DESC_SIZE		131071
++#define MAX_DMA_DESC_NUM_GENERIC	64
++#define MAX_DMA_DESC_NUM_HS_ISOC	256
++#define MAX_FRLIST_EN_NUM		64
++/**
++ * Host-mode DMA Descriptor structure
++ *
++ * DMA Descriptor structure contains two quadlets:
++ * Status quadlet and Data buffer pointer.
++ */
++typedef struct fh_otg_host_dma_desc {
++	/** DMA Descriptor status quadlet */
++	host_dma_desc_sts_t status;
++	/** DMA Descriptor data buffer pointer */
++	uint32_t buf;
++} fh_otg_host_dma_desc_t;
++
++/** OTG Host Interface Structure.
++ *
++ * The OTG Host Interface Structure structure contains information
++ * needed to manage the FH_otg controller acting in host mode. It
++ * represents the programming view of the host-specific aspects of the
++ * controller.
++ */
++typedef struct fh_otg_host_if {
++	/** Host Global Registers starting at offset 400h.*/
++	fh_otg_host_global_regs_t *host_global_regs;
++#define FH_OTG_HOST_GLOBAL_REG_OFFSET 0x400
++
++	/** Host Port 0 Control and Status Register */
++	volatile uint32_t *hprt0;
++#define FH_OTG_HOST_PORT_REGS_OFFSET 0x440
++
++	/** Host Channel Specific Registers at offsets 500h-5FCh. */
++	fh_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS];
++#define FH_OTG_HOST_CHAN_REGS_OFFSET 0x500
++#define FH_OTG_CHAN_REGS_OFFSET 0x20
++
++	/* Host configuration information */
++	/** Number of Host Channels (range: 1-16) */
++	uint8_t num_host_channels;
++	/** Periodic EPs supported (0: no, 1: yes) */
++	uint8_t perio_eps_supported;
++	/** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */
++	uint16_t perio_tx_fifo_size;
++
++} fh_otg_host_if_t;
++
++/**
++ * This union represents the bit fields in the Power and Clock Gating Control
++ * Register. Read the register into the <i>d32</i> member then set/clear the
++ * bits using the <i>b</i>it elements.
++ */
++typedef union pcgcctl_data {
++	/** raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		/** Stop Pclk */
++		unsigned stoppclk:1;
++		/** Gate Hclk */
++		unsigned gatehclk:1;
++		/** Power Clamp */
++		unsigned pwrclmp:1;
++		/** Reset Power Down Modules */
++		unsigned rstpdwnmodule:1;
++		/** Reserved */
++		unsigned reserved:1;
++		/** Enable Sleep Clock Gating (Enbl_L1Gating) */
++		unsigned enbl_sleep_gating:1;
++		/** PHY In Sleep (PhySleep) */
++		unsigned phy_in_sleep:1;
++		/** Deep Sleep*/
++		unsigned deep_sleep:1;
++		unsigned resetaftsusp:1;
++		unsigned restoremode:1;
++		unsigned enbl_extnd_hiber:1;
++		unsigned extnd_hiber_pwrclmp:1;
++		unsigned extnd_hiber_switch:1;
++		unsigned ess_reg_restored:1;
++		unsigned prt_clk_sel:2;
++		unsigned port_power:1;
++		unsigned max_xcvrselect:2;
++		unsigned max_termsel:1;
++		unsigned mac_dev_addr:7;
++		unsigned p2hd_dev_enum_spd:2;
++		unsigned p2hd_prt_spd:2;
++		unsigned if_dev_mode:1;
++	} b;
++} pcgcctl_data_t;
++
++/**
++ * This union represents the bit fields in the Global Data FIFO Software
++ * Configuration Register. Read the register into the <i>d32</i> member then
++ * set/clear the bits using the <i>b</i>it elements.
++ */
++typedef union gdfifocfg_data {
++	/* raw register data */
++	uint32_t d32;
++	/** register bits */
++	struct {
++		/** OTG Data FIFO depth */
++		unsigned gdfifocfg:16;
++		/** Start address of EP info controller */
++		unsigned epinfobase:16;
++	} b;
++} gdfifocfg_data_t;
++
++/**
++ * This union represents the bit fields in the Global Power Down Register
++ * Register. Read the register into the <i>d32</i> member then set/clear the
++ * bits using the <i>b</i>it elements.
++ */
++typedef union gpwrdn_data {
++	/* raw register data */
++	uint32_t d32;
++
++	/** register bits */
++	struct {
++		/** PMU Interrupt Select */
++		unsigned pmuintsel:1;
++		/** PMU Active */
++		unsigned pmuactv:1;
++		/** Restore */
++		unsigned restore:1;
++		/** Power Down Clamp */
++		unsigned pwrdnclmp:1;
++		/** Power Down Reset */
++		unsigned pwrdnrstn:1;
++		/** Power Down Switch */
++		unsigned pwrdnswtch:1;
++		/** Disable VBUS */
++		unsigned dis_vbus:1;
++		/** Line State Change */
++		unsigned lnstschng:1;
++		/** Line state change mask */
++		unsigned lnstchng_msk:1;
++		/** Reset Detected */
++		unsigned rst_det:1;
++		/** Reset Detect mask */
++		unsigned rst_det_msk:1;
++		/** Disconnect Detected */
++		unsigned disconn_det:1;
++		/** Disconnect Detect mask */
++		unsigned disconn_det_msk:1;
++		/** Connect Detected*/
++		unsigned connect_det:1;
++		/** Connect Detected Mask*/
++		unsigned connect_det_msk:1;
++		/** SRP Detected */
++		unsigned srp_det:1;
++		/** SRP Detect mask */
++		unsigned srp_det_msk:1;
++		/** Status Change Interrupt */
++		unsigned sts_chngint:1;
++		/** Status Change Interrupt Mask */
++		unsigned sts_chngint_msk:1;
++		/** Line State */
++		unsigned linestate:2;
++		/** Indicates current mode(status of IDDIG signal) */
++		unsigned idsts:1;
++		/** B Session Valid signal status*/
++		unsigned bsessvld:1;
++		/** ADP Event Detected */
++		unsigned adp_int:1;
++		/** Multi Valued ID pin */
++		unsigned mult_val_id_bc:5;
++		/** Reserved 24_31 */
++		unsigned reserved29_31:3;
++	} b;
++} gpwrdn_data_t;
++
++#endif
+diff --git a/drivers/usb/host/fh_otg/fh_otg/test/Makefile b/drivers/usb/host/fh_otg/fh_otg/test/Makefile
+new file mode 100644
+index 00000000..fc453759
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/test/Makefile
+@@ -0,0 +1,16 @@
++
++PERL=/usr/bin/perl
++PL_TESTS=test_sysfs.pl test_mod_param.pl
++
++.PHONY : test
++test : perl_tests
++
++perl_tests :
++	@echo
++	@echo Running perl tests
++	@for test in $(PL_TESTS); do \
++	  if $(PERL) ./$$test ; then \
++	    echo "=======> $$test, PASSED" ; \
++	  else echo "=======> $$test, FAILED" ; \
++	  fi \
++	done
+diff --git a/drivers/usb/host/fh_otg/fh_otg/test/fh_otg_test.pm b/drivers/usb/host/fh_otg/fh_otg/test/fh_otg_test.pm
+new file mode 100644
+index 00000000..b4b4c294
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/test/fh_otg_test.pm
+@@ -0,0 +1,337 @@
++package fh_otg_test;
++
++use strict;
++use Exporter ();
++
++use vars qw(@ISA @EXPORT
++$sysfsdir $paramdir $errors $params 
++);
++
++@ISA = qw(Exporter);
++
++#
++# Globals
++#
++$sysfsdir = "/sys/devices/lm0";
++$paramdir = "/sys/module/fh_otg";
++$errors = 0;
++
++$params = [
++	   {
++	    NAME => "otg_cap",
++	    DEFAULT => 0,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 2
++	   },
++	   {
++	    NAME => "dma_enable",
++	    DEFAULT => 0,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 1
++	   },
++	   { 
++	    NAME => "dma_burst_size",
++	    DEFAULT => 32,
++	    ENUM => [1, 4, 8, 16, 32, 64, 128, 256],
++	    LOW => 1,
++	    HIGH => 256
++	   },
++	   { 
++	    NAME => "host_speed",
++	    DEFAULT => 0,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 1
++	   },
++	   { 
++	    NAME => "host_support_fs_ls_low_power",
++	    DEFAULT => 0,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 1
++	   },
++	   { 
++	    NAME => "host_ls_low_power_phy_clk",
++	    DEFAULT => 0,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 1
++	   },
++	   { 
++	    NAME => "dev_speed",
++	    DEFAULT => 0,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 1
++	   },
++	   { 
++	    NAME => "enable_dynamic_fifo",
++	    DEFAULT => 1,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 1
++	   },
++	   { 
++	    NAME => "data_fifo_size",
++	    DEFAULT => 8192,
++	    ENUM => [],
++	    LOW => 32,
++	    HIGH => 32768
++	   },
++	   { 
++	    NAME => "dev_rx_fifo_size",
++	    DEFAULT => 1064,
++	    ENUM => [],
++	    LOW => 16,
++	    HIGH => 32768
++	   },
++	   { 
++	    NAME => "dev_nperio_tx_fifo_size",
++	    DEFAULT => 1024,
++	    ENUM => [],
++	    LOW => 16,
++	    HIGH => 32768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_1",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_2",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_3",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_4",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_5",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_6",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_7",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_8",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_9",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_10",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_11",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_12",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_13",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_14",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "dev_perio_tx_fifo_size_15",
++	    DEFAULT => 256,
++	    ENUM => [],
++	    LOW => 4,
++	    HIGH => 768
++	   },
++	   { 
++	    NAME => "host_rx_fifo_size",
++	    DEFAULT => 1024,
++	    ENUM => [],
++	    LOW => 16,
++	    HIGH => 32768
++	   },
++	   { 
++	    NAME => "host_nperio_tx_fifo_size",
++	    DEFAULT => 1024,
++	    ENUM => [],
++	    LOW => 16,
++	    HIGH => 32768
++	   },
++	   { 
++	    NAME => "host_perio_tx_fifo_size",
++	    DEFAULT => 1024,
++	    ENUM => [],
++	    LOW => 16,
++	    HIGH => 32768
++	   },
++	   { 
++	    NAME => "max_transfer_size",
++	    DEFAULT => 65535,
++	    ENUM => [],
++	    LOW => 2047,
++	    HIGH => 65535
++	   },
++	   { 
++	    NAME => "max_packet_count",
++	    DEFAULT => 511,
++	    ENUM => [],
++	    LOW => 15,
++	    HIGH => 511
++	   },
++	   { 
++	    NAME => "host_channels",
++	    DEFAULT => 12,
++	    ENUM => [],
++	    LOW => 1,
++	    HIGH => 16
++	   },
++	   { 
++	    NAME => "dev_endpoints",
++	    DEFAULT => 6,
++	    ENUM => [],
++	    LOW => 1,
++	    HIGH => 15
++	   },
++	   { 
++	    NAME => "phy_type",
++	    DEFAULT => 1,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 2
++	   },
++	   { 
++	    NAME => "phy_utmi_width",
++	    DEFAULT => 16,
++	    ENUM => [8, 16],
++	    LOW => 8,
++	    HIGH => 16
++	   },
++	   { 
++	    NAME => "phy_ulpi_ddr",
++	    DEFAULT => 0,
++	    ENUM => [],
++	    LOW => 0,
++	    HIGH => 1
++	   },
++	  ];
++
++
++#
++#
++sub check_arch {
++  $_ = `uname -m`;
++  chomp;
++  unless (m/armv4tl/) {
++    warn "# \n# Can't execute on $_.  Run on integrator platform.\n# \n";
++    return 0;
++  }
++  return 1;
++}
++
++#
++#
++sub load_module {
++  my $params = shift;
++  print "\nRemoving Module\n";
++  system "rmmod fh_otg";
++  print "Loading Module\n";
++  if ($params ne "") {
++    print "Module Parameters: $params\n";
++  }
++  if (system("modprobe fh_otg $params")) { 
++    warn "Unable to load module\n";
++    return 0; 
++  }
++  return 1;
++}
++
++#
++#
++sub test_status {
++  my $arg = shift;
++
++  print "\n";
++
++  if (defined $arg) {
++    warn "WARNING: $arg\n";
++  }
++  
++  if ($errors > 0) {
++    warn "TEST FAILED with $errors errors\n";
++    return 0;
++  } else {
++    print "TEST PASSED\n";
++    return 0 if (defined $arg);
++  }
++  return 1;
++}
++
++#
++#
++@EXPORT = qw(
++$sysfsdir
++$paramdir
++$params
++$errors
++check_arch
++load_module
++test_status
++);
++
++1;
+diff --git a/drivers/usb/host/fh_otg/fh_otg/test/test_mod_param.pl b/drivers/usb/host/fh_otg/fh_otg/test/test_mod_param.pl
+new file mode 100644
+index 00000000..f7c6549c
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/test/test_mod_param.pl
+@@ -0,0 +1,133 @@
++#!/usr/bin/perl -w
++# 
++# Run this program on the integrator.
++#
++# - Tests module parameter default values.
++# - Tests setting of valid module parameter values via modprobe.
++# - Tests invalid module parameter values.
++# -----------------------------------------------------------------------------
++use strict;
++use fh_otg_test;
++
++check_arch() or die;
++
++#
++#
++sub test {
++  my ($param,$expected) = @_;
++  my $value = get($param);
++
++  if ($value == $expected) {
++    print "$param = $value, okay\n";
++  }
++
++  else {
++    warn "ERROR: value of $param != $expected, $value\n";
++    $errors ++;
++  }
++}
++
++#
++#
++sub get {
++  my $param = shift;
++  my $tmp = `cat $paramdir/$param`;
++  chomp $tmp;
++  return $tmp;
++}
++
++#
++#
++sub test_main {
++
++  print "\nTesting Module Parameters\n";
++
++  load_module("") or die;
++
++  # Test initial values
++  print "\nTesting Default Values\n";
++  foreach (@{$params}) {
++    test ($_->{NAME}, $_->{DEFAULT});
++  }
++
++  # Test low value
++  print "\nTesting Low Value\n";
++  my $cmd_params = "";
++  foreach (@{$params}) {
++    $cmd_params = $cmd_params . "$_->{NAME}=$_->{LOW} ";
++  }
++  load_module($cmd_params) or die;
++
++  foreach (@{$params}) {
++    test ($_->{NAME}, $_->{LOW});
++  }
++
++  # Test high value
++  print "\nTesting High Value\n";
++  $cmd_params = "";
++  foreach (@{$params}) {
++    $cmd_params = $cmd_params . "$_->{NAME}=$_->{HIGH} ";
++  }
++  load_module($cmd_params) or die;
++
++  foreach (@{$params}) {
++    test ($_->{NAME}, $_->{HIGH});
++  }
++
++  # Test Enum
++  print "\nTesting Enumerated\n";
++  foreach (@{$params}) {
++    if (defined $_->{ENUM}) {
++      my $value;
++      foreach $value (@{$_->{ENUM}}) {
++	$cmd_params = "$_->{NAME}=$value";
++	load_module($cmd_params) or die;
++	test ($_->{NAME}, $value);
++      }
++    }
++  }
++
++  # Test Invalid Values
++  print "\nTesting Invalid Values\n";
++  $cmd_params = "";
++  foreach (@{$params}) {
++    $cmd_params = $cmd_params . sprintf "$_->{NAME}=%d ", $_->{LOW}-1;
++  }
++  load_module($cmd_params) or die;
++
++  foreach (@{$params}) {
++    test ($_->{NAME}, $_->{DEFAULT});
++  }
++
++  $cmd_params = "";
++  foreach (@{$params}) {
++    $cmd_params = $cmd_params . sprintf "$_->{NAME}=%d ", $_->{HIGH}+1;
++  }
++  load_module($cmd_params) or die;
++
++  foreach (@{$params}) {
++    test ($_->{NAME}, $_->{DEFAULT});
++  }
++
++  print "\nTesting Enumerated\n";
++  foreach (@{$params}) {
++    if (defined $_->{ENUM}) {
++      my $value;
++      foreach $value (@{$_->{ENUM}}) {
++	$value = $value + 1;
++	$cmd_params = "$_->{NAME}=$value";
++	load_module($cmd_params) or die;
++	test ($_->{NAME}, $_->{DEFAULT});
++	$value = $value - 2;
++	$cmd_params = "$_->{NAME}=$value";
++	load_module($cmd_params) or die;
++	test ($_->{NAME}, $_->{DEFAULT});
++      }
++    }
++  }
++
++  test_status() or die;
++}
++
++test_main();
++0;
+diff --git a/drivers/usb/host/fh_otg/fh_otg/test/test_sysfs.pl b/drivers/usb/host/fh_otg/fh_otg/test/test_sysfs.pl
+new file mode 100644
+index 00000000..0eecbc7f
+--- /dev/null
++++ b/drivers/usb/host/fh_otg/fh_otg/test/test_sysfs.pl
+@@ -0,0 +1,193 @@
++#!/usr/bin/perl -w
++# 
++# Run this program on the integrator
++# - Tests select sysfs attributes.
++# - Todo ... test more attributes, hnp/srp, buspower/bussuspend, etc.
++# -----------------------------------------------------------------------------
++use strict;
++use fh_otg_test;
++
++check_arch() or die;
++
++#
++#
++sub test {
++  my ($attr,$expected) = @_;
++  my $string = get($attr);
++
++  if ($string eq $expected) {
++    printf("$attr = $string, okay\n");
++  }
++  else {
++    warn "ERROR: value of $attr != $expected, $string\n";
++    $errors ++;
++  }
++}
++
++#
++#
++sub set {
++  my ($reg, $value) = @_;
++  system "echo $value > $sysfsdir/$reg";
++}
++
++#
++#
++sub get {
++  my $attr = shift;
++  my $string = `cat $sysfsdir/$attr`;
++  chomp $string;
++  if ($string =~ m/\s\=\s/) {
++    my $tmp;
++    ($tmp, $string) = split /\s=\s/, $string;
++  }
++  return $string;
++}
++
++#
++#
++sub test_main {
++  print("\nTesting Sysfs Attributes\n");
++
++  load_module("") or die;
++
++  # Test initial values of regoffset/regvalue/guid/gsnpsid
++  print("\nTesting Default Values\n");
++
++  test("regoffset", "0xffffffff");
++  test("regvalue", "invalid offset");
++  test("guid", "0x12345678");	# this will fail if it has been changed
++  test("gsnpsid", "0x4f54200a");
++
++  # Test operation of regoffset/regvalue
++  print("\nTesting regoffset\n");
++  set('regoffset', '5a5a5a5a');
++  test("regoffset", "0xffffffff");
++
++  set('regoffset', '0');
++  test("regoffset", "0x00000000");
++
++  set('regoffset', '40000');
++  test("regoffset", "0x00000000");
++
++  set('regoffset', '3ffff');
++  test("regoffset", "0x0003ffff");
++
++  set('regoffset', '1');
++  test("regoffset", "0x00000001");
++
++  print("\nTesting regvalue\n");
++  set('regoffset', '3c');
++  test("regvalue", "0x12345678");
++  set('regvalue', '5a5a5a5a');
++  test("regvalue", "0x5a5a5a5a");
++  set('regvalue','a5a5a5a5');
++  test("regvalue", "0xa5a5a5a5");
++  set('guid','12345678');
++
++  # Test HNP Capable
++  print("\nTesting HNP Capable bit\n");
++  set('hnpcapable', '1');
++  test("hnpcapable", "0x1");
++  set('hnpcapable','0');
++  test("hnpcapable", "0x0");
++
++  set('regoffset','0c');
++
++  my $old = get('gusbcfg');
++  print("setting hnpcapable\n");
++  set('hnpcapable', '1');
++  test("hnpcapable", "0x1");
++  test('gusbcfg', sprintf "0x%08x", (oct ($old) | (1<<9)));
++  test('regvalue', sprintf "0x%08x", (oct ($old) | (1<<9)));
++
++  $old = get('gusbcfg');
++  print("clearing hnpcapable\n");
++  set('hnpcapable', '0');
++  test("hnpcapable", "0x0");
++  test ('gusbcfg', sprintf "0x%08x", oct ($old) & (~(1<<9)));
++  test ('regvalue', sprintf "0x%08x", oct ($old) & (~(1<<9)));
++
++  # Test SRP Capable
++  print("\nTesting SRP Capable bit\n");
++  set('srpcapable', '1');
++  test("srpcapable", "0x1");
++  set('srpcapable','0');
++  test("srpcapable", "0x0");
++
++  set('regoffset','0c');
++
++  $old = get('gusbcfg');
++  print("setting srpcapable\n");
++  set('srpcapable', '1');
++  test("srpcapable", "0x1");
++  test('gusbcfg', sprintf "0x%08x", (oct ($old) | (1<<8)));
++  test('regvalue', sprintf "0x%08x", (oct ($old) | (1<<8)));
++
++  $old = get('gusbcfg');
++  print("clearing srpcapable\n");
++  set('srpcapable', '0');
++  test("srpcapable", "0x0");
++  test('gusbcfg', sprintf "0x%08x", oct ($old) & (~(1<<8)));
++  test('regvalue', sprintf "0x%08x", oct ($old) & (~(1<<8)));
++
++  # Test GGPIO
++  print("\nTesting GGPIO\n");
++  set('ggpio','5a5a5a5a');
++  test('ggpio','0x5a5a0000');
++  set('ggpio','a5a5a5a5');
++  test('ggpio','0xa5a50000');
++  set('ggpio','11110000');
++  test('ggpio','0x11110000');
++  set('ggpio','00001111');
++  test('ggpio','0x00000000');
++
++  # Test DEVSPEED
++  print("\nTesting DEVSPEED\n");
++  set('regoffset','800');
++  $old = get('regvalue');
++  set('devspeed','0');
++  test('devspeed','0x0');
++  test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3)));
++  set('devspeed','1');
++  test('devspeed','0x1');
++  test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 1));
++  set('devspeed','2');
++  test('devspeed','0x2');
++  test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 2));
++  set('devspeed','3');
++  test('devspeed','0x3');
++  test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 3));
++  set('devspeed','4');
++  test('devspeed','0x0');
++  test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3)));
++  set('devspeed','5');
++  test('devspeed','0x1');
++  test('regvalue',sprintf("0x%08x", oct($old) & ~(0x3) | 1));
++
++
++  #  mode	Returns the current mode:0 for device mode1 for host mode	Read
++  #  hnp	Initiate the Host Negotiation Protocol.  Read returns the status.	Read/Write
++  #  srp	Initiate the Session Request Protocol.  Read returns the status.	Read/Write
++  #  buspower	Get or Set the Power State of the bus (0 - Off or 1 - On) 	Read/Write
++  #  bussuspend	Suspend the USB bus.	Read/Write
++  #  busconnected	Get the connection status of the bus 	Read
++
++  #  gotgctl	Get or set the Core Control Status Register.	Read/Write
++  ##  gusbcfg	Get or set the Core USB Configuration Register	Read/Write
++  #  grxfsiz	Get or set the Receive FIFO Size Register	Read/Write
++  #  gnptxfsiz	Get or set the non-periodic Transmit Size Register	Read/Write
++  #  gpvndctl	Get or set the PHY Vendor Control Register	Read/Write
++  ##  ggpio	Get the value in the lower 16-bits of the General Purpose IO Register or Set the upper 16 bits.	Read/Write
++  ##  guid	Get or set the value of the User ID Register	Read/Write
++  ##  gsnpsid	Get the value of the Synopsys ID Regester	Read
++  ##  devspeed	Get or set the device speed setting in the DCFG register	Read/Write
++  #  enumspeed	Gets the device enumeration Speed.	Read
++  #  hptxfsiz	Get the value of the Host Periodic Transmit FIFO	Read
++  #  hprt0	Get or Set the value in the Host Port Control and Status Register	Read/Write
++
++  test_status("TEST NYI") or die;
++}
++
++test_main();
++0;
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index f9cf3f04..49a9d3a6 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -1003,7 +1003,7 @@ MODULE_LICENSE ("GPL");
+ #define SA1111_DRIVER		ohci_hcd_sa1111_driver
+ #endif
+ 
+-#if defined(CONFIG_ARCH_S3C2410) || defined(CONFIG_ARCH_S3C64XX)
++#if  defined(CONFIG_ARCH_S3C2410) ||defined(CONFIG_ARCH_S3C64XX)
+ #include "ohci-s3c2410.c"
+ #define PLATFORM_DRIVER		ohci_hcd_s3c2410_driver
+ #endif
+diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
+index 7c9a4d55..7aeb729b 100644
+--- a/drivers/usb/host/ohci-s3c2410.c
++++ b/drivers/usb/host/ohci-s3c2410.c
+@@ -21,14 +21,21 @@
+ 
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+-#include <plat/usb-control.h>
++
++#include <mach/hardware.h>
++#include <mach/usb-control.h>
+ 
+ #define valid_port(idx) ((idx) == 1 || (idx) == 2)
+ 
++extern void usb_host_clk_en(void);
++
+ /* clock device associated with the hcd */
+ 
+ static struct clk *clk;
++
++#if defined(CONFIG_ARCH_2410)
+ static struct clk *usb_clk;
++#endif
+ 
+ /* forward definitions */
+ 
+@@ -47,8 +54,10 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
+ 
+ 	dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
+ 
++#if defined(CONFIG_ARCH_2410)
+ 	clk_enable(usb_clk);
+ 	mdelay(2);			/* let the bus clock stabilise */
++#endif
+ 
+ 	clk_enable(clk);
+ 
+@@ -56,8 +65,9 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
+ 		info->hcd	= hcd;
+ 		info->report_oc = s3c2410_hcd_oc;
+ 
+-		if (info->enable_oc != NULL)
++		if (info->enable_oc != NULL) {
+ 			(info->enable_oc)(info, 1);
++		}
+ 	}
+ }
+ 
+@@ -71,12 +81,15 @@ static void s3c2410_stop_hc(struct platform_device *dev)
+ 		info->report_oc = NULL;
+ 		info->hcd	= NULL;
+ 
+-		if (info->enable_oc != NULL)
++		if (info->enable_oc != NULL) {
+ 			(info->enable_oc)(info, 0);
++		}
+ 	}
+ 
+ 	clk_disable(clk);
++#if defined(CONFIG_ARCH_2410)
+ 	clk_disable(usb_clk);
++#endif
+ }
+ 
+ /* ohci_s3c2410_hub_status_data
+@@ -86,14 +99,14 @@ static void s3c2410_stop_hc(struct platform_device *dev)
+ */
+ 
+ static int
+-ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf)
++ohci_s3c2410_hub_status_data (struct usb_hcd *hcd, char *buf)
+ {
+ 	struct s3c2410_hcd_info *info = to_s3c2410_info(hcd);
+ 	struct s3c2410_hcd_port *port;
+ 	int orig;
+ 	int portno;
+ 
+-	orig  = ohci_hub_status_data(hcd, buf);
++	orig  = ohci_hub_status_data (hcd, buf);
+ 
+ 	if (info == NULL)
+ 		return orig;
+@@ -143,7 +156,7 @@ static void s3c2410_usb_set_power(struct s3c2410_hcd_info *info,
+  * request.
+ */
+ 
+-static int ohci_s3c2410_hub_control(
++static int ohci_s3c2410_hub_control (
+ 	struct usb_hcd	*hcd,
+ 	u16		typeReq,
+ 	u16		wValue,
+@@ -197,8 +210,9 @@ static int ohci_s3c2410_hub_control(
+ 			dev_dbg(hcd->self.controller,
+ 				"ClearPortFeature: OVER_CURRENT\n");
+ 
+-			if (valid_port(wIndex))
++			if (valid_port(wIndex)) {
+ 				info->port[wIndex-1].oc_status = 0;
++			}
+ 
+ 			goto out;
+ 
+@@ -239,11 +253,8 @@ static int ohci_s3c2410_hub_control(
+ 		desc->wHubCharacteristics |= cpu_to_le16(0x0001);
+ 
+ 		if (info->enable_oc) {
+-			desc->wHubCharacteristics &= ~cpu_to_le16(
+-				HUB_CHAR_OCPM);
+-			desc->wHubCharacteristics |=  cpu_to_le16(
+-				0x0008 |
+-				0x0001);
++			desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
++			desc->wHubCharacteristics |=  cpu_to_le16(0x0008|0x0001);
+ 		}
+ 
+ 		dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
+@@ -257,11 +268,13 @@ static int ohci_s3c2410_hub_control(
+ 		dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
+ 
+ 		if (valid_port(wIndex)) {
+-			if (info->port[wIndex-1].oc_changed)
++			if (info->port[wIndex-1].oc_changed) {
+ 				*data |= cpu_to_le32(RH_PS_OCIC);
++			}
+ 
+-			if (info->port[wIndex-1].oc_status)
++			if (info->port[wIndex-1].oc_status) {
+ 				*data |= cpu_to_le32(RH_PS_POCI);
++			}
+ 		}
+ 	}
+ 
+@@ -319,7 +332,7 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
+ */
+ 
+ static void
+-usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
++usb_hcd_s3c2410_remove (struct usb_hcd *hcd, struct platform_device *dev)
+ {
+ 	usb_remove_hcd(hcd);
+ 	s3c2410_stop_hc(dev);
+@@ -337,12 +350,16 @@ usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
+  * through the hotplug entry's driver_data.
+  *
+  */
+-static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
++static int usb_hcd_s3c2410_probe (const struct hc_driver *driver,
+ 				  struct platform_device *dev)
+ {
+ 	struct usb_hcd *hcd = NULL;
+ 	int retval;
+ 
++#if !defined(CONFIG_ARCH_2410)
++        usb_host_clk_en();
++#endif
++
+ 	s3c2410_usb_set_power(dev->dev.platform_data, 1, 1);
+ 	s3c2410_usb_set_power(dev->dev.platform_data, 2, 1);
+ 
+@@ -351,7 +368,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
+ 		return -ENOMEM;
+ 
+ 	hcd->rsrc_start = dev->resource[0].start;
+-	hcd->rsrc_len	= resource_size(&dev->resource[0]);
++	hcd->rsrc_len   = dev->resource[0].end - dev->resource[0].start + 1;
+ 
+ 	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ 		dev_err(&dev->dev, "request_mem_region failed\n");
+@@ -362,16 +379,18 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
+ 	clk = clk_get(&dev->dev, "usb-host");
+ 	if (IS_ERR(clk)) {
+ 		dev_err(&dev->dev, "cannot get usb-host clock\n");
+-		retval = PTR_ERR(clk);
++		retval = -ENOENT;
+ 		goto err_mem;
+ 	}
+ 
++#if defined(CONFIG_ARCH_2410)
+ 	usb_clk = clk_get(&dev->dev, "usb-bus-host");
+ 	if (IS_ERR(usb_clk)) {
+-		dev_err(&dev->dev, "cannot get usb-bus-host clock\n");
+-		retval = PTR_ERR(usb_clk);
++		dev_err(&dev->dev, "cannot get usb-host clock\n");
++		retval = -ENOENT;
+ 		goto err_clk;
+ 	}
++#endif
+ 
+ 	s3c2410_start_hc(dev, hcd);
+ 
+@@ -393,10 +412,13 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
+  err_ioremap:
+ 	s3c2410_stop_hc(dev);
+ 	iounmap(hcd->regs);
++
++#if defined(CONFIG_ARCH_2410)
+ 	clk_put(usb_clk);
+ 
+  err_clk:
+ 	clk_put(clk);
++#endif
+ 
+  err_mem:
+ 	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+@@ -409,19 +431,17 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
+ /*-------------------------------------------------------------------------*/
+ 
+ static int
+-ohci_s3c2410_start(struct usb_hcd *hcd)
++ohci_s3c2410_start (struct usb_hcd *hcd)
+ {
+-	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
++	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+ 	int ret;
+ 
+-	ret = ohci_init(ohci);
+-	if (ret < 0)
++	if ((ret = ohci_init(ohci)) < 0)
+ 		return ret;
+ 
+-	ret = ohci_run(ohci);
+-	if (ret < 0) {
+-		err("can't start %s", hcd->self.bus_name);
+-		ohci_stop(hcd);
++	if ((ret = ohci_run (ohci)) < 0) {
++		err ("can't start %s", hcd->self.bus_name);
++		ohci_stop (hcd);
+ 		return ret;
+ 	}
+ 
+@@ -473,12 +493,12 @@ static const struct hc_driver ohci_s3c2410_hc_driver = {
+ 
+ /* device driver */
+ 
+-static int __devinit ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
++static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
+ {
+ 	return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev);
+ }
+ 
+-static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
++static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
+ {
+ 	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ 
+@@ -488,7 +508,7 @@ static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
+ 
+ static struct platform_driver ohci_hcd_s3c2410_driver = {
+ 	.probe		= ohci_hcd_s3c2410_drv_probe,
+-	.remove		= __devexit_p(ohci_hcd_s3c2410_drv_remove),
++	.remove		= ohci_hcd_s3c2410_drv_remove,
+ 	.shutdown	= usb_hcd_platform_shutdown,
+ 	/*.suspend	= ohci_hcd_s3c2410_drv_suspend, */
+ 	/*.resume	= ohci_hcd_s3c2410_drv_resume, */
+diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
+index 21d816e9..673ee6b0 100644
+--- a/drivers/watchdog/Kconfig
++++ b/drivers/watchdog/Kconfig
+@@ -606,7 +606,14 @@ config HP_WATCHDOG
+ 	  will only load on an HP ProLiant system with a minimum of iLO2 support.
+ 	  To compile this driver as a module, choose M here: the module will be
+ 	  called hpwdt.
+-
++	  
++config FH_WATCHDOG
++	tristate "fullhan  watchdog driver"
++	
++	help
++	  A software monitoring watchdog and NMI sourcing driver. 
++	  
++	  
+ config HPWDT_NMI_DECODING
+ 	bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
+ 	depends on HP_WATCHDOG
+diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
+index ed26f709..0cb0e134 100644
+--- a/drivers/watchdog/Makefile
++++ b/drivers/watchdog/Makefile
+@@ -49,6 +49,7 @@ obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
+ obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
+ obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
+ obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
++obj-$(CONFIG_FH_WATCHDOG) += fh_wdt.o
+ 
+ # AVR32 Architecture
+ obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
+diff --git a/drivers/watchdog/fh_wdt.c b/drivers/watchdog/fh_wdt.c
+new file mode 100644
+index 00000000..b24b9186
+--- /dev/null
++++ b/drivers/watchdog/fh_wdt.c
+@@ -0,0 +1,461 @@
++/*
++ * Copyright 2010-2011 Picochip Ltd., Jamie Iles
++ * http://www.picochip.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * This file implements a driver for the Synopsys DesignWare watchdog device
++ * in the many ARM subsystems. The watchdog has 16 different timeout periods
++ * and these are a function of the input clock frequency.
++ *
++ * The DesignWare watchdog cannot be stopped once it has been started so we
++ * use a software timer to implement a ping that will keep the watchdog alive.
++ * If we receive an expected close for the watchdog then we keep the timer
++ * running, otherwise the timer is stopped and the watchdog will expire.
++ */
++#define pr_fmt(fmt) "fh_wdt: " fmt
++
++#include <linux/bitops.h>
++#include <linux/clk.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/fs.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/pm.h>
++#include <linux/platform_device.h>
++#include <linux/spinlock.h>
++#include <linux/timer.h>
++#include <linux/uaccess.h>
++#include <linux/watchdog.h>
++#include <linux/interrupt.h>
++#include <mach/pmu.h>
++#include <mach/fh_wdt.h>
++
++#define WDT_RESPONSE_MODE
++
++#define WDOG_CONTROL_REG_OFFSET		    			 0x00
++#define WDOG_CONTROL_REG_WDT_EN_MASK	  		 0x01
++#define WDOG_CONTROL_REG_RMOD_MASK               0x02
++#define WDOG_TIMEOUT_RANGE_REG_OFFSET	         0x04
++#define WDOG_CURRENT_COUNT_REG_OFFSET	         0x08
++#define WDOG_COUNTER_RESTART_REG_OFFSET       0x0c
++#define WDOG_COUNTER_RESTART_KICK_VALUE	     0x76
++
++/* Hardware timeout in seconds */
++#define WDT_HW_TIMEOUT 2
++/* User land timeout */
++#define WDT_HEARTBEAT 15
++static int heartbeat = WDT_HEARTBEAT;
++
++/* The maximum TOP (timeout period) value that can be set in the watchdog. */
++#define FH_WDT_MAX_TOP		15
++
++static int curr_top_val = FH_WDT_MAX_TOP;
++static int curr_clk_rate = 1800000;
++static int nowayout = WATCHDOG_NOWAYOUT;
++module_param(nowayout, int, 0);
++MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
++		 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
++
++#define WDT_TIMEOUT		(HZ / 2)
++
++static struct {
++	spinlock_t		lock;
++	void __iomem		*regs;
++	struct clk		*clk;
++	unsigned long		in_use;
++	unsigned long		next_heartbeat;
++	struct timer_list	timer;
++	int			expect_close;
++	struct fh_wdt_platform_data *plat_data;
++} fh_wdt;
++
++static inline int fh_wdt_is_enabled(void)
++{
++	return readl(fh_wdt.regs + WDOG_CONTROL_REG_OFFSET) &
++		WDOG_CONTROL_REG_WDT_EN_MASK;
++}
++
++//#define WDT_CLOCK 1800000
++#define WDT_CLOCK clk_get_rate(fh_wdt.clk)
++
++static inline int fh_wdt_top_in_seconds(unsigned top)
++{
++	/*
++	 * There are 16 possible timeout values in 0..15 where the number of
++	 * cycles is 2 ^ (16 + i) and the watchdog counts down.
++	 */
++	return (1 << (16 + top)) / WDT_CLOCK;
++}
++
++static inline void fh_wdt_set_next_heartbeat(void)
++{
++	fh_wdt.next_heartbeat = jiffies + heartbeat * HZ;
++}
++
++static int fh_wdt_set_top(unsigned top_s)
++{
++	int i, top_val = FH_WDT_MAX_TOP;
++
++	/*
++	 * Iterate over the timeout values until we find the closest match. We
++	 * always look for >=.
++	 */
++
++	for (i = 0; i <= FH_WDT_MAX_TOP; ++i)
++		if (fh_wdt_top_in_seconds(i) >= top_s) {
++			top_val = i;
++			break;
++		}
++
++	/* Set the new value in the watchdog. */
++	printk("[wdt] set topval: %d", top_val);
++	writel(top_val, fh_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
++
++	fh_wdt_set_next_heartbeat();
++
++	return fh_wdt_top_in_seconds(top_val);
++}
++
++static void fh_wdt_keepalive(void)
++{
++	writel(WDOG_COUNTER_RESTART_KICK_VALUE, fh_wdt.regs +
++	       WDOG_COUNTER_RESTART_REG_OFFSET);
++}
++
++static void fh_wdt_ping(unsigned long data)
++{
++	if (time_before(jiffies, fh_wdt.next_heartbeat) ||
++	    (!nowayout && !fh_wdt.in_use)) {
++		fh_wdt_keepalive();
++		mod_timer(&fh_wdt.timer, jiffies + WDT_TIMEOUT);
++	} else
++		pr_crit("keepalive missed, machine will reset\n");
++}
++
++static int fh_wdt_open(struct inode *inode, struct file *filp)
++{
++	if (test_and_set_bit(0, &fh_wdt.in_use))
++		return -EBUSY;
++
++	/* Make sure we don't get unloaded. */
++	__module_get(THIS_MODULE);
++
++	spin_lock(&fh_wdt.lock);
++
++	if(fh_wdt.plat_data && fh_wdt.plat_data->resume)
++		fh_wdt.plat_data->resume();
++
++	fh_wdt_set_top(WDT_HW_TIMEOUT);///3000);
++	if (!fh_wdt_is_enabled())
++	{
++        /*
++         * The watchdog is not currently enabled. Set the timeout to
++         * the maximum and then start it.
++         */
++	    u32 value;
++	    value = WDOG_CONTROL_REG_WDT_EN_MASK;
++#ifdef WDT_RESPONSE_MODE
++	    value |= WDOG_CONTROL_REG_RMOD_MASK;
++#endif
++		writel(value, fh_wdt.regs + WDOG_CONTROL_REG_OFFSET);
++		fh_wdt_keepalive();
++	}
++
++	fh_wdt_set_next_heartbeat();
++
++	spin_unlock(&fh_wdt.lock);
++
++	return nonseekable_open(inode, filp);
++}
++
++ssize_t fh_wdt_write(struct file *filp, const char __user *buf, size_t len,
++		     loff_t *offset)
++{
++	if (!len)
++		return 0;
++
++	if (!nowayout) {
++		size_t i;
++
++		fh_wdt.expect_close = 0;
++
++		for (i = 0; i < len; ++i) {
++			char c;
++
++			if (get_user(c, buf + i))
++				return -EFAULT;
++
++			if (c == 'V') {
++				fh_wdt.expect_close = 1;
++				break;
++			}
++		}
++	}
++
++	fh_wdt_set_next_heartbeat();
++	mod_timer(&fh_wdt.timer, jiffies + WDT_TIMEOUT);
++
++	return len;
++}
++
++static u32 fh_wdt_time_left(void)
++{
++	return readl(fh_wdt.regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
++			WDT_CLOCK;
++}
++
++static const struct watchdog_info fh_wdt_ident = {
++	.options	= WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
++			  WDIOF_MAGICCLOSE,
++	.identity	= "Synopsys DesignWare Watchdog",
++};
++
++static long fh_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++	unsigned long val;
++
++	switch (cmd) {
++	case WDIOC_GETSUPPORT:
++		return copy_to_user((struct watchdog_info *)arg, &fh_wdt_ident,
++				    sizeof(fh_wdt_ident)) ? -EFAULT : 0;
++
++	case WDIOC_GETSTATUS:
++	case WDIOC_GETBOOTSTATUS:
++		return put_user(0, (int *)arg);
++
++	case WDIOC_KEEPALIVE:
++		fh_wdt_set_next_heartbeat();
++		return 0;
++
++	case WDIOC_SETTIMEOUT:
++		if (get_user(val, (int __user *)arg))
++			return -EFAULT;
++
++		pr_debug("[wdt] settime value %lu", val);
++		heartbeat =  val;
++		fh_wdt_keepalive();
++		fh_wdt_set_next_heartbeat();
++
++		return put_user(val , (int __user *)arg);
++
++	case WDIOC_GETTIMEOUT:
++		return put_user(heartbeat, (int __user *)arg);
++
++	case WDIOC_GETTIMELEFT:
++		/* Get the time left until expiry. */
++		if (get_user(val, (int __user *)arg))
++			return -EFAULT;
++		return put_user(fh_wdt_time_left(), (int __user *)arg);
++
++	case WDIOC_SETOPTIONS:
++		if (get_user(val, (int __user *)arg))
++			return -EFAULT;
++
++		if (val & WDIOS_DISABLECARD) {
++			if(fh_wdt.plat_data && fh_wdt.plat_data->pause)
++				fh_wdt.plat_data->pause();
++			else
++				return -EPERM;
++		}
++
++		if (val & WDIOS_ENABLECARD) {
++			if(fh_wdt.plat_data && fh_wdt.plat_data->resume)
++				fh_wdt.plat_data->resume();
++			else
++				return -EPERM;
++		}
++
++		return 0;
++
++	default:
++		return -ENOTTY;
++	}
++}
++
++#ifdef WDT_RESPONSE_MODE
++static irqreturn_t fh_wdt_interrupt(int this_irq, void *dev_id)
++{
++    fh_pmu_stop();
++    return IRQ_HANDLED;
++}
++#endif
++
++static int fh_wdt_release(struct inode *inode, struct file *filp)
++{
++	clear_bit(0, &fh_wdt.in_use);
++
++	if (!fh_wdt.expect_close) {
++		del_timer(&fh_wdt.timer);
++		if (!nowayout)
++			pr_crit("unexpected close, system will reboot soon\n");
++		else
++			pr_crit("watchdog cannot be disabled, system will reboot soon\n");
++	}
++
++	fh_wdt.expect_close = 0;
++
++	return 0;
++}
++
++#ifdef CONFIG_PM
++static int fh_wdt_suspend(struct device *dev)
++{
++	clk_disable(fh_wdt.clk);
++	curr_top_val = readl(fh_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xf;
++	writel(FH_WDT_MAX_TOP, fh_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
++    curr_clk_rate = WDT_CLOCK;
++    //clk_get_rate(fh_wdt.clk->parent) / fh_wdt.clk->prediv / fh_wdt.clk->div_reg_mask >> reg_shift
++    //clk_set_rate(fh_wdt.clk, 0xffffffff);
++    clk_set_rate(fh_wdt.clk, 843750);   /* 1800000 / 256 = 210937.5*/
++	fh_wdt_keepalive();
++
++	return 0;
++}
++
++static int fh_wdt_resume(struct device *dev)
++{
++    int err;
++
++    clk_set_rate(fh_wdt.clk, curr_clk_rate);
++	err = clk_enable(fh_wdt.clk);
++	writel(curr_top_val, fh_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
++
++	if (err)
++	{
++	    pr_err("an error occured during wdt resume, error no: %d\n", err);
++		return err;
++	}
++
++	fh_wdt_keepalive();
++
++	return 0;
++}
++
++static const struct dev_pm_ops fh_wdt_pm_ops = {
++	.suspend	= fh_wdt_suspend,
++	.resume		= fh_wdt_resume,
++};
++#endif /* CONFIG_PM */
++
++static const struct file_operations wdt_fops = {
++	.owner		= THIS_MODULE,
++	.llseek		= no_llseek,
++	.open		= fh_wdt_open,
++	.write		= fh_wdt_write,
++	.unlocked_ioctl	= fh_wdt_ioctl,
++	.release	= fh_wdt_release
++};
++
++static struct miscdevice fh_wdt_miscdev = {
++	.fops		= &wdt_fops,
++	.name		= "watchdog",
++	.minor		= WATCHDOG_MINOR,
++};
++
++static int __devinit fh_wdt_drv_probe(struct platform_device *pdev)
++{
++	int ret, irq;
++	struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++	if (!mem)
++		return -EINVAL;
++
++	if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
++				     "fh_wdt"))
++		return -ENOMEM;
++
++	fh_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
++	if (!fh_wdt.regs)
++		return -ENOMEM;
++#ifdef WDT_RESPONSE_MODE
++    irq = platform_get_irq(pdev, 0);
++	if (irq < 0) {
++        dev_err(&pdev->dev, "no irq resource\n");
++        return -ENXIO;
++    }
++
++	ret = request_irq(irq, fh_wdt_interrupt, IRQF_DISABLED, pdev->name,
++			&fh_wdt);
++	if (ret) {
++		dev_err(&pdev->dev, "failure requesting irq %i\n", irq);
++        return -ENXIO;
++    }
++#endif
++
++	fh_wdt.plat_data = dev_get_platdata(&pdev->dev);
++	fh_wdt.clk = clk_get( NULL,"wdt_clk");
++	if (IS_ERR(fh_wdt.clk))
++		return PTR_ERR(fh_wdt.clk);
++
++	clk_set_rate(fh_wdt.clk, 1000000);
++
++	ret = clk_enable(fh_wdt.clk);
++	if (ret)
++		goto out_put_clk;
++
++	spin_lock_init(&fh_wdt.lock);
++
++	ret = misc_register(&fh_wdt_miscdev);
++	if (ret)
++		goto out_disable_clk;
++
++	fh_wdt_set_next_heartbeat();
++	setup_timer(&fh_wdt.timer, fh_wdt_ping, 0);
++	mod_timer(&fh_wdt.timer, jiffies + WDT_TIMEOUT);
++
++	return 0;
++
++out_disable_clk:
++	clk_disable(fh_wdt.clk);
++out_put_clk:
++	clk_put(fh_wdt.clk);
++
++	return ret;
++}
++
++static int __devexit fh_wdt_drv_remove(struct platform_device *pdev)
++{
++	misc_deregister(&fh_wdt_miscdev);
++
++	clk_disable(fh_wdt.clk);
++	clk_put(fh_wdt.clk);
++
++	return 0;
++}
++
++static struct platform_driver fh_wdt_driver = {
++	.probe		= fh_wdt_drv_probe,
++	.remove		= __devexit_p(fh_wdt_drv_remove),
++	.driver		= {
++		.name	= "fh_wdt",
++		.owner	= THIS_MODULE,
++#ifdef CONFIG_PM
++		.pm	= &fh_wdt_pm_ops,
++#endif /* CONFIG_PM */
++	},
++};
++
++static int __init fh_wdt_watchdog_init(void)
++{
++	return platform_driver_register(&fh_wdt_driver);
++}
++module_init(fh_wdt_watchdog_init);
++
++static void __exit fh_wdt_watchdog_exit(void)
++{
++	platform_driver_unregister(&fh_wdt_driver);
++}
++module_exit(fh_wdt_watchdog_exit);
++
++MODULE_AUTHOR("fullhan");
++MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+diff --git a/fs/Kconfig b/fs/Kconfig
+index 19891aab..77c5e665 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -192,6 +192,7 @@ source "fs/hfsplus/Kconfig"
+ source "fs/befs/Kconfig"
+ source "fs/bfs/Kconfig"
+ source "fs/efs/Kconfig"
++source "fs/yaffs2/Kconfig"
+ source "fs/jffs2/Kconfig"
+ # UBIFS File system configuration
+ source "fs/ubifs/Kconfig"
+diff --git a/fs/Kconfig.pre.yaffs b/fs/Kconfig.pre.yaffs
+new file mode 100644
+index 00000000..19891aab
+--- /dev/null
++++ b/fs/Kconfig.pre.yaffs
+@@ -0,0 +1,273 @@
++#
++# File system configuration
++#
++
++menu "File systems"
++
++if BLOCK
++
++source "fs/ext2/Kconfig"
++source "fs/ext3/Kconfig"
++source "fs/ext4/Kconfig"
++
++config FS_XIP
++# execute in place
++	bool
++	depends on EXT2_FS_XIP
++	default y
++
++source "fs/jbd/Kconfig"
++source "fs/jbd2/Kconfig"
++
++config FS_MBCACHE
++# Meta block cache for Extended Attributes (ext2/ext3/ext4)
++	tristate
++	default y if EXT2_FS=y && EXT2_FS_XATTR
++	default y if EXT3_FS=y && EXT3_FS_XATTR
++	default y if EXT4_FS=y && EXT4_FS_XATTR
++	default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS_XATTR
++
++source "fs/reiserfs/Kconfig"
++source "fs/jfs/Kconfig"
++
++source "fs/xfs/Kconfig"
++source "fs/gfs2/Kconfig"
++source "fs/ocfs2/Kconfig"
++source "fs/btrfs/Kconfig"
++source "fs/nilfs2/Kconfig"
++
++endif # BLOCK
++
++# Posix ACL utility routines
++#
++# Note: Posix ACLs can be implemented without these helpers.  Never use
++# this symbol for ifdefs in core code.
++#
++config FS_POSIX_ACL
++	def_bool n
++
++config EXPORTFS
++	tristate
++
++config FILE_LOCKING
++	bool "Enable POSIX file locking API" if EXPERT
++	default y
++	help
++	  This option enables standard file locking support, required
++          for filesystems like NFS and for the flock() system
++          call. Disabling this option saves about 11k.
++
++source "fs/notify/Kconfig"
++
++source "fs/quota/Kconfig"
++
++source "fs/autofs4/Kconfig"
++source "fs/fuse/Kconfig"
++
++config CUSE
++	tristate "Character device in Userspace support"
++	depends on FUSE_FS
++	help
++	  This FUSE extension allows character devices to be
++	  implemented in userspace.
++
++	  If you want to develop or use userspace character device
++	  based on CUSE, answer Y or M.
++
++config GENERIC_ACL
++	bool
++	select FS_POSIX_ACL
++
++menu "Caches"
++
++source "fs/fscache/Kconfig"
++source "fs/cachefiles/Kconfig"
++
++endmenu
++
++if BLOCK
++menu "CD-ROM/DVD Filesystems"
++
++source "fs/isofs/Kconfig"
++source "fs/udf/Kconfig"
++
++endmenu
++endif # BLOCK
++
++if BLOCK
++menu "DOS/FAT/NT Filesystems"
++
++source "fs/fat/Kconfig"
++source "fs/ntfs/Kconfig"
++
++endmenu
++endif # BLOCK
++
++menu "Pseudo filesystems"
++
++source "fs/proc/Kconfig"
++source "fs/sysfs/Kconfig"
++
++config TMPFS
++	bool "Virtual memory file system support (former shm fs)"
++	depends on SHMEM
++	help
++	  Tmpfs is a file system which keeps all files in virtual memory.
++
++	  Everything in tmpfs is temporary in the sense that no files will be
++	  created on your hard drive. The files live in memory and swap
++	  space. If you unmount a tmpfs instance, everything stored therein is
++	  lost.
++
++	  See <file:Documentation/filesystems/tmpfs.txt> for details.
++
++config TMPFS_POSIX_ACL
++	bool "Tmpfs POSIX Access Control Lists"
++	depends on TMPFS
++	select TMPFS_XATTR
++	select GENERIC_ACL
++	help
++	  POSIX Access Control Lists (ACLs) support permissions for users and
++	  groups beyond the owner/group/world scheme.
++
++	  To learn more about Access Control Lists, visit the POSIX ACLs for
++	  Linux website <http://acl.bestbits.at/>.
++
++	  If you don't know what Access Control Lists are, say N.
++
++config TMPFS_XATTR
++	bool "Tmpfs extended attributes"
++	depends on TMPFS
++	default n
++	help
++	  Extended attributes are name:value pairs associated with inodes by
++	  the kernel or by users (see the attr(5) manual page, or visit
++	  <http://acl.bestbits.at/> for details).
++
++	  Currently this enables support for the trusted.* and
++	  security.* namespaces.
++
++	  You need this for POSIX ACL support on tmpfs.
++
++	  If unsure, say N.
++
++config HUGETLBFS
++	bool "HugeTLB file system support"
++	depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
++		   SYS_SUPPORTS_HUGETLBFS || BROKEN
++	help
++	  hugetlbfs is a filesystem backing for HugeTLB pages, based on
++	  ramfs. For architectures that support it, say Y here and read
++	  <file:Documentation/vm/hugetlbpage.txt> for details.
++
++	  If unsure, say N.
++
++config HUGETLB_PAGE
++	def_bool HUGETLBFS
++
++source "fs/configfs/Kconfig"
++
++endmenu
++
++menuconfig MISC_FILESYSTEMS
++	bool "Miscellaneous filesystems"
++	default y
++	---help---
++	  Say Y here to get to see options for various miscellaneous
++	  filesystems, such as filesystems that came from other
++	  operating systems.
++
++	  This option alone does not add any kernel code.
++
++	  If you say N, all options in this submenu will be skipped and
++	  disabled; if unsure, say Y here.
++
++if MISC_FILESYSTEMS
++
++source "fs/adfs/Kconfig"
++source "fs/affs/Kconfig"
++source "fs/ecryptfs/Kconfig"
++source "fs/hfs/Kconfig"
++source "fs/hfsplus/Kconfig"
++source "fs/befs/Kconfig"
++source "fs/bfs/Kconfig"
++source "fs/efs/Kconfig"
++source "fs/jffs2/Kconfig"
++# UBIFS File system configuration
++source "fs/ubifs/Kconfig"
++source "fs/logfs/Kconfig"
++source "fs/cramfs/Kconfig"
++source "fs/squashfs/Kconfig"
++source "fs/freevxfs/Kconfig"
++source "fs/minix/Kconfig"
++source "fs/omfs/Kconfig"
++source "fs/hpfs/Kconfig"
++source "fs/qnx4/Kconfig"
++source "fs/romfs/Kconfig"
++source "fs/pstore/Kconfig"
++source "fs/sysv/Kconfig"
++source "fs/ufs/Kconfig"
++source "fs/exofs/Kconfig"
++
++endif # MISC_FILESYSTEMS
++
++menuconfig NETWORK_FILESYSTEMS
++	bool "Network File Systems"
++	default y
++	depends on NET
++	---help---
++	  Say Y here to get to see options for network filesystems and
++	  filesystem-related networking code, such as NFS daemon and
++	  RPCSEC security modules.
++
++	  This option alone does not add any kernel code.
++
++	  If you say N, all options in this submenu will be skipped and
++	  disabled; if unsure, say Y here.
++
++if NETWORK_FILESYSTEMS
++
++source "fs/nfs/Kconfig"
++source "fs/nfsd/Kconfig"
++
++config LOCKD
++	tristate
++	depends on FILE_LOCKING
++
++config LOCKD_V4
++	bool
++	depends on NFSD_V3 || NFS_V3
++	depends on FILE_LOCKING
++	default y
++
++config NFS_ACL_SUPPORT
++	tristate
++	select FS_POSIX_ACL
++
++config NFS_COMMON
++	bool
++	depends on NFSD || NFS_FS
++	default y
++
++source "net/sunrpc/Kconfig"
++source "fs/ceph/Kconfig"
++source "fs/cifs/Kconfig"
++source "fs/ncpfs/Kconfig"
++source "fs/coda/Kconfig"
++source "fs/afs/Kconfig"
++source "fs/9p/Kconfig"
++
++endif # NETWORK_FILESYSTEMS
++
++if BLOCK
++menu "Partition Types"
++
++source "fs/partitions/Kconfig"
++
++endmenu
++endif
++
++source "fs/nls/Kconfig"
++source "fs/dlm/Kconfig"
++
++endmenu
+diff --git a/fs/Makefile b/fs/Makefile
+index fb68c2b8..dbd3eb7e 100644
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -124,3 +124,4 @@ obj-$(CONFIG_GFS2_FS)           += gfs2/
+ obj-$(CONFIG_EXOFS_FS)          += exofs/
+ obj-$(CONFIG_CEPH_FS)		+= ceph/
+ obj-$(CONFIG_PSTORE)		+= pstore/
++obj-$(CONFIG_YAFFS_FS)		+= yaffs2/
+diff --git a/fs/Makefile.pre.yaffs b/fs/Makefile.pre.yaffs
+new file mode 100644
+index 00000000..fb68c2b8
+--- /dev/null
++++ b/fs/Makefile.pre.yaffs
+@@ -0,0 +1,126 @@
++#
++# Makefile for the Linux filesystems.
++#
++# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
++# Rewritten to use lists instead of if-statements.
++# 
++
++obj-y :=	open.o read_write.o file_table.o super.o \
++		char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
++		ioctl.o readdir.o select.o fifo.o dcache.o inode.o \
++		attr.o bad_inode.o file.o filesystems.o namespace.o \
++		seq_file.o xattr.o libfs.o fs-writeback.o \
++		pnode.o drop_caches.o splice.o sync.o utimes.o \
++		stack.o fs_struct.o statfs.o
++
++ifeq ($(CONFIG_BLOCK),y)
++obj-y +=	buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
++else
++obj-y +=	no-block.o
++endif
++
++obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
++obj-y				+= notify/
++obj-$(CONFIG_EPOLL)		+= eventpoll.o
++obj-$(CONFIG_ANON_INODES)	+= anon_inodes.o
++obj-$(CONFIG_SIGNALFD)		+= signalfd.o
++obj-$(CONFIG_TIMERFD)		+= timerfd.o
++obj-$(CONFIG_EVENTFD)		+= eventfd.o
++obj-$(CONFIG_AIO)               += aio.o
++obj-$(CONFIG_FILE_LOCKING)      += locks.o
++obj-$(CONFIG_COMPAT)		+= compat.o compat_ioctl.o
++obj-$(CONFIG_NFSD_DEPRECATED)	+= nfsctl.o
++obj-$(CONFIG_BINFMT_AOUT)	+= binfmt_aout.o
++obj-$(CONFIG_BINFMT_EM86)	+= binfmt_em86.o
++obj-$(CONFIG_BINFMT_MISC)	+= binfmt_misc.o
++
++# binfmt_script is always there
++obj-y				+= binfmt_script.o
++
++obj-$(CONFIG_BINFMT_ELF)	+= binfmt_elf.o
++obj-$(CONFIG_COMPAT_BINFMT_ELF)	+= compat_binfmt_elf.o
++obj-$(CONFIG_BINFMT_ELF_FDPIC)	+= binfmt_elf_fdpic.o
++obj-$(CONFIG_BINFMT_SOM)	+= binfmt_som.o
++obj-$(CONFIG_BINFMT_FLAT)	+= binfmt_flat.o
++
++obj-$(CONFIG_FS_MBCACHE)	+= mbcache.o
++obj-$(CONFIG_FS_POSIX_ACL)	+= posix_acl.o xattr_acl.o
++obj-$(CONFIG_NFS_COMMON)	+= nfs_common/
++obj-$(CONFIG_GENERIC_ACL)	+= generic_acl.o
++
++obj-$(CONFIG_FHANDLE)		+= fhandle.o
++
++obj-y				+= quota/
++
++obj-$(CONFIG_PROC_FS)		+= proc/
++obj-y				+= partitions/
++obj-$(CONFIG_SYSFS)		+= sysfs/
++obj-$(CONFIG_CONFIGFS_FS)	+= configfs/
++obj-y				+= devpts/
++
++obj-$(CONFIG_PROFILING)		+= dcookies.o
++obj-$(CONFIG_DLM)		+= dlm/
++ 
++# Do not add any filesystems before this line
++obj-$(CONFIG_FSCACHE)		+= fscache/
++obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
++obj-$(CONFIG_EXT3_FS)		+= ext3/ # Before ext2 so root fs can be ext3
++obj-$(CONFIG_EXT2_FS)		+= ext2/
++# We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
++# unless explicitly requested by rootfstype
++obj-$(CONFIG_EXT4_FS)		+= ext4/
++obj-$(CONFIG_JBD)		+= jbd/
++obj-$(CONFIG_JBD2)		+= jbd2/
++obj-$(CONFIG_CRAMFS)		+= cramfs/
++obj-$(CONFIG_SQUASHFS)		+= squashfs/
++obj-y				+= ramfs/
++obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
++obj-$(CONFIG_CODA_FS)		+= coda/
++obj-$(CONFIG_MINIX_FS)		+= minix/
++obj-$(CONFIG_FAT_FS)		+= fat/
++obj-$(CONFIG_BFS_FS)		+= bfs/
++obj-$(CONFIG_ISO9660_FS)	+= isofs/
++obj-$(CONFIG_HFSPLUS_FS)	+= hfsplus/ # Before hfs to find wrapped HFS+
++obj-$(CONFIG_HFS_FS)		+= hfs/
++obj-$(CONFIG_ECRYPT_FS)		+= ecryptfs/
++obj-$(CONFIG_VXFS_FS)		+= freevxfs/
++obj-$(CONFIG_NFS_FS)		+= nfs/
++obj-$(CONFIG_EXPORTFS)		+= exportfs/
++obj-$(CONFIG_NFSD)		+= nfsd/
++obj-$(CONFIG_LOCKD)		+= lockd/
++obj-$(CONFIG_NLS)		+= nls/
++obj-$(CONFIG_SYSV_FS)		+= sysv/
++obj-$(CONFIG_CIFS)		+= cifs/
++obj-$(CONFIG_NCP_FS)		+= ncpfs/
++obj-$(CONFIG_HPFS_FS)		+= hpfs/
++obj-$(CONFIG_NTFS_FS)		+= ntfs/
++obj-$(CONFIG_UFS_FS)		+= ufs/
++obj-$(CONFIG_EFS_FS)		+= efs/
++obj-$(CONFIG_JFFS2_FS)		+= jffs2/
++obj-$(CONFIG_LOGFS)		+= logfs/
++obj-$(CONFIG_UBIFS_FS)		+= ubifs/
++obj-$(CONFIG_AFFS_FS)		+= affs/
++obj-$(CONFIG_ROMFS_FS)		+= romfs/
++obj-$(CONFIG_QNX4FS_FS)		+= qnx4/
++obj-$(CONFIG_AUTOFS4_FS)	+= autofs4/
++obj-$(CONFIG_ADFS_FS)		+= adfs/
++obj-$(CONFIG_FUSE_FS)		+= fuse/
++obj-$(CONFIG_UDF_FS)		+= udf/
++obj-$(CONFIG_SUN_OPENPROMFS)	+= openpromfs/
++obj-$(CONFIG_OMFS_FS)		+= omfs/
++obj-$(CONFIG_JFS_FS)		+= jfs/
++obj-$(CONFIG_XFS_FS)		+= xfs/
++obj-$(CONFIG_9P_FS)		+= 9p/
++obj-$(CONFIG_AFS_FS)		+= afs/
++obj-$(CONFIG_NILFS2_FS)		+= nilfs2/
++obj-$(CONFIG_BEFS_FS)		+= befs/
++obj-$(CONFIG_HOSTFS)		+= hostfs/
++obj-$(CONFIG_HPPFS)		+= hppfs/
++obj-$(CONFIG_CACHEFILES)	+= cachefiles/
++obj-$(CONFIG_DEBUG_FS)		+= debugfs/
++obj-$(CONFIG_OCFS2_FS)		+= ocfs2/
++obj-$(CONFIG_BTRFS_FS)		+= btrfs/
++obj-$(CONFIG_GFS2_FS)           += gfs2/
++obj-$(CONFIG_EXOFS_FS)          += exofs/
++obj-$(CONFIG_CEPH_FS)		+= ceph/
++obj-$(CONFIG_PSTORE)		+= pstore/
+diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
+new file mode 100644
+index 00000000..658feea5
+--- /dev/null
++++ b/fs/yaffs2/Kconfig
+@@ -0,0 +1,161 @@
++#
++# yaffs file system configurations
++#
++
++config YAFFS_FS
++	tristate "yaffs2 file system support"
++	default n
++	depends on MTD_BLOCK
++	select YAFFS_YAFFS1
++	select YAFFS_YAFFS2
++	help
++	  yaffs2, or Yet Another Flash File System, is a file system
++	  optimised for NAND Flash chips.
++
++	  To compile the yaffs2 file system support as a module, choose M
++	  here: the module will be called yaffs2.
++
++	  If unsure, say N.
++
++	  Further information on yaffs2 is available at
++	  <http://www.aleph1.co.uk/yaffs/>.
++
++config YAFFS_YAFFS1
++	bool "512 byte / page devices"
++	depends on YAFFS_FS
++	default y
++	help
++	  Enable yaffs1 support -- yaffs for 512 byte / page devices
++
++	  Not needed for 2K-page devices.
++
++	  If unsure, say Y.
++
++config YAFFS_9BYTE_TAGS
++	bool "Use older-style on-NAND data format with pageStatus byte"
++	depends on YAFFS_YAFFS1
++	default n
++	help
++
++	  Older-style on-NAND data format has a "pageStatus" byte to record
++	  chunk/page state.  This byte is zero when the page is discarded.
++	  Choose this option if you have existing on-NAND data using this
++	  format that you need to continue to support.  New data written
++	  also uses the older-style format.  Note: Use of this option
++	  generally requires that MTD's oob layout be adjusted to use the
++	  older-style format.  See notes on tags formats and MTD versions
++	  in yaffs_mtdif1.c.
++
++	  If unsure, say N.
++
++config YAFFS_DOES_ECC
++	bool "Lets yaffs do its own ECC"
++	depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
++	default n
++	help
++	  This enables yaffs to use its own ECC functions instead of using
++	  the ones from the generic MTD-NAND driver.
++
++	  If unsure, say N.
++
++config YAFFS_ECC_WRONG_ORDER
++	bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
++	depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
++	default n
++	help
++	  This makes yaffs_ecc.c use the same ecc byte order as Steven
++	  Hill's nand_ecc.c. If not set, then you get the same ecc byte
++	  order as SmartMedia.
++
++	  If unsure, say N.
++
++config YAFFS_YAFFS2
++	bool "2048 byte (or larger) / page devices"
++	depends on YAFFS_FS
++	default y
++	help
++	  Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
++
++	  If unsure, say Y.
++
++config YAFFS_AUTO_YAFFS2
++	bool "Autoselect yaffs2 format"
++	depends on YAFFS_YAFFS2
++	default y
++	help
++	  Without this, you need to explicitely use yaffs2 as the file
++	  system type. With this, you can say "yaffs" and yaffs or yaffs2
++	  will be used depending on the device page size (yaffs on
++	  512-byte page devices, yaffs2 on 2K page devices).
++
++	  If unsure, say Y.
++
++config YAFFS_DISABLE_TAGS_ECC
++	bool "Disable yaffs from doing ECC on tags by default"
++	depends on YAFFS_FS && YAFFS_YAFFS2
++	default n
++	help
++	  This defaults yaffs to using its own ECC calculations on tags instead of
++	  just relying on the MTD.
++	  This behavior can also be overridden with tags_ecc_on and
++	  tags_ecc_off mount options.
++
++	  If unsure, say N.
++
++config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++	bool "Force chunk erase check"
++	depends on YAFFS_FS
++	default n
++	help
++          Normally yaffs only checks chunks before writing until an erased
++	  chunk is found. This helps to detect any partially written
++	  chunks that might have happened due to power loss.
++
++	  Enabling this forces on the test that chunks are erased in flash
++	  before writing to them. This takes more time but is potentially
++	  a bit more secure.
++
++	  Suggest setting Y during development and ironing out driver
++	  issues etc. Suggest setting to N if you want faster writing.
++
++	  If unsure, say Y.
++
++config YAFFS_EMPTY_LOST_AND_FOUND
++	bool "Empty lost and found on boot"
++	depends on YAFFS_FS
++	default n
++	help
++	  If this is enabled then the contents of lost and found is
++	  automatically dumped at mount.
++
++	  If unsure, say N.
++
++config YAFFS_DISABLE_BLOCK_REFRESHING
++	bool "Disable yaffs2 block refreshing"
++	depends on YAFFS_FS
++	default n
++	help
++	 If this is set, then block refreshing is disabled.
++	 Block refreshing infrequently refreshes the oldest block in
++	 a yaffs2 file system. This mechanism helps to refresh flash to
++	 mitigate against data loss. This is particularly useful for MLC.
++
++	  If unsure, say N.
++
++config YAFFS_DISABLE_BACKGROUND
++	bool "Disable yaffs2 background processing"
++	depends on YAFFS_FS
++	default n
++	help
++	 If this is set, then background processing is disabled.
++	 Background processing makes many foreground activities faster.
++
++	 If unsure, say N.
++
++config YAFFS_XATTR
++	bool "Enable yaffs2 xattr support"
++	depends on YAFFS_FS
++	default y
++	help
++	 If this is set then yaffs2 will provide xattr support.
++	 If unsure, say Y.
+diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
+new file mode 100644
+index 00000000..f9a9fb1b
+--- /dev/null
++++ b/fs/yaffs2/Makefile
+@@ -0,0 +1,18 @@
++#
++# Makefile for the linux YAFFS filesystem routines.
++#
++
++obj-$(CONFIG_YAFFS_FS) += yaffs.o
++
++yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
++yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
++yaffs-y += yaffs_mtdif.o
++yaffs-y += yaffs_nameval.o yaffs_attribs.o
++yaffs-y += yaffs_allocator.o
++yaffs-y += yaffs_yaffs1.o
++yaffs-y += yaffs_yaffs2.o
++yaffs-y += yaffs_bitmap.o
++yaffs-y += yaffs_summary.o
++yaffs-y += yaffs_verify.o
++
+diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
+new file mode 100644
+index 00000000..c8f2861c
+--- /dev/null
++++ b/fs/yaffs2/yaffs_allocator.c
+@@ -0,0 +1,357 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_allocator.h"
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yportenv.h"
++
++/*
++ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
++ * of approx 100 objects that are themn allocated singly.
++ * This is basically a simplified slab allocator.
++ *
++ * We don't use the Linux slab allocator because slab does not allow
++ * us to dump all the objects in one hit when we do a umount and tear
++ * down  all the tnodes and objects. slab requires that we first free
++ * the individual objects.
++ *
++ * Once yaffs has been mainlined I shall try to motivate for a change
++ * to slab to provide the extra features we need here.
++ */
++
++struct yaffs_tnode_list {
++	struct yaffs_tnode_list *next;
++	struct yaffs_tnode *tnodes;
++};
++
++struct yaffs_obj_list {
++	struct yaffs_obj_list *next;
++	struct yaffs_obj *objects;
++};
++
++struct yaffs_allocator {
++	int n_tnodes_created;
++	struct yaffs_tnode *free_tnodes;
++	int n_free_tnodes;
++	struct yaffs_tnode_list *alloc_tnode_list;
++
++	int n_obj_created;
++	struct list_head free_objs;
++	int n_free_objects;
++
++	struct yaffs_obj_list *allocated_obj_list;
++};
++
++static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
++{
++	struct yaffs_allocator *allocator =
++	    (struct yaffs_allocator *)dev->allocator;
++	struct yaffs_tnode_list *tmp;
++
++	if (!allocator) {
++		BUG();
++		return;
++	}
++
++	while (allocator->alloc_tnode_list) {
++		tmp = allocator->alloc_tnode_list->next;
++
++		kfree(allocator->alloc_tnode_list->tnodes);
++		kfree(allocator->alloc_tnode_list);
++		allocator->alloc_tnode_list = tmp;
++	}
++
++	allocator->free_tnodes = NULL;
++	allocator->n_free_tnodes = 0;
++	allocator->n_tnodes_created = 0;
++}
++
++static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
++{
++	struct yaffs_allocator *allocator = dev->allocator;
++
++	if (!allocator) {
++		BUG();
++		return;
++	}
++
++	allocator->alloc_tnode_list = NULL;
++	allocator->free_tnodes = NULL;
++	allocator->n_free_tnodes = 0;
++	allocator->n_tnodes_created = 0;
++}
++
++static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
++{
++	struct yaffs_allocator *allocator =
++	    (struct yaffs_allocator *)dev->allocator;
++	int i;
++	struct yaffs_tnode *new_tnodes;
++	u8 *mem;
++	struct yaffs_tnode *curr;
++	struct yaffs_tnode *next;
++	struct yaffs_tnode_list *tnl;
++
++	if (!allocator) {
++		BUG();
++		return YAFFS_FAIL;
++	}
++
++	if (n_tnodes < 1)
++		return YAFFS_OK;
++
++	/* make these things */
++	new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
++	mem = (u8 *) new_tnodes;
++
++	if (!new_tnodes) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"yaffs: Could not allocate Tnodes");
++		return YAFFS_FAIL;
++	}
++
++	/* New hookup for wide tnodes */
++	for (i = 0; i < n_tnodes - 1; i++) {
++		curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
++		next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
++		curr->internal[0] = next;
++	}
++
++	curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
++	curr->internal[0] = allocator->free_tnodes;
++	allocator->free_tnodes = (struct yaffs_tnode *)mem;
++
++	allocator->n_free_tnodes += n_tnodes;
++	allocator->n_tnodes_created += n_tnodes;
++
++	/* Now add this bunch of tnodes to a list for freeing up.
++	 * NB If we can't add this to the management list it isn't fatal
++	 * but it just means we can't free this bunch of tnodes later.
++	 */
++	tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
++	if (!tnl) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"Could not add tnodes to management list");
++		return YAFFS_FAIL;
++	} else {
++		tnl->tnodes = new_tnodes;
++		tnl->next = allocator->alloc_tnode_list;
++		allocator->alloc_tnode_list = tnl;
++	}
++
++	yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
++
++	return YAFFS_OK;
++}
++
++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
++{
++	struct yaffs_allocator *allocator =
++	    (struct yaffs_allocator *)dev->allocator;
++	struct yaffs_tnode *tn = NULL;
++
++	if (!allocator) {
++		BUG();
++		return NULL;
++	}
++
++	/* If there are none left make more */
++	if (!allocator->free_tnodes)
++		yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
++
++	if (allocator->free_tnodes) {
++		tn = allocator->free_tnodes;
++		allocator->free_tnodes = allocator->free_tnodes->internal[0];
++		allocator->n_free_tnodes--;
++	}
++
++	return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
++{
++	struct yaffs_allocator *allocator = dev->allocator;
++
++	if (!allocator) {
++		BUG();
++		return;
++	}
++
++	if (tn) {
++		tn->internal[0] = allocator->free_tnodes;
++		allocator->free_tnodes = tn;
++		allocator->n_free_tnodes++;
++	}
++	dev->checkpoint_blocks_required = 0;	/* force recalculation */
++}
++
++/*--------------- yaffs_obj alloaction ------------------------
++ *
++ * Free yaffs_objs are stored in a list using obj->siblings.
++ * The blocks of allocated objects are stored in a linked list.
++ */
++
++static void yaffs_init_raw_objs(struct yaffs_dev *dev)
++{
++	struct yaffs_allocator *allocator = dev->allocator;
++
++	if (!allocator) {
++		BUG();
++		return;
++	}
++
++	allocator->allocated_obj_list = NULL;
++	INIT_LIST_HEAD(&allocator->free_objs);
++	allocator->n_free_objects = 0;
++}
++
++static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
++{
++	struct yaffs_allocator *allocator = dev->allocator;
++	struct yaffs_obj_list *tmp;
++
++	if (!allocator) {
++		BUG();
++		return;
++	}
++
++	while (allocator->allocated_obj_list) {
++		tmp = allocator->allocated_obj_list->next;
++		kfree(allocator->allocated_obj_list->objects);
++		kfree(allocator->allocated_obj_list);
++		allocator->allocated_obj_list = tmp;
++	}
++
++	INIT_LIST_HEAD(&allocator->free_objs);
++	allocator->n_free_objects = 0;
++	allocator->n_obj_created = 0;
++}
++
++static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
++{
++	struct yaffs_allocator *allocator = dev->allocator;
++	int i;
++	struct yaffs_obj *new_objs;
++	struct yaffs_obj_list *list;
++
++	if (!allocator) {
++		BUG();
++		return YAFFS_FAIL;
++	}
++
++	if (n_obj < 1)
++		return YAFFS_OK;
++
++	/* make these things */
++	new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
++	list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
++
++	if (!new_objs || !list) {
++		kfree(new_objs);
++		new_objs = NULL;
++		kfree(list);
++		list = NULL;
++		yaffs_trace(YAFFS_TRACE_ALLOCATE,
++			"Could not allocate more objects");
++		return YAFFS_FAIL;
++	}
++
++	/* Hook them into the free list */
++	for (i = 0; i < n_obj; i++)
++		list_add(&new_objs[i].siblings, &allocator->free_objs);
++
++	allocator->n_free_objects += n_obj;
++	allocator->n_obj_created += n_obj;
++
++	/* Now add this bunch of Objects to a list for freeing up. */
++
++	list->objects = new_objs;
++	list->next = allocator->allocated_obj_list;
++	allocator->allocated_obj_list = list;
++
++	return YAFFS_OK;
++}
++
++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
++{
++	struct yaffs_obj *obj = NULL;
++	struct list_head *lh;
++	struct yaffs_allocator *allocator = dev->allocator;
++
++	if (!allocator) {
++		BUG();
++		return obj;
++	}
++
++	/* If there are none left make more */
++	if (list_empty(&allocator->free_objs))
++		yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
++
++	if (!list_empty(&allocator->free_objs)) {
++		lh = allocator->free_objs.next;
++		obj = list_entry(lh, struct yaffs_obj, siblings);
++		list_del_init(lh);
++		allocator->n_free_objects--;
++	}
++
++	return obj;
++}
++
++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
++{
++
++	struct yaffs_allocator *allocator = dev->allocator;
++
++	if (!allocator) {
++		BUG();
++		return;
++	}
++
++	/* Link into the free list. */
++	list_add(&obj->siblings, &allocator->free_objs);
++	allocator->n_free_objects++;
++}
++
++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
++{
++
++	if (!dev->allocator) {
++		BUG();
++		return;
++	}
++
++	yaffs_deinit_raw_tnodes(dev);
++	yaffs_deinit_raw_objs(dev);
++	kfree(dev->allocator);
++	dev->allocator = NULL;
++}
++
++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
++{
++	struct yaffs_allocator *allocator;
++
++	if (dev->allocator) {
++		BUG();
++		return;
++	}
++
++	allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
++	if (allocator) {
++		dev->allocator = allocator;
++		yaffs_init_raw_tnodes(dev);
++		yaffs_init_raw_objs(dev);
++	}
++}
++
+diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
+new file mode 100644
+index 00000000..a8cc3226
+--- /dev/null
++++ b/fs/yaffs2/yaffs_allocator.h
+@@ -0,0 +1,30 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ALLOCATOR_H__
++#define __YAFFS_ALLOCATOR_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
++
++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
++
++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
+new file mode 100644
+index 00000000..3d778f22
+--- /dev/null
++++ b/fs/yaffs2/yaffs_attribs.c
+@@ -0,0 +1,124 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_attribs.h"
++
++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
++{
++	obj->yst_uid = oh->yst_uid;
++	obj->yst_gid = oh->yst_gid;
++	obj->yst_atime = oh->yst_atime;
++	obj->yst_mtime = oh->yst_mtime;
++	obj->yst_ctime = oh->yst_ctime;
++	obj->yst_rdev = oh->yst_rdev;
++}
++
++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
++{
++	oh->yst_uid = obj->yst_uid;
++	oh->yst_gid = obj->yst_gid;
++	oh->yst_atime = obj->yst_atime;
++	oh->yst_mtime = obj->yst_mtime;
++	oh->yst_ctime = obj->yst_ctime;
++	oh->yst_rdev = obj->yst_rdev;
++
++}
++
++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
++{
++	obj->yst_mtime = Y_CURRENT_TIME;
++	if (do_a)
++		obj->yst_atime = obj->yst_mtime;
++	if (do_c)
++		obj->yst_ctime = obj->yst_mtime;
++}
++
++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
++{
++	yaffs_load_current_time(obj, 1, 1);
++	obj->yst_rdev = rdev;
++	obj->yst_uid = uid;
++	obj->yst_gid = gid;
++}
++
++static loff_t yaffs_get_file_size(struct yaffs_obj *obj)
++{
++	YCHAR *alias = NULL;
++	obj = yaffs_get_equivalent_obj(obj);
++
++	switch (obj->variant_type) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		return obj->variant.file_variant.file_size;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		alias = obj->variant.symlink_variant.alias;
++		if (!alias)
++			return 0;
++		return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
++	default:
++		return 0;
++	}
++}
++
++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
++{
++	unsigned int valid = attr->ia_valid;
++
++	if (valid & ATTR_MODE)
++		obj->yst_mode = attr->ia_mode;
++	if (valid & ATTR_UID)
++		obj->yst_uid = attr->ia_uid;
++	if (valid & ATTR_GID)
++		obj->yst_gid = attr->ia_gid;
++
++	if (valid & ATTR_ATIME)
++		obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
++	if (valid & ATTR_CTIME)
++		obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
++	if (valid & ATTR_MTIME)
++		obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
++
++	if (valid & ATTR_SIZE)
++		yaffs_resize_file(obj, attr->ia_size);
++
++	yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++	return YAFFS_OK;
++
++}
++
++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
++{
++	unsigned int valid = 0;
++
++	attr->ia_mode = obj->yst_mode;
++	valid |= ATTR_MODE;
++	attr->ia_uid = obj->yst_uid;
++	valid |= ATTR_UID;
++	attr->ia_gid = obj->yst_gid;
++	valid |= ATTR_GID;
++
++	Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
++	valid |= ATTR_ATIME;
++	Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
++	valid |= ATTR_CTIME;
++	Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
++	valid |= ATTR_MTIME;
++
++	attr->ia_size = yaffs_get_file_size(obj);
++	valid |= ATTR_SIZE;
++
++	attr->ia_valid = valid;
++
++	return YAFFS_OK;
++}
+diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
+new file mode 100644
+index 00000000..5b21b085
+--- /dev/null
++++ b/fs/yaffs2/yaffs_attribs.h
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ATTRIBS_H__
++#define __YAFFS_ATTRIBS_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
+new file mode 100644
+index 00000000..4440e930
+--- /dev/null
++++ b/fs/yaffs2/yaffs_bitmap.c
+@@ -0,0 +1,97 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_bitmap.h"
++#include "yaffs_trace.h"
++/*
++ * Chunk bitmap manipulations
++ */
++
++static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
++{
++	if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"BlockBits block %d is not valid",
++			blk);
++		BUG();
++	}
++	return dev->chunk_bits +
++	    (dev->chunk_bit_stride * (blk - dev->internal_start_block));
++}
++
++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
++{
++	if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
++	    chunk < 0 || chunk >= dev->param.chunks_per_block) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"Chunk Id (%d:%d) invalid",
++			blk, chunk);
++		BUG();
++	}
++}
++
++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
++{
++	u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++	memset(blk_bits, 0, dev->chunk_bit_stride);
++}
++
++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++	u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++	yaffs_verify_chunk_bit_id(dev, blk, chunk);
++	blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
++}
++
++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++	u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++	yaffs_verify_chunk_bit_id(dev, blk, chunk);
++	blk_bits[chunk / 8] |= (1 << (chunk & 7));
++}
++
++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++	u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++	yaffs_verify_chunk_bit_id(dev, blk, chunk);
++	return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++}
++
++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
++{
++	u8 *blk_bits = yaffs_block_bits(dev, blk);
++	int i;
++
++	for (i = 0; i < dev->chunk_bit_stride; i++) {
++		if (*blk_bits)
++			return 1;
++		blk_bits++;
++	}
++	return 0;
++}
++
++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
++{
++	u8 *blk_bits = yaffs_block_bits(dev, blk);
++	int i;
++	int n = 0;
++
++	for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
++		n += hweight8(*blk_bits);
++
++	return n;
++}
+diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
+new file mode 100644
+index 00000000..e26b37d8
+--- /dev/null
++++ b/fs/yaffs2/yaffs_bitmap.h
+@@ -0,0 +1,33 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * Chunk bitmap manipulations
++ */
++
++#ifndef __YAFFS_BITMAP_H__
++#define __YAFFS_BITMAP_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
+new file mode 100644
+index 00000000..e739fb4a
+--- /dev/null
++++ b/fs/yaffs2/yaffs_checkptrw.c
+@@ -0,0 +1,474 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_checkptrw.h"
++#include "yaffs_getblockinfo.h"
++
++struct yaffs_checkpt_chunk_hdr {
++	int version;
++	int seq;
++	u32 sum;
++	u32 xor;
++} ;
++
++
++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
++{
++	return chunk - dev->chunk_offset;
++}
++
++static int apply_block_offset(struct yaffs_dev *dev, int block)
++{
++	return block - dev->block_offset;
++}
++
++static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev)
++{
++	struct yaffs_checkpt_chunk_hdr hdr;
++
++	hdr.version = YAFFS_CHECKPOINT_VERSION;
++	hdr.seq = dev->checkpt_page_seq;
++	hdr.sum = dev->checkpt_sum;
++	hdr.xor = dev->checkpt_xor;
++
++	dev->checkpt_byte_offs = sizeof(hdr);
++
++	memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr));
++}
++
++static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev)
++{
++	struct yaffs_checkpt_chunk_hdr hdr;
++
++	memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr));
++
++	dev->checkpt_byte_offs = sizeof(hdr);
++
++	return hdr.version == YAFFS_CHECKPOINT_VERSION &&
++		hdr.seq == dev->checkpt_page_seq &&
++		hdr.sum == dev->checkpt_sum &&
++		hdr.xor == dev->checkpt_xor;
++}
++
++static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
++{
++	int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"checkpt blocks_avail = %d", blocks_avail);
++
++	return (blocks_avail <= 0) ? 0 : 1;
++}
++
++static int yaffs_checkpt_erase(struct yaffs_dev *dev)
++{
++	int i;
++
++	if (!dev->drv.drv_erase_fn)
++		return 0;
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"checking blocks %d to %d",
++		dev->internal_start_block, dev->internal_end_block);
++
++	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++		struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
++		int offset_i = apply_block_offset(dev, i);
++		int result;
++
++		if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"erasing checkpt block %d", i);
++
++			dev->n_erasures++;
++
++			result = dev->drv.drv_erase_fn(dev, offset_i);
++			if(result) {
++				bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++				dev->n_erased_blocks++;
++				dev->n_free_chunks +=
++				    dev->param.chunks_per_block;
++			} else {
++				dev->drv.drv_mark_bad_fn(dev, offset_i);
++				bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++			}
++		}
++	}
++
++	dev->blocks_in_checkpt = 0;
++
++	return 1;
++}
++
++static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
++{
++	int i;
++	int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"allocating checkpt block: erased %d reserved %d avail %d next %d ",
++		dev->n_erased_blocks, dev->param.n_reserved_blocks,
++		blocks_avail, dev->checkpt_next_block);
++
++	if (dev->checkpt_next_block >= 0 &&
++	    dev->checkpt_next_block <= dev->internal_end_block &&
++	    blocks_avail > 0) {
++
++		for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
++		     i++) {
++			struct yaffs_block_info *bi;
++
++			bi = yaffs_get_block_info(dev, i);
++			if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++				dev->checkpt_next_block = i + 1;
++				dev->checkpt_cur_block = i;
++				yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++					"allocating checkpt block %d", i);
++				return;
++			}
++		}
++	}
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
++
++	dev->checkpt_next_block = -1;
++	dev->checkpt_cur_block = -1;
++}
++
++static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
++{
++	int i;
++	struct yaffs_ext_tags tags;
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"find next checkpt block: start:  blocks %d next %d",
++		dev->blocks_in_checkpt, dev->checkpt_next_block);
++
++	if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
++		for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
++		     i++) {
++			int chunk = i * dev->param.chunks_per_block;
++			enum yaffs_block_state state;
++			u32 seq;
++
++			dev->tagger.read_chunk_tags_fn(dev,
++					apply_chunk_offset(dev, chunk),
++					NULL, &tags);
++			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++				"find next checkpt block: search: block %d state %d oid %d seq %d eccr %d",
++				i, (int) state,
++				tags.obj_id, tags.seq_number,
++				tags.ecc_result);
++
++			if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++				continue;
++
++			dev->tagger.query_block_fn(dev,
++						apply_block_offset(dev, i),
++						&state, &seq);
++			if (state == YAFFS_BLOCK_STATE_DEAD)
++				continue;
++
++			/* Right kind of block */
++			dev->checkpt_next_block = tags.obj_id;
++			dev->checkpt_cur_block = i;
++			dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
++			dev->blocks_in_checkpt++;
++			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++				"found checkpt block %d", i);
++			return;
++		}
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
++
++	dev->checkpt_next_block = -1;
++	dev->checkpt_cur_block = -1;
++}
++
++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
++{
++	int i;
++
++	dev->checkpt_open_write = writing;
++
++	/* Got the functions we need? */
++	if (!dev->tagger.write_chunk_tags_fn ||
++	    !dev->tagger.read_chunk_tags_fn ||
++	    !dev->drv.drv_erase_fn ||
++	    !dev->drv.drv_mark_bad_fn)
++		return 0;
++
++	if (writing && !yaffs2_checkpt_space_ok(dev))
++		return 0;
++
++	if (!dev->checkpt_buffer)
++		dev->checkpt_buffer =
++		    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++	if (!dev->checkpt_buffer)
++		return 0;
++
++	dev->checkpt_page_seq = 0;
++	dev->checkpt_byte_count = 0;
++	dev->checkpt_sum = 0;
++	dev->checkpt_xor = 0;
++	dev->checkpt_cur_block = -1;
++	dev->checkpt_cur_chunk = -1;
++	dev->checkpt_next_block = dev->internal_start_block;
++
++	if (writing) {
++		memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++		yaffs2_checkpt_init_chunk_hdr(dev);
++		return yaffs_checkpt_erase(dev);
++	}
++
++	/* Opening for a read */
++	/* Set to a value that will kick off a read */
++	dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
++	/* A checkpoint block list of 1 checkpoint block per 16 block is
++	 * (hopefully) going to be way more than we need */
++	dev->blocks_in_checkpt = 0;
++	dev->checkpt_max_blocks =
++	    (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
++	dev->checkpt_block_list =
++	    kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
++
++	if (!dev->checkpt_block_list)
++		return 0;
++
++	for (i = 0; i < dev->checkpt_max_blocks; i++)
++		dev->checkpt_block_list[i] = -1;
++
++	return 1;
++}
++
++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
++{
++	u32 composite_sum;
++
++	composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
++	*sum = composite_sum;
++	return 1;
++}
++
++static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
++{
++	int chunk;
++	int offset_chunk;
++	struct yaffs_ext_tags tags;
++
++	if (dev->checkpt_cur_block < 0) {
++		yaffs2_checkpt_find_erased_block(dev);
++		dev->checkpt_cur_chunk = 0;
++	}
++
++	if (dev->checkpt_cur_block < 0)
++		return 0;
++
++	tags.is_deleted = 0;
++	tags.obj_id = dev->checkpt_next_block;	/* Hint to next place to look */
++	tags.chunk_id = dev->checkpt_page_seq + 1;
++	tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
++	tags.n_bytes = dev->data_bytes_per_chunk;
++	if (dev->checkpt_cur_chunk == 0) {
++		/* First chunk we write for the block? Set block state to
++		   checkpoint */
++		struct yaffs_block_info *bi =
++		    yaffs_get_block_info(dev, dev->checkpt_cur_block);
++		bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++		dev->blocks_in_checkpt++;
++	}
++
++	chunk =
++	    dev->checkpt_cur_block * dev->param.chunks_per_block +
++	    dev->checkpt_cur_chunk;
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
++		chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
++		tags.obj_id, tags.chunk_id);
++
++	offset_chunk = apply_chunk_offset(dev, chunk);
++
++	dev->n_page_writes++;
++
++	dev->tagger.write_chunk_tags_fn(dev, offset_chunk,
++				       dev->checkpt_buffer, &tags);
++	dev->checkpt_page_seq++;
++	dev->checkpt_cur_chunk++;
++	if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
++		dev->checkpt_cur_chunk = 0;
++		dev->checkpt_cur_block = -1;
++	}
++	memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++
++	yaffs2_checkpt_init_chunk_hdr(dev);
++
++
++	return 1;
++}
++
++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
++{
++	int i = 0;
++	int ok = 1;
++	u8 *data_bytes = (u8 *) data;
++
++	if (!dev->checkpt_buffer)
++		return 0;
++
++	if (!dev->checkpt_open_write)
++		return -1;
++
++	while (i < n_bytes && ok) {
++		dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
++		dev->checkpt_sum += *data_bytes;
++		dev->checkpt_xor ^= *data_bytes;
++
++		dev->checkpt_byte_offs++;
++		i++;
++		data_bytes++;
++		dev->checkpt_byte_count++;
++
++		if (dev->checkpt_byte_offs < 0 ||
++		    dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
++			ok = yaffs2_checkpt_flush_buffer(dev);
++	}
++
++	return i;
++}
++
++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
++{
++	int i = 0;
++	int ok = 1;
++	struct yaffs_ext_tags tags;
++	int chunk;
++	int offset_chunk;
++	u8 *data_bytes = (u8 *) data;
++
++	if (!dev->checkpt_buffer)
++		return 0;
++
++	if (dev->checkpt_open_write)
++		return -1;
++
++	while (i < n_bytes && ok) {
++
++		if (dev->checkpt_byte_offs < 0 ||
++		    dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
++
++			if (dev->checkpt_cur_block < 0) {
++				yaffs2_checkpt_find_block(dev);
++				dev->checkpt_cur_chunk = 0;
++			}
++
++			if (dev->checkpt_cur_block < 0) {
++				ok = 0;
++				break;
++			}
++
++			chunk = dev->checkpt_cur_block *
++			    dev->param.chunks_per_block +
++			    dev->checkpt_cur_chunk;
++
++			offset_chunk = apply_chunk_offset(dev, chunk);
++			dev->n_page_reads++;
++
++			/* read in the next chunk */
++			dev->tagger.read_chunk_tags_fn(dev,
++						offset_chunk,
++						dev->checkpt_buffer,
++						&tags);
++
++			if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
++			    tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++			    tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
++				ok = 0;
++				break;
++			}
++			if(!yaffs2_checkpt_check_chunk_hdr(dev)) {
++				ok = 0;
++				break;
++			}
++
++			dev->checkpt_page_seq++;
++			dev->checkpt_cur_chunk++;
++
++			if (dev->checkpt_cur_chunk >=
++					dev->param.chunks_per_block)
++				dev->checkpt_cur_block = -1;
++
++		}
++
++		*data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
++		dev->checkpt_sum += *data_bytes;
++		dev->checkpt_xor ^= *data_bytes;
++		dev->checkpt_byte_offs++;
++		i++;
++		data_bytes++;
++		dev->checkpt_byte_count++;
++	}
++
++	return i;
++}
++
++int yaffs_checkpt_close(struct yaffs_dev *dev)
++{
++	int i;
++
++	if (dev->checkpt_open_write) {
++		if (dev->checkpt_byte_offs !=
++			sizeof(sizeof(struct yaffs_checkpt_chunk_hdr)))
++			yaffs2_checkpt_flush_buffer(dev);
++	} else if (dev->checkpt_block_list) {
++		for (i = 0;
++		     i < dev->blocks_in_checkpt &&
++		     dev->checkpt_block_list[i] >= 0; i++) {
++			int blk = dev->checkpt_block_list[i];
++			struct yaffs_block_info *bi = NULL;
++
++			if (dev->internal_start_block <= blk &&
++			    blk <= dev->internal_end_block)
++				bi = yaffs_get_block_info(dev, blk);
++			if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
++				bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++		}
++		kfree(dev->checkpt_block_list);
++		dev->checkpt_block_list = NULL;
++	}
++
++	dev->n_free_chunks -=
++		dev->blocks_in_checkpt * dev->param.chunks_per_block;
++	dev->n_erased_blocks -= dev->blocks_in_checkpt;
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
++		dev->checkpt_byte_count);
++
++	if (dev->checkpt_buffer) {
++		/* free the buffer */
++		kfree(dev->checkpt_buffer);
++		dev->checkpt_buffer = NULL;
++		return 1;
++	} else {
++		return 0;
++	}
++}
++
++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
++{
++	/* Erase the checkpoint data */
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"checkpoint invalidate of %d blocks",
++		dev->blocks_in_checkpt);
++
++	return yaffs_checkpt_erase(dev);
++}
+diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
+new file mode 100644
+index 00000000..cdbaba71
+--- /dev/null
++++ b/fs/yaffs2/yaffs_checkptrw.h
+@@ -0,0 +1,33 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_CHECKPTRW_H__
++#define __YAFFS_CHECKPTRW_H__
++
++#include "yaffs_guts.h"
++
++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
++
++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
++
++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
++
++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
++
++int yaffs_checkpt_close(struct yaffs_dev *dev);
++
++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
+new file mode 100644
+index 00000000..9294107c
+--- /dev/null
++++ b/fs/yaffs2/yaffs_ecc.c
+@@ -0,0 +1,281 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
++ * such ECC blocks are used on a 512-byte NAND page.
++ *
++ */
++
++#include "yportenv.h"
++
++#include "yaffs_ecc.h"
++
++/* Table generated by gen-ecc.c
++ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
++ * for each byte of data. These are instead provided in a table in bits7..2.
++ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
++ * and therefore this bytes influence on the line parity.
++ */
++
++static const unsigned char column_parity_table[] = {
++	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++};
++
++
++/* Calculate the ECC for a 256-byte block of data */
++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
++{
++	unsigned int i;
++	unsigned char col_parity = 0;
++	unsigned char line_parity = 0;
++	unsigned char line_parity_prime = 0;
++	unsigned char t;
++	unsigned char b;
++
++	for (i = 0; i < 256; i++) {
++		b = column_parity_table[*data++];
++		col_parity ^= b;
++
++		if (b & 0x01) {	/* odd number of bits in the byte */
++			line_parity ^= i;
++			line_parity_prime ^= ~i;
++		}
++	}
++
++	ecc[2] = (~col_parity) | 0x03;
++
++	t = 0;
++	if (line_parity & 0x80)
++		t |= 0x80;
++	if (line_parity_prime & 0x80)
++		t |= 0x40;
++	if (line_parity & 0x40)
++		t |= 0x20;
++	if (line_parity_prime & 0x40)
++		t |= 0x10;
++	if (line_parity & 0x20)
++		t |= 0x08;
++	if (line_parity_prime & 0x20)
++		t |= 0x04;
++	if (line_parity & 0x10)
++		t |= 0x02;
++	if (line_parity_prime & 0x10)
++		t |= 0x01;
++	ecc[1] = ~t;
++
++	t = 0;
++	if (line_parity & 0x08)
++		t |= 0x80;
++	if (line_parity_prime & 0x08)
++		t |= 0x40;
++	if (line_parity & 0x04)
++		t |= 0x20;
++	if (line_parity_prime & 0x04)
++		t |= 0x10;
++	if (line_parity & 0x02)
++		t |= 0x08;
++	if (line_parity_prime & 0x02)
++		t |= 0x04;
++	if (line_parity & 0x01)
++		t |= 0x02;
++	if (line_parity_prime & 0x01)
++		t |= 0x01;
++	ecc[0] = ~t;
++
++}
++
++/* Correct the ECC on a 256 byte block of data */
++
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
++		      const unsigned char *test_ecc)
++{
++	unsigned char d0, d1, d2;	/* deltas */
++
++	d0 = read_ecc[0] ^ test_ecc[0];
++	d1 = read_ecc[1] ^ test_ecc[1];
++	d2 = read_ecc[2] ^ test_ecc[2];
++
++	if ((d0 | d1 | d2) == 0)
++		return 0;	/* no error */
++
++	if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
++	    ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
++	    ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
++		/* Single bit (recoverable) error in data */
++
++		unsigned byte;
++		unsigned bit;
++
++		bit = byte = 0;
++
++		if (d1 & 0x80)
++			byte |= 0x80;
++		if (d1 & 0x20)
++			byte |= 0x40;
++		if (d1 & 0x08)
++			byte |= 0x20;
++		if (d1 & 0x02)
++			byte |= 0x10;
++		if (d0 & 0x80)
++			byte |= 0x08;
++		if (d0 & 0x20)
++			byte |= 0x04;
++		if (d0 & 0x08)
++			byte |= 0x02;
++		if (d0 & 0x02)
++			byte |= 0x01;
++
++		if (d2 & 0x80)
++			bit |= 0x04;
++		if (d2 & 0x20)
++			bit |= 0x02;
++		if (d2 & 0x08)
++			bit |= 0x01;
++
++		data[byte] ^= (1 << bit);
++
++		return 1;	/* Corrected the error */
++	}
++
++	if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
++		/* Reccoverable error in ecc */
++
++		read_ecc[0] = test_ecc[0];
++		read_ecc[1] = test_ecc[1];
++		read_ecc[2] = test_ecc[2];
++
++		return 1;	/* Corrected the error */
++	}
++
++	/* Unrecoverable error */
++
++	return -1;
++
++}
++
++/*
++ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
++ */
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
++			  struct yaffs_ecc_other *ecc_other)
++{
++	unsigned int i;
++	unsigned char col_parity = 0;
++	unsigned line_parity = 0;
++	unsigned line_parity_prime = 0;
++	unsigned char b;
++
++	for (i = 0; i < n_bytes; i++) {
++		b = column_parity_table[*data++];
++		col_parity ^= b;
++
++		if (b & 0x01) {
++			/* odd number of bits in the byte */
++			line_parity ^= i;
++			line_parity_prime ^= ~i;
++		}
++
++	}
++
++	ecc_other->col_parity = (col_parity >> 2) & 0x3f;
++	ecc_other->line_parity = line_parity;
++	ecc_other->line_parity_prime = line_parity_prime;
++}
++
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
++			    struct yaffs_ecc_other *read_ecc,
++			    const struct yaffs_ecc_other *test_ecc)
++{
++	unsigned char delta_col;	/* column parity delta */
++	unsigned delta_line;	/* line parity delta */
++	unsigned delta_line_prime;	/* line parity delta */
++	unsigned bit;
++
++	delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
++	delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
++	delta_line_prime =
++	    read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
++
++	if ((delta_col | delta_line | delta_line_prime) == 0)
++		return 0;	/* no error */
++
++	if (delta_line == ~delta_line_prime &&
++	    (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
++		/* Single bit (recoverable) error in data */
++
++		bit = 0;
++
++		if (delta_col & 0x20)
++			bit |= 0x04;
++		if (delta_col & 0x08)
++			bit |= 0x02;
++		if (delta_col & 0x02)
++			bit |= 0x01;
++
++		if (delta_line >= n_bytes)
++			return -1;
++
++		data[delta_line] ^= (1 << bit);
++
++		return 1;	/* corrected */
++	}
++
++	if ((hweight32(delta_line) +
++	     hweight32(delta_line_prime) +
++	     hweight8(delta_col)) == 1) {
++		/* Reccoverable error in ecc */
++
++		*read_ecc = *test_ecc;
++		return 1;	/* corrected */
++	}
++
++	/* Unrecoverable error */
++
++	return -1;
++}
+diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
+new file mode 100644
+index 00000000..17d47bd8
+--- /dev/null
++++ b/fs/yaffs2/yaffs_ecc.h
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data.
++ * Thus, two such ECC blocks are used on a 512-byte NAND page.
++ *
++ */
++
++#ifndef __YAFFS_ECC_H__
++#define __YAFFS_ECC_H__
++
++struct yaffs_ecc_other {
++	unsigned char col_parity;
++	unsigned line_parity;
++	unsigned line_parity_prime;
++};
++
++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
++		      const unsigned char *test_ecc);
++
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
++			  struct yaffs_ecc_other *ecc);
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
++			    struct yaffs_ecc_other *read_ecc,
++			    const struct yaffs_ecc_other *test_ecc);
++#endif
+diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
+new file mode 100644
+index 00000000..8fd0802b
+--- /dev/null
++++ b/fs/yaffs2/yaffs_getblockinfo.h
+@@ -0,0 +1,35 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GETBLOCKINFO_H__
++#define __YAFFS_GETBLOCKINFO_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++
++/* Function to manipulate block info */
++static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
++							      *dev, int blk)
++{
++	if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"**>> yaffs: get_block_info block %d is not valid",
++			blk);
++		BUG();
++	}
++	return &dev->block_info[blk - dev->internal_start_block];
++}
++
++#endif
+diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
+new file mode 100644
+index 00000000..794bef80
+--- /dev/null
++++ b/fs/yaffs2/yaffs_guts.c
+@@ -0,0 +1,5059 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++
++#include "yaffs_guts.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_tagsmarshall.h"
++#include "yaffs_nand.h"
++#include "yaffs_yaffs1.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_verify.h"
++#include "yaffs_nand.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_nameval.h"
++#include "yaffs_allocator.h"
++#include "yaffs_attribs.h"
++#include "yaffs_summary.h"
++
++/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
++#define YAFFS_GC_GOOD_ENOUGH 2
++#define YAFFS_GC_PASSIVE_THRESHOLD 4
++
++#include "yaffs_ecc.h"
++
++/* Forward declarations */
++
++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
++			     const u8 *buffer, int n_bytes, int use_reserve);
++
++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
++				int buffer_size);
++
++/* Function to calculate chunk and offset */
++
++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
++				int *chunk_out, u32 *offset_out)
++{
++	int chunk;
++	u32 offset;
++
++	chunk = (u32) (addr >> dev->chunk_shift);
++
++	if (dev->chunk_div == 1) {
++		/* easy power of 2 case */
++		offset = (u32) (addr & dev->chunk_mask);
++	} else {
++		/* Non power-of-2 case */
++
++		loff_t chunk_base;
++
++		chunk /= dev->chunk_div;
++
++		chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
++		offset = (u32) (addr - chunk_base);
++	}
++
++	*chunk_out = chunk;
++	*offset_out = offset;
++}
++
++/* Function to return the number of shifts for a power of 2 greater than or
++ * equal to the given number
++ * Note we don't try to cater for all possible numbers and this does not have to
++ * be hellishly efficient.
++ */
++
++static inline u32 calc_shifts_ceiling(u32 x)
++{
++	int extra_bits;
++	int shifts;
++
++	shifts = extra_bits = 0;
++
++	while (x > 1) {
++		if (x & 1)
++			extra_bits++;
++		x >>= 1;
++		shifts++;
++	}
++
++	if (extra_bits)
++		shifts++;
++
++	return shifts;
++}
++
++/* Function to return the number of shifts to get a 1 in bit 0
++ */
++
++static inline u32 calc_shifts(u32 x)
++{
++	u32 shifts;
++
++	shifts = 0;
++
++	if (!x)
++		return 0;
++
++	while (!(x & 1)) {
++		x >>= 1;
++		shifts++;
++	}
++
++	return shifts;
++}
++
++/*
++ * Temporary buffer manipulations.
++ */
++
++static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
++{
++	int i;
++	u8 *buf = (u8 *) 1;
++
++	memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
++
++	for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
++		dev->temp_buffer[i].in_use = 0;
++		buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++		dev->temp_buffer[i].buffer = buf;
++	}
++
++	return buf ? YAFFS_OK : YAFFS_FAIL;
++}
++
++u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
++{
++	int i;
++
++	dev->temp_in_use++;
++	if (dev->temp_in_use > dev->max_temp)
++		dev->max_temp = dev->temp_in_use;
++
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->temp_buffer[i].in_use == 0) {
++			dev->temp_buffer[i].in_use = 1;
++			return dev->temp_buffer[i].buffer;
++		}
++	}
++
++	yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
++	/*
++	 * If we got here then we have to allocate an unmanaged one
++	 * This is not good.
++	 */
++
++	dev->unmanaged_buffer_allocs++;
++	return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
++
++}
++
++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
++{
++	int i;
++
++	dev->temp_in_use--;
++
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->temp_buffer[i].buffer == buffer) {
++			dev->temp_buffer[i].in_use = 0;
++			return;
++		}
++	}
++
++	if (buffer) {
++		/* assume it is an unmanaged one. */
++		yaffs_trace(YAFFS_TRACE_BUFFERS,
++			"Releasing unmanaged temp buffer");
++		kfree(buffer);
++		dev->unmanaged_buffer_deallocs++;
++	}
++
++}
++
++/*
++ * Functions for robustisizing TODO
++ *
++ */
++
++static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
++				     const u8 *data,
++				     const struct yaffs_ext_tags *tags)
++{
++	(void) dev;
++	(void) nand_chunk;
++	(void) data;
++	(void) tags;
++}
++
++static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
++				      const struct yaffs_ext_tags *tags)
++{
++	(void) dev;
++	(void) nand_chunk;
++	(void) tags;
++}
++
++void yaffs_handle_chunk_error(struct yaffs_dev *dev,
++			      struct yaffs_block_info *bi)
++{
++	if (!bi->gc_prioritise) {
++		bi->gc_prioritise = 1;
++		dev->has_pending_prioritised_gc = 1;
++		bi->chunk_error_strikes++;
++
++		if (bi->chunk_error_strikes > 3) {
++			bi->needs_retiring = 1;	/* Too many stikes, so retire */
++			yaffs_trace(YAFFS_TRACE_ALWAYS,
++				"yaffs: Block struck out");
++
++		}
++	}
++}
++
++static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
++					int erased_ok)
++{
++	int flash_block = nand_chunk / dev->param.chunks_per_block;
++	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
++
++	yaffs_handle_chunk_error(dev, bi);
++
++	if (erased_ok) {
++		/* Was an actual write failure,
++		 * so mark the block for retirement.*/
++		bi->needs_retiring = 1;
++		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++		  "**>> Block %d needs retiring", flash_block);
++	}
++
++	/* Delete the chunk */
++	yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++	yaffs_skip_rest_of_block(dev);
++}
++
++/*
++ * Verification code
++ */
++
++/*
++ *  Simple hash function. Needs to have a reasonable spread
++ */
++
++static inline int yaffs_hash_fn(int n)
++{
++	if (n < 0)
++		n = -n;
++	return n % YAFFS_NOBJECT_BUCKETS;
++}
++
++/*
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
++ */
++
++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
++{
++	return dev->root_dir;
++}
++
++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
++{
++	return dev->lost_n_found;
++}
++
++/*
++ *  Erased NAND checking functions
++ */
++
++int yaffs_check_ff(u8 *buffer, int n_bytes)
++{
++	/* Horrible, slow implementation */
++	while (n_bytes--) {
++		if (*buffer != 0xff)
++			return 0;
++		buffer++;
++	}
++	return 1;
++}
++
++static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
++{
++	int retval = YAFFS_OK;
++	u8 *data = yaffs_get_temp_buffer(dev);
++	struct yaffs_ext_tags tags;
++	int result;
++
++	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
++
++	if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
++		retval = YAFFS_FAIL;
++
++	if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
++		tags.chunk_used) {
++		yaffs_trace(YAFFS_TRACE_NANDACCESS,
++			"Chunk %d not erased", nand_chunk);
++		retval = YAFFS_FAIL;
++	}
++
++	yaffs_release_temp_buffer(dev, data);
++
++	return retval;
++
++}
++
++static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
++				      int nand_chunk,
++				      const u8 *data,
++				      struct yaffs_ext_tags *tags)
++{
++	int retval = YAFFS_OK;
++	struct yaffs_ext_tags temp_tags;
++	u8 *buffer = yaffs_get_temp_buffer(dev);
++	int result;
++
++	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
++	if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
++	    temp_tags.obj_id != tags->obj_id ||
++	    temp_tags.chunk_id != tags->chunk_id ||
++	    temp_tags.n_bytes != tags->n_bytes)
++		retval = YAFFS_FAIL;
++
++	yaffs_release_temp_buffer(dev, buffer);
++
++	return retval;
++}
++
++
++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
++{
++	int reserved_chunks;
++	int reserved_blocks = dev->param.n_reserved_blocks;
++	int checkpt_blocks;
++
++	checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
++
++	reserved_chunks =
++	    (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
++
++	return (dev->n_free_chunks > (reserved_chunks + n_chunks));
++}
++
++static int yaffs_find_alloc_block(struct yaffs_dev *dev)
++{
++	int i;
++	struct yaffs_block_info *bi;
++
++	if (dev->n_erased_blocks < 1) {
++		/* Hoosterman we've got a problem.
++		 * Can't get space to gc
++		 */
++		yaffs_trace(YAFFS_TRACE_ERROR,
++		  "yaffs tragedy: no more erased blocks");
++
++		return -1;
++	}
++
++	/* Find an empty block. */
++
++	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++		dev->alloc_block_finder++;
++		if (dev->alloc_block_finder < dev->internal_start_block
++		    || dev->alloc_block_finder > dev->internal_end_block) {
++			dev->alloc_block_finder = dev->internal_start_block;
++		}
++
++		bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
++
++		if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++			bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
++			dev->seq_number++;
++			bi->seq_number = dev->seq_number;
++			dev->n_erased_blocks--;
++			yaffs_trace(YAFFS_TRACE_ALLOCATE,
++			  "Allocated block %d, seq  %d, %d left" ,
++			   dev->alloc_block_finder, dev->seq_number,
++			   dev->n_erased_blocks);
++			return dev->alloc_block_finder;
++		}
++	}
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS,
++		"yaffs tragedy: no more erased blocks, but there should have been %d",
++		dev->n_erased_blocks);
++
++	return -1;
++}
++
++static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
++			     struct yaffs_block_info **block_ptr)
++{
++	int ret_val;
++	struct yaffs_block_info *bi;
++
++	if (dev->alloc_block < 0) {
++		/* Get next block to allocate off */
++		dev->alloc_block = yaffs_find_alloc_block(dev);
++		dev->alloc_page = 0;
++	}
++
++	if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
++		/* No space unless we're allowed to use the reserve. */
++		return -1;
++	}
++
++	if (dev->n_erased_blocks < dev->param.n_reserved_blocks
++	    && dev->alloc_page == 0)
++		yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
++
++	/* Next page please.... */
++	if (dev->alloc_block >= 0) {
++		bi = yaffs_get_block_info(dev, dev->alloc_block);
++
++		ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
++		    dev->alloc_page;
++		bi->pages_in_use++;
++		yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
++
++		dev->alloc_page++;
++
++		dev->n_free_chunks--;
++
++		/* If the block is full set the state to full */
++		if (dev->alloc_page >= dev->param.chunks_per_block) {
++			bi->block_state = YAFFS_BLOCK_STATE_FULL;
++			dev->alloc_block = -1;
++		}
++
++		if (block_ptr)
++			*block_ptr = bi;
++
++		return ret_val;
++	}
++
++	yaffs_trace(YAFFS_TRACE_ERROR,
++		"!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
++
++	return -1;
++}
++
++static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
++{
++	int n;
++
++	n = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++	if (dev->alloc_block > 0)
++		n += (dev->param.chunks_per_block - dev->alloc_page);
++
++	return n;
++
++}
++
++/*
++ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
++ * if we don't want to write to it.
++ */
++void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
++{
++	struct yaffs_block_info *bi;
++
++	if (dev->alloc_block > 0) {
++		bi = yaffs_get_block_info(dev, dev->alloc_block);
++		if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
++			bi->block_state = YAFFS_BLOCK_STATE_FULL;
++			dev->alloc_block = -1;
++		}
++	}
++}
++
++static int yaffs_write_new_chunk(struct yaffs_dev *dev,
++				 const u8 *data,
++				 struct yaffs_ext_tags *tags, int use_reserver)
++{
++	int attempts = 0;
++	int write_ok = 0;
++	int chunk;
++
++	yaffs2_checkpt_invalidate(dev);
++
++	do {
++		struct yaffs_block_info *bi = 0;
++		int erased_ok = 0;
++
++		chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
++		if (chunk < 0) {
++			/* no space */
++			break;
++		}
++
++		/* First check this chunk is erased, if it needs
++		 * checking.  The checking policy (unless forced
++		 * always on) is as follows:
++		 *
++		 * Check the first page we try to write in a block.
++		 * If the check passes then we don't need to check any
++		 * more.        If the check fails, we check again...
++		 * If the block has been erased, we don't need to check.
++		 *
++		 * However, if the block has been prioritised for gc,
++		 * then we think there might be something odd about
++		 * this block and stop using it.
++		 *
++		 * Rationale: We should only ever see chunks that have
++		 * not been erased if there was a partially written
++		 * chunk due to power loss.  This checking policy should
++		 * catch that case with very few checks and thus save a
++		 * lot of checks that are most likely not needed.
++		 *
++		 * Mods to the above
++		 * If an erase check fails or the write fails we skip the
++		 * rest of the block.
++		 */
++
++		/* let's give it a try */
++		attempts++;
++
++		if (dev->param.always_check_erased)
++			bi->skip_erased_check = 0;
++
++		if (!bi->skip_erased_check) {
++			erased_ok = yaffs_check_chunk_erased(dev, chunk);
++			if (erased_ok != YAFFS_OK) {
++				yaffs_trace(YAFFS_TRACE_ERROR,
++				  "**>> yaffs chunk %d was not erased",
++				  chunk);
++
++				/* If not erased, delete this one,
++				 * skip rest of block and
++				 * try another chunk */
++				yaffs_chunk_del(dev, chunk, 1, __LINE__);
++				yaffs_skip_rest_of_block(dev);
++				continue;
++			}
++		}
++
++		write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
++
++		if (!bi->skip_erased_check)
++			write_ok =
++			    yaffs_verify_chunk_written(dev, chunk, data, tags);
++
++		if (write_ok != YAFFS_OK) {
++			/* Clean up aborted write, skip to next block and
++			 * try another chunk */
++			yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
++			continue;
++		}
++
++		bi->skip_erased_check = 1;
++
++		/* Copy the data into the robustification buffer */
++		yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
++
++	} while (write_ok != YAFFS_OK &&
++		 (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++
++	if (!write_ok)
++		chunk = -1;
++
++	if (attempts > 1) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"**>> yaffs write required %d attempts",
++			attempts);
++		dev->n_retried_writes += (attempts - 1);
++	}
++
++	return chunk;
++}
++
++/*
++ * Block retiring for handling a broken block.
++ */
++
++static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
++{
++	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
++
++	yaffs2_checkpt_invalidate(dev);
++
++	yaffs2_clear_oldest_dirty_seq(dev, bi);
++
++	if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
++		if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
++			yaffs_trace(YAFFS_TRACE_ALWAYS,
++				"yaffs: Failed to mark bad and erase block %d",
++				flash_block);
++		} else {
++			struct yaffs_ext_tags tags;
++			int chunk_id =
++			    flash_block * dev->param.chunks_per_block;
++
++			u8 *buffer = yaffs_get_temp_buffer(dev);
++
++			memset(buffer, 0xff, dev->data_bytes_per_chunk);
++			memset(&tags, 0, sizeof(tags));
++			tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
++			if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
++							dev->chunk_offset,
++							buffer,
++							&tags) != YAFFS_OK)
++				yaffs_trace(YAFFS_TRACE_ALWAYS,
++					"yaffs: Failed to write bad block marker to block %d",
++					flash_block);
++
++			yaffs_release_temp_buffer(dev, buffer);
++		}
++	}
++
++	bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++	bi->gc_prioritise = 0;
++	bi->needs_retiring = 0;
++
++	dev->n_retired_blocks++;
++}
++
++/*---------------- Name handling functions ------------*/
++
++static u16 yaffs_calc_name_sum(const YCHAR *name)
++{
++	u16 sum = 0;
++	u16 i = 1;
++
++	if (!name)
++		return 0;
++
++	while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
++
++		/* 0x1f mask is case insensitive */
++		sum += ((*name) & 0x1f) * i;
++		i++;
++		name++;
++	}
++	return sum;
++}
++
++
++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
++{
++	memset(obj->short_name, 0, sizeof(obj->short_name));
++
++	if (name && !name[0]) {
++		yaffs_fix_null_name(obj, obj->short_name,
++				YAFFS_SHORT_NAME_LENGTH);
++		name = obj->short_name;
++	} else if (name &&
++		strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
++		YAFFS_SHORT_NAME_LENGTH)  {
++		strcpy(obj->short_name, name);
++	}
++
++	obj->sum = yaffs_calc_name_sum(name);
++}
++
++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
++				const struct yaffs_obj_hdr *oh)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++	YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
++	memset(tmp_name, 0, sizeof(tmp_name));
++	yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
++				YAFFS_MAX_NAME_LENGTH + 1);
++	yaffs_set_obj_name(obj, tmp_name);
++#else
++	yaffs_set_obj_name(obj, oh->name);
++#endif
++}
++
++loff_t yaffs_max_file_size(struct yaffs_dev *dev)
++{
++	if(sizeof(loff_t) < 8)
++		return YAFFS_MAX_FILE_SIZE_32;
++	else
++		return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
++}
++
++/*-------------------- TNODES -------------------
++
++ * List of spare tnodes
++ * The list is hooked together using the first pointer
++ * in the tnode.
++ */
++
++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
++{
++	struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
++
++	if (tn) {
++		memset(tn, 0, dev->tnode_size);
++		dev->n_tnodes++;
++	}
++
++	dev->checkpoint_blocks_required = 0;	/* force recalculation */
++
++	return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
++{
++	yaffs_free_raw_tnode(dev, tn);
++	dev->n_tnodes--;
++	dev->checkpoint_blocks_required = 0;	/* force recalculation */
++}
++
++static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
++{
++	yaffs_deinit_raw_tnodes_and_objs(dev);
++	dev->n_obj = 0;
++	dev->n_tnodes = 0;
++}
++
++static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++			unsigned pos, unsigned val)
++{
++	u32 *map = (u32 *) tn;
++	u32 bit_in_map;
++	u32 bit_in_word;
++	u32 word_in_map;
++	u32 mask;
++
++	pos &= YAFFS_TNODES_LEVEL0_MASK;
++	val >>= dev->chunk_grp_bits;
++
++	bit_in_map = pos * dev->tnode_width;
++	word_in_map = bit_in_map / 32;
++	bit_in_word = bit_in_map & (32 - 1);
++
++	mask = dev->tnode_mask << bit_in_word;
++
++	map[word_in_map] &= ~mask;
++	map[word_in_map] |= (mask & (val << bit_in_word));
++
++	if (dev->tnode_width > (32 - bit_in_word)) {
++		bit_in_word = (32 - bit_in_word);
++		word_in_map++;
++		mask =
++		    dev->tnode_mask >> bit_in_word;
++		map[word_in_map] &= ~mask;
++		map[word_in_map] |= (mask & (val >> bit_in_word));
++	}
++}
++
++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++			 unsigned pos)
++{
++	u32 *map = (u32 *) tn;
++	u32 bit_in_map;
++	u32 bit_in_word;
++	u32 word_in_map;
++	u32 val;
++
++	pos &= YAFFS_TNODES_LEVEL0_MASK;
++
++	bit_in_map = pos * dev->tnode_width;
++	word_in_map = bit_in_map / 32;
++	bit_in_word = bit_in_map & (32 - 1);
++
++	val = map[word_in_map] >> bit_in_word;
++
++	if (dev->tnode_width > (32 - bit_in_word)) {
++		bit_in_word = (32 - bit_in_word);
++		word_in_map++;
++		val |= (map[word_in_map] << bit_in_word);
++	}
++
++	val &= dev->tnode_mask;
++	val <<= dev->chunk_grp_bits;
++
++	return val;
++}
++
++/* ------------------- End of individual tnode manipulation -----------------*/
++
++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
++ * The look up tree is represented by the top tnode and the number of top_level
++ * in the tree. 0 means only the level 0 tnode is in the tree.
++ */
++
++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
++				       struct yaffs_file_var *file_struct,
++				       u32 chunk_id)
++{
++	struct yaffs_tnode *tn = file_struct->top;
++	u32 i;
++	int required_depth;
++	int level = file_struct->top_level;
++
++	(void) dev;
++
++	/* Check sane level and chunk Id */
++	if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
++		return NULL;
++
++	if (chunk_id > YAFFS_MAX_CHUNK_ID)
++		return NULL;
++
++	/* First check we're tall enough (ie enough top_level) */
++
++	i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++	required_depth = 0;
++	while (i) {
++		i >>= YAFFS_TNODES_INTERNAL_BITS;
++		required_depth++;
++	}
++
++	if (required_depth > file_struct->top_level)
++		return NULL;	/* Not tall enough, so we can't find it */
++
++	/* Traverse down to level 0 */
++	while (level > 0 && tn) {
++		tn = tn->internal[(chunk_id >>
++				   (YAFFS_TNODES_LEVEL0_BITS +
++				    (level - 1) *
++				    YAFFS_TNODES_INTERNAL_BITS)) &
++				  YAFFS_TNODES_INTERNAL_MASK];
++		level--;
++	}
++
++	return tn;
++}
++
++/* add_find_tnode_0 finds the level 0 tnode if it exists,
++ * otherwise first expands the tree.
++ * This happens in two steps:
++ *  1. If the tree isn't tall enough, then make it taller.
++ *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
++ *
++ * Used when modifying the tree.
++ *
++ *  If the tn argument is NULL, then a fresh tnode will be added otherwise the
++ *  specified tn will be plugged into the ttree.
++ */
++
++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
++					   struct yaffs_file_var *file_struct,
++					   u32 chunk_id,
++					   struct yaffs_tnode *passed_tn)
++{
++	int required_depth;
++	int i;
++	int l;
++	struct yaffs_tnode *tn;
++	u32 x;
++
++	/* Check sane level and page Id */
++	if (file_struct->top_level < 0 ||
++	    file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
++		return NULL;
++
++	if (chunk_id > YAFFS_MAX_CHUNK_ID)
++		return NULL;
++
++	/* First check we're tall enough (ie enough top_level) */
++
++	x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++	required_depth = 0;
++	while (x) {
++		x >>= YAFFS_TNODES_INTERNAL_BITS;
++		required_depth++;
++	}
++
++	if (required_depth > file_struct->top_level) {
++		/* Not tall enough, gotta make the tree taller */
++		for (i = file_struct->top_level; i < required_depth; i++) {
++
++			tn = yaffs_get_tnode(dev);
++
++			if (tn) {
++				tn->internal[0] = file_struct->top;
++				file_struct->top = tn;
++				file_struct->top_level++;
++			} else {
++				yaffs_trace(YAFFS_TRACE_ERROR,
++					"yaffs: no more tnodes");
++				return NULL;
++			}
++		}
++	}
++
++	/* Traverse down to level 0, adding anything we need */
++
++	l = file_struct->top_level;
++	tn = file_struct->top;
++
++	if (l > 0) {
++		while (l > 0 && tn) {
++			x = (chunk_id >>
++			     (YAFFS_TNODES_LEVEL0_BITS +
++			      (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
++			    YAFFS_TNODES_INTERNAL_MASK;
++
++			if ((l > 1) && !tn->internal[x]) {
++				/* Add missing non-level-zero tnode */
++				tn->internal[x] = yaffs_get_tnode(dev);
++				if (!tn->internal[x])
++					return NULL;
++			} else if (l == 1) {
++				/* Looking from level 1 at level 0 */
++				if (passed_tn) {
++					/* If we already have one, release it */
++					if (tn->internal[x])
++						yaffs_free_tnode(dev,
++							tn->internal[x]);
++					tn->internal[x] = passed_tn;
++
++				} else if (!tn->internal[x]) {
++					/* Don't have one, none passed in */
++					tn->internal[x] = yaffs_get_tnode(dev);
++					if (!tn->internal[x])
++						return NULL;
++				}
++			}
++
++			tn = tn->internal[x];
++			l--;
++		}
++	} else {
++		/* top is level 0 */
++		if (passed_tn) {
++			memcpy(tn, passed_tn,
++			       (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
++			yaffs_free_tnode(dev, passed_tn);
++		}
++	}
++
++	return tn;
++}
++
++static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
++			    int chunk_obj)
++{
++	return (tags->chunk_id == chunk_obj &&
++		tags->obj_id == obj_id &&
++		!tags->is_deleted) ? 1 : 0;
++
++}
++
++static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
++					struct yaffs_ext_tags *tags, int obj_id,
++					int inode_chunk)
++{
++	int j;
++
++	for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
++		if (yaffs_check_chunk_bit
++		    (dev, the_chunk / dev->param.chunks_per_block,
++		     the_chunk % dev->param.chunks_per_block)) {
++
++			if (dev->chunk_grp_size == 1)
++				return the_chunk;
++			else {
++				yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
++							 tags);
++				if (yaffs_tags_match(tags,
++							obj_id, inode_chunk)) {
++					/* found it; */
++					return the_chunk;
++				}
++			}
++		}
++		the_chunk++;
++	}
++	return -1;
++}
++
++static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++				    struct yaffs_ext_tags *tags)
++{
++	/*Get the Tnode, then get the level 0 offset chunk offset */
++	struct yaffs_tnode *tn;
++	int the_chunk = -1;
++	struct yaffs_ext_tags local_tags;
++	int ret_val = -1;
++	struct yaffs_dev *dev = in->my_dev;
++
++	if (!tags) {
++		/* Passed a NULL, so use our own tags space */
++		tags = &local_tags;
++	}
++
++	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
++
++	if (!tn)
++		return ret_val;
++
++	the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++	ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
++					      inode_chunk);
++	return ret_val;
++}
++
++static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
++				     struct yaffs_ext_tags *tags)
++{
++	/* Get the Tnode, then get the level 0 offset chunk offset */
++	struct yaffs_tnode *tn;
++	int the_chunk = -1;
++	struct yaffs_ext_tags local_tags;
++	struct yaffs_dev *dev = in->my_dev;
++	int ret_val = -1;
++
++	if (!tags) {
++		/* Passed a NULL, so use our own tags space */
++		tags = &local_tags;
++	}
++
++	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
++
++	if (!tn)
++		return ret_val;
++
++	the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++	ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
++					      inode_chunk);
++
++	/* Delete the entry in the filestructure (if found) */
++	if (ret_val != -1)
++		yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
++
++	return ret_val;
++}
++
++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++			    int nand_chunk, int in_scan)
++{
++	/* NB in_scan is zero unless scanning.
++	 * For forward scanning, in_scan is > 0;
++	 * for backward scanning in_scan is < 0
++	 *
++	 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
++	 */
++
++	struct yaffs_tnode *tn;
++	struct yaffs_dev *dev = in->my_dev;
++	int existing_cunk;
++	struct yaffs_ext_tags existing_tags;
++	struct yaffs_ext_tags new_tags;
++	unsigned existing_serial, new_serial;
++
++	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
++		/* Just ignore an attempt at putting a chunk into a non-file
++		 * during scanning.
++		 * If it is not during Scanning then something went wrong!
++		 */
++		if (!in_scan) {
++			yaffs_trace(YAFFS_TRACE_ERROR,
++				"yaffs tragedy:attempt to put data chunk into a non-file"
++				);
++			BUG();
++		}
++
++		yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++		return YAFFS_OK;
++	}
++
++	tn = yaffs_add_find_tnode_0(dev,
++				    &in->variant.file_variant,
++				    inode_chunk, NULL);
++	if (!tn)
++		return YAFFS_FAIL;
++
++	if (!nand_chunk)
++		/* Dummy insert, bail now */
++		return YAFFS_OK;
++
++	existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++	if (in_scan != 0) {
++		/* If we're scanning then we need to test for duplicates
++		 * NB This does not need to be efficient since it should only
++		 * happen when the power fails during a write, then only one
++		 * chunk should ever be affected.
++		 *
++		 * Correction for YAFFS2: This could happen quite a lot and we
++		 * need to think about efficiency! TODO
++		 * Update: For backward scanning we don't need to re-read tags
++		 * so this is quite cheap.
++		 */
++
++		if (existing_cunk > 0) {
++			/* NB Right now existing chunk will not be real
++			 * chunk_id if the chunk group size > 1
++			 * thus we have to do a FindChunkInFile to get the
++			 * real chunk id.
++			 *
++			 * We have a duplicate now we need to decide which
++			 * one to use:
++			 *
++			 * Backwards scanning YAFFS2: The old one is what
++			 * we use, dump the new one.
++			 * YAFFS1: Get both sets of tags and compare serial
++			 * numbers.
++			 */
++
++			if (in_scan > 0) {
++				/* Only do this for forward scanning */
++				yaffs_rd_chunk_tags_nand(dev,
++							 nand_chunk,
++							 NULL, &new_tags);
++
++				/* Do a proper find */
++				existing_cunk =
++				    yaffs_find_chunk_in_file(in, inode_chunk,
++							     &existing_tags);
++			}
++
++			if (existing_cunk <= 0) {
++				/*Hoosterman - how did this happen? */
++
++				yaffs_trace(YAFFS_TRACE_ERROR,
++					"yaffs tragedy: existing chunk < 0 in scan"
++					);
++
++			}
++
++			/* NB The deleted flags should be false, otherwise
++			 * the chunks will not be loaded during a scan
++			 */
++
++			if (in_scan > 0) {
++				new_serial = new_tags.serial_number;
++				existing_serial = existing_tags.serial_number;
++			}
++
++			if ((in_scan > 0) &&
++			    (existing_cunk <= 0 ||
++			     ((existing_serial + 1) & 3) == new_serial)) {
++				/* Forward scanning.
++				 * Use new
++				 * Delete the old one and drop through to
++				 * update the tnode
++				 */
++				yaffs_chunk_del(dev, existing_cunk, 1,
++						__LINE__);
++			} else {
++				/* Backward scanning or we want to use the
++				 * existing one
++				 * Delete the new one and return early so that
++				 * the tnode isn't changed
++				 */
++				yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++				return YAFFS_OK;
++			}
++		}
++
++	}
++
++	if (existing_cunk == 0)
++		in->n_data_chunks++;
++
++	yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
++
++	return YAFFS_OK;
++}
++
++static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
++{
++	struct yaffs_block_info *the_block;
++	unsigned block_no;
++
++	yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
++
++	block_no = chunk / dev->param.chunks_per_block;
++	the_block = yaffs_get_block_info(dev, block_no);
++	if (the_block) {
++		the_block->soft_del_pages++;
++		dev->n_free_chunks++;
++		yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
++	}
++}
++
++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
++ * the chunks in the file.
++ * All soft deleting does is increment the block's softdelete count and pulls
++ * the chunk out of the tnode.
++ * Thus, essentially this is the same as DeleteWorker except that the chunks
++ * are soft deleted.
++ */
++
++static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
++				 u32 level, int chunk_offset)
++{
++	int i;
++	int the_chunk;
++	int all_done = 1;
++	struct yaffs_dev *dev = in->my_dev;
++
++	if (!tn)
++		return 1;
++
++	if (level > 0) {
++		for (i = YAFFS_NTNODES_INTERNAL - 1;
++			all_done && i >= 0;
++			i--) {
++			if (tn->internal[i]) {
++				all_done =
++				    yaffs_soft_del_worker(in,
++					tn->internal[i],
++					level - 1,
++					(chunk_offset <<
++					YAFFS_TNODES_INTERNAL_BITS)
++					+ i);
++				if (all_done) {
++					yaffs_free_tnode(dev,
++						tn->internal[i]);
++					tn->internal[i] = NULL;
++				} else {
++					/* Can this happen? */
++				}
++			}
++		}
++		return (all_done) ? 1 : 0;
++	}
++
++	/* level 0 */
++	 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
++		the_chunk = yaffs_get_group_base(dev, tn, i);
++		if (the_chunk) {
++			yaffs_soft_del_chunk(dev, the_chunk);
++			yaffs_load_tnode_0(dev, tn, i, 0);
++		}
++	}
++	return 1;
++}
++
++static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
++{
++	struct yaffs_dev *dev = obj->my_dev;
++	struct yaffs_obj *parent;
++
++	yaffs_verify_obj_in_dir(obj);
++	parent = obj->parent;
++
++	yaffs_verify_dir(parent);
++
++	if (dev && dev->param.remove_obj_fn)
++		dev->param.remove_obj_fn(obj);
++
++	list_del_init(&obj->siblings);
++	obj->parent = NULL;
++
++	yaffs_verify_dir(parent);
++}
++
++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
++{
++	if (!directory) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"tragedy: Trying to add an object to a null pointer directory"
++			);
++		BUG();
++		return;
++	}
++	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"tragedy: Trying to add an object to a non-directory"
++			);
++		BUG();
++	}
++
++	if (obj->siblings.prev == NULL) {
++		/* Not initialised */
++		BUG();
++	}
++
++	yaffs_verify_dir(directory);
++
++	yaffs_remove_obj_from_dir(obj);
++
++	/* Now add it */
++	list_add(&obj->siblings, &directory->variant.dir_variant.children);
++	obj->parent = directory;
++
++	if (directory == obj->my_dev->unlinked_dir
++	    || directory == obj->my_dev->del_dir) {
++		obj->unlinked = 1;
++		obj->my_dev->n_unlinked_files++;
++		obj->rename_allowed = 0;
++	}
++
++	yaffs_verify_dir(directory);
++	yaffs_verify_obj_in_dir(obj);
++}
++
++static int yaffs_change_obj_name(struct yaffs_obj *obj,
++				 struct yaffs_obj *new_dir,
++				 const YCHAR *new_name, int force, int shadows)
++{
++	int unlink_op;
++	int del_op;
++	struct yaffs_obj *existing_target;
++
++	if (new_dir == NULL)
++		new_dir = obj->parent;	/* use the old directory */
++
++	if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"tragedy: yaffs_change_obj_name: new_dir is not a directory"
++			);
++		BUG();
++	}
++
++	unlink_op = (new_dir == obj->my_dev->unlinked_dir);
++	del_op = (new_dir == obj->my_dev->del_dir);
++
++	existing_target = yaffs_find_by_name(new_dir, new_name);
++
++	/* If the object is a file going into the unlinked directory,
++	 *   then it is OK to just stuff it in since duplicate names are OK.
++	 *   else only proceed if the new name does not exist and we're putting
++	 *   it into a directory.
++	 */
++	if (!(unlink_op || del_op || force ||
++	      shadows > 0 || !existing_target) ||
++	      new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++		return YAFFS_FAIL;
++
++	yaffs_set_obj_name(obj, new_name);
++	obj->dirty = 1;
++	yaffs_add_obj_to_dir(new_dir, obj);
++
++	if (unlink_op)
++		obj->unlinked = 1;
++
++	/* If it is a deletion then we mark it as a shrink for gc  */
++	if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
++		return YAFFS_OK;
++
++	return YAFFS_FAIL;
++}
++
++/*------------------------ Short Operations Cache ------------------------------
++ *   In many situations where there is no high level buffering  a lot of
++ *   reads might be short sequential reads, and a lot of writes may be short
++ *   sequential writes. eg. scanning/writing a jpeg file.
++ *   In these cases, a short read/write cache can provide a huge perfomance
++ *   benefit with dumb-as-a-rock code.
++ *   In Linux, the page cache provides read buffering and the short op cache
++ *   provides write buffering.
++ *
++ *   There are a small number (~10) of cache chunks per device so that we don't
++ *   need a very intelligent search.
++ */
++
++static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
++{
++	struct yaffs_dev *dev = obj->my_dev;
++	int i;
++	struct yaffs_cache *cache;
++	int n_caches = obj->my_dev->param.n_caches;
++
++	for (i = 0; i < n_caches; i++) {
++		cache = &dev->cache[i];
++		if (cache->object == obj && cache->dirty)
++			return 1;
++	}
++
++	return 0;
++}
++
++static void yaffs_flush_file_cache(struct yaffs_obj *obj)
++{
++	struct yaffs_dev *dev = obj->my_dev;
++	int lowest = -99;	/* Stop compiler whining. */
++	int i;
++	struct yaffs_cache *cache;
++	int chunk_written = 0;
++	int n_caches = obj->my_dev->param.n_caches;
++
++	if (n_caches < 1)
++		return;
++	do {
++		cache = NULL;
++
++		/* Find the lowest dirty chunk for this object */
++		for (i = 0; i < n_caches; i++) {
++			if (dev->cache[i].object == obj &&
++			    dev->cache[i].dirty) {
++				if (!cache ||
++				    dev->cache[i].chunk_id < lowest) {
++					cache = &dev->cache[i];
++					lowest = cache->chunk_id;
++				}
++			}
++		}
++
++		if (cache && !cache->locked) {
++			/* Write it out and free it up */
++			chunk_written =
++			    yaffs_wr_data_obj(cache->object,
++					      cache->chunk_id,
++					      cache->data,
++					      cache->n_bytes, 1);
++			cache->dirty = 0;
++			cache->object = NULL;
++		}
++	} while (cache && chunk_written > 0);
++
++	if (cache)
++		/* Hoosterman, disk full while writing cache out. */
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"yaffs tragedy: no space during cache write");
++}
++
++/*yaffs_flush_whole_cache(dev)
++ *
++ *
++ */
++
++void yaffs_flush_whole_cache(struct yaffs_dev *dev)
++{
++	struct yaffs_obj *obj;
++	int n_caches = dev->param.n_caches;
++	int i;
++
++	/* Find a dirty object in the cache and flush it...
++	 * until there are no further dirty objects.
++	 */
++	do {
++		obj = NULL;
++		for (i = 0; i < n_caches && !obj; i++) {
++			if (dev->cache[i].object && dev->cache[i].dirty)
++				obj = dev->cache[i].object;
++		}
++		if (obj)
++			yaffs_flush_file_cache(obj);
++	} while (obj);
++
++}
++
++/* Grab us a cache chunk for use.
++ * First look for an empty one.
++ * Then look for the least recently used non-dirty one.
++ * Then look for the least recently used dirty one...., flush and look again.
++ */
++static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
++{
++	int i;
++
++	if (dev->param.n_caches > 0) {
++		for (i = 0; i < dev->param.n_caches; i++) {
++			if (!dev->cache[i].object)
++				return &dev->cache[i];
++		}
++	}
++	return NULL;
++}
++
++static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
++{
++	struct yaffs_cache *cache;
++	struct yaffs_obj *the_obj;
++	int usage;
++	int i;
++	int pushout;
++
++	if (dev->param.n_caches < 1)
++		return NULL;
++
++	/* Try find a non-dirty one... */
++
++	cache = yaffs_grab_chunk_worker(dev);
++
++	if (!cache) {
++		/* They were all dirty, find the LRU object and flush
++		 * its cache, then  find again.
++		 * NB what's here is not very accurate,
++		 * we actually flush the object with the LRU chunk.
++		 */
++
++		/* With locking we can't assume we can use entry zero,
++		 * Set the_obj to a valid pointer for Coverity. */
++		the_obj = dev->cache[0].object;
++		usage = -1;
++		cache = NULL;
++		pushout = -1;
++
++		for (i = 0; i < dev->param.n_caches; i++) {
++			if (dev->cache[i].object &&
++			    !dev->cache[i].locked &&
++			    (dev->cache[i].last_use < usage ||
++			    !cache)) {
++				usage = dev->cache[i].last_use;
++				the_obj = dev->cache[i].object;
++				cache = &dev->cache[i];
++				pushout = i;
++			}
++		}
++
++		if (!cache || cache->dirty) {
++			/* Flush and try again */
++			yaffs_flush_file_cache(the_obj);
++			cache = yaffs_grab_chunk_worker(dev);
++		}
++	}
++	return cache;
++}
++
++/* Find a cached chunk */
++static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
++						  int chunk_id)
++{
++	struct yaffs_dev *dev = obj->my_dev;
++	int i;
++
++	if (dev->param.n_caches < 1)
++		return NULL;
++
++	for (i = 0; i < dev->param.n_caches; i++) {
++		if (dev->cache[i].object == obj &&
++		    dev->cache[i].chunk_id == chunk_id) {
++			dev->cache_hits++;
++
++			return &dev->cache[i];
++		}
++	}
++	return NULL;
++}
++
++/* Mark the chunk for the least recently used algorithym */
++static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
++			    int is_write)
++{
++	int i;
++
++	if (dev->param.n_caches < 1)
++		return;
++
++	if (dev->cache_last_use < 0 ||
++		dev->cache_last_use > 100000000) {
++		/* Reset the cache usages */
++		for (i = 1; i < dev->param.n_caches; i++)
++			dev->cache[i].last_use = 0;
++
++		dev->cache_last_use = 0;
++	}
++	dev->cache_last_use++;
++	cache->last_use = dev->cache_last_use;
++
++	if (is_write)
++		cache->dirty = 1;
++}
++
++/* Invalidate a single cache page.
++ * Do this when a whole page gets written,
++ * ie the short cache for this page is no longer valid.
++ */
++static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
++{
++	struct yaffs_cache *cache;
++
++	if (object->my_dev->param.n_caches > 0) {
++		cache = yaffs_find_chunk_cache(object, chunk_id);
++
++		if (cache)
++			cache->object = NULL;
++	}
++}
++
++/* Invalidate all the cache pages associated with this object
++ * Do this whenever ther file is deleted or resized.
++ */
++static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
++{
++	int i;
++	struct yaffs_dev *dev = in->my_dev;
++
++	if (dev->param.n_caches > 0) {
++		/* Invalidate it. */
++		for (i = 0; i < dev->param.n_caches; i++) {
++			if (dev->cache[i].object == in)
++				dev->cache[i].object = NULL;
++		}
++	}
++}
++
++static void yaffs_unhash_obj(struct yaffs_obj *obj)
++{
++	int bucket;
++	struct yaffs_dev *dev = obj->my_dev;
++
++	/* If it is still linked into the bucket list, free from the list */
++	if (!list_empty(&obj->hash_link)) {
++		list_del_init(&obj->hash_link);
++		bucket = yaffs_hash_fn(obj->obj_id);
++		dev->obj_bucket[bucket].count--;
++	}
++}
++
++/*  FreeObject frees up a Object and puts it back on the free list */
++static void yaffs_free_obj(struct yaffs_obj *obj)
++{
++	struct yaffs_dev *dev;
++
++	if (!obj) {
++		BUG();
++		return;
++	}
++	dev = obj->my_dev;
++	yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
++		obj, obj->my_inode);
++	if (obj->parent)
++		BUG();
++	if (!list_empty(&obj->siblings))
++		BUG();
++
++	if (obj->my_inode) {
++		/* We're still hooked up to a cached inode.
++		 * Don't delete now, but mark for later deletion
++		 */
++		obj->defered_free = 1;
++		return;
++	}
++
++	yaffs_unhash_obj(obj);
++
++	yaffs_free_raw_obj(dev, obj);
++	dev->n_obj--;
++	dev->checkpoint_blocks_required = 0;	/* force recalculation */
++}
++
++void yaffs_handle_defered_free(struct yaffs_obj *obj)
++{
++	if (obj->defered_free)
++		yaffs_free_obj(obj);
++}
++
++static int yaffs_generic_obj_del(struct yaffs_obj *in)
++{
++	/* Iinvalidate the file's data in the cache, without flushing. */
++	yaffs_invalidate_whole_cache(in);
++
++	if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
++		/* Move to unlinked directory so we have a deletion record */
++		yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
++				      0);
++	}
++
++	yaffs_remove_obj_from_dir(in);
++	yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
++	in->hdr_chunk = 0;
++
++	yaffs_free_obj(in);
++	return YAFFS_OK;
++
++}
++
++static void yaffs_soft_del_file(struct yaffs_obj *obj)
++{
++	if (!obj->deleted ||
++	    obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
++	    obj->soft_del)
++		return;
++
++	if (obj->n_data_chunks <= 0) {
++		/* Empty file with no duplicate object headers,
++		 * just delete it immediately */
++		yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
++		obj->variant.file_variant.top = NULL;
++		yaffs_trace(YAFFS_TRACE_TRACING,
++			"yaffs: Deleting empty file %d",
++			obj->obj_id);
++		yaffs_generic_obj_del(obj);
++	} else {
++		yaffs_soft_del_worker(obj,
++				      obj->variant.file_variant.top,
++				      obj->variant.
++				      file_variant.top_level, 0);
++		obj->soft_del = 1;
++	}
++}
++
++/* Pruning removes any part of the file structure tree that is beyond the
++ * bounds of the file (ie that does not point to chunks).
++ *
++ * A file should only get pruned when its size is reduced.
++ *
++ * Before pruning, the chunks must be pulled from the tree and the
++ * level 0 tnode entries must be zeroed out.
++ * Could also use this for file deletion, but that's probably better handled
++ * by a special case.
++ *
++ * This function is recursive. For levels > 0 the function is called again on
++ * any sub-tree. For level == 0 we just check if the sub-tree has data.
++ * If there is no data in a subtree then it is pruned.
++ */
++
++static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
++					      struct yaffs_tnode *tn, u32 level,
++					      int del0)
++{
++	int i;
++	int has_data;
++
++	if (!tn)
++		return tn;
++
++	has_data = 0;
++
++	if (level > 0) {
++		for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
++			if (tn->internal[i]) {
++				tn->internal[i] =
++				    yaffs_prune_worker(dev,
++						tn->internal[i],
++						level - 1,
++						(i == 0) ? del0 : 1);
++			}
++
++			if (tn->internal[i])
++				has_data++;
++		}
++	} else {
++		int tnode_size_u32 = dev->tnode_size / sizeof(u32);
++		u32 *map = (u32 *) tn;
++
++		for (i = 0; !has_data && i < tnode_size_u32; i++) {
++			if (map[i])
++				has_data++;
++		}
++	}
++
++	if (has_data == 0 && del0) {
++		/* Free and return NULL */
++		yaffs_free_tnode(dev, tn);
++		tn = NULL;
++	}
++	return tn;
++}
++
++static int yaffs_prune_tree(struct yaffs_dev *dev,
++			    struct yaffs_file_var *file_struct)
++{
++	int i;
++	int has_data;
++	int done = 0;
++	struct yaffs_tnode *tn;
++
++	if (file_struct->top_level < 1)
++		return YAFFS_OK;
++
++	file_struct->top =
++	   yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
++
++	/* Now we have a tree with all the non-zero branches NULL but
++	 * the height is the same as it was.
++	 * Let's see if we can trim internal tnodes to shorten the tree.
++	 * We can do this if only the 0th element in the tnode is in use
++	 * (ie all the non-zero are NULL)
++	 */
++
++	while (file_struct->top_level && !done) {
++		tn = file_struct->top;
++
++		has_data = 0;
++		for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
++			if (tn->internal[i])
++				has_data++;
++		}
++
++		if (!has_data) {
++			file_struct->top = tn->internal[0];
++			file_struct->top_level--;
++			yaffs_free_tnode(dev, tn);
++		} else {
++			done = 1;
++		}
++	}
++
++	return YAFFS_OK;
++}
++
++/*-------------------- End of File Structure functions.-------------------*/
++
++/* alloc_empty_obj gets us a clean Object.*/
++static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
++{
++	struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
++
++	if (!obj)
++		return obj;
++
++	dev->n_obj++;
++
++	/* Now sweeten it up... */
++
++	memset(obj, 0, sizeof(struct yaffs_obj));
++	obj->being_created = 1;
++
++	obj->my_dev = dev;
++	obj->hdr_chunk = 0;
++	obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
++	INIT_LIST_HEAD(&(obj->hard_links));
++	INIT_LIST_HEAD(&(obj->hash_link));
++	INIT_LIST_HEAD(&obj->siblings);
++
++	/* Now make the directory sane */
++	if (dev->root_dir) {
++		obj->parent = dev->root_dir;
++		list_add(&(obj->siblings),
++			 &dev->root_dir->variant.dir_variant.children);
++	}
++
++	/* Add it to the lost and found directory.
++	 * NB Can't put root or lost-n-found in lost-n-found so
++	 * check if lost-n-found exists first
++	 */
++	if (dev->lost_n_found)
++		yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++
++	obj->being_created = 0;
++
++	dev->checkpoint_blocks_required = 0;	/* force recalculation */
++
++	return obj;
++}
++
++static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
++{
++	int i;
++	int l = 999;
++	int lowest = 999999;
++
++	/* Search for the shortest list or one that
++	 * isn't too long.
++	 */
++
++	for (i = 0; i < 10 && lowest > 4; i++) {
++		dev->bucket_finder++;
++		dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
++		if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
++			lowest = dev->obj_bucket[dev->bucket_finder].count;
++			l = dev->bucket_finder;
++		}
++	}
++
++	return l;
++}
++
++static int yaffs_new_obj_id(struct yaffs_dev *dev)
++{
++	int bucket = yaffs_find_nice_bucket(dev);
++	int found = 0;
++	struct list_head *i;
++	u32 n = (u32) bucket;
++
++	/* Now find an object value that has not already been taken
++	 * by scanning the list.
++	 */
++
++	while (!found) {
++		found = 1;
++		n += YAFFS_NOBJECT_BUCKETS;
++		if (1 || dev->obj_bucket[bucket].count > 0) {
++			list_for_each(i, &dev->obj_bucket[bucket].list) {
++				/* If there is already one in the list */
++				if (i && list_entry(i, struct yaffs_obj,
++						    hash_link)->obj_id == n) {
++					found = 0;
++				}
++			}
++		}
++	}
++	return n;
++}
++
++static void yaffs_hash_obj(struct yaffs_obj *in)
++{
++	int bucket = yaffs_hash_fn(in->obj_id);
++	struct yaffs_dev *dev = in->my_dev;
++
++	list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
++	dev->obj_bucket[bucket].count++;
++}
++
++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
++{
++	int bucket = yaffs_hash_fn(number);
++	struct list_head *i;
++	struct yaffs_obj *in;
++
++	list_for_each(i, &dev->obj_bucket[bucket].list) {
++		/* Look if it is in the list */
++		in = list_entry(i, struct yaffs_obj, hash_link);
++		if (in->obj_id == number) {
++			/* Don't show if it is defered free */
++			if (in->defered_free)
++				return NULL;
++			return in;
++		}
++	}
++
++	return NULL;
++}
++
++static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
++				enum yaffs_obj_type type)
++{
++	struct yaffs_obj *the_obj = NULL;
++	struct yaffs_tnode *tn = NULL;
++
++	if (number < 0)
++		number = yaffs_new_obj_id(dev);
++
++	if (type == YAFFS_OBJECT_TYPE_FILE) {
++		tn = yaffs_get_tnode(dev);
++		if (!tn)
++			return NULL;
++	}
++
++	the_obj = yaffs_alloc_empty_obj(dev);
++	if (!the_obj) {
++		if (tn)
++			yaffs_free_tnode(dev, tn);
++		return NULL;
++	}
++
++	the_obj->fake = 0;
++	the_obj->rename_allowed = 1;
++	the_obj->unlink_allowed = 1;
++	the_obj->obj_id = number;
++	yaffs_hash_obj(the_obj);
++	the_obj->variant_type = type;
++	yaffs_load_current_time(the_obj, 1, 1);
++
++	switch (type) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		the_obj->variant.file_variant.file_size = 0;
++		the_obj->variant.file_variant.scanned_size = 0;
++		the_obj->variant.file_variant.shrink_size =
++						yaffs_max_file_size(dev);
++		the_obj->variant.file_variant.top_level = 0;
++		the_obj->variant.file_variant.top = tn;
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
++		INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		/* No action required */
++		break;
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++		/* todo this should not happen */
++		break;
++	}
++	return the_obj;
++}
++
++static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
++					       int number, u32 mode)
++{
++
++	struct yaffs_obj *obj =
++	    yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
++
++	if (!obj)
++		return NULL;
++
++	obj->fake = 1;	/* it is fake so it might not use NAND */
++	obj->rename_allowed = 0;
++	obj->unlink_allowed = 0;
++	obj->deleted = 0;
++	obj->unlinked = 0;
++	obj->yst_mode = mode;
++	obj->my_dev = dev;
++	obj->hdr_chunk = 0;	/* Not a valid chunk. */
++	return obj;
++
++}
++
++
++static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
++{
++	int i;
++
++	dev->n_obj = 0;
++	dev->n_tnodes = 0;
++	yaffs_init_raw_tnodes_and_objs(dev);
++
++	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++		INIT_LIST_HEAD(&dev->obj_bucket[i].list);
++		dev->obj_bucket[i].count = 0;
++	}
++}
++
++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
++						 int number,
++						 enum yaffs_obj_type type)
++{
++	struct yaffs_obj *the_obj = NULL;
++
++	if (number > 0)
++		the_obj = yaffs_find_by_number(dev, number);
++
++	if (!the_obj)
++		the_obj = yaffs_new_obj(dev, number, type);
++
++	return the_obj;
++
++}
++
++YCHAR *yaffs_clone_str(const YCHAR *str)
++{
++	YCHAR *new_str = NULL;
++	int len;
++
++	if (!str)
++		str = _Y("");
++
++	len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
++	new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
++	if (new_str) {
++		strncpy(new_str, str, len);
++		new_str[len] = 0;
++	}
++	return new_str;
++
++}
++/*
++ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
++ * link (ie. name) is created or deleted in the directory.
++ *
++ * ie.
++ *   create dir/a : update dir's mtime/ctime
++ *   rm dir/a:   update dir's mtime/ctime
++ *   modify dir/a: don't update dir's mtimme/ctime
++ *
++ * This can be handled immediately or defered. Defering helps reduce the number
++ * of updates when many files in a directory are changed within a brief period.
++ *
++ * If the directory updating is defered then yaffs_update_dirty_dirs must be
++ * called periodically.
++ */
++
++static void yaffs_update_parent(struct yaffs_obj *obj)
++{
++	struct yaffs_dev *dev;
++
++	if (!obj)
++		return;
++	dev = obj->my_dev;
++	obj->dirty = 1;
++	yaffs_load_current_time(obj, 0, 1);
++	if (dev->param.defered_dir_update) {
++		struct list_head *link = &obj->variant.dir_variant.dirty;
++
++		if (list_empty(link)) {
++			list_add(link, &dev->dirty_dirs);
++			yaffs_trace(YAFFS_TRACE_BACKGROUND,
++			  "Added object %d to dirty directories",
++			   obj->obj_id);
++		}
++
++	} else {
++		yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++	}
++}
++
++void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
++{
++	struct list_head *link;
++	struct yaffs_obj *obj;
++	struct yaffs_dir_var *d_s;
++	union yaffs_obj_var *o_v;
++
++	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
++
++	while (!list_empty(&dev->dirty_dirs)) {
++		link = dev->dirty_dirs.next;
++		list_del_init(link);
++
++		d_s = list_entry(link, struct yaffs_dir_var, dirty);
++		o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
++		obj = list_entry(o_v, struct yaffs_obj, variant);
++
++		yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
++			obj->obj_id);
++
++		if (obj->dirty)
++			yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++	}
++}
++
++/*
++ * Mknod (create) a new object.
++ * equiv_obj only has meaning for a hard link;
++ * alias_str only has meaning for a symlink.
++ * rdev only has meaning for devices (a subset of special objects)
++ */
++
++static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
++					  struct yaffs_obj *parent,
++					  const YCHAR *name,
++					  u32 mode,
++					  u32 uid,
++					  u32 gid,
++					  struct yaffs_obj *equiv_obj,
++					  const YCHAR *alias_str, u32 rdev)
++{
++	struct yaffs_obj *in;
++	YCHAR *str = NULL;
++	struct yaffs_dev *dev = parent->my_dev;
++
++	/* Check if the entry exists.
++	 * If it does then fail the call since we don't want a dup. */
++	if (yaffs_find_by_name(parent, name))
++		return NULL;
++
++	if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
++		str = yaffs_clone_str(alias_str);
++		if (!str)
++			return NULL;
++	}
++
++	in = yaffs_new_obj(dev, -1, type);
++
++	if (!in) {
++		kfree(str);
++		return NULL;
++	}
++
++	in->hdr_chunk = 0;
++	in->valid = 1;
++	in->variant_type = type;
++
++	in->yst_mode = mode;
++
++	yaffs_attribs_init(in, gid, uid, rdev);
++
++	in->n_data_chunks = 0;
++
++	yaffs_set_obj_name(in, name);
++	in->dirty = 1;
++
++	yaffs_add_obj_to_dir(parent, in);
++
++	in->my_dev = parent->my_dev;
++
++	switch (type) {
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		in->variant.symlink_variant.alias = str;
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		in->variant.hardlink_variant.equiv_obj = equiv_obj;
++		in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
++		list_add(&in->hard_links, &equiv_obj->hard_links);
++		break;
++	case YAFFS_OBJECT_TYPE_FILE:
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++		/* do nothing */
++		break;
++	}
++
++	if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
++		/* Could not create the object header, fail */
++		yaffs_del_obj(in);
++		in = NULL;
++	}
++
++	if (in)
++		yaffs_update_parent(parent);
++
++	return in;
++}
++
++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
++				    const YCHAR *name, u32 mode, u32 uid,
++				    u32 gid)
++{
++	return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
++				uid, gid, NULL, NULL, 0);
++}
++
++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
++				   u32 mode, u32 uid, u32 gid)
++{
++	return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
++				mode, uid, gid, NULL, NULL, 0);
++}
++
++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
++				       const YCHAR *name, u32 mode, u32 uid,
++				       u32 gid, u32 rdev)
++{
++	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
++				uid, gid, NULL, NULL, rdev);
++}
++
++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
++				       const YCHAR *name, u32 mode, u32 uid,
++				       u32 gid, const YCHAR *alias)
++{
++	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
++				uid, gid, NULL, alias, 0);
++}
++
++/* yaffs_link_obj returns the object id of the equivalent object.*/
++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
++				 struct yaffs_obj *equiv_obj)
++{
++	/* Get the real object in case we were fed a hard link obj */
++	equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
++
++	if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
++			parent, name, 0, 0, 0,
++			equiv_obj, NULL, 0))
++		return equiv_obj;
++
++	return NULL;
++
++}
++
++
++
++/*---------------------- Block Management and Page Allocation -------------*/
++
++static void yaffs_deinit_blocks(struct yaffs_dev *dev)
++{
++	if (dev->block_info_alt && dev->block_info)
++		vfree(dev->block_info);
++	else
++		kfree(dev->block_info);
++
++	dev->block_info_alt = 0;
++
++	dev->block_info = NULL;
++
++	if (dev->chunk_bits_alt && dev->chunk_bits)
++		vfree(dev->chunk_bits);
++	else
++		kfree(dev->chunk_bits);
++	dev->chunk_bits_alt = 0;
++	dev->chunk_bits = NULL;
++}
++
++static int yaffs_init_blocks(struct yaffs_dev *dev)
++{
++	int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++
++	dev->block_info = NULL;
++	dev->chunk_bits = NULL;
++	dev->alloc_block = -1;	/* force it to get a new one */
++
++	/* If the first allocation strategy fails, thry the alternate one */
++	dev->block_info =
++		kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
++	if (!dev->block_info) {
++		dev->block_info =
++		    vmalloc(n_blocks * sizeof(struct yaffs_block_info));
++		dev->block_info_alt = 1;
++	} else {
++		dev->block_info_alt = 0;
++	}
++
++	if (!dev->block_info)
++		goto alloc_error;
++
++	/* Set up dynamic blockinfo stuff. Round up bytes. */
++	dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
++	dev->chunk_bits =
++		kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
++	if (!dev->chunk_bits) {
++		dev->chunk_bits =
++		    vmalloc(dev->chunk_bit_stride * n_blocks);
++		dev->chunk_bits_alt = 1;
++	} else {
++		dev->chunk_bits_alt = 0;
++	}
++	if (!dev->chunk_bits)
++		goto alloc_error;
++
++
++	memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
++	memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
++	return YAFFS_OK;
++
++alloc_error:
++	yaffs_deinit_blocks(dev);
++	return YAFFS_FAIL;
++}
++
++
++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
++{
++	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
++	int erased_ok = 0;
++	int i;
++
++	/* If the block is still healthy erase it and mark as clean.
++	 * If the block has had a data failure, then retire it.
++	 */
++
++	yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
++		"yaffs_block_became_dirty block %d state %d %s",
++		block_no, bi->block_state,
++		(bi->needs_retiring) ? "needs retiring" : "");
++
++	yaffs2_clear_oldest_dirty_seq(dev, bi);
++
++	bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
++
++	/* If this is the block being garbage collected then stop gc'ing */
++	if (block_no == dev->gc_block)
++		dev->gc_block = 0;
++
++	/* If this block is currently the best candidate for gc
++	 * then drop as a candidate */
++	if (block_no == dev->gc_dirtiest) {
++		dev->gc_dirtiest = 0;
++		dev->gc_pages_in_use = 0;
++	}
++
++	if (!bi->needs_retiring) {
++		yaffs2_checkpt_invalidate(dev);
++		erased_ok = yaffs_erase_block(dev, block_no);
++		if (!erased_ok) {
++			dev->n_erase_failures++;
++			yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++			  "**>> Erasure failed %d", block_no);
++		}
++	}
++
++	/* Verify erasure if needed */
++	if (erased_ok &&
++	    ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
++	     !yaffs_skip_verification(dev))) {
++		for (i = 0; i < dev->param.chunks_per_block; i++) {
++			if (!yaffs_check_chunk_erased(dev,
++				block_no * dev->param.chunks_per_block + i)) {
++				yaffs_trace(YAFFS_TRACE_ERROR,
++					">>Block %d erasure supposedly OK, but chunk %d not erased",
++					block_no, i);
++			}
++		}
++	}
++
++	if (!erased_ok) {
++		/* We lost a block of free space */
++		dev->n_free_chunks -= dev->param.chunks_per_block;
++		yaffs_retire_block(dev, block_no);
++		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++			"**>> Block %d retired", block_no);
++		return;
++	}
++
++	/* Clean it up... */
++	bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++	bi->seq_number = 0;
++	dev->n_erased_blocks++;
++	bi->pages_in_use = 0;
++	bi->soft_del_pages = 0;
++	bi->has_shrink_hdr = 0;
++	bi->skip_erased_check = 1;	/* Clean, so no need to check */
++	bi->gc_prioritise = 0;
++	bi->has_summary = 0;
++
++	yaffs_clear_chunk_bits(dev, block_no);
++
++	yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
++}
++
++static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
++					struct yaffs_block_info *bi,
++					int old_chunk, u8 *buffer)
++{
++	int new_chunk;
++	int mark_flash = 1;
++	struct yaffs_ext_tags tags;
++	struct yaffs_obj *object;
++	int matching_chunk;
++	int ret_val = YAFFS_OK;
++
++	memset(&tags, 0, sizeof(tags));
++	yaffs_rd_chunk_tags_nand(dev, old_chunk,
++				 buffer, &tags);
++	object = yaffs_find_by_number(dev, tags.obj_id);
++
++	yaffs_trace(YAFFS_TRACE_GC_DETAIL,
++		"Collecting chunk in block %d, %d %d %d ",
++		dev->gc_chunk, tags.obj_id,
++		tags.chunk_id, tags.n_bytes);
++
++	if (object && !yaffs_skip_verification(dev)) {
++		if (tags.chunk_id == 0)
++			matching_chunk =
++			    object->hdr_chunk;
++		else if (object->soft_del)
++			/* Defeat the test */
++			matching_chunk = old_chunk;
++		else
++			matching_chunk =
++			    yaffs_find_chunk_in_file
++			    (object, tags.chunk_id,
++			     NULL);
++
++		if (old_chunk != matching_chunk)
++			yaffs_trace(YAFFS_TRACE_ERROR,
++				"gc: page in gc mismatch: %d %d %d %d",
++				old_chunk,
++				matching_chunk,
++				tags.obj_id,
++				tags.chunk_id);
++	}
++
++	if (!object) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"page %d in gc has no object: %d %d %d ",
++			old_chunk,
++			tags.obj_id, tags.chunk_id,
++			tags.n_bytes);
++	}
++
++	if (object &&
++	    object->deleted &&
++	    object->soft_del && tags.chunk_id != 0) {
++		/* Data chunk in a soft deleted file,
++		 * throw it away.
++		 * It's a soft deleted data chunk,
++		 * No need to copy this, just forget
++		 * about it and fix up the object.
++		 */
++
++		/* Free chunks already includes
++		 * softdeleted chunks, how ever this
++		 * chunk is going to soon be really
++		 * deleted which will increment free
++		 * chunks. We have to decrement free
++		 * chunks so this works out properly.
++		 */
++		dev->n_free_chunks--;
++		bi->soft_del_pages--;
++
++		object->n_data_chunks--;
++		if (object->n_data_chunks <= 0) {
++			/* remeber to clean up obj */
++			dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
++			dev->n_clean_ups++;
++		}
++		mark_flash = 0;
++	} else if (object) {
++		/* It's either a data chunk in a live
++		 * file or an ObjectHeader, so we're
++		 * interested in it.
++		 * NB Need to keep the ObjectHeaders of
++		 * deleted files until the whole file
++		 * has been deleted off
++		 */
++		tags.serial_number++;
++		dev->n_gc_copies++;
++
++		if (tags.chunk_id == 0) {
++			/* It is an object Id,
++			 * We need to nuke the
++			 * shrinkheader flags since its
++			 * work is done.
++			 * Also need to clean up
++			 * shadowing.
++			 */
++			struct yaffs_obj_hdr *oh;
++			oh = (struct yaffs_obj_hdr *) buffer;
++
++			oh->is_shrink = 0;
++			tags.extra_is_shrink = 0;
++			oh->shadows_obj = 0;
++			oh->inband_shadowed_obj_id = 0;
++			tags.extra_shadows = 0;
++
++			/* Update file size */
++			if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
++				yaffs_oh_size_load(oh,
++				    object->variant.file_variant.file_size);
++				tags.extra_file_size =
++				    object->variant.file_variant.file_size;
++			}
++
++			yaffs_verify_oh(object, oh, &tags, 1);
++			new_chunk =
++			    yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
++		} else {
++			new_chunk =
++			    yaffs_write_new_chunk(dev, buffer, &tags, 1);
++		}
++
++		if (new_chunk < 0) {
++			ret_val = YAFFS_FAIL;
++		} else {
++
++			/* Now fix up the Tnodes etc. */
++
++			if (tags.chunk_id == 0) {
++				/* It's a header */
++				object->hdr_chunk = new_chunk;
++				object->serial = tags.serial_number;
++			} else {
++				/* It's a data chunk */
++				yaffs_put_chunk_in_file(object, tags.chunk_id,
++							new_chunk, 0);
++			}
++		}
++	}
++	if (ret_val == YAFFS_OK)
++		yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
++	return ret_val;
++}
++
++static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
++{
++	int old_chunk;
++	int ret_val = YAFFS_OK;
++	int i;
++	int is_checkpt_block;
++	int max_copies;
++	int chunks_before = yaffs_get_erased_chunks(dev);
++	int chunks_after;
++	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
++
++	is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
++
++	yaffs_trace(YAFFS_TRACE_TRACING,
++		"Collecting block %d, in use %d, shrink %d, whole_block %d",
++		block, bi->pages_in_use, bi->has_shrink_hdr,
++		whole_block);
++
++	/*yaffs_verify_free_chunks(dev); */
++
++	if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
++		bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
++
++	bi->has_shrink_hdr = 0;	/* clear the flag so that the block can erase */
++
++	dev->gc_disable = 1;
++
++	yaffs_summary_gc(dev, block);
++
++	if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
++		yaffs_trace(YAFFS_TRACE_TRACING,
++			"Collecting block %d that has no chunks in use",
++			block);
++		yaffs_block_became_dirty(dev, block);
++	} else {
++
++		u8 *buffer = yaffs_get_temp_buffer(dev);
++
++		yaffs_verify_blk(dev, bi, block);
++
++		max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
++		old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
++
++		for (/* init already done */ ;
++		     ret_val == YAFFS_OK &&
++		     dev->gc_chunk < dev->param.chunks_per_block &&
++		     (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
++		     max_copies > 0;
++		     dev->gc_chunk++, old_chunk++) {
++			if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
++				/* Page is in use and might need to be copied */
++				max_copies--;
++				ret_val = yaffs_gc_process_chunk(dev, bi,
++							old_chunk, buffer);
++			}
++		}
++		yaffs_release_temp_buffer(dev, buffer);
++	}
++
++	yaffs_verify_collected_blk(dev, bi, block);
++
++	if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++		/*
++		 * The gc did not complete. Set block state back to FULL
++		 * because checkpointing does not restore gc.
++		 */
++		bi->block_state = YAFFS_BLOCK_STATE_FULL;
++	} else {
++		/* The gc completed. */
++		/* Do any required cleanups */
++		for (i = 0; i < dev->n_clean_ups; i++) {
++			/* Time to delete the file too */
++			struct yaffs_obj *object =
++			    yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
++			if (object) {
++				yaffs_free_tnode(dev,
++					  object->variant.file_variant.top);
++				object->variant.file_variant.top = NULL;
++				yaffs_trace(YAFFS_TRACE_GC,
++					"yaffs: About to finally delete object %d",
++					object->obj_id);
++				yaffs_generic_obj_del(object);
++				object->my_dev->n_deleted_files--;
++			}
++
++		}
++		chunks_after = yaffs_get_erased_chunks(dev);
++		if (chunks_before >= chunks_after)
++			yaffs_trace(YAFFS_TRACE_GC,
++				"gc did not increase free chunks before %d after %d",
++				chunks_before, chunks_after);
++		dev->gc_block = 0;
++		dev->gc_chunk = 0;
++		dev->n_clean_ups = 0;
++	}
++
++	dev->gc_disable = 0;
++
++	return ret_val;
++}
++
++/*
++ * find_gc_block() selects the dirtiest block (or close enough)
++ * for garbage collection.
++ */
++
++static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
++				    int aggressive, int background)
++{
++	int i;
++	int iterations;
++	unsigned selected = 0;
++	int prioritised = 0;
++	int prioritised_exist = 0;
++	struct yaffs_block_info *bi;
++	int threshold;
++
++	/* First let's see if we need to grab a prioritised block */
++	if (dev->has_pending_prioritised_gc && !aggressive) {
++		dev->gc_dirtiest = 0;
++		bi = dev->block_info;
++		for (i = dev->internal_start_block;
++		     i <= dev->internal_end_block && !selected; i++) {
++
++			if (bi->gc_prioritise) {
++				prioritised_exist = 1;
++				if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++				    yaffs_block_ok_for_gc(dev, bi)) {
++					selected = i;
++					prioritised = 1;
++				}
++			}
++			bi++;
++		}
++
++		/*
++		 * If there is a prioritised block and none was selected then
++		 * this happened because there is at least one old dirty block
++		 * gumming up the works. Let's gc the oldest dirty block.
++		 */
++
++		if (prioritised_exist &&
++		    !selected && dev->oldest_dirty_block > 0)
++			selected = dev->oldest_dirty_block;
++
++		if (!prioritised_exist)	/* None found, so we can clear this */
++			dev->has_pending_prioritised_gc = 0;
++	}
++
++	/* If we're doing aggressive GC then we are happy to take a less-dirty
++	 * block, and search harder.
++	 * else (leasurely gc), then we only bother to do this if the
++	 * block has only a few pages in use.
++	 */
++
++	if (!selected) {
++		int pages_used;
++		int n_blocks =
++		    dev->internal_end_block - dev->internal_start_block + 1;
++		if (aggressive) {
++			threshold = dev->param.chunks_per_block;
++			iterations = n_blocks;
++		} else {
++			int max_threshold;
++
++			if (background)
++				max_threshold = dev->param.chunks_per_block / 2;
++			else
++				max_threshold = dev->param.chunks_per_block / 8;
++
++			if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
++				max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++
++			threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
++			if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
++				threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++			if (threshold > max_threshold)
++				threshold = max_threshold;
++
++			iterations = n_blocks / 16 + 1;
++			if (iterations > 100)
++				iterations = 100;
++		}
++
++		for (i = 0;
++		     i < iterations &&
++		     (dev->gc_dirtiest < 1 ||
++		      dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
++		     i++) {
++			dev->gc_block_finder++;
++			if (dev->gc_block_finder < dev->internal_start_block ||
++			    dev->gc_block_finder > dev->internal_end_block)
++				dev->gc_block_finder =
++				    dev->internal_start_block;
++
++			bi = yaffs_get_block_info(dev, dev->gc_block_finder);
++
++			pages_used = bi->pages_in_use - bi->soft_del_pages;
++
++			if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++			    pages_used < dev->param.chunks_per_block &&
++			    (dev->gc_dirtiest < 1 ||
++			     pages_used < dev->gc_pages_in_use) &&
++			    yaffs_block_ok_for_gc(dev, bi)) {
++				dev->gc_dirtiest = dev->gc_block_finder;
++				dev->gc_pages_in_use = pages_used;
++			}
++		}
++
++		if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
++			selected = dev->gc_dirtiest;
++	}
++
++	/*
++	 * If nothing has been selected for a while, try the oldest dirty
++	 * because that's gumming up the works.
++	 */
++
++	if (!selected && dev->param.is_yaffs2 &&
++	    dev->gc_not_done >= (background ? 10 : 20)) {
++		yaffs2_find_oldest_dirty_seq(dev);
++		if (dev->oldest_dirty_block > 0) {
++			selected = dev->oldest_dirty_block;
++			dev->gc_dirtiest = selected;
++			dev->oldest_dirty_gc_count++;
++			bi = yaffs_get_block_info(dev, selected);
++			dev->gc_pages_in_use =
++			    bi->pages_in_use - bi->soft_del_pages;
++		} else {
++			dev->gc_not_done = 0;
++		}
++	}
++
++	if (selected) {
++		yaffs_trace(YAFFS_TRACE_GC,
++			"GC Selected block %d with %d free, prioritised:%d",
++			selected,
++			dev->param.chunks_per_block - dev->gc_pages_in_use,
++			prioritised);
++
++		dev->n_gc_blocks++;
++		if (background)
++			dev->bg_gcs++;
++
++		dev->gc_dirtiest = 0;
++		dev->gc_pages_in_use = 0;
++		dev->gc_not_done = 0;
++		if (dev->refresh_skip > 0)
++			dev->refresh_skip--;
++	} else {
++		dev->gc_not_done++;
++		yaffs_trace(YAFFS_TRACE_GC,
++			"GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
++			dev->gc_block_finder, dev->gc_not_done, threshold,
++			dev->gc_dirtiest, dev->gc_pages_in_use,
++			dev->oldest_dirty_block, background ? " bg" : "");
++	}
++
++	return selected;
++}
++
++/* New garbage collector
++ * If we're very low on erased blocks then we do aggressive garbage collection
++ * otherwise we do "leasurely" garbage collection.
++ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
++ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
++ *
++ * The idea is to help clear out space in a more spread-out manner.
++ * Dunno if it really does anything useful.
++ */
++static int yaffs_check_gc(struct yaffs_dev *dev, int background)
++{
++	int aggressive = 0;
++	int gc_ok = YAFFS_OK;
++	int max_tries = 0;
++	int min_erased;
++	int erased_chunks;
++	int checkpt_block_adjust;
++
++	if (dev->param.gc_control_fn &&
++		(dev->param.gc_control_fn(dev) & 1) == 0)
++		return YAFFS_OK;
++
++	if (dev->gc_disable)
++		/* Bail out so we don't get recursive gc */
++		return YAFFS_OK;
++
++	/* This loop should pass the first time.
++	 * Only loops here if the collection does not increase space.
++	 */
++
++	do {
++		max_tries++;
++
++		checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
++
++		min_erased =
++		    dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
++		erased_chunks =
++		    dev->n_erased_blocks * dev->param.chunks_per_block;
++
++		/* If we need a block soon then do aggressive gc. */
++		if (dev->n_erased_blocks < min_erased)
++			aggressive = 1;
++		else {
++			if (!background
++			    && erased_chunks > (dev->n_free_chunks / 4))
++				break;
++
++			if (dev->gc_skip > 20)
++				dev->gc_skip = 20;
++			if (erased_chunks < dev->n_free_chunks / 2 ||
++			    dev->gc_skip < 1 || background)
++				aggressive = 0;
++			else {
++				dev->gc_skip--;
++				break;
++			}
++		}
++
++		dev->gc_skip = 5;
++
++		/* If we don't already have a block being gc'd then see if we
++		 * should start another */
++
++		if (dev->gc_block < 1 && !aggressive) {
++			dev->gc_block = yaffs2_find_refresh_block(dev);
++			dev->gc_chunk = 0;
++			dev->n_clean_ups = 0;
++		}
++		if (dev->gc_block < 1) {
++			dev->gc_block =
++			    yaffs_find_gc_block(dev, aggressive, background);
++			dev->gc_chunk = 0;
++			dev->n_clean_ups = 0;
++		}
++
++		if (dev->gc_block > 0) {
++			dev->all_gcs++;
++			if (!aggressive)
++				dev->passive_gc_count++;
++
++			yaffs_trace(YAFFS_TRACE_GC,
++				"yaffs: GC n_erased_blocks %d aggressive %d",
++				dev->n_erased_blocks, aggressive);
++
++			gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
++		}
++
++		if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
++		    dev->gc_block > 0) {
++			yaffs_trace(YAFFS_TRACE_GC,
++				"yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
++				dev->n_erased_blocks, max_tries,
++				dev->gc_block);
++		}
++	} while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
++		 (dev->gc_block > 0) && (max_tries < 2));
++
++	return aggressive ? gc_ok : YAFFS_OK;
++}
++
++/*
++ * yaffs_bg_gc()
++ * Garbage collects. Intended to be called from a background thread.
++ * Returns non-zero if at least half the free chunks are erased.
++ */
++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
++{
++	int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
++
++	yaffs_check_gc(dev, 1);
++	return erased_chunks > dev->n_free_chunks / 2;
++}
++
++/*-------------------- Data file manipulation -----------------*/
++
++static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
++{
++	int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
++
++	if (nand_chunk >= 0)
++		return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
++						buffer, NULL);
++	else {
++		yaffs_trace(YAFFS_TRACE_NANDACCESS,
++			"Chunk %d not found zero instead",
++			nand_chunk);
++		/* get sane (zero) data if you read a hole */
++		memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
++		return 0;
++	}
++
++}
++
++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
++		     int lyn)
++{
++	int block;
++	int page;
++	struct yaffs_ext_tags tags;
++	struct yaffs_block_info *bi;
++
++	if (chunk_id <= 0)
++		return;
++
++	dev->n_deletions++;
++	block = chunk_id / dev->param.chunks_per_block;
++	page = chunk_id % dev->param.chunks_per_block;
++
++	if (!yaffs_check_chunk_bit(dev, block, page))
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Deleting invalid chunk %d", chunk_id);
++
++	bi = yaffs_get_block_info(dev, block);
++
++	yaffs2_update_oldest_dirty_seq(dev, block, bi);
++
++	yaffs_trace(YAFFS_TRACE_DELETION,
++		"line %d delete of chunk %d",
++		lyn, chunk_id);
++
++	if (!dev->param.is_yaffs2 && mark_flash &&
++	    bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
++
++		memset(&tags, 0, sizeof(tags));
++		tags.is_deleted = 1;
++		yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
++		yaffs_handle_chunk_update(dev, chunk_id, &tags);
++	} else {
++		dev->n_unmarked_deletions++;
++	}
++
++	/* Pull out of the management area.
++	 * If the whole block became dirty, this will kick off an erasure.
++	 */
++	if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
++	    bi->block_state == YAFFS_BLOCK_STATE_FULL ||
++	    bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++	    bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++		dev->n_free_chunks++;
++		yaffs_clear_chunk_bit(dev, block, page);
++		bi->pages_in_use--;
++
++		if (bi->pages_in_use == 0 &&
++		    !bi->has_shrink_hdr &&
++		    bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
++		    bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++			yaffs_block_became_dirty(dev, block);
++		}
++	}
++}
++
++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
++			     const u8 *buffer, int n_bytes, int use_reserve)
++{
++	/* Find old chunk Need to do this to get serial number
++	 * Write new one and patch into tree.
++	 * Invalidate old tags.
++	 */
++
++	int prev_chunk_id;
++	struct yaffs_ext_tags prev_tags;
++	int new_chunk_id;
++	struct yaffs_ext_tags new_tags;
++	struct yaffs_dev *dev = in->my_dev;
++
++	yaffs_check_gc(dev, 0);
++
++	/* Get the previous chunk at this location in the file if it exists.
++	 * If it does not exist then put a zero into the tree. This creates
++	 * the tnode now, rather than later when it is harder to clean up.
++	 */
++	prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
++	if (prev_chunk_id < 1 &&
++	    !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
++		return 0;
++
++	/* Set up new tags */
++	memset(&new_tags, 0, sizeof(new_tags));
++
++	new_tags.chunk_id = inode_chunk;
++	new_tags.obj_id = in->obj_id;
++	new_tags.serial_number =
++	    (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
++	new_tags.n_bytes = n_bytes;
++
++	if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++		  "Writing %d bytes to chunk!!!!!!!!!",
++		   n_bytes);
++		BUG();
++	}
++
++	new_chunk_id =
++	    yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
++
++	if (new_chunk_id > 0) {
++		yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
++
++		if (prev_chunk_id > 0)
++			yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
++
++		yaffs_verify_file_sane(in);
++	}
++	return new_chunk_id;
++
++}
++
++
++
++static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
++				const YCHAR *name, const void *value, int size,
++				int flags)
++{
++	struct yaffs_xattr_mod xmod;
++	int result;
++
++	xmod.set = set;
++	xmod.name = name;
++	xmod.data = value;
++	xmod.size = size;
++	xmod.flags = flags;
++	xmod.result = -ENOSPC;
++
++	result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
++
++	if (result > 0)
++		return xmod.result;
++	else
++		return -ENOSPC;
++}
++
++static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
++				   struct yaffs_xattr_mod *xmod)
++{
++	int retval = 0;
++	int x_offs = sizeof(struct yaffs_obj_hdr);
++	struct yaffs_dev *dev = obj->my_dev;
++	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
++	char *x_buffer = buffer + x_offs;
++
++	if (xmod->set)
++		retval =
++		    nval_set(x_buffer, x_size, xmod->name, xmod->data,
++			     xmod->size, xmod->flags);
++	else
++		retval = nval_del(x_buffer, x_size, xmod->name);
++
++	obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++	obj->xattr_known = 1;
++	xmod->result = retval;
++
++	return retval;
++}
++
++static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
++				  void *value, int size)
++{
++	char *buffer = NULL;
++	int result;
++	struct yaffs_ext_tags tags;
++	struct yaffs_dev *dev = obj->my_dev;
++	int x_offs = sizeof(struct yaffs_obj_hdr);
++	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
++	char *x_buffer;
++	int retval = 0;
++
++	if (obj->hdr_chunk < 1)
++		return -ENODATA;
++
++	/* If we know that the object has no xattribs then don't do all the
++	 * reading and parsing.
++	 */
++	if (obj->xattr_known && !obj->has_xattr) {
++		if (name)
++			return -ENODATA;
++		else
++			return 0;
++	}
++
++	buffer = (char *)yaffs_get_temp_buffer(dev);
++	if (!buffer)
++		return -ENOMEM;
++
++	result =
++	    yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
++
++	if (result != YAFFS_OK)
++		retval = -ENOENT;
++	else {
++		x_buffer = buffer + x_offs;
++
++		if (!obj->xattr_known) {
++			obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++			obj->xattr_known = 1;
++		}
++
++		if (name)
++			retval = nval_get(x_buffer, x_size, name, value, size);
++		else
++			retval = nval_list(x_buffer, x_size, value, size);
++	}
++	yaffs_release_temp_buffer(dev, (u8 *) buffer);
++	return retval;
++}
++
++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
++		      const void *value, int size, int flags)
++{
++	return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
++}
++
++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
++{
++	return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
++}
++
++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
++		      int size)
++{
++	return yaffs_do_xattrib_fetch(obj, name, value, size);
++}
++
++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
++{
++	return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
++}
++
++static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
++{
++	u8 *buf;
++	struct yaffs_obj_hdr *oh;
++	struct yaffs_dev *dev;
++	struct yaffs_ext_tags tags;
++	int result;
++	int alloc_failed = 0;
++
++	if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
++		return;
++
++	dev = in->my_dev;
++	in->lazy_loaded = 0;
++	buf = yaffs_get_temp_buffer(dev);
++
++	result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
++	oh = (struct yaffs_obj_hdr *)buf;
++
++	in->yst_mode = oh->yst_mode;
++	yaffs_load_attribs(in, oh);
++	yaffs_set_obj_name_from_oh(in, oh);
++
++	if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++		in->variant.symlink_variant.alias =
++		    yaffs_clone_str(oh->alias);
++		if (!in->variant.symlink_variant.alias)
++			alloc_failed = 1;	/* Not returned */
++	}
++	yaffs_release_temp_buffer(dev, buf);
++}
++
++static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
++				    const YCHAR *oh_name, int buff_size)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++	if (dev->param.auto_unicode) {
++		if (*oh_name) {
++			/* It is an ASCII name, do an ASCII to
++			 * unicode conversion */
++			const char *ascii_oh_name = (const char *)oh_name;
++			int n = buff_size - 1;
++			while (n > 0 && *ascii_oh_name) {
++				*name = *ascii_oh_name;
++				name++;
++				ascii_oh_name++;
++				n--;
++			}
++		} else {
++			strncpy(name, oh_name + 1, buff_size - 1);
++		}
++	} else {
++#else
++	(void) dev;
++	{
++#endif
++		strncpy(name, oh_name, buff_size - 1);
++	}
++}
++
++static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
++				    const YCHAR *name)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++
++	int is_ascii;
++	YCHAR *w;
++
++	if (dev->param.auto_unicode) {
++
++		is_ascii = 1;
++		w = name;
++
++		/* Figure out if the name will fit in ascii character set */
++		while (is_ascii && *w) {
++			if ((*w) & 0xff00)
++				is_ascii = 0;
++			w++;
++		}
++
++		if (is_ascii) {
++			/* It is an ASCII name, so convert unicode to ascii */
++			char *ascii_oh_name = (char *)oh_name;
++			int n = YAFFS_MAX_NAME_LENGTH - 1;
++			while (n > 0 && *name) {
++				*ascii_oh_name = *name;
++				name++;
++				ascii_oh_name++;
++				n--;
++			}
++		} else {
++			/* Unicode name, so save starting at the second YCHAR */
++			*oh_name = 0;
++			strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
++		}
++	} else {
++#else
++	dev = dev;
++	{
++#endif
++		strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
++	}
++}
++
++/* UpdateObjectHeader updates the header on NAND for an object.
++ * If name is not NULL, then that new name is used.
++ */
++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
++		    int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
++{
++
++	struct yaffs_block_info *bi;
++	struct yaffs_dev *dev = in->my_dev;
++	int prev_chunk_id;
++	int ret_val = 0;
++	int result = 0;
++	int new_chunk_id;
++	struct yaffs_ext_tags new_tags;
++	struct yaffs_ext_tags old_tags;
++	const YCHAR *alias = NULL;
++	u8 *buffer = NULL;
++	YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
++	struct yaffs_obj_hdr *oh = NULL;
++	loff_t file_size = 0;
++
++	strcpy(old_name, _Y("silly old name"));
++
++	if (in->fake && in != dev->root_dir && !force && !xmod)
++		return ret_val;
++
++	yaffs_check_gc(dev, 0);
++	yaffs_check_obj_details_loaded(in);
++
++	buffer = yaffs_get_temp_buffer(in->my_dev);
++	oh = (struct yaffs_obj_hdr *)buffer;
++
++	prev_chunk_id = in->hdr_chunk;
++
++	if (prev_chunk_id > 0) {
++		result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
++						  buffer, &old_tags);
++
++		yaffs_verify_oh(in, oh, &old_tags, 0);
++		memcpy(old_name, oh->name, sizeof(oh->name));
++		memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
++	} else {
++		memset(buffer, 0xff, dev->data_bytes_per_chunk);
++	}
++
++	oh->type = in->variant_type;
++	oh->yst_mode = in->yst_mode;
++	oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
++
++	yaffs_load_attribs_oh(oh, in);
++
++	if (in->parent)
++		oh->parent_obj_id = in->parent->obj_id;
++	else
++		oh->parent_obj_id = 0;
++
++	if (name && *name) {
++		memset(oh->name, 0, sizeof(oh->name));
++		yaffs_load_oh_from_name(dev, oh->name, name);
++	} else if (prev_chunk_id > 0) {
++		memcpy(oh->name, old_name, sizeof(oh->name));
++	} else {
++		memset(oh->name, 0, sizeof(oh->name));
++	}
++
++	oh->is_shrink = is_shrink;
++
++	switch (in->variant_type) {
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++		/* Should not happen */
++		break;
++	case YAFFS_OBJECT_TYPE_FILE:
++		if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
++		    oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
++			file_size = in->variant.file_variant.file_size;
++		yaffs_oh_size_load(oh, file_size);
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		oh->equiv_id = in->variant.hardlink_variant.equiv_id;
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		/* Do nothing */
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		/* Do nothing */
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		alias = in->variant.symlink_variant.alias;
++		if (!alias)
++			alias = _Y("no alias");
++		strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
++		oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++		break;
++	}
++
++	/* process any xattrib modifications */
++	if (xmod)
++		yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
++
++	/* Tags */
++	memset(&new_tags, 0, sizeof(new_tags));
++	in->serial++;
++	new_tags.chunk_id = 0;
++	new_tags.obj_id = in->obj_id;
++	new_tags.serial_number = in->serial;
++
++	/* Add extra info for file header */
++	new_tags.extra_available = 1;
++	new_tags.extra_parent_id = oh->parent_obj_id;
++	new_tags.extra_file_size = file_size;
++	new_tags.extra_is_shrink = oh->is_shrink;
++	new_tags.extra_equiv_id = oh->equiv_id;
++	new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
++	new_tags.extra_obj_type = in->variant_type;
++	yaffs_verify_oh(in, oh, &new_tags, 1);
++
++	/* Create new chunk in NAND */
++	new_chunk_id =
++	    yaffs_write_new_chunk(dev, buffer, &new_tags,
++				  (prev_chunk_id > 0) ? 1 : 0);
++
++	if (buffer)
++		yaffs_release_temp_buffer(dev, buffer);
++
++	if (new_chunk_id < 0)
++		return new_chunk_id;
++
++	in->hdr_chunk = new_chunk_id;
++
++	if (prev_chunk_id > 0)
++		yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
++
++	if (!yaffs_obj_cache_dirty(in))
++		in->dirty = 0;
++
++	/* If this was a shrink, then mark the block
++	 * that the chunk lives on */
++	if (is_shrink) {
++		bi = yaffs_get_block_info(in->my_dev,
++					  new_chunk_id /
++					  in->my_dev->param.chunks_per_block);
++		bi->has_shrink_hdr = 1;
++	}
++
++
++	return new_chunk_id;
++}
++
++/*--------------------- File read/write ------------------------
++ * Read and write have very similar structures.
++ * In general the read/write has three parts to it
++ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
++ * Some complete chunks
++ * An incomplete chunk to end off with
++ *
++ * Curve-balls: the first chunk might also be the last chunk.
++ */
++
++int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
++{
++	int chunk;
++	u32 start;
++	int n_copy;
++	int n = n_bytes;
++	int n_done = 0;
++	struct yaffs_cache *cache;
++	struct yaffs_dev *dev;
++
++	dev = in->my_dev;
++
++	while (n > 0) {
++		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++		chunk++;
++
++		/* OK now check for the curveball where the start and end are in
++		 * the same chunk.
++		 */
++		if ((start + n) < dev->data_bytes_per_chunk)
++			n_copy = n;
++		else
++			n_copy = dev->data_bytes_per_chunk - start;
++
++		cache = yaffs_find_chunk_cache(in, chunk);
++
++		/* If the chunk is already in the cache or it is less than
++		 * a whole chunk or we're using inband tags then use the cache
++		 * (if there is caching) else bypass the cache.
++		 */
++		if (cache || n_copy != dev->data_bytes_per_chunk ||
++		    dev->param.inband_tags) {
++			if (dev->param.n_caches > 0) {
++
++				/* If we can't find the data in the cache,
++				 * then load it up. */
++
++				if (!cache) {
++					cache =
++					    yaffs_grab_chunk_cache(in->my_dev);
++					cache->object = in;
++					cache->chunk_id = chunk;
++					cache->dirty = 0;
++					cache->locked = 0;
++					yaffs_rd_data_obj(in, chunk,
++							  cache->data);
++					cache->n_bytes = 0;
++				}
++
++				yaffs_use_cache(dev, cache, 0);
++
++				cache->locked = 1;
++
++				memcpy(buffer, &cache->data[start], n_copy);
++
++				cache->locked = 0;
++			} else {
++				/* Read into the local buffer then copy.. */
++
++				u8 *local_buffer =
++				    yaffs_get_temp_buffer(dev);
++				yaffs_rd_data_obj(in, chunk, local_buffer);
++
++				memcpy(buffer, &local_buffer[start], n_copy);
++
++				yaffs_release_temp_buffer(dev, local_buffer);
++			}
++		} else {
++			/* A full chunk. Read directly into the buffer. */
++			yaffs_rd_data_obj(in, chunk, buffer);
++		}
++		n -= n_copy;
++		offset += n_copy;
++		buffer += n_copy;
++		n_done += n_copy;
++	}
++	return n_done;
++}
++
++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++		     int n_bytes, int write_through)
++{
++
++	int chunk;
++	u32 start;
++	int n_copy;
++	int n = n_bytes;
++	int n_done = 0;
++	int n_writeback;
++	loff_t start_write = offset;
++	int chunk_written = 0;
++	u32 n_bytes_read;
++	loff_t chunk_start;
++	struct yaffs_dev *dev;
++
++	dev = in->my_dev;
++
++	while (n > 0 && chunk_written >= 0) {
++		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++
++		if (((loff_t)chunk) *
++		    dev->data_bytes_per_chunk + start != offset ||
++		    start >= dev->data_bytes_per_chunk) {
++			yaffs_trace(YAFFS_TRACE_ERROR,
++				"AddrToChunk of offset %lld gives chunk %d start %d",
++				offset, chunk, start);
++		}
++		chunk++;	/* File pos to chunk in file offset */
++
++		/* OK now check for the curveball where the start and end are in
++		 * the same chunk.
++		 */
++
++		if ((start + n) < dev->data_bytes_per_chunk) {
++			n_copy = n;
++
++			/* Now calculate how many bytes to write back....
++			 * If we're overwriting and not writing to then end of
++			 * file then we need to write back as much as was there
++			 * before.
++			 */
++
++			chunk_start = (((loff_t)(chunk - 1)) *
++					dev->data_bytes_per_chunk);
++
++			if (chunk_start > in->variant.file_variant.file_size)
++				n_bytes_read = 0;	/* Past end of file */
++			else
++				n_bytes_read =
++				    in->variant.file_variant.file_size -
++				    chunk_start;
++
++			if (n_bytes_read > dev->data_bytes_per_chunk)
++				n_bytes_read = dev->data_bytes_per_chunk;
++
++			n_writeback =
++			    (n_bytes_read >
++			     (start + n)) ? n_bytes_read : (start + n);
++
++			if (n_writeback < 0 ||
++			    n_writeback > dev->data_bytes_per_chunk)
++				BUG();
++
++		} else {
++			n_copy = dev->data_bytes_per_chunk - start;
++			n_writeback = dev->data_bytes_per_chunk;
++		}
++
++		if (n_copy != dev->data_bytes_per_chunk ||
++		    !dev->param.cache_bypass_aligned ||
++		    dev->param.inband_tags) {
++			/* An incomplete start or end chunk (or maybe both
++			 * start and end chunk), or we're using inband tags,
++			 * or we're forcing writes through the cache,
++			 * so we want to use the cache buffers.
++			 */
++			if (dev->param.n_caches > 0) {
++				struct yaffs_cache *cache;
++
++				/* If we can't find the data in the cache, then
++				 * load the cache */
++				cache = yaffs_find_chunk_cache(in, chunk);
++
++				if (!cache &&
++				    yaffs_check_alloc_available(dev, 1)) {
++					cache = yaffs_grab_chunk_cache(dev);
++					cache->object = in;
++					cache->chunk_id = chunk;
++					cache->dirty = 0;
++					cache->locked = 0;
++					yaffs_rd_data_obj(in, chunk,
++							  cache->data);
++				} else if (cache &&
++					   !cache->dirty &&
++					   !yaffs_check_alloc_available(dev,
++									1)) {
++					/* Drop the cache if it was a read cache
++					 * item and no space check has been made
++					 * for it.
++					 */
++					cache = NULL;
++				}
++
++				if (cache) {
++					yaffs_use_cache(dev, cache, 1);
++					cache->locked = 1;
++
++					memcpy(&cache->data[start], buffer,
++					       n_copy);
++
++					cache->locked = 0;
++					cache->n_bytes = n_writeback;
++
++					if (write_through) {
++						chunk_written =
++						    yaffs_wr_data_obj
++						    (cache->object,
++						     cache->chunk_id,
++						     cache->data,
++						     cache->n_bytes, 1);
++						cache->dirty = 0;
++					}
++				} else {
++					chunk_written = -1;	/* fail write */
++				}
++			} else {
++				/* An incomplete start or end chunk (or maybe
++				 * both start and end chunk). Read into the
++				 * local buffer then copy over and write back.
++				 */
++
++				u8 *local_buffer = yaffs_get_temp_buffer(dev);
++
++				yaffs_rd_data_obj(in, chunk, local_buffer);
++				memcpy(&local_buffer[start], buffer, n_copy);
++
++				chunk_written =
++				    yaffs_wr_data_obj(in, chunk,
++						      local_buffer,
++						      n_writeback, 0);
++
++				yaffs_release_temp_buffer(dev, local_buffer);
++			}
++		} else {
++			/* A full chunk. Write directly from the buffer. */
++
++			chunk_written =
++			    yaffs_wr_data_obj(in, chunk, buffer,
++					      dev->data_bytes_per_chunk, 0);
++
++			/* Since we've overwritten the cached data,
++			 * we better invalidate it. */
++			yaffs_invalidate_chunk_cache(in, chunk);
++		}
++
++		if (chunk_written >= 0) {
++			n -= n_copy;
++			offset += n_copy;
++			buffer += n_copy;
++			n_done += n_copy;
++		}
++	}
++
++	/* Update file object */
++
++	if ((start_write + n_done) > in->variant.file_variant.file_size)
++		in->variant.file_variant.file_size = (start_write + n_done);
++
++	in->dirty = 1;
++	return n_done;
++}
++
++int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++		  int n_bytes, int write_through)
++{
++	yaffs2_handle_hole(in, offset);
++	return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
++}
++
++/* ---------------------- File resizing stuff ------------------ */
++
++static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
++{
++
++	struct yaffs_dev *dev = in->my_dev;
++	loff_t old_size = in->variant.file_variant.file_size;
++	int i;
++	int chunk_id;
++	u32 dummy;
++	int last_del;
++	int start_del;
++
++	if (old_size > 0)
++		yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
++	else
++		last_del = 0;
++
++	yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
++				&start_del, &dummy);
++	last_del++;
++	start_del++;
++
++	/* Delete backwards so that we don't end up with holes if
++	 * power is lost part-way through the operation.
++	 */
++	for (i = last_del; i >= start_del; i--) {
++		/* NB this could be optimised somewhat,
++		 * eg. could retrieve the tags and write them without
++		 * using yaffs_chunk_del
++		 */
++
++		chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
++
++		if (chunk_id < 1)
++			continue;
++
++		if (chunk_id <
++		    (dev->internal_start_block * dev->param.chunks_per_block) ||
++		    chunk_id >=
++		    ((dev->internal_end_block + 1) *
++		      dev->param.chunks_per_block)) {
++			yaffs_trace(YAFFS_TRACE_ALWAYS,
++				"Found daft chunk_id %d for %d",
++				chunk_id, i);
++		} else {
++			in->n_data_chunks--;
++			yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
++		}
++	}
++}
++
++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
++{
++	int new_full;
++	u32 new_partial;
++	struct yaffs_dev *dev = obj->my_dev;
++
++	yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
++
++	yaffs_prune_chunks(obj, new_size);
++
++	if (new_partial != 0) {
++		int last_chunk = 1 + new_full;
++		u8 *local_buffer = yaffs_get_temp_buffer(dev);
++
++		/* Rewrite the last chunk with its new size and zero pad */
++		yaffs_rd_data_obj(obj, last_chunk, local_buffer);
++		memset(local_buffer + new_partial, 0,
++		       dev->data_bytes_per_chunk - new_partial);
++
++		yaffs_wr_data_obj(obj, last_chunk, local_buffer,
++				  new_partial, 1);
++
++		yaffs_release_temp_buffer(dev, local_buffer);
++	}
++
++	obj->variant.file_variant.file_size = new_size;
++
++	yaffs_prune_tree(dev, &obj->variant.file_variant);
++}
++
++int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
++{
++	struct yaffs_dev *dev = in->my_dev;
++	loff_t old_size = in->variant.file_variant.file_size;
++
++	yaffs_flush_file_cache(in);
++	yaffs_invalidate_whole_cache(in);
++
++	yaffs_check_gc(dev, 0);
++
++	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
++		return YAFFS_FAIL;
++
++	if (new_size == old_size)
++		return YAFFS_OK;
++
++	if (new_size > old_size) {
++		yaffs2_handle_hole(in, new_size);
++		in->variant.file_variant.file_size = new_size;
++	} else {
++		/* new_size < old_size */
++		yaffs_resize_file_down(in, new_size);
++	}
++
++	/* Write a new object header to reflect the resize.
++	 * show we've shrunk the file, if need be
++	 * Do this only if the file is not in the deleted directories
++	 * and is not shadowed.
++	 */
++	if (in->parent &&
++	    !in->is_shadowed &&
++	    in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++	    in->parent->obj_id != YAFFS_OBJECTID_DELETED)
++		yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
++
++	return YAFFS_OK;
++}
++
++int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
++{
++	if (!in->dirty)
++		return YAFFS_OK;
++
++	yaffs_flush_file_cache(in);
++
++	if (data_sync)
++		return YAFFS_OK;
++
++	if (update_time)
++		yaffs_load_current_time(in, 0, 0);
++
++	return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
++				YAFFS_OK : YAFFS_FAIL;
++}
++
++
++/* yaffs_del_file deletes the whole file data
++ * and the inode associated with the file.
++ * It does not delete the links associated with the file.
++ */
++static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
++{
++	int ret_val;
++	int del_now = 0;
++	struct yaffs_dev *dev = in->my_dev;
++
++	if (!in->my_inode)
++		del_now = 1;
++
++	if (del_now) {
++		ret_val =
++		    yaffs_change_obj_name(in, in->my_dev->del_dir,
++					  _Y("deleted"), 0, 0);
++		yaffs_trace(YAFFS_TRACE_TRACING,
++			"yaffs: immediate deletion of file %d",
++			in->obj_id);
++		in->deleted = 1;
++		in->my_dev->n_deleted_files++;
++		if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++			yaffs_resize_file(in, 0);
++		yaffs_soft_del_file(in);
++	} else {
++		ret_val =
++		    yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
++					  _Y("unlinked"), 0, 0);
++	}
++	return ret_val;
++}
++
++static int yaffs_del_file(struct yaffs_obj *in)
++{
++	int ret_val = YAFFS_OK;
++	int deleted;	/* Need to cache value on stack if in is freed */
++	struct yaffs_dev *dev = in->my_dev;
++
++	if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++		yaffs_resize_file(in, 0);
++
++	if (in->n_data_chunks > 0) {
++		/* Use soft deletion if there is data in the file.
++		 * That won't be the case if it has been resized to zero.
++		 */
++		if (!in->unlinked)
++			ret_val = yaffs_unlink_file_if_needed(in);
++
++		deleted = in->deleted;
++
++		if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
++			in->deleted = 1;
++			deleted = 1;
++			in->my_dev->n_deleted_files++;
++			yaffs_soft_del_file(in);
++		}
++		return deleted ? YAFFS_OK : YAFFS_FAIL;
++	} else {
++		/* The file has no data chunks so we toss it immediately */
++		yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
++		in->variant.file_variant.top = NULL;
++		yaffs_generic_obj_del(in);
++
++		return YAFFS_OK;
++	}
++}
++
++int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
++{
++	return (obj &&
++		obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
++		!(list_empty(&obj->variant.dir_variant.children));
++}
++
++static int yaffs_del_dir(struct yaffs_obj *obj)
++{
++	/* First check that the directory is empty. */
++	if (yaffs_is_non_empty_dir(obj))
++		return YAFFS_FAIL;
++
++	return yaffs_generic_obj_del(obj);
++}
++
++static int yaffs_del_symlink(struct yaffs_obj *in)
++{
++	kfree(in->variant.symlink_variant.alias);
++	in->variant.symlink_variant.alias = NULL;
++
++	return yaffs_generic_obj_del(in);
++}
++
++static int yaffs_del_link(struct yaffs_obj *in)
++{
++	/* remove this hardlink from the list associated with the equivalent
++	 * object
++	 */
++	list_del_init(&in->hard_links);
++	return yaffs_generic_obj_del(in);
++}
++
++int yaffs_del_obj(struct yaffs_obj *obj)
++{
++	int ret_val = -1;
++
++	switch (obj->variant_type) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		ret_val = yaffs_del_file(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		if (!list_empty(&obj->variant.dir_variant.dirty)) {
++			yaffs_trace(YAFFS_TRACE_BACKGROUND,
++				"Remove object %d from dirty directories",
++				obj->obj_id);
++			list_del_init(&obj->variant.dir_variant.dirty);
++		}
++		return yaffs_del_dir(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		ret_val = yaffs_del_symlink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		ret_val = yaffs_del_link(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		ret_val = yaffs_generic_obj_del(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++		ret_val = 0;
++		break;		/* should not happen. */
++	}
++	return ret_val;
++}
++
++static int yaffs_unlink_worker(struct yaffs_obj *obj)
++{
++	int del_now = 0;
++
++	if (!obj)
++		return YAFFS_FAIL;
++
++	if (!obj->my_inode)
++		del_now = 1;
++
++	yaffs_update_parent(obj->parent);
++
++	if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++		return yaffs_del_link(obj);
++	} else if (!list_empty(&obj->hard_links)) {
++		/* Curve ball: We're unlinking an object that has a hardlink.
++		 *
++		 * This problem arises because we are not strictly following
++		 * The Linux link/inode model.
++		 *
++		 * We can't really delete the object.
++		 * Instead, we do the following:
++		 * - Select a hardlink.
++		 * - Unhook it from the hard links
++		 * - Move it from its parent directory so that the rename works.
++		 * - Rename the object to the hardlink's name.
++		 * - Delete the hardlink
++		 */
++
++		struct yaffs_obj *hl;
++		struct yaffs_obj *parent;
++		int ret_val;
++		YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++		hl = list_entry(obj->hard_links.next, struct yaffs_obj,
++				hard_links);
++
++		yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++		parent = hl->parent;
++
++		list_del_init(&hl->hard_links);
++
++		yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
++
++		ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
++
++		if (ret_val == YAFFS_OK)
++			ret_val = yaffs_generic_obj_del(hl);
++
++		return ret_val;
++
++	} else if (del_now) {
++		switch (obj->variant_type) {
++		case YAFFS_OBJECT_TYPE_FILE:
++			return yaffs_del_file(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			list_del_init(&obj->variant.dir_variant.dirty);
++			return yaffs_del_dir(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			return yaffs_del_symlink(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++			return yaffs_generic_obj_del(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++		default:
++			return YAFFS_FAIL;
++		}
++	} else if (yaffs_is_non_empty_dir(obj)) {
++		return YAFFS_FAIL;
++	} else {
++		return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
++						_Y("unlinked"), 0, 0);
++	}
++}
++
++static int yaffs_unlink_obj(struct yaffs_obj *obj)
++{
++	if (obj && obj->unlink_allowed)
++		return yaffs_unlink_worker(obj);
++
++	return YAFFS_FAIL;
++}
++
++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
++{
++	struct yaffs_obj *obj;
++
++	obj = yaffs_find_by_name(dir, name);
++	return yaffs_unlink_obj(obj);
++}
++
++/* Note:
++ * If old_name is NULL then we take old_dir as the object to be renamed.
++ */
++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
++		     struct yaffs_obj *new_dir, const YCHAR *new_name)
++{
++	struct yaffs_obj *obj = NULL;
++	struct yaffs_obj *existing_target = NULL;
++	int force = 0;
++	int result;
++	struct yaffs_dev *dev;
++
++	if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		BUG();
++		return YAFFS_FAIL;
++	}
++	if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		BUG();
++		return YAFFS_FAIL;
++	}
++
++	dev = old_dir->my_dev;
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++	/* Special case for case insemsitive systems.
++	 * While look-up is case insensitive, the name isn't.
++	 * Therefore we might want to change x.txt to X.txt
++	 */
++	if (old_dir == new_dir &&
++		old_name && new_name &&
++		strcmp(old_name, new_name) == 0)
++		force = 1;
++#endif
++
++	if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
++	    YAFFS_MAX_NAME_LENGTH)
++		/* ENAMETOOLONG */
++		return YAFFS_FAIL;
++
++	if (old_name)
++		obj = yaffs_find_by_name(old_dir, old_name);
++	else{
++		obj = old_dir;
++		old_dir = obj->parent;
++	}
++
++	if (obj && obj->rename_allowed) {
++		/* Now handle an existing target, if there is one */
++		existing_target = yaffs_find_by_name(new_dir, new_name);
++		if (yaffs_is_non_empty_dir(existing_target)) {
++			return YAFFS_FAIL;	/* ENOTEMPTY */
++		} else if (existing_target && existing_target != obj) {
++			/* Nuke the target first, using shadowing,
++			 * but only if it isn't the same object.
++			 *
++			 * Note we must disable gc here otherwise it can mess
++			 * up the shadowing.
++			 *
++			 */
++			dev->gc_disable = 1;
++			yaffs_change_obj_name(obj, new_dir, new_name, force,
++					      existing_target->obj_id);
++			existing_target->is_shadowed = 1;
++			yaffs_unlink_obj(existing_target);
++			dev->gc_disable = 0;
++		}
++
++		result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
++
++		yaffs_update_parent(old_dir);
++		if (new_dir != old_dir)
++			yaffs_update_parent(new_dir);
++
++		return result;
++	}
++	return YAFFS_FAIL;
++}
++
++/*----------------------- Initialisation Scanning ---------------------- */
++
++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
++			       int backward_scanning)
++{
++	struct yaffs_obj *obj;
++
++	if (backward_scanning) {
++		/* Handle YAFFS2 case (backward scanning)
++		 * If the shadowed object exists then ignore.
++		 */
++		obj = yaffs_find_by_number(dev, obj_id);
++		if (obj)
++			return;
++	}
++
++	/* Let's create it (if it does not exist) assuming it is a file so that
++	 * it can do shrinking etc.
++	 * We put it in unlinked dir to be cleaned up after the scanning
++	 */
++	obj =
++	    yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
++	if (!obj)
++		return;
++	obj->is_shadowed = 1;
++	yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
++	obj->variant.file_variant.shrink_size = 0;
++	obj->valid = 1;		/* So that we don't read any other info. */
++}
++
++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
++{
++	struct list_head *lh;
++	struct list_head *save;
++	struct yaffs_obj *hl;
++	struct yaffs_obj *in;
++
++	list_for_each_safe(lh, save, hard_list) {
++		hl = list_entry(lh, struct yaffs_obj, hard_links);
++		in = yaffs_find_by_number(dev,
++					hl->variant.hardlink_variant.equiv_id);
++
++		if (in) {
++			/* Add the hardlink pointers */
++			hl->variant.hardlink_variant.equiv_obj = in;
++			list_add(&hl->hard_links, &in->hard_links);
++		} else {
++			/* Todo Need to report/handle this better.
++			 * Got a problem... hardlink to a non-existant object
++			 */
++			hl->variant.hardlink_variant.equiv_obj = NULL;
++			INIT_LIST_HEAD(&hl->hard_links);
++		}
++	}
++}
++
++static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
++{
++	/*
++	 *  Sort out state of unlinked and deleted objects after scanning.
++	 */
++	struct list_head *i;
++	struct list_head *n;
++	struct yaffs_obj *l;
++
++	if (dev->read_only)
++		return;
++
++	/* Soft delete all the unlinked files */
++	list_for_each_safe(i, n,
++			   &dev->unlinked_dir->variant.dir_variant.children) {
++		l = list_entry(i, struct yaffs_obj, siblings);
++		yaffs_del_obj(l);
++	}
++
++	list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
++		l = list_entry(i, struct yaffs_obj, siblings);
++		yaffs_del_obj(l);
++	}
++}
++
++/*
++ * This code iterates through all the objects making sure that they are rooted.
++ * Any unrooted objects are re-rooted in lost+found.
++ * An object needs to be in one of:
++ * - Directly under deleted, unlinked
++ * - Directly or indirectly under root.
++ *
++ * Note:
++ *  This code assumes that we don't ever change the current relationships
++ *  between directories:
++ *   root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
++ *   lost-n-found->parent == root_dir
++ *
++ * This fixes the problem where directories might have inadvertently been
++ * deleted leaving the object "hanging" without being rooted in the
++ * directory tree.
++ */
++
++static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
++{
++	return (obj == dev->del_dir ||
++		obj == dev->unlinked_dir || obj == dev->root_dir);
++}
++
++static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_obj *parent;
++	int i;
++	struct list_head *lh;
++	struct list_head *n;
++	int depth_limit;
++	int hanging;
++
++	if (dev->read_only)
++		return;
++
++	/* Iterate through the objects in each hash entry,
++	 * looking at each object.
++	 * Make sure it is rooted.
++	 */
++
++	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++		list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
++			obj = list_entry(lh, struct yaffs_obj, hash_link);
++			parent = obj->parent;
++
++			if (yaffs_has_null_parent(dev, obj)) {
++				/* These directories are not hanging */
++				hanging = 0;
++			} else if (!parent ||
++				   parent->variant_type !=
++				   YAFFS_OBJECT_TYPE_DIRECTORY) {
++				hanging = 1;
++			} else if (yaffs_has_null_parent(dev, parent)) {
++				hanging = 0;
++			} else {
++				/*
++				 * Need to follow the parent chain to
++				 * see if it is hanging.
++				 */
++				hanging = 0;
++				depth_limit = 100;
++
++				while (parent != dev->root_dir &&
++				       parent->parent &&
++				       parent->parent->variant_type ==
++				       YAFFS_OBJECT_TYPE_DIRECTORY &&
++				       depth_limit > 0) {
++					parent = parent->parent;
++					depth_limit--;
++				}
++				if (parent != dev->root_dir)
++					hanging = 1;
++			}
++			if (hanging) {
++				yaffs_trace(YAFFS_TRACE_SCAN,
++					"Hanging object %d moved to lost and found",
++					obj->obj_id);
++				yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++			}
++		}
++	}
++}
++
++/*
++ * Delete directory contents for cleaning up lost and found.
++ */
++static void yaffs_del_dir_contents(struct yaffs_obj *dir)
++{
++	struct yaffs_obj *obj;
++	struct list_head *lh;
++	struct list_head *n;
++
++	if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++		BUG();
++
++	list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
++		obj = list_entry(lh, struct yaffs_obj, siblings);
++		if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
++			yaffs_del_dir_contents(obj);
++		yaffs_trace(YAFFS_TRACE_SCAN,
++			"Deleting lost_found object %d",
++			obj->obj_id);
++		yaffs_unlink_obj(obj);
++	}
++}
++
++static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
++{
++	yaffs_del_dir_contents(dev->lost_n_found);
++}
++
++
++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
++				     const YCHAR *name)
++{
++	int sum;
++	struct list_head *i;
++	YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
++	struct yaffs_obj *l;
++
++	if (!name)
++		return NULL;
++
++	if (!directory) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"tragedy: yaffs_find_by_name: null pointer directory"
++			);
++		BUG();
++		return NULL;
++	}
++	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"tragedy: yaffs_find_by_name: non-directory"
++			);
++		BUG();
++	}
++
++	sum = yaffs_calc_name_sum(name);
++
++	list_for_each(i, &directory->variant.dir_variant.children) {
++		l = list_entry(i, struct yaffs_obj, siblings);
++
++		if (l->parent != directory)
++			BUG();
++
++		yaffs_check_obj_details_loaded(l);
++
++		/* Special case for lost-n-found */
++		if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++			if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
++				return l;
++		} else if (l->sum == sum || l->hdr_chunk <= 0) {
++			/* LostnFound chunk called Objxxx
++			 * Do a real check
++			 */
++			yaffs_get_obj_name(l, buffer,
++				YAFFS_MAX_NAME_LENGTH + 1);
++			if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
++				return l;
++		}
++	}
++	return NULL;
++}
++
++/* GetEquivalentObject dereferences any hard links to get to the
++ * actual object.
++ */
++
++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
++{
++	if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++		obj = obj->variant.hardlink_variant.equiv_obj;
++		yaffs_check_obj_details_loaded(obj);
++	}
++	return obj;
++}
++
++/*
++ *  A note or two on object names.
++ *  * If the object name is missing, we then make one up in the form objnnn
++ *
++ *  * ASCII names are stored in the object header's name field from byte zero
++ *  * Unicode names are historically stored starting from byte zero.
++ *
++ * Then there are automatic Unicode names...
++ * The purpose of these is to save names in a way that can be read as
++ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
++ * system to share files.
++ *
++ * These automatic unicode are stored slightly differently...
++ *  - If the name can fit in the ASCII character space then they are saved as
++ *    ascii names as per above.
++ *  - If the name needs Unicode then the name is saved in Unicode
++ *    starting at oh->name[1].
++
++ */
++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
++				int buffer_size)
++{
++	/* Create an object name if we could not find one. */
++	if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
++		YCHAR local_name[20];
++		YCHAR num_string[20];
++		YCHAR *x = &num_string[19];
++		unsigned v = obj->obj_id;
++		num_string[19] = 0;
++		while (v > 0) {
++			x--;
++			*x = '0' + (v % 10);
++			v /= 10;
++		}
++		/* make up a name */
++		strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
++		strcat(local_name, x);
++		strncpy(name, local_name, buffer_size - 1);
++	}
++}
++
++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
++{
++	memset(name, 0, buffer_size * sizeof(YCHAR));
++	yaffs_check_obj_details_loaded(obj);
++	if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++		strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
++	} else if (obj->short_name[0]) {
++		strcpy(name, obj->short_name);
++	} else if (obj->hdr_chunk > 0) {
++		int result;
++		u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
++
++		struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
++
++		memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
++
++		if (obj->hdr_chunk > 0) {
++			result = yaffs_rd_chunk_tags_nand(obj->my_dev,
++							  obj->hdr_chunk,
++							  buffer, NULL);
++		}
++		yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
++					buffer_size);
++
++		yaffs_release_temp_buffer(obj->my_dev, buffer);
++	}
++
++	yaffs_fix_null_name(obj, name, buffer_size);
++
++	return strnlen(name, YAFFS_MAX_NAME_LENGTH);
++}
++
++loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
++{
++	/* Dereference any hard linking */
++	obj = yaffs_get_equivalent_obj(obj);
++
++	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++		return obj->variant.file_variant.file_size;
++	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++		if (!obj->variant.symlink_variant.alias)
++			return 0;
++		return strnlen(obj->variant.symlink_variant.alias,
++				     YAFFS_MAX_ALIAS_LENGTH);
++	} else {
++		/* Only a directory should drop through to here */
++		return obj->my_dev->data_bytes_per_chunk;
++	}
++}
++
++int yaffs_get_obj_link_count(struct yaffs_obj *obj)
++{
++	int count = 0;
++	struct list_head *i;
++
++	if (!obj->unlinked)
++		count++;	/* the object itself */
++
++	list_for_each(i, &obj->hard_links)
++	    count++;		/* add the hard links; */
++
++	return count;
++}
++
++int yaffs_get_obj_inode(struct yaffs_obj *obj)
++{
++	obj = yaffs_get_equivalent_obj(obj);
++
++	return obj->obj_id;
++}
++
++unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
++{
++	obj = yaffs_get_equivalent_obj(obj);
++
++	switch (obj->variant_type) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		return DT_REG;
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		return DT_DIR;
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		return DT_LNK;
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		return DT_REG;
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		if (S_ISFIFO(obj->yst_mode))
++			return DT_FIFO;
++		if (S_ISCHR(obj->yst_mode))
++			return DT_CHR;
++		if (S_ISBLK(obj->yst_mode))
++			return DT_BLK;
++		if (S_ISSOCK(obj->yst_mode))
++			return DT_SOCK;
++		return DT_REG;
++		break;
++	default:
++		return DT_REG;
++		break;
++	}
++}
++
++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
++{
++	obj = yaffs_get_equivalent_obj(obj);
++	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
++		return yaffs_clone_str(obj->variant.symlink_variant.alias);
++	else
++		return yaffs_clone_str(_Y(""));
++}
++
++/*--------------------------- Initialisation code -------------------------- */
++
++static int yaffs_check_dev_fns(struct yaffs_dev *dev)
++{
++	struct yaffs_driver *drv = &dev->drv;
++	struct yaffs_tags_handler *tagger = &dev->tagger;
++
++	/* Common functions, gotta have */
++	if (!drv->drv_read_chunk_fn ||
++	    !drv->drv_write_chunk_fn ||
++	    !drv->drv_erase_fn)
++		return 0;
++
++	if (dev->param.is_yaffs2 &&
++	     (!drv->drv_mark_bad_fn  || !drv->drv_check_bad_fn))
++		return 0;
++
++	/* Install the default tags marshalling functions if needed. */
++	yaffs_tags_compat_install(dev);
++	yaffs_tags_marshall_install(dev);
++
++	/* Check we now have the marshalling functions required. */
++	if (!tagger->write_chunk_tags_fn ||
++	    !tagger->read_chunk_tags_fn ||
++	    !tagger->query_block_fn ||
++	    !tagger->mark_bad_fn)
++		return 0;
++
++	return 1;
++}
++
++static int yaffs_create_initial_dir(struct yaffs_dev *dev)
++{
++	/* Initialise the unlinked, deleted, root and lost+found directories */
++	dev->lost_n_found = dev->root_dir = NULL;
++	dev->unlinked_dir = dev->del_dir = NULL;
++	dev->unlinked_dir =
++	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
++	dev->del_dir =
++	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
++	dev->root_dir =
++	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
++				  YAFFS_ROOT_MODE | S_IFDIR);
++	dev->lost_n_found =
++	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
++				  YAFFS_LOSTNFOUND_MODE | S_IFDIR);
++
++	if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
++	    && dev->del_dir) {
++		yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
++		return YAFFS_OK;
++	}
++	return YAFFS_FAIL;
++}
++
++int yaffs_guts_initialise(struct yaffs_dev *dev)
++{
++	int init_failed = 0;
++	unsigned x;
++	int bits;
++
++	yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()");
++
++	/* Check stuff that must be set */
++
++	if (!dev) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"yaffs: Need a device"
++			);
++		return YAFFS_FAIL;
++	}
++
++	if (dev->is_mounted) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
++		return YAFFS_FAIL;
++	}
++
++	dev->internal_start_block = dev->param.start_block;
++	dev->internal_end_block = dev->param.end_block;
++	dev->block_offset = 0;
++	dev->chunk_offset = 0;
++	dev->n_free_chunks = 0;
++
++	dev->gc_block = 0;
++
++	if (dev->param.start_block == 0) {
++		dev->internal_start_block = dev->param.start_block + 1;
++		dev->internal_end_block = dev->param.end_block + 1;
++		dev->block_offset = 1;
++		dev->chunk_offset = dev->param.chunks_per_block;
++	}
++
++	/* Check geometry parameters. */
++
++	if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
++		dev->param.total_bytes_per_chunk < 1024) ||
++		(!dev->param.is_yaffs2 &&
++			dev->param.total_bytes_per_chunk < 512) ||
++		(dev->param.inband_tags && !dev->param.is_yaffs2) ||
++		 dev->param.chunks_per_block < 2 ||
++		 dev->param.n_reserved_blocks < 2 ||
++		dev->internal_start_block <= 0 ||
++		dev->internal_end_block <= 0 ||
++		dev->internal_end_block <=
++		(dev->internal_start_block + dev->param.n_reserved_blocks + 2)
++		) {
++		/* otherwise it is too small */
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
++			dev->param.total_bytes_per_chunk,
++			dev->param.is_yaffs2 ? "2" : "",
++			dev->param.inband_tags);
++		return YAFFS_FAIL;
++	}
++
++	if (yaffs_init_nand(dev) != YAFFS_OK) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
++		return YAFFS_FAIL;
++	}
++
++	/* Sort out space for inband tags, if required */
++	if (dev->param.inband_tags)
++		dev->data_bytes_per_chunk =
++		    dev->param.total_bytes_per_chunk -
++		    sizeof(struct yaffs_packed_tags2_tags_only);
++	else
++		dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
++
++	/* Got the right mix of functions? */
++	if (!yaffs_check_dev_fns(dev)) {
++		/* Function missing */
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"device function(s) missing or wrong");
++
++		return YAFFS_FAIL;
++	}
++
++	/* Finished with most checks. Further checks happen later on too. */
++
++	dev->is_mounted = 1;
++
++	/* OK now calculate a few things for the device */
++
++	/*
++	 *  Calculate all the chunk size manipulation numbers:
++	 */
++	x = dev->data_bytes_per_chunk;
++	/* We always use dev->chunk_shift and dev->chunk_div */
++	dev->chunk_shift = calc_shifts(x);
++	x >>= dev->chunk_shift;
++	dev->chunk_div = x;
++	/* We only use chunk mask if chunk_div is 1 */
++	dev->chunk_mask = (1 << dev->chunk_shift) - 1;
++
++	/*
++	 * Calculate chunk_grp_bits.
++	 * We need to find the next power of 2 > than internal_end_block
++	 */
++
++	x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
++
++	bits = calc_shifts_ceiling(x);
++
++	/* Set up tnode width if wide tnodes are enabled. */
++	if (!dev->param.wide_tnodes_disabled) {
++		/* bits must be even so that we end up with 32-bit words */
++		if (bits & 1)
++			bits++;
++		if (bits < 16)
++			dev->tnode_width = 16;
++		else
++			dev->tnode_width = bits;
++	} else {
++		dev->tnode_width = 16;
++	}
++
++	dev->tnode_mask = (1 << dev->tnode_width) - 1;
++
++	/* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
++	 * so if the bitwidth of the
++	 * chunk range we're using is greater than 16 we need
++	 * to figure out chunk shift and chunk_grp_size
++	 */
++
++	if (bits <= dev->tnode_width)
++		dev->chunk_grp_bits = 0;
++	else
++		dev->chunk_grp_bits = bits - dev->tnode_width;
++
++	dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
++	if (dev->tnode_size < sizeof(struct yaffs_tnode))
++		dev->tnode_size = sizeof(struct yaffs_tnode);
++
++	dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
++
++	if (dev->param.chunks_per_block < dev->chunk_grp_size) {
++		/* We have a problem because the soft delete won't work if
++		 * the chunk group size > chunks per block.
++		 * This can be remedied by using larger "virtual blocks".
++		 */
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
++
++		return YAFFS_FAIL;
++	}
++
++	/* Finished verifying the device, continue with initialisation */
++
++	/* More device initialisation */
++	dev->all_gcs = 0;
++	dev->passive_gc_count = 0;
++	dev->oldest_dirty_gc_count = 0;
++	dev->bg_gcs = 0;
++	dev->gc_block_finder = 0;
++	dev->buffered_block = -1;
++	dev->doing_buffered_block_rewrite = 0;
++	dev->n_deleted_files = 0;
++	dev->n_bg_deletions = 0;
++	dev->n_unlinked_files = 0;
++	dev->n_ecc_fixed = 0;
++	dev->n_ecc_unfixed = 0;
++	dev->n_tags_ecc_fixed = 0;
++	dev->n_tags_ecc_unfixed = 0;
++	dev->n_erase_failures = 0;
++	dev->n_erased_blocks = 0;
++	dev->gc_disable = 0;
++	dev->has_pending_prioritised_gc = 1;
++		/* Assume the worst for now, will get fixed on first GC */
++	INIT_LIST_HEAD(&dev->dirty_dirs);
++	dev->oldest_dirty_seq = 0;
++	dev->oldest_dirty_block = 0;
++
++	/* Initialise temporary buffers and caches. */
++	if (!yaffs_init_tmp_buffers(dev))
++		init_failed = 1;
++
++	dev->cache = NULL;
++	dev->gc_cleanup_list = NULL;
++
++	if (!init_failed && dev->param.n_caches > 0) {
++		int i;
++		void *buf;
++		int cache_bytes =
++		    dev->param.n_caches * sizeof(struct yaffs_cache);
++
++		if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
++			dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
++
++		dev->cache = kmalloc(cache_bytes, GFP_NOFS);
++
++		buf = (u8 *) dev->cache;
++
++		if (dev->cache)
++			memset(dev->cache, 0, cache_bytes);
++
++		for (i = 0; i < dev->param.n_caches && buf; i++) {
++			dev->cache[i].object = NULL;
++			dev->cache[i].last_use = 0;
++			dev->cache[i].dirty = 0;
++			dev->cache[i].data = buf =
++			    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++		}
++		if (!buf)
++			init_failed = 1;
++
++		dev->cache_last_use = 0;
++	}
++
++	dev->cache_hits = 0;
++
++	if (!init_failed) {
++		dev->gc_cleanup_list =
++		    kmalloc(dev->param.chunks_per_block * sizeof(u32),
++					GFP_NOFS);
++		if (!dev->gc_cleanup_list)
++			init_failed = 1;
++	}
++
++	if (dev->param.is_yaffs2)
++		dev->param.use_header_file_size = 1;
++
++	if (!init_failed && !yaffs_init_blocks(dev))
++		init_failed = 1;
++
++	yaffs_init_tnodes_and_objs(dev);
++
++	if (!init_failed && !yaffs_create_initial_dir(dev))
++		init_failed = 1;
++
++	if (!init_failed && dev->param.is_yaffs2 &&
++		!dev->param.disable_summary &&
++		!yaffs_summary_init(dev))
++		init_failed = 1;
++
++	if (!init_failed) {
++		/* Now scan the flash. */
++		if (dev->param.is_yaffs2) {
++			if (yaffs2_checkpt_restore(dev)) {
++				yaffs_check_obj_details_loaded(dev->root_dir);
++				yaffs_trace(YAFFS_TRACE_CHECKPOINT |
++					YAFFS_TRACE_MOUNT,
++					"yaffs: restored from checkpoint"
++					);
++			} else {
++
++				/* Clean up the mess caused by an aborted
++				 * checkpoint load then scan backwards.
++				 */
++				yaffs_deinit_blocks(dev);
++
++				yaffs_deinit_tnodes_and_objs(dev);
++
++				dev->n_erased_blocks = 0;
++				dev->n_free_chunks = 0;
++				dev->alloc_block = -1;
++				dev->alloc_page = -1;
++				dev->n_deleted_files = 0;
++				dev->n_unlinked_files = 0;
++				dev->n_bg_deletions = 0;
++
++				if (!init_failed && !yaffs_init_blocks(dev))
++					init_failed = 1;
++
++				yaffs_init_tnodes_and_objs(dev);
++
++				if (!init_failed
++				    && !yaffs_create_initial_dir(dev))
++					init_failed = 1;
++
++				if (!init_failed && !yaffs2_scan_backwards(dev))
++					init_failed = 1;
++			}
++		} else if (!yaffs1_scan(dev)) {
++			init_failed = 1;
++		}
++
++		yaffs_strip_deleted_objs(dev);
++		yaffs_fix_hanging_objs(dev);
++		if (dev->param.empty_lost_n_found)
++			yaffs_empty_l_n_f(dev);
++	}
++
++	if (init_failed) {
++		/* Clean up the mess */
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++		  "yaffs: yaffs_guts_initialise() aborted.");
++
++		yaffs_deinitialise(dev);
++		return YAFFS_FAIL;
++	}
++
++	/* Zero out stats */
++	dev->n_page_reads = 0;
++	dev->n_page_writes = 0;
++	dev->n_erasures = 0;
++	dev->n_gc_copies = 0;
++	dev->n_retried_writes = 0;
++
++	dev->n_retired_blocks = 0;
++
++	yaffs_verify_free_chunks(dev);
++	yaffs_verify_blocks(dev);
++
++	/* Clean up any aborted checkpoint data */
++	if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
++		yaffs2_checkpt_invalidate(dev);
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS,
++	  "yaffs: yaffs_guts_initialise() done.");
++	return YAFFS_OK;
++}
++
++void yaffs_deinitialise(struct yaffs_dev *dev)
++{
++	if (dev->is_mounted) {
++		int i;
++
++		yaffs_deinit_blocks(dev);
++		yaffs_deinit_tnodes_and_objs(dev);
++		yaffs_summary_deinit(dev);
++
++		if (dev->param.n_caches > 0 && dev->cache) {
++
++			for (i = 0; i < dev->param.n_caches; i++) {
++				kfree(dev->cache[i].data);
++				dev->cache[i].data = NULL;
++			}
++
++			kfree(dev->cache);
++			dev->cache = NULL;
++		}
++
++		kfree(dev->gc_cleanup_list);
++
++		for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
++			kfree(dev->temp_buffer[i].buffer);
++
++		dev->is_mounted = 0;
++
++		yaffs_deinit_nand(dev);
++	}
++}
++
++int yaffs_count_free_chunks(struct yaffs_dev *dev)
++{
++	int n_free = 0;
++	int b;
++	struct yaffs_block_info *blk;
++
++	blk = dev->block_info;
++	for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++		switch (blk->block_state) {
++		case YAFFS_BLOCK_STATE_EMPTY:
++		case YAFFS_BLOCK_STATE_ALLOCATING:
++		case YAFFS_BLOCK_STATE_COLLECTING:
++		case YAFFS_BLOCK_STATE_FULL:
++			n_free +=
++			    (dev->param.chunks_per_block - blk->pages_in_use +
++			     blk->soft_del_pages);
++			break;
++		default:
++			break;
++		}
++		blk++;
++	}
++	return n_free;
++}
++
++int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
++{
++	/* This is what we report to the outside world */
++	int n_free;
++	int n_dirty_caches;
++	int blocks_for_checkpt;
++	int i;
++
++	n_free = dev->n_free_chunks;
++	n_free += dev->n_deleted_files;
++
++	/* Now count and subtract the number of dirty chunks in the cache. */
++
++	for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
++		if (dev->cache[i].dirty)
++			n_dirty_caches++;
++	}
++
++	n_free -= n_dirty_caches;
++
++	n_free -=
++	    ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
++
++	/* Now figure checkpoint space and report that... */
++	blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
++
++	n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
++
++	if (n_free < 0)
++		n_free = 0;
++
++	return n_free;
++}
++
++
++int yaffs_format_dev(struct yaffs_dev *dev)
++{
++	int i;
++	enum yaffs_block_state state;
++	u32 dummy;
++
++	if(dev->is_mounted)
++		return YAFFS_FAIL;
++
++	/*
++	* The runtime variables might not have been set up,
++	* so set up what we need.
++	*/
++	dev->internal_start_block = dev->param.start_block;
++	dev->internal_end_block = dev->param.end_block;
++	dev->block_offset = 0;
++	dev->chunk_offset = 0;
++
++	if (dev->param.start_block == 0) {
++		dev->internal_start_block = dev->param.start_block + 1;
++		dev->internal_end_block = dev->param.end_block + 1;
++		dev->block_offset = 1;
++		dev->chunk_offset = dev->param.chunks_per_block;
++	}
++
++	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++		yaffs_query_init_block_state(dev, i, &state, &dummy);
++		if (state != YAFFS_BLOCK_STATE_DEAD)
++			yaffs_erase_block(dev, i);
++	}
++
++	return YAFFS_OK;
++}
++
++
++/*
++ * Marshalling functions to get loff_t file sizes into and out of
++ * object headers.
++ */
++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
++{
++	oh->file_size_low = (fsize & 0xFFFFFFFF);
++	oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
++}
++
++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
++{
++	loff_t retval;
++
++	if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
++		retval = (((loff_t) oh->file_size_high) << 32) |
++			(((loff_t) oh->file_size_low) & 0xFFFFFFFF);
++	else
++		retval = (loff_t) oh->file_size_low;
++
++	return retval;
++}
+diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
+new file mode 100644
+index 00000000..64929ed3
+--- /dev/null
++++ b/fs/yaffs2/yaffs_guts.h
+@@ -0,0 +1,990 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GUTS_H__
++#define __YAFFS_GUTS_H__
++
++#include "yportenv.h"
++
++#define YAFFS_OK	1
++#define YAFFS_FAIL  0
++
++/* Give us a  Y=0x59,
++ * Give us an A=0x41,
++ * Give us an FF=0xff
++ * Give us an S=0x53
++ * And what have we got...
++ */
++#define YAFFS_MAGIC			0x5941ff53
++
++/*
++ * Tnodes form a tree with the tnodes in "levels"
++ * Levels greater than 0 hold 8 slots which point to other tnodes.
++ * Those at level 0 hold 16 slots which point to chunks in NAND.
++ *
++ * A maximum level of 8 thust supports files of size up to:
++ *
++ * 2^(3*MAX_LEVEL+4)
++ *
++ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
++ * a maximum file size of around 512Gbytees with 2k chunks.
++ */
++#define YAFFS_NTNODES_LEVEL0		16
++#define YAFFS_TNODES_LEVEL0_BITS	4
++#define YAFFS_TNODES_LEVEL0_MASK	0xf
++
++#define YAFFS_NTNODES_INTERNAL		(YAFFS_NTNODES_LEVEL0 / 2)
++#define YAFFS_TNODES_INTERNAL_BITS	(YAFFS_TNODES_LEVEL0_BITS - 1)
++#define YAFFS_TNODES_INTERNAL_MASK	0x7
++#define YAFFS_TNODES_MAX_LEVEL		8
++#define YAFFS_TNODES_MAX_BITS		(YAFFS_TNODES_LEVEL0_BITS + \
++					YAFFS_TNODES_INTERNAL_BITS * \
++					YAFFS_TNODES_MAX_LEVEL)
++#define YAFFS_MAX_CHUNK_ID		((1 << YAFFS_TNODES_MAX_BITS) - 1)
++
++#define YAFFS_MAX_FILE_SIZE_32		0x7fffffff
++
++/* Constants for YAFFS1 mode */
++#define YAFFS_BYTES_PER_SPARE		16
++#define YAFFS_BYTES_PER_CHUNK		512
++#define YAFFS_CHUNK_SIZE_SHIFT		9
++#define YAFFS_CHUNKS_PER_BLOCK		32
++#define YAFFS_BYTES_PER_BLOCK	(YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
++
++#define YAFFS_MIN_YAFFS2_CHUNK_SIZE	1024
++#define YAFFS_MIN_YAFFS2_SPARE_SIZE	32
++
++
++
++#define YAFFS_ALLOCATION_NOBJECTS	100
++#define YAFFS_ALLOCATION_NTNODES	100
++#define YAFFS_ALLOCATION_NLINKS		100
++
++#define YAFFS_NOBJECT_BUCKETS		256
++
++#define YAFFS_OBJECT_SPACE		0x40000
++#define YAFFS_MAX_OBJECT_ID		(YAFFS_OBJECT_SPACE - 1)
++
++/* Binary data version stamps */
++#define YAFFS_SUMMARY_VERSION		1
++#define YAFFS_CHECKPOINT_VERSION	7
++
++#ifdef CONFIG_YAFFS_UNICODE
++#define YAFFS_MAX_NAME_LENGTH		127
++#define YAFFS_MAX_ALIAS_LENGTH		79
++#else
++#define YAFFS_MAX_NAME_LENGTH		255
++#define YAFFS_MAX_ALIAS_LENGTH		159
++#endif
++
++#define YAFFS_SHORT_NAME_LENGTH		15
++
++/* Some special object ids for pseudo objects */
++#define YAFFS_OBJECTID_ROOT		1
++#define YAFFS_OBJECTID_LOSTNFOUND	2
++#define YAFFS_OBJECTID_UNLINKED		3
++#define YAFFS_OBJECTID_DELETED		4
++
++/* Fake object Id for summary data */
++#define YAFFS_OBJECTID_SUMMARY		0x10
++
++/* Pseudo object ids for checkpointing */
++#define YAFFS_OBJECTID_CHECKPOINT_DATA	0x20
++#define YAFFS_SEQUENCE_CHECKPOINT_DATA	0x21
++
++#define YAFFS_MAX_SHORT_OP_CACHES	20
++
++#define YAFFS_N_TEMP_BUFFERS		6
++
++/* We limit the number attempts at sucessfully saving a chunk of data.
++ * Small-page devices have 32 pages per block; large-page devices have 64.
++ * Default to something in the order of 5 to 10 blocks worth of chunks.
++ */
++#define YAFFS_WR_ATTEMPTS		(5*64)
++
++/* Sequence numbers are used in YAFFS2 to determine block allocation order.
++ * The range is limited slightly to help distinguish bad numbers from good.
++ * This also allows us to perhaps in the future use special numbers for
++ * special purposes.
++ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
++ * and is a larger number than the lifetime of a 2GB device.
++ */
++#define YAFFS_LOWEST_SEQUENCE_NUMBER	0x00001000
++#define YAFFS_HIGHEST_SEQUENCE_NUMBER	0xefffff00
++
++/* Special sequence number for bad block that failed to be marked bad */
++#define YAFFS_SEQUENCE_BAD_BLOCK	0xffff0000
++
++/* ChunkCache is used for short read/write operations.*/
++struct yaffs_cache {
++	struct yaffs_obj *object;
++	int chunk_id;
++	int last_use;
++	int dirty;
++	int n_bytes;		/* Only valid if the cache is dirty */
++	int locked;		/* Can't push out or flush while locked. */
++	u8 *data;
++};
++
++/* yaffs1 tags structures in RAM
++ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
++ * otherwise the structure size will get blown out.
++ */
++
++struct yaffs_tags {
++	unsigned chunk_id:20;
++	unsigned serial_number:2;
++	unsigned n_bytes_lsb:10;
++	unsigned obj_id:18;
++	unsigned ecc:12;
++	unsigned n_bytes_msb:2;
++};
++
++union yaffs_tags_union {
++	struct yaffs_tags as_tags;
++	u8 as_bytes[8];
++};
++
++
++/* Stuff used for extended tags in YAFFS2 */
++
++enum yaffs_ecc_result {
++	YAFFS_ECC_RESULT_UNKNOWN,
++	YAFFS_ECC_RESULT_NO_ERROR,
++	YAFFS_ECC_RESULT_FIXED,
++	YAFFS_ECC_RESULT_UNFIXED
++};
++
++enum yaffs_obj_type {
++	YAFFS_OBJECT_TYPE_UNKNOWN,
++	YAFFS_OBJECT_TYPE_FILE,
++	YAFFS_OBJECT_TYPE_SYMLINK,
++	YAFFS_OBJECT_TYPE_DIRECTORY,
++	YAFFS_OBJECT_TYPE_HARDLINK,
++	YAFFS_OBJECT_TYPE_SPECIAL
++};
++
++#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
++
++struct yaffs_ext_tags {
++	unsigned chunk_used;	/*  Status of the chunk: used or unused */
++	unsigned obj_id;	/* If 0 this is not used */
++	unsigned chunk_id;	/* If 0 this is a header, else a data chunk */
++	unsigned n_bytes;	/* Only valid for data chunks */
++
++	/* The following stuff only has meaning when we read */
++	enum yaffs_ecc_result ecc_result;
++	unsigned block_bad;
++
++	/* YAFFS 1 stuff */
++	unsigned is_deleted;	/* The chunk is marked deleted */
++	unsigned serial_number;	/* Yaffs1 2-bit serial number */
++
++	/* YAFFS2 stuff */
++	unsigned seq_number;	/* The sequence number of this block */
++
++	/* Extra info if this is an object header (YAFFS2 only) */
++
++	unsigned extra_available;	/* Extra info available if not zero */
++	unsigned extra_parent_id;	/* The parent object */
++	unsigned extra_is_shrink;	/* Is it a shrink header? */
++	unsigned extra_shadows;	/* Does this shadow another object? */
++
++	enum yaffs_obj_type extra_obj_type;	/* What object type? */
++
++	loff_t extra_file_size;		/* Length if it is a file */
++	unsigned extra_equiv_id;	/* Equivalent object for a hard link */
++};
++
++/* Spare structure for YAFFS1 */
++struct yaffs_spare {
++	u8 tb0;
++	u8 tb1;
++	u8 tb2;
++	u8 tb3;
++	u8 page_status;		/* set to 0 to delete the chunk */
++	u8 block_status;
++	u8 tb4;
++	u8 tb5;
++	u8 ecc1[3];
++	u8 tb6;
++	u8 tb7;
++	u8 ecc2[3];
++};
++
++/*Special structure for passing through to mtd */
++struct yaffs_nand_spare {
++	struct yaffs_spare spare;
++	int eccres1;
++	int eccres2;
++};
++
++/* Block data in RAM */
++
++enum yaffs_block_state {
++	YAFFS_BLOCK_STATE_UNKNOWN = 0,
++
++	YAFFS_BLOCK_STATE_SCANNING,
++	/* Being scanned */
++
++	YAFFS_BLOCK_STATE_NEEDS_SCAN,
++	/* The block might have something on it (ie it is allocating or full,
++	 * perhaps empty) but it needs to be scanned to determine its true
++	 * state.
++	 * This state is only valid during scanning.
++	 * NB We tolerate empty because the pre-scanner might be incapable of
++	 * deciding
++	 * However, if this state is returned on a YAFFS2 device,
++	 * then we expect a sequence number
++	 */
++
++	YAFFS_BLOCK_STATE_EMPTY,
++	/* This block is empty */
++
++	YAFFS_BLOCK_STATE_ALLOCATING,
++	/* This block is partially allocated.
++	 * At least one page holds valid data.
++	 * This is the one currently being used for page
++	 * allocation. Should never be more than one of these.
++	 * If a block is only partially allocated at mount it is treated as
++	 * full.
++	 */
++
++	YAFFS_BLOCK_STATE_FULL,
++	/* All the pages in this block have been allocated.
++	 * If a block was only partially allocated when mounted we treat
++	 * it as fully allocated.
++	 */
++
++	YAFFS_BLOCK_STATE_DIRTY,
++	/* The block was full and now all chunks have been deleted.
++	 * Erase me, reuse me.
++	 */
++
++	YAFFS_BLOCK_STATE_CHECKPOINT,
++	/* This block is assigned to holding checkpoint data. */
++
++	YAFFS_BLOCK_STATE_COLLECTING,
++	/* This block is being garbage collected */
++
++	YAFFS_BLOCK_STATE_DEAD
++	    /* This block has failed and is not in use */
++};
++
++#define	YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
++
++struct yaffs_block_info {
++
++	int soft_del_pages:10;	/* number of soft deleted pages */
++	int pages_in_use:10;	/* number of pages in use */
++	unsigned block_state:4;	/* One of the above block states. */
++				/* NB use unsigned because enum is sometimes
++				 * an int */
++	u32 needs_retiring:1;	/* Data has failed on this block, */
++				/*need to get valid data off and retire*/
++	u32 skip_erased_check:1;/* Skip the erased check on this block */
++	u32 gc_prioritise:1;	/* An ECC check or blank check has failed.
++				   Block should be prioritised for GC */
++	u32 chunk_error_strikes:3;	/* How many times we've had ecc etc
++				failures on this block and tried to reuse it */
++	u32 has_summary:1;	/* The block has a summary */
++
++	u32 has_shrink_hdr:1;	/* This block has at least one shrink header */
++	u32 seq_number;		/* block sequence number for yaffs2 */
++
++};
++
++/* -------------------------- Object structure -------------------------------*/
++/* This is the object structure as stored on NAND */
++
++struct yaffs_obj_hdr {
++	enum yaffs_obj_type type;
++
++	/* Apply to everything  */
++	int parent_obj_id;
++	u16 sum_no_longer_used;	/* checksum of name. No longer used */
++	YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++	/* The following apply to all object types except for hard links */
++	u32 yst_mode;		/* protection */
++
++	u32 yst_uid;
++	u32 yst_gid;
++	u32 yst_atime;
++	u32 yst_mtime;
++	u32 yst_ctime;
++
++	/* File size  applies to files only */
++	u32 file_size_low;
++
++	/* Equivalent object id applies to hard links only. */
++	int equiv_id;
++
++	/* Alias is for symlinks only. */
++	YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
++
++	u32 yst_rdev;	/* stuff for block and char devices (major/min) */
++
++	u32 win_ctime[2];
++	u32 win_atime[2];
++	u32 win_mtime[2];
++
++	u32 inband_shadowed_obj_id;
++	u32 inband_is_shrink;
++
++	u32 file_size_high;
++	u32 reserved[1];
++	int shadows_obj;	/* This object header shadows the
++				specified object if > 0 */
++
++	/* is_shrink applies to object headers written when wemake a hole. */
++	u32 is_shrink;
++
++};
++
++/*--------------------------- Tnode -------------------------- */
++
++struct yaffs_tnode {
++	struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
++};
++
++/*------------------------  Object -----------------------------*/
++/* An object can be one of:
++ * - a directory (no data, has children links
++ * - a regular file (data.... not prunes :->).
++ * - a symlink [symbolic link] (the alias).
++ * - a hard link
++ */
++
++struct yaffs_file_var {
++	loff_t file_size;
++	loff_t scanned_size;
++	loff_t shrink_size;
++	int top_level;
++	struct yaffs_tnode *top;
++};
++
++struct yaffs_dir_var {
++	struct list_head children;	/* list of child links */
++	struct list_head dirty;	/* Entry for list of dirty directories */
++};
++
++struct yaffs_symlink_var {
++	YCHAR *alias;
++};
++
++struct yaffs_hardlink_var {
++	struct yaffs_obj *equiv_obj;
++	u32 equiv_id;
++};
++
++union yaffs_obj_var {
++	struct yaffs_file_var file_variant;
++	struct yaffs_dir_var dir_variant;
++	struct yaffs_symlink_var symlink_variant;
++	struct yaffs_hardlink_var hardlink_variant;
++};
++
++struct yaffs_obj {
++	u8 deleted:1;		/* This should only apply to unlinked files. */
++	u8 soft_del:1;		/* it has also been soft deleted */
++	u8 unlinked:1;		/* An unlinked file.*/
++	u8 fake:1;		/* A fake object has no presence on NAND. */
++	u8 rename_allowed:1;	/* Some objects cannot be renamed. */
++	u8 unlink_allowed:1;
++	u8 dirty:1;		/* the object needs to be written to flash */
++	u8 valid:1;		/* When the file system is being loaded up, this
++				 * object might be created before the data
++				 * is available
++				 * ie. file data chunks encountered before
++				* the header.
++				 */
++	u8 lazy_loaded:1;	/* This object has been lazy loaded and
++				 * is missing some detail */
++
++	u8 defered_free:1;	/* Object is removed from NAND, but is
++				 * still in the inode cache.
++				 * Free of object is defered.
++				 * until the inode is released.
++				 */
++	u8 being_created:1;	/* This object is still being created
++				 * so skip some verification checks. */
++	u8 is_shadowed:1;	/* This object is shadowed on the way
++				 * to being renamed. */
++
++	u8 xattr_known:1;	/* We know if this has object has xattribs
++				 * or not. */
++	u8 has_xattr:1;		/* This object has xattribs.
++				 * Only valid if xattr_known. */
++
++	u8 serial;		/* serial number of chunk in NAND.*/
++	u16 sum;		/* sum of the name to speed searching */
++
++	struct yaffs_dev *my_dev;	/* The device I'm on */
++
++	struct list_head hash_link;	/* list of objects in hash bucket */
++
++	struct list_head hard_links;	/* hard linked object chain*/
++
++	/* directory structure stuff */
++	/* also used for linking up the free list */
++	struct yaffs_obj *parent;
++	struct list_head siblings;
++
++	/* Where's my object header in NAND? */
++	int hdr_chunk;
++
++	int n_data_chunks;	/* Number of data chunks for this file. */
++
++	u32 obj_id;		/* the object id value */
++
++	u32 yst_mode;
++
++	YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
++
++#ifdef CONFIG_YAFFS_WINCE
++	u32 win_ctime[2];
++	u32 win_mtime[2];
++	u32 win_atime[2];
++#else
++	u32 yst_uid;
++	u32 yst_gid;
++	u32 yst_atime;
++	u32 yst_mtime;
++	u32 yst_ctime;
++#endif
++
++	u32 yst_rdev;
++
++	void *my_inode;
++
++	enum yaffs_obj_type variant_type;
++
++	union yaffs_obj_var variant;
++
++};
++
++struct yaffs_obj_bucket {
++	struct list_head list;
++	int count;
++};
++
++/* yaffs_checkpt_obj holds the definition of an object as dumped
++ * by checkpointing.
++ */
++
++struct yaffs_checkpt_obj {
++	int struct_type;
++	u32 obj_id;
++	u32 parent_id;
++	int hdr_chunk;
++	enum yaffs_obj_type variant_type:3;
++	u8 deleted:1;
++	u8 soft_del:1;
++	u8 unlinked:1;
++	u8 fake:1;
++	u8 rename_allowed:1;
++	u8 unlink_allowed:1;
++	u8 serial;
++	int n_data_chunks;
++	loff_t size_or_equiv_obj;
++};
++
++/*--------------------- Temporary buffers ----------------
++ *
++ * These are chunk-sized working buffers. Each device has a few.
++ */
++
++struct yaffs_buffer {
++	u8 *buffer;
++	int in_use;
++};
++
++/*----------------- Device ---------------------------------*/
++
++struct yaffs_param {
++	const YCHAR *name;
++
++	/*
++	 * Entry parameters set up way early. Yaffs sets up the rest.
++	 * The structure should be zeroed out before use so that unused
++	 * and default values are zero.
++	 */
++
++	int inband_tags;	/* Use unband tags */
++	u32 total_bytes_per_chunk;	/* Should be >= 512, does not need to
++					 be a power of 2 */
++	int chunks_per_block;	/* does not need to be a power of 2 */
++	int spare_bytes_per_chunk;	/* spare area size */
++	int start_block;	/* Start block we're allowed to use */
++	int end_block;		/* End block we're allowed to use */
++	int n_reserved_blocks;	/* Tuneable so that we can reduce
++				 * reserved blocks on NOR and RAM. */
++
++	int n_caches;		/* If <= 0, then short op caching is disabled,
++				 * else the number of short op caches.
++				 */
++	int cache_bypass_aligned; /* If non-zero then bypass the cache for
++				   * aligned writes.
++				   */
++
++	int use_nand_ecc;	/* Flag to decide whether or not to use
++				 * NAND driver ECC on data (yaffs1) */
++	int tags_9bytes;	/* Use 9 byte tags */
++	int no_tags_ecc;	/* Flag to decide whether or not to do ECC
++				 * on packed tags (yaffs2) */
++
++	int is_yaffs2;		/* Use yaffs2 mode on this device */
++
++	int empty_lost_n_found;	/* Auto-empty lost+found directory on mount */
++
++	int refresh_period;	/* How often to check for a block refresh */
++
++	/* Checkpoint control. Can be set before or after initialisation */
++	u8 skip_checkpt_rd;
++	u8 skip_checkpt_wr;
++
++	int enable_xattr;	/* Enable xattribs */
++
++	int max_objects;	/*
++				 * Set to limit the number of objects created.
++				 * 0 = no limit.
++				*/
++
++	/* The remove_obj_fn function must be supplied by OS flavours that
++	 * need it.
++	 * yaffs direct uses it to implement the faster readdir.
++	 * Linux uses it to protect the directory during unlocking.
++	 */
++	void (*remove_obj_fn) (struct yaffs_obj *obj);
++
++	/* Callback to mark the superblock dirty */
++	void (*sb_dirty_fn) (struct yaffs_dev *dev);
++
++	/*  Callback to control garbage collection. */
++	unsigned (*gc_control_fn) (struct yaffs_dev *dev);
++
++	/* Debug control flags. Don't use unless you know what you're doing */
++	int use_header_file_size;	/* Flag to determine if we should use
++					 * file sizes from the header */
++	int disable_lazy_load;	/* Disable lazy loading on this device */
++	int wide_tnodes_disabled;	/* Set to disable wide tnodes */
++	int disable_soft_del;	/* yaffs 1 only: Set to disable the use of
++				 * softdeletion. */
++
++	int defered_dir_update;	/* Set to defer directory updates */
++
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++	int auto_unicode;
++#endif
++	int always_check_erased;	/* Force chunk erased check always on */
++
++	int disable_summary;
++
++};
++
++struct yaffs_driver {
++	int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
++				   const u8 *data, int data_len,
++				   const u8 *oob, int oob_len);
++	int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
++				   u8 *data, int data_len,
++				   u8 *oob, int oob_len,
++				   enum yaffs_ecc_result *ecc_result);
++	int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no);
++	int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no);
++	int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no);
++	int (*drv_initialise_fn) (struct yaffs_dev *dev);
++	int (*drv_deinitialise_fn) (struct yaffs_dev *dev);
++};
++
++struct yaffs_tags_handler {
++	int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
++				    int nand_chunk, const u8 *data,
++				    const struct yaffs_ext_tags *tags);
++	int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
++				   int nand_chunk, u8 *data,
++				   struct yaffs_ext_tags *tags);
++
++	int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
++			       enum yaffs_block_state *state,
++			       u32 *seq_number);
++	int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no);
++};
++
++struct yaffs_dev {
++	struct yaffs_param param;
++	struct yaffs_driver drv;
++	struct yaffs_tags_handler tagger;
++
++	/* Context storage. Holds extra OS specific data for this device */
++
++	void *os_context;
++	void *driver_context;
++
++	struct list_head dev_list;
++
++	/* Runtime parameters. Set up by YAFFS. */
++	int data_bytes_per_chunk;
++
++	/* Non-wide tnode stuff */
++	u16 chunk_grp_bits;	/* Number of bits that need to be resolved if
++				 * the tnodes are not wide enough.
++				 */
++	u16 chunk_grp_size;	/* == 2^^chunk_grp_bits */
++
++	/* Stuff to support wide tnodes */
++	u32 tnode_width;
++	u32 tnode_mask;
++	u32 tnode_size;
++
++	/* Stuff for figuring out file offset to chunk conversions */
++	u32 chunk_shift;	/* Shift value */
++	u32 chunk_div;		/* Divisor after shifting: 1 for 2^n sizes */
++	u32 chunk_mask;		/* Mask to use for power-of-2 case */
++
++	int is_mounted;
++	int read_only;
++	int is_checkpointed;
++
++	/* Stuff to support block offsetting to support start block zero */
++	int internal_start_block;
++	int internal_end_block;
++	int block_offset;
++	int chunk_offset;
++
++	/* Runtime checkpointing stuff */
++	int checkpt_page_seq;	/* running sequence number of checkpt pages */
++	int checkpt_byte_count;
++	int checkpt_byte_offs;
++	u8 *checkpt_buffer;
++	int checkpt_open_write;
++	int blocks_in_checkpt;
++	int checkpt_cur_chunk;
++	int checkpt_cur_block;
++	int checkpt_next_block;
++	int *checkpt_block_list;
++	int checkpt_max_blocks;
++	u32 checkpt_sum;
++	u32 checkpt_xor;
++
++	int checkpoint_blocks_required;	/* Number of blocks needed to store
++					 * current checkpoint set */
++
++	/* Block Info */
++	struct yaffs_block_info *block_info;
++	u8 *chunk_bits;		/* bitmap of chunks in use */
++	unsigned block_info_alt:1;	/* allocated using alternative alloc */
++	unsigned chunk_bits_alt:1;	/* allocated using alternative alloc */
++	int chunk_bit_stride;	/* Number of bytes of chunk_bits per block.
++				 * Must be consistent with chunks_per_block.
++				 */
++
++	int n_erased_blocks;
++	int alloc_block;	/* Current block being allocated off */
++	u32 alloc_page;
++	int alloc_block_finder;	/* Used to search for next allocation block */
++
++	/* Object and Tnode memory management */
++	void *allocator;
++	int n_obj;
++	int n_tnodes;
++
++	int n_hardlinks;
++
++	struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
++	u32 bucket_finder;
++
++	int n_free_chunks;
++
++	/* Garbage collection control */
++	u32 *gc_cleanup_list;	/* objects to delete at the end of a GC. */
++	u32 n_clean_ups;
++
++	unsigned has_pending_prioritised_gc;	/* We think this device might
++						have pending prioritised gcs */
++	unsigned gc_disable;
++	unsigned gc_block_finder;
++	unsigned gc_dirtiest;
++	unsigned gc_pages_in_use;
++	unsigned gc_not_done;
++	unsigned gc_block;
++	unsigned gc_chunk;
++	unsigned gc_skip;
++	struct yaffs_summary_tags *gc_sum_tags;
++
++	/* Special directories */
++	struct yaffs_obj *root_dir;
++	struct yaffs_obj *lost_n_found;
++
++	int buffered_block;	/* Which block is buffered here? */
++	int doing_buffered_block_rewrite;
++
++	struct yaffs_cache *cache;
++	int cache_last_use;
++
++	/* Stuff for background deletion and unlinked files. */
++	struct yaffs_obj *unlinked_dir;	/* Directory where unlinked and deleted
++					 files live. */
++	struct yaffs_obj *del_dir;	/* Directory where deleted objects are
++					sent to disappear. */
++	struct yaffs_obj *unlinked_deletion;	/* Current file being
++							background deleted. */
++	int n_deleted_files;	/* Count of files awaiting deletion; */
++	int n_unlinked_files;	/* Count of unlinked files. */
++	int n_bg_deletions;	/* Count of background deletions. */
++
++	/* Temporary buffer management */
++	struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
++	int max_temp;
++	int temp_in_use;
++	int unmanaged_buffer_allocs;
++	int unmanaged_buffer_deallocs;
++
++	/* yaffs2 runtime stuff */
++	unsigned seq_number;	/* Sequence number of currently
++					allocating block */
++	unsigned oldest_dirty_seq;
++	unsigned oldest_dirty_block;
++
++	/* Block refreshing */
++	int refresh_skip;	/* A skip down counter.
++				 * Refresh happens when this gets to zero. */
++
++	/* Dirty directory handling */
++	struct list_head dirty_dirs;	/* List of dirty directories */
++
++	/* Summary */
++	int chunks_per_summary;
++	struct yaffs_summary_tags *sum_tags;
++
++	/* Statistics */
++	u32 n_page_writes;
++	u32 n_page_reads;
++	u32 n_erasures;
++	u32 n_bad_markings;
++	u32 n_erase_failures;
++	u32 n_gc_copies;
++	u32 all_gcs;
++	u32 passive_gc_count;
++	u32 oldest_dirty_gc_count;
++	u32 n_gc_blocks;
++	u32 bg_gcs;
++	u32 n_retried_writes;
++	u32 n_retired_blocks;
++	u32 n_ecc_fixed;
++	u32 n_ecc_unfixed;
++	u32 n_tags_ecc_fixed;
++	u32 n_tags_ecc_unfixed;
++	u32 n_deletions;
++	u32 n_unmarked_deletions;
++	u32 refresh_count;
++	u32 cache_hits;
++	u32 tags_used;
++	u32 summary_used;
++
++};
++
++/* The CheckpointDevice structure holds the device information that changes
++ *at runtime and must be preserved over unmount/mount cycles.
++ */
++struct yaffs_checkpt_dev {
++	int struct_type;
++	int n_erased_blocks;
++	int alloc_block;	/* Current block being allocated off */
++	u32 alloc_page;
++	int n_free_chunks;
++
++	int n_deleted_files;	/* Count of files awaiting deletion; */
++	int n_unlinked_files;	/* Count of unlinked files. */
++	int n_bg_deletions;	/* Count of background deletions. */
++
++	/* yaffs2 runtime stuff */
++	unsigned seq_number;	/* Sequence number of currently
++				 * allocating block */
++
++};
++
++struct yaffs_checkpt_validity {
++	int struct_type;
++	u32 magic;
++	u32 version;
++	u32 head;
++};
++
++struct yaffs_shadow_fixer {
++	int obj_id;
++	int shadowed_id;
++	struct yaffs_shadow_fixer *next;
++};
++
++/* Structure for doing xattr modifications */
++struct yaffs_xattr_mod {
++	int set;		/* If 0 then this is a deletion */
++	const YCHAR *name;
++	const void *data;
++	int size;
++	int flags;
++	int result;
++};
++
++/*----------------------- YAFFS Functions -----------------------*/
++
++int yaffs_guts_initialise(struct yaffs_dev *dev);
++void yaffs_deinitialise(struct yaffs_dev *dev);
++
++int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
++
++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
++		     struct yaffs_obj *new_dir, const YCHAR * new_name);
++
++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
++int yaffs_del_obj(struct yaffs_obj *obj);
++
++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
++loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
++int yaffs_get_obj_inode(struct yaffs_obj *obj);
++unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
++int yaffs_get_obj_link_count(struct yaffs_obj *obj);
++
++/* File operations */
++int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
++		  int n_bytes);
++int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
++		  int n_bytes, int write_trhrough);
++int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
++
++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
++				    const YCHAR *name, u32 mode, u32 uid,
++				    u32 gid);
++
++int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
++
++/* Flushing and checkpointing */
++void yaffs_flush_whole_cache(struct yaffs_dev *dev);
++
++int yaffs_checkpoint_save(struct yaffs_dev *dev);
++int yaffs_checkpoint_restore(struct yaffs_dev *dev);
++
++/* Directory operations */
++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
++				   u32 mode, u32 uid, u32 gid);
++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
++				     const YCHAR *name);
++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
++
++/* Link operations */
++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
++				 struct yaffs_obj *equiv_obj);
++
++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
++
++/* Symlink operations */
++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
++				       const YCHAR *name, u32 mode, u32 uid,
++				       u32 gid, const YCHAR *alias);
++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
++
++/* Special inodes (fifos, sockets and devices) */
++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
++				       const YCHAR *name, u32 mode, u32 uid,
++				       u32 gid, u32 rdev);
++
++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
++		      const void *value, int size, int flags);
++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
++		      int size);
++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
++
++/* Special directories */
++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
++
++void yaffs_handle_defered_free(struct yaffs_obj *obj);
++
++void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
++
++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
++
++/* Debug dump  */
++int yaffs_dump_obj(struct yaffs_obj *obj);
++
++void yaffs_guts_test(struct yaffs_dev *dev);
++
++/* A few useful functions to be used within the core files*/
++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
++		     int lyn);
++int yaffs_check_ff(u8 *buffer, int n_bytes);
++void yaffs_handle_chunk_error(struct yaffs_dev *dev,
++			      struct yaffs_block_info *bi);
++
++u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
++
++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
++						 int number,
++						 enum yaffs_obj_type type);
++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++			    int nand_chunk, int in_scan);
++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
++				const struct yaffs_obj_hdr *oh);
++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
++YCHAR *yaffs_clone_str(const YCHAR *str);
++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
++		    int force, int is_shrink, int shadows,
++		    struct yaffs_xattr_mod *xop);
++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
++			       int backward_scanning);
++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
++					   struct yaffs_file_var *file_struct,
++					   u32 chunk_id,
++					   struct yaffs_tnode *passed_tn);
++
++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++		     int n_bytes, int write_trhrough);
++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
++void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
++
++int yaffs_count_free_chunks(struct yaffs_dev *dev);
++
++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
++				       struct yaffs_file_var *file_struct,
++				       u32 chunk_id);
++
++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++			 unsigned pos);
++
++int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
++
++int yaffs_format_dev(struct yaffs_dev *dev);
++
++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
++				int *chunk_out, u32 *offset_out);
++/*
++ * Marshalling functions to get loff_t file sizes into aand out of
++ * object headers.
++ */
++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
++loff_t yaffs_max_file_size(struct yaffs_dev *dev);
++
++
++#endif
+diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
+new file mode 100644
+index 00000000..c20ab14b
+--- /dev/null
++++ b/fs/yaffs2/yaffs_linux.h
+@@ -0,0 +1,48 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_LINUX_H__
++#define __YAFFS_LINUX_H__
++
++#include "yportenv.h"
++
++struct yaffs_linux_context {
++	struct list_head context_list;	/* List of these we have mounted */
++	struct yaffs_dev *dev;
++	struct super_block *super;
++	struct task_struct *bg_thread;	/* Background thread for this device */
++	int bg_running;
++	struct mutex gross_lock;	/* Gross locking mutex*/
++	u8 *spare_buffer;	/* For mtdif2 use. Don't know the buffer size
++				 * at compile time so we have to allocate it.
++				 */
++	struct list_head search_contexts;
++	struct task_struct *readdir_process;
++	unsigned mount_id;
++	int dirty;
++};
++
++#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
++#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++#define WRITE_SIZE_STR "writesize"
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
++#else
++#define WRITE_SIZE_STR "oobblock"
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
++#endif
++
++#endif
+diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
+new file mode 100644
+index 00000000..bd63855a
+--- /dev/null
++++ b/fs/yaffs2/yaffs_mtdif.c
+@@ -0,0 +1,294 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yportenv.h"
++
++#include "yaffs_mtdif.h"
++
++#include "linux/mtd/mtd.h"
++#include "linux/types.h"
++#include "linux/time.h"
++#include "linux/mtd/nand.h"
++#include "linux/kernel.h"
++#include "linux/version.h"
++#include "linux/types.h"
++
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++#include "yaffs_linux.h"
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
++#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
++#define mtd_erase(m, ei) (m)->erase(m, ei)
++#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops)
++#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops)
++#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs)
++#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs)
++#endif
++
++
++
++int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
++{
++	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++	u32 addr =
++	    ((loff_t) block_no) * dev->param.total_bytes_per_chunk *
++	    dev->param.chunks_per_block;
++	struct erase_info ei;
++	int retval = 0;
++
++	ei.mtd = mtd;
++	ei.addr = addr;
++	ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
++	ei.time = 1000;
++	ei.retries = 2;
++	ei.callback = NULL;
++	ei.priv = (u_long) dev;
++
++	retval = mtd_erase(mtd, &ei);
++
++	if (retval == 0)
++		return YAFFS_OK;
++
++	return YAFFS_FAIL;
++}
++
++
++static 	int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk,
++				   const u8 *data, int data_len,
++				   const u8 *oob, int oob_len)
++{
++	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++	loff_t addr;
++	struct mtd_oob_ops ops;
++	int retval;
++
++	addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OPS_AUTO_OOB;
++	ops.len = (data) ? data_len : 0;
++	ops.ooblen = oob_len;
++	ops.datbuf = (u8 *)data;
++	ops.oobbuf = (u8 *)oob;
++
++	retval = mtd_write_oob(mtd, addr, &ops);
++	if (retval) {
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"write_oob failed, chunk %d, mtd error %d",
++			nand_chunk, retval);
++	}
++	return retval ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk,
++				   u8 *data, int data_len,
++				   u8 *oob, int oob_len,
++				   enum yaffs_ecc_result *ecc_result)
++{
++	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++	loff_t addr;
++	struct mtd_oob_ops ops;
++	int retval;
++
++	addr = ((loff_t) nand_chunk) * dev->data_bytes_per_chunk;
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OPS_AUTO_OOB;
++	ops.len = (data) ? data_len : 0;
++	ops.ooblen = oob_len;
++	ops.datbuf = data;
++	ops.oobbuf = oob;
++
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
++	/* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
++	 * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
++	 */
++	ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
++#endif
++	/* Read page and oob using MTD.
++	 * Check status and determine ECC result.
++	 */
++	retval = mtd_read_oob(mtd, addr, &ops);
++	if (retval)
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"read_oob failed, chunk %d, mtd error %d",
++			nand_chunk, retval);
++
++	switch (retval) {
++	case 0:
++		/* no error */
++		if(ecc_result)
++			*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++		break;
++
++	case -EUCLEAN:
++		/* MTD's ECC fixed the data */
++		if(ecc_result)
++			*ecc_result = YAFFS_ECC_RESULT_FIXED;
++		dev->n_ecc_fixed++;
++		break;
++
++	case -EBADMSG:
++	default:
++		/* MTD's ECC could not fix the data */
++		dev->n_ecc_unfixed++;
++		if(ecc_result)
++			*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++		return YAFFS_FAIL;
++	}
++
++	return YAFFS_OK;
++}
++
++static 	int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no)
++{
++	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++
++	loff_t addr;
++	struct erase_info ei;
++	int retval = 0;
++	u32 block_size;
++
++	block_size = dev->param.total_bytes_per_chunk *
++		     dev->param.chunks_per_block;
++	addr = ((loff_t) block_no) * block_size;
++
++	ei.mtd = mtd;
++	ei.addr = addr;
++	ei.len = block_size;
++	ei.time = 1000;
++	ei.retries = 2;
++	ei.callback = NULL;
++	ei.priv = (u_long) dev;
++
++	retval = mtd_erase(mtd, &ei);
++
++	if (retval == 0)
++		return YAFFS_OK;
++
++	return YAFFS_FAIL;
++}
++
++static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++	int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
++	int retval;
++
++	yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no);
++
++	retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
++	return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no)
++{
++	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++	int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
++	int retval;
++
++	yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "checking block %d bad", block_no);
++
++	retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no);
++	return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_initialise(struct yaffs_dev *dev)
++{
++	return YAFFS_OK;
++}
++
++static int yaffs_mtd_deinitialise(struct yaffs_dev *dev)
++{
++	return YAFFS_OK;
++}
++
++
++void yaffs_mtd_drv_install(struct yaffs_dev *dev)
++{
++	struct yaffs_driver *drv = &dev->drv;
++
++	drv->drv_write_chunk_fn = yaffs_mtd_write;
++	drv->drv_read_chunk_fn = yaffs_mtd_read;
++	drv->drv_erase_fn = yaffs_mtd_erase;
++	drv->drv_mark_bad_fn = yaffs_mtd_mark_bad;
++	drv->drv_check_bad_fn = yaffs_mtd_check_bad;
++	drv->drv_initialise_fn = yaffs_mtd_initialise;
++	drv->drv_deinitialise_fn = yaffs_mtd_deinitialise;
++}
++
++
++struct mtd_info * yaffs_get_mtd_device(dev_t sdev)
++{
++	struct mtd_info *mtd;
++
++	mtd = yaffs_get_mtd_device(sdev);
++
++	/* Check it's an mtd device..... */
++	if (MAJOR(sdev) != MTD_BLOCK_MAJOR)
++		return NULL;	/* This isn't an mtd device */
++
++	/* Check it's NAND */
++	if (mtd->type != MTD_NANDFLASH) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"yaffs: MTD device is not NAND it's type %d",
++			mtd->type);
++		return NULL;
++	}
++
++	yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
++	yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
++	yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++	yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size);
++#else
++	yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
++#endif
++
++	return mtd;
++}
++
++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
++{
++	if (yaffs_version == 2) {
++		if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++		     mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++		    !inband_tags) {
++			yaffs_trace(YAFFS_TRACE_ALWAYS,
++				"MTD device does not have the right page sizes"
++			);
++			return -1;
++		}
++	} else {
++		if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
++		    mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
++			yaffs_trace(YAFFS_TRACE_ALWAYS,
++				"MTD device does not support have the right page sizes"
++			);
++			return -1;
++		}
++	}
++
++	return 0;
++}
++
++
++void yaffs_put_mtd_device(struct mtd_info *mtd)
++{
++	if(mtd)
++		put_mtd_device(mtd);
++}
+diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
+new file mode 100644
+index 00000000..9cff224c
+--- /dev/null
++++ b/fs/yaffs2/yaffs_mtdif.h
+@@ -0,0 +1,25 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF_H__
++#define __YAFFS_MTDIF_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_mtd_drv_install(struct yaffs_dev *dev);
++struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
++void yaffs_put_mtd_device(struct mtd_info *mtd);
++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
++#endif
+diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
+new file mode 100644
+index 00000000..4bdf4ed7
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nameval.c
+@@ -0,0 +1,208 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This simple implementation of a name-value store assumes a small number of
++* values and fits into a small finite buffer.
++ *
++ * Each attribute is stored as a record:
++ *  sizeof(int) bytes   record size.
++ *  strnlen+1 bytes name null terminated.
++ *  nbytes    value.
++ *  ----------
++ *  total size  stored in record size
++ *
++ * This code has not been tested with unicode yet.
++ */
++
++#include "yaffs_nameval.h"
++
++#include "yportenv.h"
++
++static int nval_find(const char *xb, int xb_size, const YCHAR *name,
++		     int *exist_size)
++{
++	int pos = 0;
++	int size;
++
++	memcpy(&size, xb, sizeof(int));
++	while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
++		if (!strncmp((YCHAR *) (xb + pos + sizeof(int)),
++				name, size)) {
++			if (exist_size)
++				*exist_size = size;
++			return pos;
++		}
++		pos += size;
++		if (pos < xb_size - sizeof(int))
++			memcpy(&size, xb + pos, sizeof(int));
++		else
++			size = 0;
++	}
++	if (exist_size)
++		*exist_size = 0;
++	return -ENODATA;
++}
++
++static int nval_used(const char *xb, int xb_size)
++{
++	int pos = 0;
++	int size;
++
++	memcpy(&size, xb + pos, sizeof(int));
++	while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
++		pos += size;
++		if (pos < xb_size - sizeof(int))
++			memcpy(&size, xb + pos, sizeof(int));
++		else
++			size = 0;
++	}
++	return pos;
++}
++
++int nval_del(char *xb, int xb_size, const YCHAR *name)
++{
++	int pos = nval_find(xb, xb_size, name, NULL);
++	int size;
++
++	if (pos < 0 || pos >= xb_size)
++		return -ENODATA;
++
++	/* Find size, shift rest over this record,
++	 * then zero out the rest of buffer */
++	memcpy(&size, xb + pos, sizeof(int));
++	memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
++	memset(xb + (xb_size - size), 0, size);
++	return 0;
++}
++
++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
++		int bsize, int flags)
++{
++	int pos;
++	int namelen = strnlen(name, xb_size);
++	int reclen;
++	int size_exist = 0;
++	int space;
++	int start;
++
++	pos = nval_find(xb, xb_size, name, &size_exist);
++
++	if (flags & XATTR_CREATE && pos >= 0)
++		return -EEXIST;
++	if (flags & XATTR_REPLACE && pos < 0)
++		return -ENODATA;
++
++	start = nval_used(xb, xb_size);
++	space = xb_size - start + size_exist;
++
++	reclen = (sizeof(int) + namelen + 1 + bsize);
++
++	if (reclen > space)
++		return -ENOSPC;
++
++	if (pos >= 0) {
++		nval_del(xb, xb_size, name);
++		start = nval_used(xb, xb_size);
++	}
++
++	pos = start;
++
++	memcpy(xb + pos, &reclen, sizeof(int));
++	pos += sizeof(int);
++	strncpy((YCHAR *) (xb + pos), name, reclen);
++	pos += (namelen + 1);
++	memcpy(xb + pos, buf, bsize);
++	return 0;
++}
++
++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
++	     int bsize)
++{
++	int pos = nval_find(xb, xb_size, name, NULL);
++	int size;
++
++	if (pos >= 0 && pos < xb_size) {
++
++		memcpy(&size, xb + pos, sizeof(int));
++		pos += sizeof(int);	/* advance past record length */
++		size -= sizeof(int);
++
++		/* Advance over name string */
++		while (xb[pos] && size > 0 && pos < xb_size) {
++			pos++;
++			size--;
++		}
++		/*Advance over NUL */
++		pos++;
++		size--;
++
++		/* If bsize is zero then this is a size query.
++		 * Return the size, but don't copy.
++		 */
++		if (!bsize)
++			return size;
++
++		if (size <= bsize) {
++			memcpy(buf, xb + pos, size);
++			return size;
++		}
++	}
++	if (pos >= 0)
++		return -ERANGE;
++
++	return -ENODATA;
++}
++
++int nval_list(const char *xb, int xb_size, char *buf, int bsize)
++{
++	int pos = 0;
++	int size;
++	int name_len;
++	int ncopied = 0;
++	int filled = 0;
++
++	memcpy(&size, xb + pos, sizeof(int));
++	while (size > sizeof(int) &&
++		size <= xb_size &&
++		(pos + size) < xb_size &&
++		!filled) {
++		pos += sizeof(int);
++		size -= sizeof(int);
++		name_len = strnlen((YCHAR *) (xb + pos), size);
++		if (ncopied + name_len + 1 < bsize) {
++			memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
++			buf += name_len;
++			*buf = '\0';
++			buf++;
++			if (sizeof(YCHAR) > 1) {
++				*buf = '\0';
++				buf++;
++			}
++			ncopied += (name_len + 1);
++		} else {
++			filled = 1;
++		}
++		pos += size;
++		if (pos < xb_size - sizeof(int))
++			memcpy(&size, xb + pos, sizeof(int));
++		else
++			size = 0;
++	}
++	return ncopied;
++}
++
++int nval_hasvalues(const char *xb, int xb_size)
++{
++	return nval_used(xb, xb_size) > 0;
++}
+diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
+new file mode 100644
+index 00000000..951e64f8
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nameval.h
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __NAMEVAL_H__
++#define __NAMEVAL_H__
++
++#include "yportenv.h"
++
++int nval_del(char *xb, int xb_size, const YCHAR * name);
++int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
++	     int bsize, int flags);
++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
++	     int bsize);
++int nval_list(const char *xb, int xb_size, char *buf, int bsize);
++int nval_hasvalues(const char *xb, int xb_size);
++#endif
+diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
+new file mode 100644
+index 00000000..9afd5ec8
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nand.c
+@@ -0,0 +1,118 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_nand.h"
++#include "yaffs_tagscompat.h"
++
++#include "yaffs_getblockinfo.h"
++#include "yaffs_summary.h"
++
++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
++{
++	return chunk - dev->chunk_offset;
++}
++
++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
++			     u8 *buffer, struct yaffs_ext_tags *tags)
++{
++	int result;
++	struct yaffs_ext_tags local_tags;
++	int flash_chunk = apply_chunk_offset(dev, nand_chunk);
++
++	dev->n_page_reads++;
++
++	/* If there are no tags provided use local tags. */
++	if (!tags)
++		tags = &local_tags;
++
++	result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags);
++	if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
++
++		struct yaffs_block_info *bi;
++		bi = yaffs_get_block_info(dev,
++					  nand_chunk /
++					  dev->param.chunks_per_block);
++		yaffs_handle_chunk_error(dev, bi);
++	}
++	return result;
++}
++
++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
++				int nand_chunk,
++				const u8 *buffer, struct yaffs_ext_tags *tags)
++{
++	int result;
++	int flash_chunk = apply_chunk_offset(dev, nand_chunk);
++
++	dev->n_page_writes++;
++
++	if (!tags) {
++		yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
++		BUG();
++		return YAFFS_FAIL;
++	}
++
++	tags->seq_number = dev->seq_number;
++	tags->chunk_used = 1;
++	yaffs_trace(YAFFS_TRACE_WRITE,
++		"Writing chunk %d tags %d %d",
++		nand_chunk, tags->obj_id, tags->chunk_id);
++
++	result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk,
++							buffer, tags);
++
++	yaffs_summary_add(dev, tags, nand_chunk);
++
++	return result;
++}
++
++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++	block_no -= dev->block_offset;
++	dev->n_bad_markings++;
++	return dev->tagger.mark_bad_fn(dev, block_no);
++}
++
++
++int yaffs_query_init_block_state(struct yaffs_dev *dev,
++				 int block_no,
++				 enum yaffs_block_state *state,
++				 u32 *seq_number)
++{
++	block_no -= dev->block_offset;
++	return dev->tagger.query_block_fn(dev, block_no, state, seq_number);
++}
++
++int yaffs_erase_block(struct yaffs_dev *dev, int block_no)
++{
++	int result;
++
++	block_no -= dev->block_offset;
++	dev->n_erasures++;
++	result = dev->drv.drv_erase_fn(dev, block_no);
++	return result;
++}
++
++int yaffs_init_nand(struct yaffs_dev *dev)
++{
++	if (dev->drv.drv_initialise_fn)
++		return dev->drv.drv_initialise_fn(dev);
++	return YAFFS_OK;
++}
++
++int yaffs_deinit_nand(struct yaffs_dev *dev)
++{
++	if (dev->drv.drv_deinitialise_fn)
++		return dev->drv.drv_deinitialise_fn(dev);
++	return YAFFS_OK;
++}
+diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
+new file mode 100644
+index 00000000..804e97ad
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nand.h
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_NAND_H__
++#define __YAFFS_NAND_H__
++#include "yaffs_guts.h"
++
++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
++			     u8 *buffer, struct yaffs_ext_tags *tags);
++
++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
++			     int nand_chunk,
++			     const u8 *buffer, struct yaffs_ext_tags *tags);
++
++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
++
++int yaffs_query_init_block_state(struct yaffs_dev *dev,
++				 int block_no,
++				 enum yaffs_block_state *state,
++				 unsigned *seq_number);
++
++int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
++
++int yaffs_init_nand(struct yaffs_dev *dev);
++int yaffs_deinit_nand(struct yaffs_dev *dev);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
+new file mode 100644
+index 00000000..dd9a331d
+--- /dev/null
++++ b/fs/yaffs2/yaffs_packedtags1.c
+@@ -0,0 +1,56 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags1.h"
++#include "yportenv.h"
++
++static const u8 all_ff[20] = {
++	0xff, 0xff, 0xff, 0xff,
++	0xff, 0xff, 0xff, 0xff,
++	0xff, 0xff, 0xff, 0xff,
++	0xff, 0xff, 0xff, 0xff,
++	0xff, 0xff, 0xff, 0xff
++};
++
++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
++		      const struct yaffs_ext_tags *t)
++{
++	pt->chunk_id = t->chunk_id;
++	pt->serial_number = t->serial_number;
++	pt->n_bytes = t->n_bytes;
++	pt->obj_id = t->obj_id;
++	pt->ecc = 0;
++	pt->deleted = (t->is_deleted) ? 0 : 1;
++	pt->unused_stuff = 0;
++	pt->should_be_ff = 0xffffffff;
++}
++
++void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
++			const struct yaffs_packed_tags1 *pt)
++{
++
++	if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
++		t->block_bad = 0;
++		if (pt->should_be_ff != 0xffffffff)
++			t->block_bad = 1;
++		t->chunk_used = 1;
++		t->obj_id = pt->obj_id;
++		t->chunk_id = pt->chunk_id;
++		t->n_bytes = pt->n_bytes;
++		t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++		t->is_deleted = (pt->deleted) ? 0 : 1;
++		t->serial_number = pt->serial_number;
++	} else {
++		memset(t, 0, sizeof(struct yaffs_ext_tags));
++	}
++}
+diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
+new file mode 100644
+index 00000000..b80f0a5b
+--- /dev/null
++++ b/fs/yaffs2/yaffs_packedtags1.h
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
++
++#ifndef __YAFFS_PACKEDTAGS1_H__
++#define __YAFFS_PACKEDTAGS1_H__
++
++#include "yaffs_guts.h"
++
++struct yaffs_packed_tags1 {
++	unsigned chunk_id:20;
++	unsigned serial_number:2;
++	unsigned n_bytes:10;
++	unsigned obj_id:18;
++	unsigned ecc:12;
++	unsigned deleted:1;
++	unsigned unused_stuff:1;
++	unsigned should_be_ff;
++
++};
++
++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
++		      const struct yaffs_ext_tags *t);
++void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
++			const struct yaffs_packed_tags1 *pt);
++#endif
+diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
+new file mode 100644
+index 00000000..e1d18cc3
+--- /dev/null
++++ b/fs/yaffs2/yaffs_packedtags2.c
+@@ -0,0 +1,197 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags2.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++
++/* This code packs a set of extended tags into a binary structure for
++ * NAND storage
++ */
++
++/* Some of the information is "extra" struff which can be packed in to
++ * speed scanning
++ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
++ */
++
++/* Extra flags applied to chunk_id */
++
++#define EXTRA_HEADER_INFO_FLAG	0x80000000
++#define EXTRA_SHRINK_FLAG	0x40000000
++#define EXTRA_SHADOWS_FLAG	0x20000000
++#define EXTRA_SPARE_FLAGS	0x10000000
++
++#define ALL_EXTRA_FLAGS		0xf0000000
++
++/* Also, the top 4 bits of the object Id are set to the object type. */
++#define EXTRA_OBJECT_TYPE_SHIFT (28)
++#define EXTRA_OBJECT_TYPE_MASK  ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
++
++static void yaffs_dump_packed_tags2_tags_only(
++				const struct yaffs_packed_tags2_tags_only *ptt)
++{
++	yaffs_trace(YAFFS_TRACE_MTD,
++		"packed tags obj %d chunk %d byte %d seq %d",
++		ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
++}
++
++static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
++{
++	yaffs_dump_packed_tags2_tags_only(&pt->t);
++}
++
++static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
++{
++	yaffs_trace(YAFFS_TRACE_MTD,
++		"ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
++		t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
++		t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
++		t->seq_number);
++
++}
++
++static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
++{
++	if (t->chunk_id != 0 || !t->extra_available)
++		return 0;
++
++	/* Check if the file size is too long to store */
++	if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
++	    (t->extra_file_size >> 31) != 0)
++		return 0;
++	return 1;
++}
++
++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
++				const struct yaffs_ext_tags *t)
++{
++	ptt->chunk_id = t->chunk_id;
++	ptt->seq_number = t->seq_number;
++	ptt->n_bytes = t->n_bytes;
++	ptt->obj_id = t->obj_id;
++
++	/* Only store extra tags for object headers.
++	 * If it is a file then only store  if the file size is short\
++	 * enough to fit.
++	 */
++	if (yaffs_check_tags_extra_packable(t)) {
++		/* Store the extra header info instead */
++		/* We save the parent object in the chunk_id */
++		ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
++		if (t->extra_is_shrink)
++			ptt->chunk_id |= EXTRA_SHRINK_FLAG;
++		if (t->extra_shadows)
++			ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
++
++		ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++		ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
++
++		if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++			ptt->n_bytes = t->extra_equiv_id;
++		else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++			ptt->n_bytes = (unsigned) t->extra_file_size;
++		else
++			ptt->n_bytes = 0;
++	}
++
++	yaffs_dump_packed_tags2_tags_only(ptt);
++	yaffs_dump_tags2(t);
++}
++
++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
++		      const struct yaffs_ext_tags *t, int tags_ecc)
++{
++	yaffs_pack_tags2_tags_only(&pt->t, t);
++
++	if (tags_ecc)
++		yaffs_ecc_calc_other((unsigned char *)&pt->t,
++				    sizeof(struct yaffs_packed_tags2_tags_only),
++				    &pt->ecc);
++}
++
++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
++				  struct yaffs_packed_tags2_tags_only *ptt)
++{
++	memset(t, 0, sizeof(struct yaffs_ext_tags));
++
++	if (ptt->seq_number == 0xffffffff)
++		return;
++
++	t->block_bad = 0;
++	t->chunk_used = 1;
++	t->obj_id = ptt->obj_id;
++	t->chunk_id = ptt->chunk_id;
++	t->n_bytes = ptt->n_bytes;
++	t->is_deleted = 0;
++	t->serial_number = 0;
++	t->seq_number = ptt->seq_number;
++
++	/* Do extra header info stuff */
++	if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
++		t->chunk_id = 0;
++		t->n_bytes = 0;
++
++		t->extra_available = 1;
++		t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
++		t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
++		t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
++		t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
++		t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++
++		if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++			t->extra_equiv_id = ptt->n_bytes;
++		else
++			t->extra_file_size = ptt->n_bytes;
++	}
++	yaffs_dump_packed_tags2_tags_only(ptt);
++	yaffs_dump_tags2(t);
++}
++
++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
++			int tags_ecc)
++{
++	enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++
++	if (pt->t.seq_number != 0xffffffff && tags_ecc) {
++		/* Chunk is in use and we need to do ECC */
++
++		struct yaffs_ecc_other ecc;
++		int result;
++		yaffs_ecc_calc_other((unsigned char *)&pt->t,
++				sizeof(struct yaffs_packed_tags2_tags_only),
++				&ecc);
++		result =
++		    yaffs_ecc_correct_other((unsigned char *)&pt->t,
++				sizeof(struct yaffs_packed_tags2_tags_only),
++				&pt->ecc, &ecc);
++		switch (result) {
++		case 0:
++			ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++			break;
++		case 1:
++			ecc_result = YAFFS_ECC_RESULT_FIXED;
++			break;
++		case -1:
++			ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++			break;
++		default:
++			ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
++		}
++	}
++	yaffs_unpack_tags2_tags_only(t, &pt->t);
++
++	t->ecc_result = ecc_result;
++
++	yaffs_dump_packed_tags2(pt);
++	yaffs_dump_tags2(t);
++}
+diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
+new file mode 100644
+index 00000000..675e7194
+--- /dev/null
++++ b/fs/yaffs2/yaffs_packedtags2.h
+@@ -0,0 +1,47 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
++
++#ifndef __YAFFS_PACKEDTAGS2_H__
++#define __YAFFS_PACKEDTAGS2_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_ecc.h"
++
++struct yaffs_packed_tags2_tags_only {
++	unsigned seq_number;
++	unsigned obj_id;
++	unsigned chunk_id;
++	unsigned n_bytes;
++};
++
++struct yaffs_packed_tags2 {
++	struct yaffs_packed_tags2_tags_only t;
++	struct yaffs_ecc_other ecc;
++};
++
++/* Full packed tags with ECC, used for oob tags */
++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
++		      const struct yaffs_ext_tags *t, int tags_ecc);
++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
++			int tags_ecc);
++
++/* Only the tags part (no ECC for use with inband tags */
++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
++				const struct yaffs_ext_tags *t);
++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
++				  struct yaffs_packed_tags2_tags_only *pt);
++#endif
+diff --git a/fs/yaffs2/yaffs_summary.c b/fs/yaffs2/yaffs_summary.c
+new file mode 100644
+index 00000000..6f3c7839
+--- /dev/null
++++ b/fs/yaffs2/yaffs_summary.c
+@@ -0,0 +1,313 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Summaries write the useful part of the tags for the chunks in a block into an
++ * an array which is written to the last n chunks of the block.
++ * Reading the summaries gives all the tags for the block in one read. Much
++ * faster.
++ *
++ * Chunks holding summaries are marked with tags making it look like
++ * they are part of a fake file.
++ *
++ * The summary could also be used during gc.
++ *
++ */
++
++#include "yaffs_summary.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_bitmap.h"
++
++/*
++ * The summary is built up in an array of summary tags.
++ * This gets written to the last one or two (maybe more) chunks in a block.
++ * A summary header is written as the first part of each chunk of summary data.
++ * The summary header must match or the summary is rejected.
++ */
++
++/* Summary tags don't need the sequence number because that is redundant. */
++struct yaffs_summary_tags {
++	unsigned obj_id;
++	unsigned chunk_id;
++	unsigned n_bytes;
++};
++
++/* Summary header */
++struct yaffs_summary_header {
++	unsigned version;	/* Must match current version */
++	unsigned block;		/* Must be this block */
++	unsigned seq;		/* Must be this sequence number */
++	unsigned sum;		/* Just add up all the bytes in the tags */
++};
++
++
++static void yaffs_summary_clear(struct yaffs_dev *dev)
++{
++	if (!dev->sum_tags)
++		return;
++	memset(dev->sum_tags, 0, dev->chunks_per_summary *
++		sizeof(struct yaffs_summary_tags));
++}
++
++
++void yaffs_summary_deinit(struct yaffs_dev *dev)
++{
++	kfree(dev->sum_tags);
++	dev->sum_tags = NULL;
++	kfree(dev->gc_sum_tags);
++	dev->gc_sum_tags = NULL;
++	dev->chunks_per_summary = 0;
++}
++
++int yaffs_summary_init(struct yaffs_dev *dev)
++{
++	int sum_bytes;
++	int chunks_used; /* Number of chunks used by summary */
++	int sum_tags_bytes;
++
++	sum_bytes = dev->param.chunks_per_block *
++			sizeof(struct yaffs_summary_tags);
++
++	chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
++			(dev->data_bytes_per_chunk -
++				sizeof(struct yaffs_summary_header));
++
++	dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
++	sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
++				dev->chunks_per_summary;
++	dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
++	dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
++	if (!dev->sum_tags || !dev->gc_sum_tags) {
++		yaffs_summary_deinit(dev);
++		return YAFFS_FAIL;
++	}
++
++	yaffs_summary_clear(dev);
++
++	return YAFFS_OK;
++}
++
++static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
++{
++	u8 *sum_buffer = (u8 *)dev->sum_tags;
++	int i;
++	unsigned sum = 0;
++
++	i = sizeof(struct yaffs_summary_tags) *
++				dev->chunks_per_summary;
++	while (i > 0) {
++		sum += *sum_buffer;
++		sum_buffer++;
++		i--;
++	}
++
++	return sum;
++}
++
++static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
++{
++	struct yaffs_ext_tags tags;
++	u8 *buffer;
++	u8 *sum_buffer = (u8 *)dev->sum_tags;
++	int n_bytes;
++	int chunk_in_nand;
++	int chunk_in_block;
++	int result;
++	int this_tx;
++	struct yaffs_summary_header hdr;
++	int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
++	struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++
++	buffer = yaffs_get_temp_buffer(dev);
++	n_bytes = sizeof(struct yaffs_summary_tags) *
++				dev->chunks_per_summary;
++	memset(&tags, 0, sizeof(struct yaffs_ext_tags));
++	tags.obj_id = YAFFS_OBJECTID_SUMMARY;
++	tags.chunk_id = 1;
++	chunk_in_block = dev->chunks_per_summary;
++	chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
++						dev->chunks_per_summary;
++	hdr.version = YAFFS_SUMMARY_VERSION;
++	hdr.block = blk;
++	hdr.seq = bi->seq_number;
++	hdr.sum = yaffs_summary_sum(dev);
++
++	do {
++		this_tx = n_bytes;
++		if (this_tx > sum_bytes_per_chunk)
++			this_tx = sum_bytes_per_chunk;
++		memcpy(buffer, &hdr, sizeof(hdr));
++		memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
++		tags.n_bytes = this_tx + sizeof(hdr);
++		result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
++						buffer, &tags);
++
++		if (result != YAFFS_OK)
++			break;
++		yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++		bi->pages_in_use++;
++		dev->n_free_chunks--;
++
++		n_bytes -= this_tx;
++		sum_buffer += this_tx;
++		chunk_in_nand++;
++		chunk_in_block++;
++		tags.chunk_id++;
++	} while (result == YAFFS_OK && n_bytes > 0);
++	yaffs_release_temp_buffer(dev, buffer);
++
++
++	if (result == YAFFS_OK)
++		bi->has_summary = 1;
++
++
++	return result;
++}
++
++int yaffs_summary_read(struct yaffs_dev *dev,
++			struct yaffs_summary_tags *st,
++			int blk)
++{
++	struct yaffs_ext_tags tags;
++	u8 *buffer;
++	u8 *sum_buffer = (u8 *)st;
++	int n_bytes;
++	int chunk_id;
++	int chunk_in_nand;
++	int chunk_in_block;
++	int result;
++	int this_tx;
++	struct yaffs_summary_header hdr;
++	struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++	int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
++	int sum_tags_bytes;
++
++	sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
++				dev->chunks_per_summary;
++	buffer = yaffs_get_temp_buffer(dev);
++	n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
++	chunk_in_block = dev->chunks_per_summary;
++	chunk_in_nand = blk * dev->param.chunks_per_block +
++							dev->chunks_per_summary;
++	chunk_id = 1;
++	do {
++		this_tx = n_bytes;
++		if (this_tx > sum_bytes_per_chunk)
++			this_tx = sum_bytes_per_chunk;
++		result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
++						buffer, &tags);
++
++		if (tags.chunk_id != chunk_id ||
++			tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
++			tags.chunk_used == 0 ||
++			tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++			tags.n_bytes != (this_tx + sizeof(hdr)))
++				result = YAFFS_FAIL;
++		if (result != YAFFS_OK)
++			break;
++
++		if (st == dev->sum_tags) {
++			/* If we're scanning then update the block info */
++			yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++			bi->pages_in_use++;
++		}
++		memcpy(&hdr, buffer, sizeof(hdr));
++		memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
++		n_bytes -= this_tx;
++		sum_buffer += this_tx;
++		chunk_in_nand++;
++		chunk_in_block++;
++		chunk_id++;
++	} while (result == YAFFS_OK && n_bytes > 0);
++	yaffs_release_temp_buffer(dev, buffer);
++
++	if (result == YAFFS_OK) {
++		/* Verify header */
++		if (hdr.version != YAFFS_SUMMARY_VERSION ||
++		    hdr.block != blk ||
++		    hdr.seq != bi->seq_number ||
++		    hdr.sum != yaffs_summary_sum(dev))
++			result = YAFFS_FAIL;
++	}
++
++	if (st == dev->sum_tags && result == YAFFS_OK)
++		bi->has_summary = 1;
++
++	return result;
++}
++
++int yaffs_summary_add(struct yaffs_dev *dev,
++			struct yaffs_ext_tags *tags,
++			int chunk_in_nand)
++{
++	struct yaffs_packed_tags2_tags_only tags_only;
++	struct yaffs_summary_tags *sum_tags;
++	int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
++	int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
++
++	if (!dev->sum_tags)
++		return YAFFS_OK;
++
++	if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
++		yaffs_pack_tags2_tags_only(&tags_only, tags);
++		sum_tags = &dev->sum_tags[chunk_in_block];
++		sum_tags->chunk_id = tags_only.chunk_id;
++		sum_tags->n_bytes = tags_only.n_bytes;
++		sum_tags->obj_id = tags_only.obj_id;
++
++		if (chunk_in_block == dev->chunks_per_summary - 1) {
++			/* Time to write out the summary */
++			yaffs_summary_write(dev, block_in_nand);
++			yaffs_summary_clear(dev);
++			yaffs_skip_rest_of_block(dev);
++		}
++	}
++	return YAFFS_OK;
++}
++
++int yaffs_summary_fetch(struct yaffs_dev *dev,
++			struct yaffs_ext_tags *tags,
++			int chunk_in_block)
++{
++	struct yaffs_packed_tags2_tags_only tags_only;
++	struct yaffs_summary_tags *sum_tags;
++	if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
++		sum_tags = &dev->sum_tags[chunk_in_block];
++		tags_only.chunk_id = sum_tags->chunk_id;
++		tags_only.n_bytes = sum_tags->n_bytes;
++		tags_only.obj_id = sum_tags->obj_id;
++		yaffs_unpack_tags2_tags_only(tags, &tags_only);
++		return YAFFS_OK;
++	}
++	return YAFFS_FAIL;
++}
++
++void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
++{
++	struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++	int i;
++
++	if (!bi->has_summary)
++		return;
++
++	for (i = dev->chunks_per_summary;
++	     i < dev->param.chunks_per_block;
++	     i++) {
++		if (yaffs_check_chunk_bit(dev, blk, i)) {
++			yaffs_clear_chunk_bit(dev, blk, i);
++			bi->pages_in_use--;
++			dev->n_free_chunks++;
++		}
++	}
++}
+diff --git a/fs/yaffs2/yaffs_summary.h b/fs/yaffs2/yaffs_summary.h
+new file mode 100644
+index 00000000..be141d07
+--- /dev/null
++++ b/fs/yaffs2/yaffs_summary.h
+@@ -0,0 +1,37 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_SUMMARY_H__
++#define __YAFFS_SUMMARY_H__
++
++#include "yaffs_packedtags2.h"
++
++
++int yaffs_summary_init(struct yaffs_dev *dev);
++void yaffs_summary_deinit(struct yaffs_dev *dev);
++
++int yaffs_summary_add(struct yaffs_dev *dev,
++			struct yaffs_ext_tags *tags,
++			int chunk_in_block);
++int yaffs_summary_fetch(struct yaffs_dev *dev,
++			struct yaffs_ext_tags *tags,
++			int chunk_in_block);
++int yaffs_summary_read(struct yaffs_dev *dev,
++			struct yaffs_summary_tags *st,
++			int blk);
++void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
++
++
++#endif
+diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
+new file mode 100644
+index 00000000..092430be
+--- /dev/null
++++ b/fs/yaffs2/yaffs_tagscompat.c
+@@ -0,0 +1,381 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_ecc.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_trace.h"
++
++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
++
++
++/********** Tags ECC calculations  *********/
++
++
++void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
++{
++	/* Calculate an ecc */
++	unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
++	unsigned i, j;
++	unsigned ecc = 0;
++	unsigned bit = 0;
++
++	tags->ecc = 0;
++
++	for (i = 0; i < 8; i++) {
++		for (j = 1; j & 0xff; j <<= 1) {
++			bit++;
++			if (b[i] & j)
++				ecc ^= bit;
++		}
++	}
++	tags->ecc = ecc;
++}
++
++int yaffs_check_tags_ecc(struct yaffs_tags *tags)
++{
++	unsigned ecc = tags->ecc;
++
++	yaffs_calc_tags_ecc(tags);
++
++	ecc ^= tags->ecc;
++
++	if (ecc && ecc <= 64) {
++		/* TODO: Handle the failure better. Retire? */
++		unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
++
++		ecc--;
++
++		b[ecc / 8] ^= (1 << (ecc & 7));
++
++		/* Now recvalc the ecc */
++		yaffs_calc_tags_ecc(tags);
++
++		return 1;	/* recovered error */
++	} else if (ecc) {
++		/* Wierd ecc failure value */
++		/* TODO Need to do somethiong here */
++		return -1;	/* unrecovered error */
++	}
++	return 0;
++}
++
++/********** Tags **********/
++
++static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
++				     struct yaffs_tags *tags_ptr)
++{
++	union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
++
++	yaffs_calc_tags_ecc(tags_ptr);
++
++	spare_ptr->tb0 = tu->as_bytes[0];
++	spare_ptr->tb1 = tu->as_bytes[1];
++	spare_ptr->tb2 = tu->as_bytes[2];
++	spare_ptr->tb3 = tu->as_bytes[3];
++	spare_ptr->tb4 = tu->as_bytes[4];
++	spare_ptr->tb5 = tu->as_bytes[5];
++	spare_ptr->tb6 = tu->as_bytes[6];
++	spare_ptr->tb7 = tu->as_bytes[7];
++}
++
++static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
++				      struct yaffs_spare *spare_ptr,
++				      struct yaffs_tags *tags_ptr)
++{
++	union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
++	int result;
++
++	tu->as_bytes[0] = spare_ptr->tb0;
++	tu->as_bytes[1] = spare_ptr->tb1;
++	tu->as_bytes[2] = spare_ptr->tb2;
++	tu->as_bytes[3] = spare_ptr->tb3;
++	tu->as_bytes[4] = spare_ptr->tb4;
++	tu->as_bytes[5] = spare_ptr->tb5;
++	tu->as_bytes[6] = spare_ptr->tb6;
++	tu->as_bytes[7] = spare_ptr->tb7;
++
++	result = yaffs_check_tags_ecc(tags_ptr);
++	if (result > 0)
++		dev->n_tags_ecc_fixed++;
++	else if (result < 0)
++		dev->n_tags_ecc_unfixed++;
++}
++
++static void yaffs_spare_init(struct yaffs_spare *spare)
++{
++	memset(spare, 0xff, sizeof(struct yaffs_spare));
++}
++
++static int yaffs_wr_nand(struct yaffs_dev *dev,
++			 int nand_chunk, const u8 *data,
++			 struct yaffs_spare *spare)
++{
++	int data_size = dev->data_bytes_per_chunk;
++
++	return dev->drv.drv_write_chunk_fn(dev, nand_chunk,
++				data, data_size,
++				(u8 *) spare, sizeof(*spare));
++}
++
++static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
++			       int nand_chunk,
++			       u8 *data,
++			       struct yaffs_spare *spare,
++			       enum yaffs_ecc_result *ecc_result,
++			       int correct_errors)
++{
++	int ret_val;
++	struct yaffs_spare local_spare;
++	int data_size;
++	int spare_size;
++	int ecc_result1, ecc_result2;
++	u8 calc_ecc[3];
++
++	if (!spare) {
++		/* If we don't have a real spare, then we use a local one. */
++		/* Need this for the calculation of the ecc */
++		spare = &local_spare;
++	}
++	data_size = dev->data_bytes_per_chunk;
++	spare_size = sizeof(struct yaffs_spare);
++
++	if (dev->param.use_nand_ecc)
++		return dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++						data, data_size,
++						(u8 *) spare, spare_size,
++						ecc_result);
++
++
++	/* Handle the ECC at this level. */
++
++	ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++						 data, data_size,
++						 (u8 *)spare, spare_size,
++						NULL);
++	if (!data || !correct_errors)
++		return ret_val;
++
++	/* Do ECC correction if needed. */
++	yaffs_ecc_calc(data, calc_ecc);
++	ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
++	yaffs_ecc_calc(&data[256], calc_ecc);
++	ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc);
++
++	if (ecc_result1 > 0) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"**>>yaffs ecc error fix performed on chunk %d:0",
++			nand_chunk);
++		dev->n_ecc_fixed++;
++	} else if (ecc_result1 < 0) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"**>>yaffs ecc error unfixed on chunk %d:0",
++			nand_chunk);
++		dev->n_ecc_unfixed++;
++	}
++
++	if (ecc_result2 > 0) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"**>>yaffs ecc error fix performed on chunk %d:1",
++			nand_chunk);
++		dev->n_ecc_fixed++;
++	} else if (ecc_result2 < 0) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"**>>yaffs ecc error unfixed on chunk %d:1",
++			nand_chunk);
++		dev->n_ecc_unfixed++;
++	}
++
++	if (ecc_result1 || ecc_result2) {
++		/* We had a data problem on this page */
++		yaffs_handle_rd_data_error(dev, nand_chunk);
++	}
++
++	if (ecc_result1 < 0 || ecc_result2 < 0)
++		*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++	else if (ecc_result1 > 0 || ecc_result2 > 0)
++		*ecc_result = YAFFS_ECC_RESULT_FIXED;
++	else
++		*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++
++	return ret_val;
++}
++
++/*
++ * Functions for robustisizing
++ */
++
++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
++{
++	int flash_block = nand_chunk / dev->param.chunks_per_block;
++
++	/* Mark the block for retirement */
++	yaffs_get_block_info(dev, flash_block + dev->block_offset)->
++		needs_retiring = 1;
++	yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++		"**>>Block %d marked for retirement",
++		flash_block);
++
++	/* TODO:
++	 * Just do a garbage collection on the affected block
++	 * then retire the block
++	 * NB recursion
++	 */
++}
++
++static int yaffs_tags_compat_wr(struct yaffs_dev *dev,
++			 int nand_chunk,
++			 const u8 *data, const struct yaffs_ext_tags *ext_tags)
++{
++	struct yaffs_spare spare;
++	struct yaffs_tags tags;
++
++	yaffs_spare_init(&spare);
++
++	if (ext_tags->is_deleted)
++		spare.page_status = 0;
++	else {
++		tags.obj_id = ext_tags->obj_id;
++		tags.chunk_id = ext_tags->chunk_id;
++
++		tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
++
++		if (dev->data_bytes_per_chunk >= 1024)
++			tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
++		else
++			tags.n_bytes_msb = 3;
++
++		tags.serial_number = ext_tags->serial_number;
++
++		if (!dev->param.use_nand_ecc && data) {
++			yaffs_ecc_calc(data, spare.ecc1);
++			yaffs_ecc_calc(&data[256], spare.ecc2);
++		}
++
++		yaffs_load_tags_to_spare(&spare, &tags);
++	}
++	return yaffs_wr_nand(dev, nand_chunk, data, &spare);
++}
++
++static int yaffs_tags_compat_rd(struct yaffs_dev *dev,
++			 int nand_chunk,
++			 u8 *data, struct yaffs_ext_tags *ext_tags)
++{
++	struct yaffs_spare spare;
++	struct yaffs_tags tags;
++	enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
++	static struct yaffs_spare spare_ff;
++	static int init;
++	int deleted;
++
++	if (!init) {
++		memset(&spare_ff, 0xff, sizeof(spare_ff));
++		init = 1;
++	}
++
++	if (!yaffs_rd_chunk_nand(dev, nand_chunk,
++					data, &spare, &ecc_result, 1))
++		return YAFFS_FAIL;
++
++	/* ext_tags may be NULL */
++	if (!ext_tags)
++		return YAFFS_OK;
++
++	deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
++
++	ext_tags->is_deleted = deleted;
++	ext_tags->ecc_result = ecc_result;
++	ext_tags->block_bad = 0;	/* We're reading it */
++	/* therefore it is not a bad block */
++	ext_tags->chunk_used =
++		memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
++
++	if (ext_tags->chunk_used) {
++		yaffs_get_tags_from_spare(dev, &spare, &tags);
++		ext_tags->obj_id = tags.obj_id;
++		ext_tags->chunk_id = tags.chunk_id;
++		ext_tags->n_bytes = tags.n_bytes_lsb;
++
++		if (dev->data_bytes_per_chunk >= 1024)
++			ext_tags->n_bytes |=
++				(((unsigned)tags.n_bytes_msb) << 10);
++
++		ext_tags->serial_number = tags.serial_number;
++	}
++
++	return YAFFS_OK;
++}
++
++static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
++{
++	struct yaffs_spare spare;
++
++	memset(&spare, 0xff, sizeof(struct yaffs_spare));
++
++	spare.block_status = 'Y';
++
++	yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
++		      &spare);
++	yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
++		      NULL, &spare);
++
++	return YAFFS_OK;
++}
++
++static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
++				  int block_no,
++				  enum yaffs_block_state *state,
++				  u32 *seq_number)
++{
++	struct yaffs_spare spare0, spare1;
++	static struct yaffs_spare spare_ff;
++	static int init;
++	enum yaffs_ecc_result dummy;
++
++	if (!init) {
++		memset(&spare_ff, 0xff, sizeof(spare_ff));
++		init = 1;
++	}
++
++	*seq_number = 0;
++
++	/* Look for bad block markers in the first two chunks */
++	yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block,
++			    NULL, &spare0, &dummy, 0);
++	yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
++			    NULL, &spare1, &dummy, 0);
++
++	if (hweight8(spare0.block_status & spare1.block_status) < 7)
++		*state = YAFFS_BLOCK_STATE_DEAD;
++	else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
++		*state = YAFFS_BLOCK_STATE_EMPTY;
++	else
++		*state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
++
++	return YAFFS_OK;
++}
++
++void yaffs_tags_compat_install(struct yaffs_dev *dev)
++{
++	if(dev->param.is_yaffs2)
++		return;
++	if(!dev->tagger.write_chunk_tags_fn)
++		dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr;
++	if(!dev->tagger.read_chunk_tags_fn)
++		dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd;
++	if(!dev->tagger.query_block_fn)
++		dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
++	if(!dev->tagger.mark_bad_fn)
++		dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
++}
+diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
+new file mode 100644
+index 00000000..92d298a6
+--- /dev/null
++++ b/fs/yaffs2/yaffs_tagscompat.h
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSCOMPAT_H__
++#define __YAFFS_TAGSCOMPAT_H__
++
++
++#include "yaffs_guts.h"
++
++#if 0
++
++
++int yaffs_tags_compat_wr(struct yaffs_dev *dev,
++			 int nand_chunk,
++			 const u8 *data, const struct yaffs_ext_tags *tags);
++int yaffs_tags_compat_rd(struct yaffs_dev *dev,
++			 int nand_chunk,
++			 u8 *data, struct yaffs_ext_tags *tags);
++int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
++int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
++				  int block_no,
++				  enum yaffs_block_state *state,
++				  u32 *seq_number);
++
++#endif
++
++
++void yaffs_tags_compat_install(struct yaffs_dev *dev);
++void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
++int yaffs_check_tags_ecc(struct yaffs_tags *tags);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_tagsmarshall.c b/fs/yaffs2/yaffs_tagsmarshall.c
+new file mode 100644
+index 00000000..44a83b12
+--- /dev/null
++++ b/fs/yaffs2/yaffs_tagsmarshall.c
+@@ -0,0 +1,199 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_packedtags2.h"
++
++static int yaffs_tags_marshall_write(struct yaffs_dev *dev,
++				    int nand_chunk, const u8 *data,
++				    const struct yaffs_ext_tags *tags)
++{
++	struct yaffs_packed_tags2 pt;
++	int retval;
++
++	int packed_tags_size =
++	    dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++	void *packed_tags_ptr =
++	    dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
++
++	yaffs_trace(YAFFS_TRACE_MTD,
++		"yaffs_tags_marshall_write chunk %d data %p tags %p",
++		nand_chunk, data, tags);
++
++	/* For yaffs2 writing there must be both data and tags.
++	 * If we're using inband tags, then the tags are stuffed into
++	 * the end of the data buffer.
++	 */
++	if (!data || !tags)
++		BUG();
++	else if (dev->param.inband_tags) {
++		struct yaffs_packed_tags2_tags_only *pt2tp;
++		pt2tp =
++		    (struct yaffs_packed_tags2_tags_only *)(data +
++							dev->
++							data_bytes_per_chunk);
++		yaffs_pack_tags2_tags_only(pt2tp, tags);
++	} else {
++		yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
++	}
++
++	retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
++			data, dev->param.total_bytes_per_chunk,
++			(dev->param.inband_tags) ? NULL : packed_tags_ptr,
++			(dev->param.inband_tags) ? 0 : packed_tags_size);
++
++	return retval;
++}
++
++static int yaffs_tags_marshall_read(struct yaffs_dev *dev,
++				   int nand_chunk, u8 *data,
++				   struct yaffs_ext_tags *tags)
++{
++	int retval = 0;
++	int local_data = 0;
++	u8 spare_buffer[100];
++	enum yaffs_ecc_result ecc_result;
++
++	struct yaffs_packed_tags2 pt;
++
++	int packed_tags_size =
++	    dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++	void *packed_tags_ptr =
++	    dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
++
++	yaffs_trace(YAFFS_TRACE_MTD,
++		"yaffs_tags_marshall_read chunk %d data %p tags %p",
++		nand_chunk, data, tags);
++
++	if (dev->param.inband_tags) {
++		if (!data) {
++			local_data = 1;
++			data = yaffs_get_temp_buffer(dev);
++		}
++	}
++
++	if (dev->param.inband_tags || (data && !tags))
++		retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++					data, dev->param.total_bytes_per_chunk,
++					NULL, 0,
++					&ecc_result);
++	else if (tags)
++		retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++					data, dev->param.total_bytes_per_chunk,
++					spare_buffer, packed_tags_size,
++					&ecc_result);
++	else
++		BUG();
++
++
++	if (dev->param.inband_tags) {
++		if (tags) {
++			struct yaffs_packed_tags2_tags_only *pt2tp;
++			pt2tp =
++				(struct yaffs_packed_tags2_tags_only *)
++				&data[dev->data_bytes_per_chunk];
++			yaffs_unpack_tags2_tags_only(tags, pt2tp);
++		}
++	} else if (tags) {
++		memcpy(packed_tags_ptr, spare_buffer, packed_tags_size);
++		yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
++	}
++
++	if (local_data)
++		yaffs_release_temp_buffer(dev, data);
++
++	if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
++		tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++		dev->n_ecc_unfixed++;
++	}
++
++	if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) {
++		if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR)
++			tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
++		dev->n_ecc_fixed++;
++	}
++
++	if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no,
++			       enum yaffs_block_state *state,
++			       u32 *seq_number)
++{
++	int retval;
++
++	yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d",
++			block_no);
++
++	retval = dev->drv.drv_check_bad_fn(dev, block_no);
++
++	if (retval== YAFFS_FAIL) {
++		yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
++
++		*state = YAFFS_BLOCK_STATE_DEAD;
++		*seq_number = 0;
++	} else {
++		struct yaffs_ext_tags t;
++
++		yaffs_tags_marshall_read(dev,
++				    block_no * dev->param.chunks_per_block,
++				    NULL, &t);
++
++		if (t.chunk_used) {
++			*seq_number = t.seq_number;
++			*state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
++		} else {
++			*seq_number = 0;
++			*state = YAFFS_BLOCK_STATE_EMPTY;
++		}
++	}
++
++	yaffs_trace(YAFFS_TRACE_MTD,
++		"block query returns  seq %d state %d",
++		*seq_number, *state);
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++	return dev->drv.drv_mark_bad_fn(dev, block_no);
++
++}
++
++
++void yaffs_tags_marshall_install(struct yaffs_dev *dev)
++{
++	if (!dev->param.is_yaffs2)
++		return;
++
++	if (!dev->tagger.write_chunk_tags_fn)
++		dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write;
++
++	if (!dev->tagger.read_chunk_tags_fn)
++		dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read;
++
++	if (!dev->tagger.query_block_fn)
++		dev->tagger.query_block_fn = yaffs_tags_marshall_query_block;
++
++	if (!dev->tagger.mark_bad_fn)
++		dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad;
++
++}
+diff --git a/fs/yaffs2/yaffs_tagsmarshall.h b/fs/yaffs2/yaffs_tagsmarshall.h
+new file mode 100644
+index 00000000..bf3e68a1
+--- /dev/null
++++ b/fs/yaffs2/yaffs_tagsmarshall.h
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSMARSHALL_H__
++#define __YAFFS_TAGSMARSHALL_H__
++
++#include "yaffs_guts.h"
++void yaffs_tags_marshall_install(struct yaffs_dev *dev);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
+new file mode 100644
+index 00000000..fd26054d
+--- /dev/null
++++ b/fs/yaffs2/yaffs_trace.h
+@@ -0,0 +1,57 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YTRACE_H__
++#define __YTRACE_H__
++
++extern unsigned int yaffs_trace_mask;
++extern unsigned int yaffs_wr_attempts;
++
++/*
++ * Tracing flags.
++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
++ */
++
++#define YAFFS_TRACE_OS			0x00000002
++#define YAFFS_TRACE_ALLOCATE		0x00000004
++#define YAFFS_TRACE_SCAN		0x00000008
++#define YAFFS_TRACE_BAD_BLOCKS		0x00000010
++#define YAFFS_TRACE_ERASE		0x00000020
++#define YAFFS_TRACE_GC			0x00000040
++#define YAFFS_TRACE_WRITE		0x00000080
++#define YAFFS_TRACE_TRACING		0x00000100
++#define YAFFS_TRACE_DELETION		0x00000200
++#define YAFFS_TRACE_BUFFERS		0x00000400
++#define YAFFS_TRACE_NANDACCESS		0x00000800
++#define YAFFS_TRACE_GC_DETAIL		0x00001000
++#define YAFFS_TRACE_SCAN_DEBUG		0x00002000
++#define YAFFS_TRACE_MTD			0x00004000
++#define YAFFS_TRACE_CHECKPOINT		0x00008000
++
++#define YAFFS_TRACE_VERIFY		0x00010000
++#define YAFFS_TRACE_VERIFY_NAND		0x00020000
++#define YAFFS_TRACE_VERIFY_FULL		0x00040000
++#define YAFFS_TRACE_VERIFY_ALL		0x000f0000
++
++#define YAFFS_TRACE_SYNC		0x00100000
++#define YAFFS_TRACE_BACKGROUND		0x00200000
++#define YAFFS_TRACE_LOCK		0x00400000
++#define YAFFS_TRACE_MOUNT		0x00800000
++
++#define YAFFS_TRACE_ERROR		0x40000000
++#define YAFFS_TRACE_BUG			0x80000000
++#define YAFFS_TRACE_ALWAYS		0xf0000000
++
++#endif
+diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
+new file mode 100644
+index 00000000..e8f2f0a6
+--- /dev/null
++++ b/fs/yaffs2/yaffs_verify.c
+@@ -0,0 +1,529 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_verify.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++
++int yaffs_skip_verification(struct yaffs_dev *dev)
++{
++	(void) dev;
++	return !(yaffs_trace_mask &
++		 (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_full_verification(struct yaffs_dev *dev)
++{
++	(void) dev;
++	return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
++{
++	(void) dev;
++	return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
++}
++
++static const char * const block_state_name[] = {
++	"Unknown",
++	"Needs scan",
++	"Scanning",
++	"Empty",
++	"Allocating",
++	"Full",
++	"Dirty",
++	"Checkpoint",
++	"Collecting",
++	"Dead"
++};
++
++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
++{
++	int actually_used;
++	int in_use;
++
++	if (yaffs_skip_verification(dev))
++		return;
++
++	/* Report illegal runtime states */
++	if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Block %d has undefined state %d",
++			n, bi->block_state);
++
++	switch (bi->block_state) {
++	case YAFFS_BLOCK_STATE_UNKNOWN:
++	case YAFFS_BLOCK_STATE_SCANNING:
++	case YAFFS_BLOCK_STATE_NEEDS_SCAN:
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Block %d has bad run-state %s",
++			n, block_state_name[bi->block_state]);
++	}
++
++	/* Check pages in use and soft deletions are legal */
++
++	actually_used = bi->pages_in_use - bi->soft_del_pages;
++
++	if (bi->pages_in_use < 0 ||
++	    bi->pages_in_use > dev->param.chunks_per_block ||
++	    bi->soft_del_pages < 0 ||
++	    bi->soft_del_pages > dev->param.chunks_per_block ||
++	    actually_used < 0 || actually_used > dev->param.chunks_per_block)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Block %d has illegal values pages_in_used %d soft_del_pages %d",
++			n, bi->pages_in_use, bi->soft_del_pages);
++
++	/* Check chunk bitmap legal */
++	in_use = yaffs_count_chunk_bits(dev, n);
++	if (in_use != bi->pages_in_use)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
++			n, bi->pages_in_use, in_use);
++}
++
++void yaffs_verify_collected_blk(struct yaffs_dev *dev,
++				struct yaffs_block_info *bi, int n)
++{
++	yaffs_verify_blk(dev, bi, n);
++
++	/* After collection the block should be in the erased state */
++
++	if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
++	    bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"Block %d is in state %d after gc, should be erased",
++			n, bi->block_state);
++	}
++}
++
++void yaffs_verify_blocks(struct yaffs_dev *dev)
++{
++	int i;
++	int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
++	int illegal_states = 0;
++
++	if (yaffs_skip_verification(dev))
++		return;
++
++	memset(state_count, 0, sizeof(state_count));
++
++	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++		struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
++		yaffs_verify_blk(dev, bi, i);
++
++		if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
++			state_count[bi->block_state]++;
++		else
++			illegal_states++;
++	}
++
++	yaffs_trace(YAFFS_TRACE_VERIFY,	"Block summary");
++
++	yaffs_trace(YAFFS_TRACE_VERIFY,
++		"%d blocks have illegal states",
++		illegal_states);
++	if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Too many allocating blocks");
++
++	for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"%s %d blocks",
++			block_state_name[i], state_count[i]);
++
++	if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Checkpoint block count wrong dev %d count %d",
++			dev->blocks_in_checkpt,
++			state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
++
++	if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Erased block count wrong dev %d count %d",
++			dev->n_erased_blocks,
++			state_count[YAFFS_BLOCK_STATE_EMPTY]);
++
++	if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Too many collecting blocks %d (max is 1)",
++			state_count[YAFFS_BLOCK_STATE_COLLECTING]);
++}
++
++/*
++ * Verify the object header. oh must be valid, but obj and tags may be NULL in
++ * which case those tests will not be performed.
++ */
++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
++		     struct yaffs_ext_tags *tags, int parent_check)
++{
++	if (obj && yaffs_skip_verification(obj->my_dev))
++		return;
++
++	if (!(tags && obj && oh)) {
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Verifying object header tags %p obj %p oh %p",
++			tags, obj, oh);
++		return;
++	}
++
++	if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++	    oh->type > YAFFS_OBJECT_TYPE_MAX)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d header type is illegal value 0x%x",
++			tags->obj_id, oh->type);
++
++	if (tags->obj_id != obj->obj_id)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d header mismatch obj_id %d",
++			tags->obj_id, obj->obj_id);
++
++	/*
++	 * Check that the object's parent ids match if parent_check requested.
++	 *
++	 * Tests do not apply to the root object.
++	 */
++
++	if (parent_check && tags->obj_id > 1 && !obj->parent)
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d header mismatch parent_id %d obj->parent is NULL",
++			tags->obj_id, oh->parent_obj_id);
++
++	if (parent_check && obj->parent &&
++	    oh->parent_obj_id != obj->parent->obj_id &&
++	    (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
++	     obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d header mismatch parent_id %d parent_obj_id %d",
++			tags->obj_id, oh->parent_obj_id,
++			obj->parent->obj_id);
++
++	if (tags->obj_id > 1 && oh->name[0] == 0)	/* Null name */
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d header name is NULL",
++			obj->obj_id);
++
++	if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff)	/* Junk name */
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d header name is 0xff",
++			obj->obj_id);
++}
++
++void yaffs_verify_file(struct yaffs_obj *obj)
++{
++	u32 x;
++	int required_depth;
++	int actual_depth;
++	int last_chunk;
++	u32 offset_in_chunk;
++	u32 the_chunk;
++
++	u32 i;
++	struct yaffs_dev *dev;
++	struct yaffs_ext_tags tags;
++	struct yaffs_tnode *tn;
++	u32 obj_id;
++
++	if (!obj)
++		return;
++
++	if (yaffs_skip_verification(obj->my_dev))
++		return;
++
++	dev = obj->my_dev;
++	obj_id = obj->obj_id;
++
++
++	/* Check file size is consistent with tnode depth */
++	yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
++				&last_chunk, &offset_in_chunk);
++	last_chunk++;
++	x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
++	required_depth = 0;
++	while (x > 0) {
++		x >>= YAFFS_TNODES_INTERNAL_BITS;
++		required_depth++;
++	}
++
++	actual_depth = obj->variant.file_variant.top_level;
++
++	/* Check that the chunks in the tnode tree are all correct.
++	 * We do this by scanning through the tnode tree and
++	 * checking the tags for every chunk match.
++	 */
++
++	if (yaffs_skip_nand_verification(dev))
++		return;
++
++	for (i = 1; i <= last_chunk; i++) {
++		tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
++
++		if (!tn)
++			continue;
++
++		the_chunk = yaffs_get_group_base(dev, tn, i);
++		if (the_chunk > 0) {
++			yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
++						 &tags);
++			if (tags.obj_id != obj_id || tags.chunk_id != i)
++				yaffs_trace(YAFFS_TRACE_VERIFY,
++					"Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
++					obj_id, i, the_chunk,
++					tags.obj_id, tags.chunk_id);
++		}
++	}
++}
++
++void yaffs_verify_link(struct yaffs_obj *obj)
++{
++	if (obj && yaffs_skip_verification(obj->my_dev))
++		return;
++
++	/* Verify sane equivalent object */
++}
++
++void yaffs_verify_symlink(struct yaffs_obj *obj)
++{
++	if (obj && yaffs_skip_verification(obj->my_dev))
++		return;
++
++	/* Verify symlink string */
++}
++
++void yaffs_verify_special(struct yaffs_obj *obj)
++{
++	if (obj && yaffs_skip_verification(obj->my_dev))
++		return;
++}
++
++void yaffs_verify_obj(struct yaffs_obj *obj)
++{
++	struct yaffs_dev *dev;
++	u32 chunk_min;
++	u32 chunk_max;
++	u32 chunk_id_ok;
++	u32 chunk_in_range;
++	u32 chunk_wrongly_deleted;
++	u32 chunk_valid;
++
++	if (!obj)
++		return;
++
++	if (obj->being_created)
++		return;
++
++	dev = obj->my_dev;
++
++	if (yaffs_skip_verification(dev))
++		return;
++
++	/* Check sane object header chunk */
++
++	chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
++	chunk_max =
++	    (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
++
++	chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
++			  ((unsigned)(obj->hdr_chunk)) <= chunk_max);
++	chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
++	chunk_valid = chunk_in_range &&
++	    yaffs_check_chunk_bit(dev,
++				  obj->hdr_chunk / dev->param.chunks_per_block,
++				  obj->hdr_chunk % dev->param.chunks_per_block);
++	chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
++
++	if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d has chunk_id %d %s %s",
++			obj->obj_id, obj->hdr_chunk,
++			chunk_id_ok ? "" : ",out of range",
++			chunk_wrongly_deleted ? ",marked as deleted" : "");
++
++	if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
++		struct yaffs_ext_tags tags;
++		struct yaffs_obj_hdr *oh;
++		u8 *buffer = yaffs_get_temp_buffer(dev);
++
++		oh = (struct yaffs_obj_hdr *)buffer;
++
++		yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
++
++		yaffs_verify_oh(obj, oh, &tags, 1);
++
++		yaffs_release_temp_buffer(dev, buffer);
++	}
++
++	/* Verify it has a parent */
++	if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d has parent pointer %p which does not look like an object",
++			obj->obj_id, obj->parent);
++	}
++
++	/* Verify parent is a directory */
++	if (obj->parent &&
++	    obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d's parent is not a directory (type %d)",
++			obj->obj_id, obj->parent->variant_type);
++	}
++
++	switch (obj->variant_type) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		yaffs_verify_file(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		yaffs_verify_symlink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		yaffs_verify_dir(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		yaffs_verify_link(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		yaffs_verify_special(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++	default:
++		yaffs_trace(YAFFS_TRACE_VERIFY,
++			"Obj %d has illegaltype %d",
++		   obj->obj_id, obj->variant_type);
++		break;
++	}
++}
++
++void yaffs_verify_objects(struct yaffs_dev *dev)
++{
++	struct yaffs_obj *obj;
++	int i;
++	struct list_head *lh;
++
++	if (yaffs_skip_verification(dev))
++		return;
++
++	/* Iterate through the objects in each hash entry */
++
++	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++		list_for_each(lh, &dev->obj_bucket[i].list) {
++			obj = list_entry(lh, struct yaffs_obj, hash_link);
++			yaffs_verify_obj(obj);
++		}
++	}
++}
++
++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
++{
++	struct list_head *lh;
++	struct yaffs_obj *list_obj;
++	int count = 0;
++
++	if (!obj) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
++		BUG();
++		return;
++	}
++
++	if (yaffs_skip_verification(obj->my_dev))
++		return;
++
++	if (!obj->parent) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
++		BUG();
++		return;
++	}
++
++	if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
++		BUG();
++	}
++
++	/* Iterate through the objects in each hash entry */
++
++	list_for_each(lh, &obj->parent->variant.dir_variant.children) {
++		list_obj = list_entry(lh, struct yaffs_obj, siblings);
++		yaffs_verify_obj(list_obj);
++		if (obj == list_obj)
++			count++;
++	}
++
++	if (count != 1) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"Object in directory %d times",
++			count);
++		BUG();
++	}
++}
++
++void yaffs_verify_dir(struct yaffs_obj *directory)
++{
++	struct list_head *lh;
++	struct yaffs_obj *list_obj;
++
++	if (!directory) {
++		BUG();
++		return;
++	}
++
++	if (yaffs_skip_full_verification(directory->my_dev))
++		return;
++
++	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"Directory has wrong type: %d",
++			directory->variant_type);
++		BUG();
++	}
++
++	/* Iterate through the objects in each hash entry */
++
++	list_for_each(lh, &directory->variant.dir_variant.children) {
++		list_obj = list_entry(lh, struct yaffs_obj, siblings);
++		if (list_obj->parent != directory) {
++			yaffs_trace(YAFFS_TRACE_ALWAYS,
++				"Object in directory list has wrong parent %p",
++				list_obj->parent);
++			BUG();
++		}
++		yaffs_verify_obj_in_dir(list_obj);
++	}
++}
++
++static int yaffs_free_verification_failures;
++
++void yaffs_verify_free_chunks(struct yaffs_dev *dev)
++{
++	int counted;
++	int difference;
++
++	if (yaffs_skip_verification(dev))
++		return;
++
++	counted = yaffs_count_free_chunks(dev);
++
++	difference = dev->n_free_chunks - counted;
++
++	if (difference) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"Freechunks verification failure %d %d %d",
++			dev->n_free_chunks, counted, difference);
++		yaffs_free_verification_failures++;
++	}
++}
++
++int yaffs_verify_file_sane(struct yaffs_obj *in)
++{
++	(void) in;
++	return YAFFS_OK;
++}
+diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
+new file mode 100644
+index 00000000..4f4af8d2
+--- /dev/null
++++ b/fs/yaffs2/yaffs_verify.h
+@@ -0,0 +1,43 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_VERIFY_H__
++#define __YAFFS_VERIFY_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
++		      int n);
++void yaffs_verify_collected_blk(struct yaffs_dev *dev,
++				struct yaffs_block_info *bi, int n);
++void yaffs_verify_blocks(struct yaffs_dev *dev);
++
++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
++		     struct yaffs_ext_tags *tags, int parent_check);
++void yaffs_verify_file(struct yaffs_obj *obj);
++void yaffs_verify_link(struct yaffs_obj *obj);
++void yaffs_verify_symlink(struct yaffs_obj *obj);
++void yaffs_verify_special(struct yaffs_obj *obj);
++void yaffs_verify_obj(struct yaffs_obj *obj);
++void yaffs_verify_objects(struct yaffs_dev *dev);
++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
++void yaffs_verify_dir(struct yaffs_obj *directory);
++void yaffs_verify_free_chunks(struct yaffs_dev *dev);
++
++int yaffs_verify_file_sane(struct yaffs_obj *obj);
++
++int yaffs_skip_verification(struct yaffs_dev *dev);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
+new file mode 100644
+index 00000000..75e8ef20
+--- /dev/null
++++ b/fs/yaffs2/yaffs_vfs.c
+@@ -0,0 +1,3354 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ * Acknowledgements:
++ * Luc van OostenRyck for numerous patches.
++ * Nick Bane for numerous patches.
++ * Nick Bane for 2.5/2.6 integration.
++ * Andras Toth for mknod rdev issue.
++ * Michael Fischer for finding the problem with inode inconsistency.
++ * Some code bodily lifted from JFFS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ *
++ * This is the file system front-end to YAFFS that hooks it up to
++ * the VFS.
++ *
++ * Special notes:
++ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
++ *         this superblock
++ * >> 2.6: sb->s_fs_info  points to the struct yaffs_dev associated with this
++ *         superblock
++ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
++ */
++
++/*
++ * There are two variants of the VFS glue code. This variant should compile
++ * for any version of Linux.
++ */
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
++#define YAFFS_COMPILE_BACKGROUND
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23))
++#define YAFFS_COMPILE_FREEZER
++#endif
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
++#define YAFFS_COMPILE_EXPORTFS
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++#define YAFFS_USE_SETATTR_COPY
++#define YAFFS_USE_TRUNCATE_SETSIZE
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++#define YAFFS_HAS_EVICT_INODE
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++#define YAFFS_NEW_FOLLOW_LINK 1
++#else
++#define YAFFS_NEW_FOLLOW_LINK 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++#define YAFFS_HAS_WRITE_SUPER
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
++#include <linux/smp_lock.h>
++#endif
++#include <linux/pagemap.h>
++#include <linux/mtd/mtd.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++#include <linux/namei.h>
++#endif
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++#include <linux/exportfs.h>
++#endif
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#endif
++#ifdef YAFFS_COMPILE_FREEZER
++#include <linux/freezer.h>
++#endif
++
++#include <asm/div64.h>
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++#include <linux/statfs.h>
++
++#define UnlockPage(p) unlock_page(p)
++#define Page_Uptodate(page)	test_bit(PG_uptodate, &(page)->flags)
++
++/* FIXME: use sb->s_id instead ? */
++#define yaffs_devname(sb, buf)	bdevname(sb->s_bdev, buf)
++
++#else
++
++#include <linux/locks.h>
++#define	BDEVNAME_SIZE		0
++#define	yaffs_devname(sb, buf)	kdevname(sb->s_dev)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
++#define __user
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT  (&proc_root)
++#else
++#define YPROC_ROOT  NULL
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define Y_INIT_TIMER(a)	init_timer(a)
++#else
++#define Y_INIT_TIMER(a)	init_timer_on_stack(a)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++#define YAFFS_SUPER_HAS_DIRTY
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
++#define set_nlink(inode, count)  do { (inode)->i_nlink = (count); } while(0)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++	uint64_t result = partition_size;
++	do_div(result, block_size);
++	return (uint32_t) result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
++#include <linux/mtd/mtd.h>
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++#include "yaffs_attribs.h"
++
++#include "yaffs_linux.h"
++
++#include "yaffs_mtdif.h"
++
++unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
++unsigned int yaffs_gc_control = 1;
++unsigned int yaffs_bg_enable = 1;
++unsigned int yaffs_auto_select = 1;
++/* Module Parameters */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_trace_mask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++module_param(yaffs_gc_control, uint, 0644);
++module_param(yaffs_bg_enable, uint, 0644);
++#else
++MODULE_PARM(yaffs_trace_mask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++MODULE_PARM(yaffs_gc_control, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++
++#else
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
++#else
++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip)
++#endif
++
++#define yaffs_inode_to_obj(iptr) \
++	((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
++#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define yaffs_super_to_dev(sb)	((struct yaffs_dev *)sb->s_fs_info)
++#else
++#define yaffs_super_to_dev(sb)	((struct yaffs_dev *)sb->u.generic_sbp)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#define Y_CLEAR_INODE(i) clear_inode(i)
++#else
++#define Y_CLEAR_INODE(i) end_writeback(i)
++#endif
++
++
++#define update_dir_time(dir) do {\
++			(dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
++		} while (0)
++
++static void yaffs_fill_inode_from_obj(struct inode *inode,
++				      struct yaffs_obj *obj);
++
++
++static void yaffs_gross_lock(struct yaffs_dev *dev)
++{
++	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
++	mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
++	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
++}
++
++static void yaffs_gross_unlock(struct yaffs_dev *dev)
++{
++	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
++	mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
++}
++
++
++static int yaffs_readpage_nolock(struct file *f, struct page *pg)
++{
++	/* Lifted from jffs2 */
++
++	struct yaffs_obj *obj;
++	unsigned char *pg_buf;
++	int ret;
++	loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT;
++	struct yaffs_dev *dev;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_readpage_nolock at %lld, size %08x",
++		(long long)pos,
++		(unsigned)PAGE_CACHE_SIZE);
++
++	obj = yaffs_dentry_to_obj(f->f_dentry);
++
++	dev = obj->my_dev;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++	BUG_ON(!PageLocked(pg));
++#else
++	if (!PageLocked(pg))
++		PAGE_BUG(pg);
++#endif
++
++	pg_buf = kmap(pg);
++	/* FIXME: Can kmap fail? */
++
++	yaffs_gross_lock(dev);
++
++	ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE);
++
++	yaffs_gross_unlock(dev);
++
++	if (ret >= 0)
++		ret = 0;
++
++	if (ret) {
++		ClearPageUptodate(pg);
++		SetPageError(pg);
++	} else {
++		SetPageUptodate(pg);
++		ClearPageError(pg);
++	}
++
++	flush_dcache_page(pg);
++	kunmap(pg);
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
++	return ret;
++}
++
++static int yaffs_readpage_unlock(struct file *f, struct page *pg)
++{
++	int ret = yaffs_readpage_nolock(f, pg);
++	UnlockPage(pg);
++	return ret;
++}
++
++static int yaffs_readpage(struct file *f, struct page *pg)
++{
++	int ret;
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
++	ret = yaffs_readpage_unlock(f, pg);
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
++	return ret;
++}
++
++
++static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
++{
++	struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
++
++	if (lc)
++		lc->dirty = val;
++
++# ifdef YAFFS_SUPER_HAS_DIRTY
++	{
++		struct super_block *sb = lc->super;
++
++		if (sb)
++			sb->s_dirt = val;
++	}
++#endif
++
++}
++
++static void yaffs_set_super_dirty(struct yaffs_dev *dev)
++{
++	yaffs_set_super_dirty_val(dev, 1);
++}
++
++static void yaffs_clear_super_dirty(struct yaffs_dev *dev)
++{
++	yaffs_set_super_dirty_val(dev, 0);
++}
++
++static int yaffs_check_super_dirty(struct yaffs_dev *dev)
++{
++	struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
++
++	if (lc && lc->dirty)
++		return 1;
++
++# ifdef YAFFS_SUPER_HAS_DIRTY
++	{
++		struct super_block *sb = lc->super;
++
++		if (sb && sb->s_dirt)
++			return 1;
++	}
++#endif
++	return 0;
++
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
++#else
++static int yaffs_writepage(struct page *page)
++#endif
++{
++	struct yaffs_dev *dev;
++	struct address_space *mapping = page->mapping;
++	struct inode *inode;
++	unsigned long end_index;
++	char *buffer;
++	struct yaffs_obj *obj;
++	int n_written = 0;
++	unsigned n_bytes;
++	loff_t i_size;
++
++	if (!mapping)
++		BUG();
++	inode = mapping->host;
++	if (!inode)
++		BUG();
++	i_size = i_size_read(inode);
++
++	end_index = i_size >> PAGE_CACHE_SHIFT;
++
++	if (page->index < end_index)
++		n_bytes = PAGE_CACHE_SIZE;
++	else {
++		n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
++
++		if (page->index > end_index || !n_bytes) {
++			yaffs_trace(YAFFS_TRACE_OS,
++				"yaffs_writepage at %lld, inode size = %lld!!",
++				((loff_t)page->index) << PAGE_CACHE_SHIFT,
++				inode->i_size);
++			yaffs_trace(YAFFS_TRACE_OS,
++				"                -> don't care!!");
++
++			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
++			set_page_writeback(page);
++			unlock_page(page);
++			end_page_writeback(page);
++			return 0;
++		}
++	}
++
++	if (n_bytes != PAGE_CACHE_SIZE)
++		zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
++
++	get_page(page);
++
++	buffer = kmap(page);
++
++	obj = yaffs_inode_to_obj(inode);
++	dev = obj->my_dev;
++	yaffs_gross_lock(dev);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_writepage at %lld, size %08x",
++		((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes);
++	yaffs_trace(YAFFS_TRACE_OS,
++		"writepag0: obj = %lld, ino = %lld",
++		obj->variant.file_variant.file_size, inode->i_size);
++
++	n_written = yaffs_wr_file(obj, buffer,
++				  ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0);
++
++	yaffs_set_super_dirty(dev);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"writepag1: obj = %lld, ino = %lld",
++		obj->variant.file_variant.file_size, inode->i_size);
++
++	yaffs_gross_unlock(dev);
++
++	kunmap(page);
++	set_page_writeback(page);
++	unlock_page(page);
++	end_page_writeback(page);
++	put_page(page);
++
++	return (n_written == n_bytes) ? 0 : -ENOSPC;
++}
++
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev;
++
++	int n_free_chunks;
++
++	obj = yaffs_dentry_to_obj(f->f_dentry);
++
++	dev = obj->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	n_free_chunks = yaffs_get_n_free_chunks(dev);
++
++	yaffs_gross_unlock(dev);
++
++	return (n_free_chunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev;
++
++	obj = yaffs_dentry_to_obj(f->f_dentry);
++
++	dev = obj->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	yaffs_gross_unlock(dev);
++}
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++			     loff_t pos, unsigned len, unsigned flags,
++			     struct page **pagep, void **fsdata)
++{
++	struct page *pg = NULL;
++	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++
++	int ret = 0;
++	int space_held = 0;
++
++	/* Get a page */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++	pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++	pg = __grab_cache_page(mapping, index);
++#endif
++
++	*pagep = pg;
++	if (!pg) {
++		ret = -ENOMEM;
++		goto out;
++	}
++	yaffs_trace(YAFFS_TRACE_OS,
++		"start yaffs_write_begin index %d(%x) uptodate %d",
++		(int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
++
++	/* Get fs space */
++	space_held = yaffs_hold_space(filp);
++
++	if (!space_held) {
++		ret = -ENOSPC;
++		goto out;
++	}
++
++	/* Update page if required */
++
++	if (!Page_Uptodate(pg))
++		ret = yaffs_readpage_nolock(filp, pg);
++
++	if (ret)
++		goto out;
++
++	/* Happy path return */
++	yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
++
++	return 0;
++
++out:
++	yaffs_trace(YAFFS_TRACE_OS,
++		"end yaffs_write_begin fail returning %d", ret);
++	if (space_held)
++		yaffs_release_space(filp);
++	if (pg) {
++		unlock_page(pg);
++		page_cache_release(pg);
++	}
++	return ret;
++}
++
++#else
++
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++			       unsigned offset, unsigned to)
++{
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write");
++
++	if (!Page_Uptodate(pg))
++		return yaffs_readpage_nolock(f, pg);
++	return 0;
++}
++#endif
++
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++				loff_t * pos)
++{
++	struct yaffs_obj *obj;
++	int n_written;
++	loff_t ipos;
++	struct inode *inode;
++	struct yaffs_dev *dev;
++
++	obj = yaffs_dentry_to_obj(f->f_dentry);
++
++	if (!obj) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_file_write: hey obj is null!");
++                return -EINVAL;
++        }
++
++	dev = obj->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	inode = f->f_dentry->d_inode;
++
++	if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
++		ipos = inode->i_size;
++	else
++		ipos = *pos;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld",
++		(unsigned)n, (unsigned)n, obj->obj_id, ipos);
++
++	n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
++
++	yaffs_set_super_dirty(dev);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_file_write: %d(%x) bytes written",
++		(unsigned)n, (unsigned)n);
++
++	if (n_written > 0) {
++		ipos += n_written;
++		*pos = ipos;
++		if (ipos > inode->i_size) {
++			inode->i_size = ipos;
++			inode->i_blocks = (ipos + 511) >> 9;
++
++			yaffs_trace(YAFFS_TRACE_OS,
++				"yaffs_file_write size updated to %lld bytes, %d blocks",
++				ipos, (int)(inode->i_blocks));
++		}
++
++	}
++	yaffs_gross_unlock(dev);
++	return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
++}
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++			   loff_t pos, unsigned len, unsigned copied,
++			   struct page *pg, void *fsdadata)
++{
++	int ret = 0;
++	void *addr, *kva;
++	uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++	kva = kmap(pg);
++	addr = kva + offset_into_page;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_write_end addr %p pos %lld n_bytes %d",
++		addr, pos, copied);
++
++	ret = yaffs_file_write(filp, addr, copied, &pos);
++
++	if (ret != copied) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_write_end not same size ret %d  copied %d",
++			ret, copied);
++		SetPageError(pg);
++	}
++
++	kunmap(pg);
++
++	yaffs_release_space(filp);
++	unlock_page(pg);
++	page_cache_release(pg);
++	return ret;
++}
++#else
++
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++			      unsigned to)
++{
++	void *addr, *kva;
++
++	loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
++	int n_bytes = to - offset;
++	int n_written;
++
++	kva = kmap(pg);
++	addr = kva + offset;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_commit_write addr %p pos %lld n_bytes %d",
++		addr, pos, n_bytes);
++
++	n_written = yaffs_file_write(f, addr, n_bytes, &pos);
++
++	if (n_written != n_bytes) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_commit_write not same size n_written %d  n_bytes %d",
++			n_written, n_bytes);
++		SetPageError(pg);
++	}
++	kunmap(pg);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_commit_write returning %d",
++		n_written == n_bytes ? 0 : n_written);
++
++	return n_written == n_bytes ? 0 : n_written;
++}
++#endif
++
++static struct address_space_operations yaffs_file_address_operations = {
++	.readpage = yaffs_readpage,
++	.writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++	.write_begin = yaffs_write_begin,
++	.write_end = yaffs_write_end,
++#else
++	.prepare_write = yaffs_prepare_write,
++	.commit_write = yaffs_commit_write,
++#endif
++};
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id)
++#else
++static int yaffs_file_flush(struct file *file)
++#endif
++{
++	struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
++
++	struct yaffs_dev *dev = obj->my_dev;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_file_flush object %d (%s)",
++		obj->obj_id,
++		obj->dirty ? "dirty" : "clean");
++
++	yaffs_gross_lock(dev);
++
++	yaffs_flush_file(obj, 1, 0);
++
++	yaffs_gross_unlock(dev);
++
++	return 0;
++}
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++static int yaffs_sync_object(struct file *file, int datasync)
++#else
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++			     int datasync)
++#endif
++{
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++	struct dentry *dentry = file->f_path.dentry;
++#endif
++
++	obj = yaffs_dentry_to_obj(dentry);
++
++	dev = obj->my_dev;
++
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++		"yaffs_sync_object");
++	yaffs_gross_lock(dev);
++	yaffs_flush_file(obj, 1, datasync);
++	yaffs_gross_unlock(dev);
++	return 0;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
++	.read = do_sync_read,
++	.write = do_sync_write,
++	.aio_read = generic_file_aio_read,
++	.aio_write = generic_file_aio_write,
++	.mmap = generic_file_mmap,
++	.flush = yaffs_file_flush,
++	.fsync = yaffs_sync_object,
++	.splice_read = generic_file_splice_read,
++	.splice_write = generic_file_splice_write,
++	.llseek = generic_file_llseek,
++};
++
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++
++static const struct file_operations yaffs_file_operations = {
++	.read = do_sync_read,
++	.write = do_sync_write,
++	.aio_read = generic_file_aio_read,
++	.aio_write = generic_file_aio_write,
++	.mmap = generic_file_mmap,
++	.flush = yaffs_file_flush,
++	.fsync = yaffs_sync_object,
++	.sendfile = generic_file_sendfile,
++};
++
++#else
++
++static const struct file_operations yaffs_file_operations = {
++	.read = generic_file_read,
++	.write = generic_file_write,
++	.mmap = generic_file_mmap,
++	.flush = yaffs_file_flush,
++	.fsync = yaffs_sync_object,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++	.sendfile = generic_file_sendfile,
++#endif
++};
++#endif
++
++
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++static void zero_user_segment(struct page *page, unsigned start, unsigned end)
++{
++	void *kaddr = kmap_atomic(page, KM_USER0);
++	memset(kaddr + start, 0, end - start);
++	kunmap_atomic(kaddr, KM_USER0);
++	flush_dcache_page(page);
++}
++#endif
++
++
++static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
++{
++#ifdef YAFFS_USE_TRUNCATE_SETSIZE
++	truncate_setsize(inode, newsize);
++	return 0;
++#else
++	truncate_inode_pages(&inode->i_data, newsize);
++	return 0;
++#endif
++
++}
++
++
++static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
++{
++#ifdef YAFFS_USE_SETATTR_COPY
++	setattr_copy(inode, attr);
++	return 0;
++#else
++	return inode_setattr(inode, attr);
++#endif
++
++}
++
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++	struct inode *inode = dentry->d_inode;
++	int error = 0;
++	struct yaffs_dev *dev;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_setattr of object %d",
++		yaffs_inode_to_obj(inode)->obj_id);
++#if 0
++	/* Fail if a requested resize >= 2GB */
++	if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
++		error = -EINVAL;
++#endif
++
++	if (error == 0)
++		error = inode_change_ok(inode, attr);
++	if (error == 0) {
++		int result;
++		if (!error) {
++			error = yaffs_vfs_setattr(inode, attr);
++			yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
++			if (attr->ia_valid & ATTR_SIZE) {
++				yaffs_vfs_setsize(inode, attr->ia_size);
++				inode->i_blocks = (inode->i_size + 511) >> 9;
++			}
++		}
++		dev = yaffs_inode_to_obj(inode)->my_dev;
++		if (attr->ia_valid & ATTR_SIZE) {
++			yaffs_trace(YAFFS_TRACE_OS,
++				"resize to %d(%x)",
++				(int)(attr->ia_size),
++				(int)(attr->ia_size));
++		}
++		yaffs_gross_lock(dev);
++		result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
++		if (result == YAFFS_OK) {
++			error = 0;
++		} else {
++			error = -EPERM;
++		}
++		yaffs_gross_unlock(dev);
++
++	}
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
++
++	return error;
++}
++
++static int yaffs_setxattr(struct dentry *dentry, const char *name,
++		   const void *value, size_t size, int flags)
++{
++	struct inode *inode = dentry->d_inode;
++	int error = 0;
++	struct yaffs_dev *dev;
++	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
++
++	if (error == 0) {
++		int result;
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		result = yaffs_set_xattrib(obj, name, value, size, flags);
++		if (result == YAFFS_OK)
++			error = 0;
++		else if (result < 0)
++			error = result;
++		yaffs_gross_unlock(dev);
++
++	}
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
++
++	return error;
++}
++
++static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name,
++			void *buff, size_t size)
++{
++	struct inode *inode = dentry->d_inode;
++	int error = 0;
++	struct yaffs_dev *dev;
++	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_getxattr \"%s\" from object %d",
++		name, obj->obj_id);
++
++	if (error == 0) {
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		error = yaffs_get_xattrib(obj, name, buff, size);
++		yaffs_gross_unlock(dev);
++
++	}
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
++
++	return error;
++}
++
++static int yaffs_removexattr(struct dentry *dentry, const char *name)
++{
++	struct inode *inode = dentry->d_inode;
++	int error = 0;
++	struct yaffs_dev *dev;
++	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_removexattr of object %d", obj->obj_id);
++
++	if (error == 0) {
++		int result;
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		result = yaffs_remove_xattrib(obj, name);
++		if (result == YAFFS_OK)
++			error = 0;
++		else if (result < 0)
++			error = result;
++		yaffs_gross_unlock(dev);
++
++	}
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_removexattr done returning %d", error);
++
++	return error;
++}
++
++static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
++{
++	struct inode *inode = dentry->d_inode;
++	int error = 0;
++	struct yaffs_dev *dev;
++	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_listxattr of object %d", obj->obj_id);
++
++	if (error == 0) {
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		error = yaffs_list_xattrib(obj, buff, size);
++		yaffs_gross_unlock(dev);
++
++	}
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_listxattr done returning %d", error);
++
++	return error;
++}
++
++
++static const struct inode_operations yaffs_file_inode_operations = {
++	.setattr = yaffs_setattr,
++	.setxattr = yaffs_setxattr,
++	.getxattr = yaffs_getxattr,
++	.listxattr = yaffs_listxattr,
++	.removexattr = yaffs_removexattr,
++};
++
++
++static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
++			  int buflen)
++{
++	unsigned char *alias;
++	int ret;
++
++	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++
++	yaffs_gross_unlock(dev);
++
++	if (!alias)
++		return -ENOMEM;
++
++	ret = vfs_readlink(dentry, buffer, buflen, alias);
++	kfree(alias);
++	return ret;
++}
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++	void *ret;
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++	int ret
++#endif
++	unsigned char *alias;
++	int ret_int = 0;
++	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++	yaffs_gross_unlock(dev);
++
++	if (!alias) {
++		ret_int = -ENOMEM;
++		goto out;
++	}
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++	nd_set_link(nd, alias);
++	ret = alias;
++out:
++	if (ret_int)
++		ret = ERR_PTR(ret_int);
++	return ret;
++#else
++	ret = vfs_follow_link(nd, alias);
++	kfree(alias);
++out:
++	if (ret_int)
++		ret = ret_int;
++	return ret;
++#endif
++}
++
++
++#ifdef YAFFS_HAS_PUT_INODE
++
++/* For now put inode is just for debugging
++ * Put inode is called when the inode **structure** is put.
++ */
++static void yaffs_put_inode(struct inode *inode)
++{
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_put_inode: ino %d, count %d"),
++		(int)inode->i_ino, atomic_read(&inode->i_count);
++
++}
++#endif
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
++{
++	kfree(alias);
++}
++#endif
++
++static const struct inode_operations yaffs_symlink_inode_operations = {
++	.readlink = yaffs_readlink,
++	.follow_link = yaffs_follow_link,
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++	.put_link = yaffs_put_link,
++#endif
++	.setattr = yaffs_setattr,
++	.setxattr = yaffs_setxattr,
++	.getxattr = yaffs_getxattr,
++	.listxattr = yaffs_listxattr,
++	.removexattr = yaffs_removexattr,
++};
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++	struct inode *inode;
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
++
++	inode = iget_locked(sb, ino);
++	if (!inode)
++		return ERR_PTR(-ENOMEM);
++	if (!(inode->i_state & I_NEW))
++		return inode;
++
++	/* NB This is called as a side effect of other functions, but
++	 * we had to release the lock to prevent deadlocks, so
++	 * need to lock again.
++	 */
++
++	yaffs_gross_lock(dev);
++
++	obj = yaffs_find_by_number(dev, inode->i_ino);
++
++	yaffs_fill_inode_from_obj(inode, obj);
++
++	yaffs_gross_unlock(dev);
++
++	unlock_new_inode(inode);
++	return inode;
++}
++
++#else
++
++static void yaffs_read_inode(struct inode *inode)
++{
++	/* NB This is called as a side effect of other functions, but
++	 * we had to release the lock to prevent deadlocks, so
++	 * need to lock again.
++	 */
++
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_read_inode for %d", (int)inode->i_ino);
++
++	if (current != yaffs_dev_to_lc(dev)->readdir_process)
++		yaffs_gross_lock(dev);
++
++	obj = yaffs_find_by_number(dev, inode->i_ino);
++
++	yaffs_fill_inode_from_obj(inode, obj);
++
++	if (current != yaffs_dev_to_lc(dev)->readdir_process)
++		yaffs_gross_unlock(dev);
++}
++
++#endif
++
++
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++			      struct yaffs_obj *obj)
++{
++	struct inode *inode;
++
++	if (!sb) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_get_inode for NULL super_block!!");
++		return NULL;
++
++	}
++
++	if (!obj) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_get_inode for NULL object!!");
++		return NULL;
++
++	}
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_get_inode for object %d", obj->obj_id);
++
++	inode = Y_IGET(sb, obj->obj_id);
++	if (IS_ERR(inode))
++		return NULL;
++
++	/* NB Side effect: iget calls back to yaffs_read_inode(). */
++	/* iget also increments the inode's i_count */
++	/* NB You can't be holding gross_lock or deadlock will happen! */
++
++	return inode;
++}
++
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++		       dev_t rdev)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++		       dev_t rdev)
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++		       int rdev)
++#endif
++{
++	struct inode *inode;
++
++	struct yaffs_obj *obj = NULL;
++	struct yaffs_dev *dev;
++
++	struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
++
++	int error = -ENOSPC;
++	uid_t uid = YCRED(current)->fsuid;
++	gid_t gid =
++	    (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++	if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++		mode |= S_ISGID;
++
++	if (parent) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_mknod: parent object %d type %d",
++			parent->obj_id, parent->variant_type);
++	} else {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_mknod: could not get parent object");
++		return -EPERM;
++	}
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_mknod: making oject for %s, mode %x dev %x",
++		dentry->d_name.name, mode, rdev);
++
++	dev = parent->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	switch (mode & S_IFMT) {
++	default:
++		/* Special (socket, fifo, device...) */
++		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++		obj =
++		    yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++					 gid, old_encode_dev(rdev));
++#else
++		obj =
++		    yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++					 gid, rdev);
++#endif
++		break;
++	case S_IFREG:		/* file          */
++		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
++		obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
++					gid);
++		break;
++	case S_IFDIR:		/* directory */
++		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
++		obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
++				       uid, gid);
++		break;
++	case S_IFLNK:		/* symlink */
++		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
++		obj = NULL;	/* Do we ever get here? */
++		break;
++	}
++
++	/* Can not call yaffs_get_inode() with gross lock held */
++	yaffs_gross_unlock(dev);
++
++	if (obj) {
++		inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
++		d_instantiate(dentry, inode);
++		update_dir_time(dir);
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_mknod created object %d count = %d",
++			obj->obj_id, atomic_read(&inode->i_count));
++		error = 0;
++		yaffs_fill_inode_from_obj(dir, parent);
++	} else {
++		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
++		error = -ENOMEM;
++	}
++
++	return error;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
++#else
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++	int ret_val;
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir");
++	ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
++	return ret_val;
++}
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++			bool dummy)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++			struct nameidata *n)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++			struct nameidata *n)
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_create");
++	return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++				   unsigned int dummy)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++				   struct nameidata *n)
++#else
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
++#endif
++{
++	struct yaffs_obj *obj;
++	struct inode *inode = NULL;	/* NCB 2.5/2.6 needs NULL here */
++
++	struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
++
++	if (current != yaffs_dev_to_lc(dev)->readdir_process)
++		yaffs_gross_lock(dev);
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s",
++		yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
++
++	obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
++
++	obj = yaffs_get_equivalent_obj(obj);	/* in case it was a hardlink */
++
++	/* Can't hold gross lock when calling yaffs_get_inode() */
++	if (current != yaffs_dev_to_lc(dev)->readdir_process)
++		yaffs_gross_unlock(dev);
++
++	if (obj) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_lookup found %d", obj->obj_id);
++
++		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++	} else {
++		yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
++
++	}
++
++/* added NCB for 2.5/6 compatability - forces add even if inode is
++ * NULL which creates dentry hash */
++	d_add(dentry, inode);
++
++	return NULL;
++}
++
++/*
++ * Create a link...
++ */
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++		      struct dentry *dentry)
++{
++	struct inode *inode = old_dentry->d_inode;
++	struct yaffs_obj *obj = NULL;
++	struct yaffs_obj *link = NULL;
++	struct yaffs_dev *dev;
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
++
++	obj = yaffs_inode_to_obj(inode);
++	dev = obj->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	if (!S_ISDIR(inode->i_mode))	/* Don't link directories */
++		link =
++		    yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
++				   obj);
++
++	if (link) {
++		set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj));
++		d_instantiate(dentry, old_dentry->d_inode);
++		atomic_inc(&old_dentry->d_inode->i_count);
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_link link count %d i_count %d",
++			old_dentry->d_inode->i_nlink,
++			atomic_read(&old_dentry->d_inode->i_count));
++	}
++
++	yaffs_gross_unlock(dev);
++
++	if (link) {
++		update_dir_time(dir);
++		return 0;
++	}
++
++	return -EPERM;
++}
++
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++			 const char *symname)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev;
++	uid_t uid = YCRED(current)->fsuid;
++	gid_t gid =
++	    (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
++
++	if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) >
++				YAFFS_MAX_NAME_LENGTH)
++		return -ENAMETOOLONG;
++
++	if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) >
++				YAFFS_MAX_ALIAS_LENGTH)
++		return -ENAMETOOLONG;
++
++	dev = yaffs_inode_to_obj(dir)->my_dev;
++	yaffs_gross_lock(dev);
++	obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
++				   S_IFLNK | S_IRWXUGO, uid, gid, symname);
++	yaffs_gross_unlock(dev);
++
++	if (obj) {
++		struct inode *inode;
++
++		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++		d_instantiate(dentry, inode);
++		update_dir_time(dir);
++		yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
++		return 0;
++	} else {
++		yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
++	}
++
++	return -ENOMEM;
++}
++
++/*
++ * The VFS layer already does all the dentry stuff for rename.
++ *
++ * NB: POSIX says you can rename an object over an old object of the same name
++ */
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++			struct inode *new_dir, struct dentry *new_dentry)
++{
++	struct yaffs_dev *dev;
++	int ret_val = YAFFS_FAIL;
++	struct yaffs_obj *target;
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
++	dev = yaffs_inode_to_obj(old_dir)->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	/* Check if the target is an existing directory that is not empty. */
++	target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
++				    new_dentry->d_name.name);
++
++	if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++	    !list_empty(&target->variant.dir_variant.children)) {
++
++		yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
++
++		ret_val = YAFFS_FAIL;
++	} else {
++		/* Now does unlinking internally using shadowing mechanism */
++		yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
++
++		ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
++					   old_dentry->d_name.name,
++					   yaffs_inode_to_obj(new_dir),
++					   new_dentry->d_name.name);
++	}
++	yaffs_gross_unlock(dev);
++
++	if (ret_val == YAFFS_OK) {
++		if (target)
++			inode_dec_link_count(new_dentry->d_inode);
++
++		update_dir_time(old_dir);
++		if (old_dir != new_dir)
++			update_dir_time(new_dir);
++		return 0;
++	} else {
++		return -ENOTEMPTY;
++	}
++}
++
++
++
++
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	int ret_val;
++
++	struct yaffs_dev *dev;
++	struct yaffs_obj *obj;
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s",
++		(int)(dir->i_ino), dentry->d_name.name);
++	obj = yaffs_inode_to_obj(dir);
++	dev = obj->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	ret_val = yaffs_unlinker(obj, dentry->d_name.name);
++
++	if (ret_val == YAFFS_OK) {
++		inode_dec_link_count(dentry->d_inode);
++		dir->i_version++;
++		yaffs_gross_unlock(dev);
++		update_dir_time(dir);
++		return 0;
++	}
++	yaffs_gross_unlock(dev);
++	return -ENOTEMPTY;
++}
++
++
++
++static const struct inode_operations yaffs_dir_inode_operations = {
++	.create = yaffs_create,
++	.lookup = yaffs_lookup,
++	.link = yaffs_link,
++	.unlink = yaffs_unlink,
++	.symlink = yaffs_symlink,
++	.mkdir = yaffs_mkdir,
++	.rmdir = yaffs_unlink,
++	.mknod = yaffs_mknod,
++	.rename = yaffs_rename,
++	.setattr = yaffs_setattr,
++	.setxattr = yaffs_setxattr,
++	.getxattr = yaffs_getxattr,
++	.listxattr = yaffs_listxattr,
++	.removexattr = yaffs_removexattr,
++};
++
++/*-----------------------------------------------------------------*/
++/* Directory search context allows us to unlock access to yaffs during
++ * filldir without causing problems with the directory being modified.
++ * This is similar to the tried and tested mechanism used in yaffs direct.
++ *
++ * A search context iterates along a doubly linked list of siblings in the
++ * directory. If the iterating object is deleted then this would corrupt
++ * the list iteration, likely causing a crash. The search context avoids
++ * this by using the remove_obj_fn to move the search context to the
++ * next object before the object is deleted.
++ *
++ * Many readdirs (and thus seach conexts) may be alive simulateously so
++ * each struct yaffs_dev has a list of these.
++ *
++ * A seach context lives for the duration of a readdir.
++ *
++ * All these functions must be called while yaffs is locked.
++ */
++
++struct yaffs_search_context {
++	struct yaffs_dev *dev;
++	struct yaffs_obj *dir_obj;
++	struct yaffs_obj *next_return;
++	struct list_head others;
++};
++
++/*
++ * yaffs_new_search() creates a new search context, initialises it and
++ * adds it to the device's search context list.
++ *
++ * Called at start of readdir.
++ */
++static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
++{
++	struct yaffs_dev *dev = dir->my_dev;
++	struct yaffs_search_context *sc =
++	    kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
++	if (sc) {
++		sc->dir_obj = dir;
++		sc->dev = dev;
++		if (list_empty(&sc->dir_obj->variant.dir_variant.children))
++			sc->next_return = NULL;
++		else
++			sc->next_return =
++			    list_entry(dir->variant.dir_variant.children.next,
++				       struct yaffs_obj, siblings);
++		INIT_LIST_HEAD(&sc->others);
++		list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
++	}
++	return sc;
++}
++
++/*
++ * yaffs_search_end() disposes of a search context and cleans up.
++ */
++static void yaffs_search_end(struct yaffs_search_context *sc)
++{
++	if (sc) {
++		list_del(&sc->others);
++		kfree(sc);
++	}
++}
++
++/*
++ * yaffs_search_advance() moves a search context to the next object.
++ * Called when the search iterates or when an object removal causes
++ * the search context to be moved to the next object.
++ */
++static void yaffs_search_advance(struct yaffs_search_context *sc)
++{
++	if (!sc)
++		return;
++
++	if (sc->next_return == NULL ||
++	    list_empty(&sc->dir_obj->variant.dir_variant.children))
++		sc->next_return = NULL;
++	else {
++		struct list_head *next = sc->next_return->siblings.next;
++
++		if (next == &sc->dir_obj->variant.dir_variant.children)
++			sc->next_return = NULL;	/* end of list */
++		else
++			sc->next_return =
++			    list_entry(next, struct yaffs_obj, siblings);
++	}
++}
++
++/*
++ * yaffs_remove_obj_callback() is called when an object is unlinked.
++ * We check open search contexts and advance any which are currently
++ * on the object being iterated.
++ */
++static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
++{
++
++	struct list_head *i;
++	struct yaffs_search_context *sc;
++	struct list_head *search_contexts =
++	    &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
++
++	/* Iterate through the directory search contexts.
++	 * If any are currently on the object being removed, then advance
++	 * the search context to the next object to prevent a hanging pointer.
++	 */
++	list_for_each(i, search_contexts) {
++		sc = list_entry(i, struct yaffs_search_context, others);
++		if (sc->next_return == obj)
++			yaffs_search_advance(sc);
++	}
++
++}
++
++
++/*-----------------------------------------------------------------*/
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev;
++	struct yaffs_search_context *sc;
++	struct inode *inode = f->f_dentry->d_inode;
++	unsigned long offset, curoffs;
++	struct yaffs_obj *l;
++	int ret_val = 0;
++
++	char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++	obj = yaffs_dentry_to_obj(f->f_dentry);
++	dev = obj->my_dev;
++
++	yaffs_gross_lock(dev);
++
++	yaffs_dev_to_lc(dev)->readdir_process = current;
++
++	offset = f->f_pos;
++
++	sc = yaffs_new_search(obj);
++	if (!sc) {
++		ret_val = -ENOMEM;
++		goto out;
++	}
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_readdir: starting at %d", (int)offset);
++
++	if (offset == 0) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_readdir: entry . ino %d",
++			(int)inode->i_ino);
++		yaffs_gross_unlock(dev);
++		if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
++			yaffs_gross_lock(dev);
++			goto out;
++		}
++		yaffs_gross_lock(dev);
++		offset++;
++		f->f_pos++;
++	}
++	if (offset == 1) {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_readdir: entry .. ino %d",
++			(int)f->f_dentry->d_parent->d_inode->i_ino);
++		yaffs_gross_unlock(dev);
++		if (filldir(dirent, "..", 2, offset,
++			    f->f_dentry->d_parent->d_inode->i_ino,
++			    DT_DIR) < 0) {
++			yaffs_gross_lock(dev);
++			goto out;
++		}
++		yaffs_gross_lock(dev);
++		offset++;
++		f->f_pos++;
++	}
++
++	curoffs = 1;
++
++	/* If the directory has changed since the open or last call to
++	   readdir, rewind to after the 2 canned entries. */
++	if (f->f_version != inode->i_version) {
++		offset = 2;
++		f->f_pos = offset;
++		f->f_version = inode->i_version;
++	}
++
++	while (sc->next_return) {
++		curoffs++;
++		l = sc->next_return;
++		if (curoffs >= offset) {
++			int this_inode = yaffs_get_obj_inode(l);
++			int this_type = yaffs_get_obj_type(l);
++
++			yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
++			yaffs_trace(YAFFS_TRACE_OS,
++				"yaffs_readdir: %s inode %d",
++				name, yaffs_get_obj_inode(l));
++
++			yaffs_gross_unlock(dev);
++
++			if (filldir(dirent,
++				    name,
++				    strlen(name),
++				    offset, this_inode, this_type) < 0) {
++				yaffs_gross_lock(dev);
++				goto out;
++			}
++
++			yaffs_gross_lock(dev);
++
++			offset++;
++			f->f_pos++;
++		}
++		yaffs_search_advance(sc);
++	}
++
++out:
++	yaffs_search_end(sc);
++	yaffs_dev_to_lc(dev)->readdir_process = NULL;
++	yaffs_gross_unlock(dev);
++
++	return ret_val;
++}
++
++static const struct file_operations yaffs_dir_operations = {
++	.read = generic_read_dir,
++	.readdir = yaffs_readdir,
++	.fsync = yaffs_sync_object,
++	.llseek = generic_file_llseek,
++};
++
++static void yaffs_fill_inode_from_obj(struct inode *inode,
++				      struct yaffs_obj *obj)
++{
++	if (inode && obj) {
++
++		/* Check mode against the variant type and attempt to repair if broken. */
++		u32 mode = obj->yst_mode;
++		switch (obj->variant_type) {
++		case YAFFS_OBJECT_TYPE_FILE:
++			if (!S_ISREG(mode)) {
++				obj->yst_mode &= ~S_IFMT;
++				obj->yst_mode |= S_IFREG;
++			}
++
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			if (!S_ISLNK(mode)) {
++				obj->yst_mode &= ~S_IFMT;
++				obj->yst_mode |= S_IFLNK;
++			}
++
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			if (!S_ISDIR(mode)) {
++				obj->yst_mode &= ~S_IFMT;
++				obj->yst_mode |= S_IFDIR;
++			}
++
++			break;
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++		default:
++			/* TODO? */
++			break;
++		}
++
++		inode->i_flags |= S_NOATIME;
++
++		inode->i_ino = obj->obj_id;
++		inode->i_mode = obj->yst_mode;
++		inode->i_uid = obj->yst_uid;
++		inode->i_gid = obj->yst_gid;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++		inode->i_blksize = inode->i_sb->s_blocksize;
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++		inode->i_rdev = old_decode_dev(obj->yst_rdev);
++		inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
++		inode->i_atime.tv_nsec = 0;
++		inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
++		inode->i_mtime.tv_nsec = 0;
++		inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
++		inode->i_ctime.tv_nsec = 0;
++#else
++		inode->i_rdev = obj->yst_rdev;
++		inode->i_atime = obj->yst_atime;
++		inode->i_mtime = obj->yst_mtime;
++		inode->i_ctime = obj->yst_ctime;
++#endif
++		inode->i_size = yaffs_get_obj_length(obj);
++		inode->i_blocks = (inode->i_size + 511) >> 9;
++
++		set_nlink(inode, yaffs_get_obj_link_count(obj));
++
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
++			inode->i_mode, inode->i_uid, inode->i_gid,
++			inode->i_size, atomic_read(&inode->i_count));
++
++		switch (obj->yst_mode & S_IFMT) {
++		default:	/* fifo, device or socket */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++			init_special_inode(inode, obj->yst_mode,
++					   old_decode_dev(obj->yst_rdev));
++#else
++			init_special_inode(inode, obj->yst_mode,
++					   (dev_t) (obj->yst_rdev));
++#endif
++			break;
++		case S_IFREG:	/* file */
++			inode->i_op = &yaffs_file_inode_operations;
++			inode->i_fop = &yaffs_file_operations;
++			inode->i_mapping->a_ops =
++			    &yaffs_file_address_operations;
++			break;
++		case S_IFDIR:	/* directory */
++			inode->i_op = &yaffs_dir_inode_operations;
++			inode->i_fop = &yaffs_dir_operations;
++			break;
++		case S_IFLNK:	/* symlink */
++			inode->i_op = &yaffs_symlink_inode_operations;
++			break;
++		}
++
++		yaffs_inode_to_obj_lv(inode) = obj;
++
++		obj->my_inode = inode;
++
++	} else {
++		yaffs_trace(YAFFS_TRACE_OS,
++			"yaffs_fill_inode invalid parameters");
++	}
++
++}
++
++
++
++/*
++ * yaffs background thread functions .
++ * yaffs_bg_thread_fn() the thread function
++ * yaffs_bg_start() launches the background thread.
++ * yaffs_bg_stop() cleans up the background thread.
++ *
++ * NB:
++ * The thread should only run after the yaffs is initialised
++ * The thread should be stopped before yaffs is unmounted.
++ * The thread should not do any writing while the fs is in read only.
++ */
++
++static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
++{
++	unsigned erased_chunks =
++	    dev->n_erased_blocks * dev->param.chunks_per_block;
++	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++	unsigned scattered = 0;	/* Free chunks not in an erased block */
++
++	if (erased_chunks < dev->n_free_chunks)
++		scattered = (dev->n_free_chunks - erased_chunks);
++
++	if (!context->bg_running)
++		return 0;
++	else if (scattered < (dev->param.chunks_per_block * 2))
++		return 0;
++	else if (erased_chunks > dev->n_free_chunks / 2)
++		return 0;
++	else if (erased_chunks > dev->n_free_chunks / 4)
++		return 1;
++	else
++		return 2;
++}
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++
++void yaffs_background_waker(unsigned long data)
++{
++	wake_up_process((struct task_struct *)data);
++}
++
++static int yaffs_bg_thread_fn(void *data)
++{
++	struct yaffs_dev *dev = (struct yaffs_dev *)data;
++	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++	unsigned long now = jiffies;
++	unsigned long next_dir_update = now;
++	unsigned long next_gc = now;
++	unsigned long expires;
++	unsigned int urgency;
++
++	int gc_result;
++	struct timer_list timer;
++
++	yaffs_trace(YAFFS_TRACE_BACKGROUND,
++		"yaffs_background starting for dev %p", (void *)dev);
++
++#ifdef YAFFS_COMPILE_FREEZER
++	set_freezable();
++#endif
++	while (context->bg_running) {
++		yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
++
++		if (kthread_should_stop())
++			break;
++
++#ifdef YAFFS_COMPILE_FREEZER
++		if (try_to_freeze())
++			continue;
++#endif
++		yaffs_gross_lock(dev);
++
++		now = jiffies;
++
++		if (time_after(now, next_dir_update) && yaffs_bg_enable) {
++			yaffs_update_dirty_dirs(dev);
++			next_dir_update = now + HZ;
++		}
++
++		if (time_after(now, next_gc) && yaffs_bg_enable) {
++			if (!dev->is_checkpointed) {
++				urgency = yaffs_bg_gc_urgency(dev);
++				gc_result = yaffs_bg_gc(dev, urgency);
++				if (urgency > 1)
++					next_gc = now + HZ / 20 + 1;
++				else if (urgency > 0)
++					next_gc = now + HZ / 10 + 1;
++				else
++					next_gc = now + HZ * 2;
++			} else	{
++			        /*
++				 * gc not running so set to next_dir_update
++				 * to cut down on wake ups
++				 */
++				next_gc = next_dir_update;
++                        }
++		}
++		yaffs_gross_unlock(dev);
++#if 1
++		expires = next_dir_update;
++		if (time_before(next_gc, expires))
++			expires = next_gc;
++		if (time_before(expires, now))
++			expires = now + HZ;
++
++		Y_INIT_TIMER(&timer);
++		timer.expires = expires + 1;
++		timer.data = (unsigned long)current;
++		timer.function = yaffs_background_waker;
++
++		set_current_state(TASK_INTERRUPTIBLE);
++		add_timer(&timer);
++		schedule();
++		del_timer_sync(&timer);
++#else
++		msleep(10);
++#endif
++	}
++
++	return 0;
++}
++
++static int yaffs_bg_start(struct yaffs_dev *dev)
++{
++	int retval = 0;
++	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++
++	if (dev->read_only)
++		return -1;
++
++	context->bg_running = 1;
++
++	context->bg_thread = kthread_run(yaffs_bg_thread_fn,
++					 (void *)dev, "yaffs-bg-%d",
++					 context->mount_id);
++
++	if (IS_ERR(context->bg_thread)) {
++		retval = PTR_ERR(context->bg_thread);
++		context->bg_thread = NULL;
++		context->bg_running = 0;
++	}
++	return retval;
++}
++
++static void yaffs_bg_stop(struct yaffs_dev *dev)
++{
++	struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
++
++	ctxt->bg_running = 0;
++
++	if (ctxt->bg_thread) {
++		kthread_stop(ctxt->bg_thread);
++		ctxt->bg_thread = NULL;
++	}
++}
++#else
++static int yaffs_bg_thread_fn(void *data)
++{
++	return 0;
++}
++
++static int yaffs_bg_start(struct yaffs_dev *dev)
++{
++	return 0;
++}
++
++static void yaffs_bg_stop(struct yaffs_dev *dev)
++{
++}
++#endif
++
++
++static void yaffs_flush_inodes(struct super_block *sb)
++{
++	struct inode *iptr;
++	struct yaffs_obj *obj;
++
++	list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
++		obj = yaffs_inode_to_obj(iptr);
++		if (obj) {
++			yaffs_trace(YAFFS_TRACE_OS,
++				"flushing obj %d",
++				obj->obj_id);
++			yaffs_flush_file(obj, 1, 0);
++		}
++	}
++}
++
++static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
++{
++	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++	if (!dev)
++		return;
++
++	yaffs_flush_inodes(sb);
++	yaffs_update_dirty_dirs(dev);
++	yaffs_flush_whole_cache(dev);
++	if (do_checkpoint)
++		yaffs_checkpoint_save(dev);
++}
++
++static LIST_HEAD(yaffs_context_list);
++struct mutex yaffs_context_lock;
++
++static void yaffs_put_super(struct super_block *sb)
++{
++	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
++			"yaffs_put_super");
++
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++		"Shutting down yaffs background thread");
++	yaffs_bg_stop(dev);
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++		"yaffs background thread shut down");
++
++	yaffs_gross_lock(dev);
++
++	yaffs_flush_super(sb, 1);
++
++	yaffs_deinitialise(dev);
++
++	yaffs_gross_unlock(dev);
++
++	mutex_lock(&yaffs_context_lock);
++	list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
++	mutex_unlock(&yaffs_context_lock);
++
++	if (yaffs_dev_to_lc(dev)->spare_buffer) {
++		kfree(yaffs_dev_to_lc(dev)->spare_buffer);
++		yaffs_dev_to_lc(dev)->spare_buffer = NULL;
++	}
++
++	kfree(dev);
++
++	yaffs_put_mtd_device(mtd);
++
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
++			"yaffs_put_super done");
++}
++
++
++static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
++{
++	return yaffs_gc_control;
++}
++
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++
++static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
++					  uint32_t generation)
++{
++	return Y_IGET(sb, ino);
++}
++
++static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
++					  struct fid *fid, int fh_len,
++					  int fh_type)
++{
++	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
++				    yaffs2_nfs_get_inode);
++}
++
++static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
++					  struct fid *fid, int fh_len,
++					  int fh_type)
++{
++	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
++				    yaffs2_nfs_get_inode);
++}
++
++struct dentry *yaffs2_get_parent(struct dentry *dentry)
++{
++
++	struct super_block *sb = dentry->d_inode->i_sb;
++	struct dentry *parent = ERR_PTR(-ENOENT);
++	struct inode *inode;
++	unsigned long parent_ino;
++	struct yaffs_obj *d_obj;
++	struct yaffs_obj *parent_obj;
++
++	d_obj = yaffs_inode_to_obj(dentry->d_inode);
++
++	if (d_obj) {
++		parent_obj = d_obj->parent;
++		if (parent_obj) {
++			parent_ino = yaffs_get_obj_inode(parent_obj);
++			inode = Y_IGET(sb, parent_ino);
++
++			if (IS_ERR(inode)) {
++				parent = ERR_CAST(inode);
++			} else {
++				parent = d_obtain_alias(inode);
++				if (!IS_ERR(parent)) {
++					parent = ERR_PTR(-ENOMEM);
++					iput(inode);
++				}
++			}
++		}
++	}
++
++	return parent;
++}
++
++/* Just declare a zero structure as a NULL value implies
++ * using the default functions of exportfs.
++ */
++
++static struct export_operations yaffs_export_ops = {
++	.fh_to_dentry = yaffs2_fh_to_dentry,
++	.fh_to_parent = yaffs2_fh_to_parent,
++	.get_parent = yaffs2_get_parent,
++};
++
++#endif
++
++static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
++{
++	/* Clear the association between the inode and
++	 * the struct yaffs_obj.
++	 */
++	obj->my_inode = NULL;
++	yaffs_inode_to_obj_lv(inode) = NULL;
++
++	/* If the object freeing was deferred, then the real
++	 * free happens now.
++	 * This should fix the inode inconsistency problem.
++	 */
++	yaffs_handle_defered_free(obj);
++}
++
++#ifdef YAFFS_HAS_EVICT_INODE
++/* yaffs_evict_inode combines into one operation what was previously done in
++ * yaffs_clear_inode() and yaffs_delete_inode()
++ *
++ */
++static void yaffs_evict_inode(struct inode *inode)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev;
++	int deleteme = 0;
++
++	obj = yaffs_inode_to_obj(inode);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_evict_inode: ino %d, count %d %s",
++		(int)inode->i_ino, atomic_read(&inode->i_count),
++		obj ? "object exists" : "null object");
++
++	if (!inode->i_nlink && !is_bad_inode(inode))
++		deleteme = 1;
++	truncate_inode_pages(&inode->i_data, 0);
++	Y_CLEAR_INODE(inode);
++
++	if (deleteme && obj) {
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		yaffs_del_obj(obj);
++		yaffs_gross_unlock(dev);
++	}
++	if (obj) {
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		yaffs_unstitch_obj(inode, obj);
++		yaffs_gross_unlock(dev);
++	}
++}
++#else
++
++/* clear is called to tell the fs to release any per-inode data it holds.
++ * The object might still exist on disk and is just being thrown out of the cache
++ * or else the object has actually been deleted and we're being called via
++ * the chain
++ *   yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
++ */
++
++static void yaffs_clear_inode(struct inode *inode)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_dev *dev;
++
++	obj = yaffs_inode_to_obj(inode);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_clear_inode: ino %d, count %d %s",
++		(int)inode->i_ino, atomic_read(&inode->i_count),
++		obj ? "object exists" : "null object");
++
++	if (obj) {
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		yaffs_unstitch_obj(inode, obj);
++		yaffs_gross_unlock(dev);
++	}
++
++}
++
++/* delete is called when the link count is zero and the inode
++ * is put (ie. nobody wants to know about it anymore, time to
++ * delete the file).
++ * NB Must call clear_inode()
++ */
++static void yaffs_delete_inode(struct inode *inode)
++{
++	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++	struct yaffs_dev *dev;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_delete_inode: ino %d, count %d %s",
++		(int)inode->i_ino, atomic_read(&inode->i_count),
++		obj ? "object exists" : "null object");
++
++	if (obj) {
++		dev = obj->my_dev;
++		yaffs_gross_lock(dev);
++		yaffs_del_obj(obj);
++		yaffs_gross_unlock(dev);
++	}
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++	truncate_inode_pages(&inode->i_data, 0);
++#endif
++	clear_inode(inode);
++}
++#endif
++
++
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++	struct super_block *sb = dentry->d_sb;
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
++{
++	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++#endif
++
++	yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
++
++	yaffs_gross_lock(dev);
++
++	buf->f_type = YAFFS_MAGIC;
++	buf->f_bsize = sb->s_blocksize;
++	buf->f_namelen = 255;
++
++	if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
++		/* Do this if chunk size is not a power of 2 */
++
++		uint64_t bytes_in_dev;
++		uint64_t bytes_free;
++
++		bytes_in_dev =
++		    ((uint64_t)
++		     ((dev->param.end_block - dev->param.start_block +
++		       1))) * ((uint64_t) (dev->param.chunks_per_block *
++					   dev->data_bytes_per_chunk));
++
++		do_div(bytes_in_dev, sb->s_blocksize);	/* bytes_in_dev becomes the number of blocks */
++		buf->f_blocks = bytes_in_dev;
++
++		bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
++		    ((uint64_t) (dev->data_bytes_per_chunk));
++
++		do_div(bytes_free, sb->s_blocksize);
++
++		buf->f_bfree = bytes_free;
++
++	} else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
++
++		buf->f_blocks =
++		    (dev->param.end_block - dev->param.start_block + 1) *
++		    dev->param.chunks_per_block /
++		    (sb->s_blocksize / dev->data_bytes_per_chunk);
++		buf->f_bfree =
++		    yaffs_get_n_free_chunks(dev) /
++		    (sb->s_blocksize / dev->data_bytes_per_chunk);
++	} else {
++		buf->f_blocks =
++		    (dev->param.end_block - dev->param.start_block + 1) *
++		    dev->param.chunks_per_block *
++		    (dev->data_bytes_per_chunk / sb->s_blocksize);
++
++		buf->f_bfree =
++		    yaffs_get_n_free_chunks(dev) *
++		    (dev->data_bytes_per_chunk / sb->s_blocksize);
++	}
++
++	buf->f_files = 0;
++	buf->f_ffree = 0;
++	buf->f_bavail = buf->f_bfree;
++
++	yaffs_gross_unlock(dev);
++	return 0;
++}
++
++
++
++static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
++{
++
++	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++	unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
++	unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
++	int do_checkpoint;
++	int dirty = yaffs_check_super_dirty(dev);
++
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++		"yaffs_do_sync_fs: gc-urgency %d %s %s%s",
++		gc_urgent,
++		dirty ? "dirty" : "clean",
++		request_checkpoint ? "checkpoint requested" : "no checkpoint",
++		oneshot_checkpoint ? " one-shot" : "");
++
++	yaffs_gross_lock(dev);
++	do_checkpoint = ((request_checkpoint && !gc_urgent) ||
++			 oneshot_checkpoint) && !dev->is_checkpointed;
++
++	if (dirty || do_checkpoint) {
++		yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
++		yaffs_clear_super_dirty(dev);
++		if (oneshot_checkpoint)
++			yaffs_auto_checkpoint &= ~4;
++	}
++	yaffs_gross_unlock(dev);
++
++	return 0;
++}
++
++
++#ifdef YAFFS_HAS_WRITE_SUPER
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static void yaffs_write_super(struct super_block *sb)
++#else
++static int yaffs_write_super(struct super_block *sb)
++#endif
++{
++	unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
++
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++		"yaffs_write_super %s",
++		request_checkpoint ? " checkpt" : "");
++
++	yaffs_do_sync_fs(sb, request_checkpoint);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++	return 0;
++#endif
++}
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait)
++#else
++static int yaffs_sync_fs(struct super_block *sb)
++#endif
++{
++	unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
++
++	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++		"yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
++
++	yaffs_do_sync_fs(sb, request_checkpoint);
++
++	return 0;
++}
++
++
++
++static const struct super_operations yaffs_super_ops = {
++	.statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
++	.read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
++	.put_inode = yaffs_put_inode,
++#endif
++	.put_super = yaffs_put_super,
++#ifdef YAFFS_HAS_EVICT_INODE
++	.evict_inode = yaffs_evict_inode,
++#else
++	.delete_inode = yaffs_delete_inode,
++	.clear_inode = yaffs_clear_inode,
++#endif
++	.sync_fs = yaffs_sync_fs,
++#ifdef YAFFS_HAS_WRITE_SUPER
++	.write_super = yaffs_write_super,
++#endif
++};
++
++struct yaffs_options {
++	int inband_tags;
++	int skip_checkpoint_read;
++	int skip_checkpoint_write;
++	int no_cache;
++	int tags_ecc_on;
++	int tags_ecc_overridden;
++	int lazy_loading_enabled;
++	int lazy_loading_overridden;
++	int empty_lost_and_found;
++	int empty_lost_and_found_overridden;
++	int disable_summary;
++};
++
++#define MAX_OPT_LEN 30
++static int yaffs_parse_options(struct yaffs_options *options,
++			       const char *options_str)
++{
++	char cur_opt[MAX_OPT_LEN + 1];
++	int p;
++	int error = 0;
++
++	/* Parse through the options which is a comma seperated list */
++
++	while (options_str && *options_str && !error) {
++		memset(cur_opt, 0, MAX_OPT_LEN + 1);
++		p = 0;
++
++		while (*options_str == ',')
++			options_str++;
++
++		while (*options_str && *options_str != ',') {
++			if (p < MAX_OPT_LEN) {
++				cur_opt[p] = *options_str;
++				p++;
++			}
++			options_str++;
++		}
++
++		if (!strcmp(cur_opt, "inband-tags")) {
++			options->inband_tags = 1;
++		} else if (!strcmp(cur_opt, "tags-ecc-off")) {
++			options->tags_ecc_on = 0;
++			options->tags_ecc_overridden = 1;
++		} else if (!strcmp(cur_opt, "tags-ecc-on")) {
++			options->tags_ecc_on = 1;
++			options->tags_ecc_overridden = 1;
++		} else if (!strcmp(cur_opt, "lazy-loading-off")) {
++			options->lazy_loading_enabled = 0;
++			options->lazy_loading_overridden = 1;
++		} else if (!strcmp(cur_opt, "lazy-loading-on")) {
++			options->lazy_loading_enabled = 1;
++			options->lazy_loading_overridden = 1;
++		} else if (!strcmp(cur_opt, "disable-summary")) {
++			options->disable_summary = 1;
++		} else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
++			options->empty_lost_and_found = 0;
++			options->empty_lost_and_found_overridden = 1;
++		} else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
++			options->empty_lost_and_found = 1;
++			options->empty_lost_and_found_overridden = 1;
++		} else if (!strcmp(cur_opt, "no-cache")) {
++			options->no_cache = 1;
++		} else if (!strcmp(cur_opt, "no-checkpoint-read")) {
++			options->skip_checkpoint_read = 1;
++		} else if (!strcmp(cur_opt, "no-checkpoint-write")) {
++			options->skip_checkpoint_write = 1;
++		} else if (!strcmp(cur_opt, "no-checkpoint")) {
++			options->skip_checkpoint_read = 1;
++			options->skip_checkpoint_write = 1;
++		} else {
++			printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++			       cur_opt);
++			error = 1;
++		}
++	}
++
++	return error;
++}
++
++
++static struct dentry *yaffs_make_root(struct inode *inode)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
++	struct dentry *root = d_alloc_root(inode);
++
++	if (!root)
++		iput(inode);
++
++        return root;
++#else
++        return d_make_root(inode);
++#endif
++}
++
++
++
++
++static struct super_block *yaffs_internal_read_super(int yaffs_version,
++						     struct super_block *sb,
++						     void *data, int silent)
++{
++	int n_blocks;
++	struct inode *inode = NULL;
++	struct dentry *root;
++	struct yaffs_dev *dev = 0;
++	char devname_buf[BDEVNAME_SIZE + 1];
++	struct mtd_info *mtd;
++	int err;
++	char *data_str = (char *)data;
++	struct yaffs_linux_context *context = NULL;
++	struct yaffs_param *param;
++
++	int read_only = 0;
++
++	struct yaffs_options options;
++
++	unsigned mount_id;
++	int found;
++	struct yaffs_linux_context *context_iterator;
++	struct list_head *l;
++
++	if (!sb) {
++		printk(KERN_INFO "yaffs: sb is NULL\n");
++		return NULL;
++        }
++
++	sb->s_magic = YAFFS_MAGIC;
++	sb->s_op = &yaffs_super_ops;
++	sb->s_flags |= MS_NOATIME;
++
++	read_only = ((sb->s_flags & MS_RDONLY) != 0);
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++	sb->s_export_op = &yaffs_export_ops;
++#endif
++
++	if (!sb->s_dev)
++		printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
++	else if (!yaffs_devname(sb, devname_buf))
++		printk(KERN_INFO "yaffs: devname is NULL\n");
++	else
++		printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
++		       sb->s_dev,
++		       yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
++
++	if (!data_str)
++		data_str = "";
++
++	printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
++
++	memset(&options, 0, sizeof(options));
++
++	if (yaffs_parse_options(&options, data_str)) {
++		/* Option parsing failed */
++		return NULL;
++	}
++
++	sb->s_blocksize = PAGE_CACHE_SIZE;
++	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_read_super: Using yaffs%d", yaffs_version);
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_read_super: block size %d", (int)(sb->s_blocksize));
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS,
++		"yaffs: Attempting MTD mount of %u.%u,\"%s\"",
++		MAJOR(sb->s_dev), MINOR(sb->s_dev),
++		yaffs_devname(sb, devname_buf));
++
++	/* Get the device */
++	mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
++	if (!mtd) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"yaffs: MTD device %u either not valid or unavailable",
++			MINOR(sb->s_dev));
++		return NULL;
++	}
++
++	if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
++		yaffs_version = 2;
++	}
++
++	/* Added NCB 26/5/2006 for completeness */
++	if (yaffs_version == 2 && !options.inband_tags
++	    && WRITE_SIZE(mtd) == 512) {
++		yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
++		yaffs_version = 1;
++	}
++
++	if(yaffs_verify_mtd(mtd, yaffs_version, options.inband_tags) < 0)
++		return NULL;
++
++	/* OK, so if we got here, we have an MTD that's NAND and looks
++	 * like it has the right capabilities
++	 * Set the struct yaffs_dev up for mtd
++	 */
++
++	if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
++		read_only = 1;
++		printk(KERN_INFO
++		       "yaffs: mtd is read only, setting superblock read only\n"
++		);
++		sb->s_flags |= MS_RDONLY;
++	}
++
++	dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
++	context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
++
++	if (!dev || !context) {
++		if (dev)
++			kfree(dev);
++		if (context)
++			kfree(context);
++		dev = NULL;
++		context = NULL;
++	}
++
++	if (!dev) {
++		/* Deep shit could not allocate device structure */
++		yaffs_trace(YAFFS_TRACE_ALWAYS,
++			"yaffs_read_super: Failed trying to allocate struct yaffs_dev."
++		);
++		return NULL;
++	}
++	memset(dev, 0, sizeof(struct yaffs_dev));
++	param = &(dev->param);
++
++	memset(context, 0, sizeof(struct yaffs_linux_context));
++	dev->os_context = context;
++	INIT_LIST_HEAD(&(context->context_list));
++	context->dev = dev;
++	context->super = sb;
++
++	dev->read_only = read_only;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++	sb->s_fs_info = dev;
++#else
++	sb->u.generic_sbp = dev;
++#endif
++
++
++	dev->driver_context = mtd;
++	param->name = mtd->name;
++
++	/* Set up the memory size parameters.... */
++
++
++	param->n_reserved_blocks = 5;
++	param->n_caches = (options.no_cache) ? 0 : 10;
++	param->inband_tags = options.inband_tags;
++
++	param->enable_xattr = 1;
++	if (options.lazy_loading_overridden)
++		param->disable_lazy_load = !options.lazy_loading_enabled;
++
++	param->defered_dir_update = 1;
++
++	if (options.tags_ecc_overridden)
++		param->no_tags_ecc = !options.tags_ecc_on;
++
++	param->empty_lost_n_found = 1;
++	param->refresh_period = 500;
++	param->disable_summary = options.disable_summary;
++
++	if (options.empty_lost_and_found_overridden)
++		param->empty_lost_n_found = options.empty_lost_and_found;
++
++	/* ... and the functions. */
++	if (yaffs_version == 2) {
++		param->is_yaffs2 = 1;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++		param->total_bytes_per_chunk = mtd->writesize;
++		param->chunks_per_block = mtd->erasesize / mtd->writesize;
++#else
++		param->total_bytes_per_chunk = mtd->oobblock;
++		param->chunks_per_block = mtd->erasesize / mtd->oobblock;
++#endif
++		n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
++
++		param->start_block = 0;
++		param->end_block = n_blocks - 1;
++	} else {
++		param->is_yaffs2 = 0;
++		n_blocks = YCALCBLOCKS(mtd->size,
++			     YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
++
++		param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
++		param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
++	}
++
++	param->start_block = 0;
++	param->end_block = n_blocks - 1;
++
++	yaffs_mtd_drv_install(dev);
++
++	param->sb_dirty_fn = yaffs_set_super_dirty;
++	param->gc_control_fn = yaffs_gc_control_callback;
++
++	yaffs_dev_to_lc(dev)->super = sb;
++
++	param->use_nand_ecc = 1;
++
++	param->skip_checkpt_rd = options.skip_checkpoint_read;
++	param->skip_checkpt_wr = options.skip_checkpoint_write;
++
++	mutex_lock(&yaffs_context_lock);
++	/* Get a mount id */
++	found = 0;
++	for (mount_id = 0; !found; mount_id++) {
++		found = 1;
++		list_for_each(l, &yaffs_context_list) {
++			context_iterator =
++			    list_entry(l, struct yaffs_linux_context,
++				       context_list);
++			if (context_iterator->mount_id == mount_id)
++				found = 0;
++		}
++	}
++	context->mount_id = mount_id;
++
++	list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
++		      &yaffs_context_list);
++	mutex_unlock(&yaffs_context_lock);
++
++	/* Directory search handling... */
++	INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
++	param->remove_obj_fn = yaffs_remove_obj_callback;
++
++	mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
++
++	yaffs_gross_lock(dev);
++
++	err = yaffs_guts_initialise(dev);
++
++	yaffs_trace(YAFFS_TRACE_OS,
++		"yaffs_read_super: guts initialised %s",
++		(err == YAFFS_OK) ? "OK" : "FAILED");
++
++	if (err == YAFFS_OK)
++		yaffs_bg_start(dev);
++
++	if (!context->bg_thread)
++		param->defered_dir_update = 0;
++
++	sb->s_maxbytes = yaffs_max_file_size(dev);
++
++	/* Release lock before yaffs_get_inode() */
++	yaffs_gross_unlock(dev);
++
++	/* Create root inode */
++	if (err == YAFFS_OK)
++		inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
++
++	if (!inode)
++		return NULL;
++
++	inode->i_op = &yaffs_dir_inode_operations;
++	inode->i_fop = &yaffs_dir_operations;
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs_read_super: got root inode");
++
++	root = yaffs_make_root(inode);
++
++	if (!root)
++		return NULL;
++
++	sb->s_root = root;
++	if(!dev->is_checkpointed)
++		yaffs_set_super_dirty(dev);
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS,
++		"yaffs_read_super: is_checkpointed %d",
++		dev->is_checkpointed);
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs_read_super: done");
++	return sb;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
++					 int silent)
++{
++	return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
++        const char *dev_name, void *data)
++{
++    return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
++}
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_read_super(struct file_system_type *fs,
++			    int flags, const char *dev_name,
++			    void *data, struct vfsmount *mnt)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs_read_super(struct file_system_type *fs,
++					    int flags, const char *dev_name,
++					    void *data)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "yaffs",
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++        .mount = yaffs_mount,
++#else
++        .get_sb = yaffs_read_super,
++#endif
++     	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
++					    int silent)
++{
++	return yaffs_internal_read_super(1, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
++		      FS_REQUIRES_DEV);
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
++					  int silent)
++{
++	return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
++        const char *dev_name, void *data)
++{
++        return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
++}
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs2_read_super(struct file_system_type *fs,
++			     int flags, const char *dev_name, void *data,
++			     struct vfsmount *mnt)
++{
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs2_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs2_read_super(struct file_system_type *fs,
++					     int flags, const char *dev_name,
++					     void *data)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs2_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs2_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "yaffs2",
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++        .mount = yaffs2_mount,
++#else
++        .get_sb = yaffs2_read_super,
++#endif
++     	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs2_read_super(struct super_block *sb,
++					     void *data, int silent)
++{
++	return yaffs_internal_read_super(2, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
++		      FS_REQUIRES_DEV);
++#endif
++
++
++static struct proc_dir_entry *my_proc_entry;
++
++static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
++{
++	struct yaffs_param *param = &dev->param;
++
++	buf += sprintf(buf, "start_block.......... %d\n", param->start_block);
++	buf += sprintf(buf, "end_block............ %d\n", param->end_block);
++	buf += sprintf(buf, "total_bytes_per_chunk %d\n",
++				param->total_bytes_per_chunk);
++	buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc);
++	buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc);
++	buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2);
++	buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags);
++	buf += sprintf(buf, "empty_lost_n_found... %d\n",
++				param->empty_lost_n_found);
++	buf += sprintf(buf, "disable_lazy_load.... %d\n",
++				param->disable_lazy_load);
++	buf += sprintf(buf, "refresh_period....... %d\n",
++				param->refresh_period);
++	buf += sprintf(buf, "n_caches............. %d\n", param->n_caches);
++	buf += sprintf(buf, "n_reserved_blocks.... %d\n",
++				param->n_reserved_blocks);
++	buf += sprintf(buf, "always_check_erased.. %d\n",
++				param->always_check_erased);
++	buf += sprintf(buf, "\n");
++
++	return buf;
++}
++
++static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
++{
++	buf += sprintf(buf, "max file size....... %lld\n",
++				(long long) yaffs_max_file_size(dev));
++	buf += sprintf(buf, "data_bytes_per_chunk. %d\n",
++				dev->data_bytes_per_chunk);
++	buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
++	buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
++	buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
++	buf += sprintf(buf, "blocks_in_checkpt.... %d\n",
++				dev->blocks_in_checkpt);
++	buf += sprintf(buf, "\n");
++	buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
++	buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
++	buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
++	buf += sprintf(buf, "\n");
++	buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
++	buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
++	buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
++	buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
++	buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
++	buf += sprintf(buf, "passive_gc_count..... %u\n",
++				dev->passive_gc_count);
++	buf += sprintf(buf, "oldest_dirty_gc_count %u\n",
++				dev->oldest_dirty_gc_count);
++	buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
++	buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
++	buf += sprintf(buf, "n_retried_writes..... %u\n",
++				dev->n_retried_writes);
++	buf += sprintf(buf, "n_retired_blocks..... %u\n",
++				dev->n_retired_blocks);
++	buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
++	buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
++	buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n",
++				dev->n_tags_ecc_fixed);
++	buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n",
++				dev->n_tags_ecc_unfixed);
++	buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
++	buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
++	buf += sprintf(buf, "n_unlinked_files..... %u\n",
++				dev->n_unlinked_files);
++	buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
++	buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
++	buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used);
++	buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used);
++
++	return buf;
++}
++
++static int yaffs_proc_read(char *page,
++			   char **start,
++			   off_t offset, int count, int *eof, void *data)
++{
++	struct list_head *item;
++	char *buf = page;
++	int step = offset;
++	int n = 0;
++
++	/* Get proc_file_read() to step 'offset' by one on each sucessive call.
++	 * We use 'offset' (*ppos) to indicate where we are in dev_list.
++	 * This also assumes the user has posted a read buffer large
++	 * enough to hold the complete output; but that's life in /proc.
++	 */
++
++	*(int *)start = 1;
++
++	/* Print header first */
++	if (step == 0)
++		buf +=
++		    sprintf(buf,
++			    "Multi-version YAFFS built:" __DATE__ " " __TIME__
++			    "\n");
++	else if (step == 1)
++		buf += sprintf(buf, "\n");
++	else {
++		step -= 2;
++
++		mutex_lock(&yaffs_context_lock);
++
++		/* Locate and print the Nth entry.  Order N-squared but N is small. */
++		list_for_each(item, &yaffs_context_list) {
++			struct yaffs_linux_context *dc =
++			    list_entry(item, struct yaffs_linux_context,
++				       context_list);
++			struct yaffs_dev *dev = dc->dev;
++
++			if (n < (step & ~1)) {
++				n += 2;
++				continue;
++			}
++			if ((step & 1) == 0) {
++				buf +=
++				    sprintf(buf, "\nDevice %d \"%s\"\n", n,
++					    dev->param.name);
++				buf = yaffs_dump_dev_part0(buf, dev);
++			} else {
++				buf = yaffs_dump_dev_part1(buf, dev);
++                        }
++
++			break;
++		}
++		mutex_unlock(&yaffs_context_lock);
++	}
++
++	return buf - page < count ? buf - page : count;
++}
++
++/**
++ * Set the verbosity of the warnings and error messages.
++ *
++ * Note that the names can only be a..z or _ with the current code.
++ */
++
++static struct {
++	char *mask_name;
++	unsigned mask_bitfield;
++} mask_flags[] = {
++	{"allocate", YAFFS_TRACE_ALLOCATE},
++	{"always", YAFFS_TRACE_ALWAYS},
++	{"background", YAFFS_TRACE_BACKGROUND},
++	{"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
++	{"buffers", YAFFS_TRACE_BUFFERS},
++	{"bug", YAFFS_TRACE_BUG},
++	{"checkpt", YAFFS_TRACE_CHECKPOINT},
++	{"deletion", YAFFS_TRACE_DELETION},
++	{"erase", YAFFS_TRACE_ERASE},
++	{"error", YAFFS_TRACE_ERROR},
++	{"gc_detail", YAFFS_TRACE_GC_DETAIL},
++	{"gc", YAFFS_TRACE_GC},
++	{"lock", YAFFS_TRACE_LOCK},
++	{"mtd", YAFFS_TRACE_MTD},
++	{"nandaccess", YAFFS_TRACE_NANDACCESS},
++	{"os", YAFFS_TRACE_OS},
++	{"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
++	{"scan", YAFFS_TRACE_SCAN},
++	{"mount", YAFFS_TRACE_MOUNT},
++	{"tracing", YAFFS_TRACE_TRACING},
++	{"sync", YAFFS_TRACE_SYNC},
++	{"write", YAFFS_TRACE_WRITE},
++	{"verify", YAFFS_TRACE_VERIFY},
++	{"verify_nand", YAFFS_TRACE_VERIFY_NAND},
++	{"verify_full", YAFFS_TRACE_VERIFY_FULL},
++	{"verify_all", YAFFS_TRACE_VERIFY_ALL},
++	{"all", 0xffffffff},
++	{"none", 0},
++	{NULL, 0},
++};
++
++#define MAX_MASK_NAME_LENGTH 40
++static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
++					  unsigned long count, void *data)
++{
++	unsigned rg = 0, mask_bitfield;
++	char *end;
++	char *mask_name;
++	const char *x;
++	char substring[MAX_MASK_NAME_LENGTH + 1];
++	int i;
++	int done = 0;
++	int add, len = 0;
++	int pos = 0;
++
++	rg = yaffs_trace_mask;
++
++	while (!done && (pos < count)) {
++		done = 1;
++		while ((pos < count) && isspace(buf[pos]))
++			pos++;
++
++		switch (buf[pos]) {
++		case '+':
++		case '-':
++		case '=':
++			add = buf[pos];
++			pos++;
++			break;
++
++		default:
++			add = ' ';
++			break;
++		}
++		mask_name = NULL;
++
++		mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
++		if (end > buf + pos) {
++			mask_name = "numeral";
++			len = end - (buf + pos);
++			pos += len;
++			done = 0;
++		} else {
++			for (x = buf + pos, i = 0;
++			     (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++			     i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++				substring[i] = *x;
++			substring[i] = '\0';
++
++			for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++				if (strcmp(substring, mask_flags[i].mask_name)
++				    == 0) {
++					mask_name = mask_flags[i].mask_name;
++					mask_bitfield =
++					    mask_flags[i].mask_bitfield;
++					done = 0;
++					break;
++				}
++			}
++		}
++
++		if (mask_name != NULL) {
++			done = 0;
++			switch (add) {
++			case '-':
++				rg &= ~mask_bitfield;
++				break;
++			case '+':
++				rg |= mask_bitfield;
++				break;
++			case '=':
++				rg = mask_bitfield;
++				break;
++			default:
++				rg |= mask_bitfield;
++				break;
++			}
++		}
++	}
++
++	yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
++
++	printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
++
++	if (rg & YAFFS_TRACE_ALWAYS) {
++		for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++			char flag;
++			flag = ((rg & mask_flags[i].mask_bitfield) ==
++				mask_flags[i].mask_bitfield) ? '+' : '-';
++			printk(KERN_DEBUG "%c%s\n", flag,
++			       mask_flags[i].mask_name);
++		}
++	}
++
++	return count;
++}
++
++static int yaffs_proc_write(struct file *file, const char *buf,
++			    unsigned long count, void *data)
++{
++	return yaffs_proc_write_trace_options(file, buf, count, data);
++}
++
++/* Stuff to handle installation of file systems */
++struct file_system_to_install {
++	struct file_system_type *fst;
++	int installed;
++};
++
++static struct file_system_to_install fs_to_install[] = {
++	{&yaffs_fs_type, 0},
++	{&yaffs2_fs_type, 0},
++	{NULL, 0}
++};
++
++static int __init init_yaffs_fs(void)
++{
++	int error = 0;
++	struct file_system_to_install *fsinst;
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS,
++		"yaffs built " __DATE__ " " __TIME__ " Installing.");
++
++	mutex_init(&yaffs_context_lock);
++
++	/* Install the proc_fs entries */
++	my_proc_entry = create_proc_entry("yaffs",
++					  S_IRUGO | S_IFREG, YPROC_ROOT);
++
++	if (my_proc_entry) {
++		my_proc_entry->write_proc = yaffs_proc_write;
++		my_proc_entry->read_proc = yaffs_proc_read;
++		my_proc_entry->data = NULL;
++	} else {
++		return -ENOMEM;
++        }
++
++	/* Now add the file system entries */
++
++	fsinst = fs_to_install;
++
++	while (fsinst->fst && !error) {
++		error = register_filesystem(fsinst->fst);
++		if (!error)
++			fsinst->installed = 1;
++		fsinst++;
++	}
++
++	/* Any errors? uninstall  */
++	if (error) {
++		fsinst = fs_to_install;
++
++		while (fsinst->fst) {
++			if (fsinst->installed) {
++				unregister_filesystem(fsinst->fst);
++				fsinst->installed = 0;
++			}
++			fsinst++;
++		}
++	}
++
++	return error;
++}
++
++static void __exit exit_yaffs_fs(void)
++{
++
++	struct file_system_to_install *fsinst;
++
++	yaffs_trace(YAFFS_TRACE_ALWAYS,
++		"yaffs built " __DATE__ " " __TIME__ " removing.");
++
++	remove_proc_entry("yaffs", YPROC_ROOT);
++
++	fsinst = fs_to_install;
++
++	while (fsinst->fst) {
++		if (fsinst->installed) {
++			unregister_filesystem(fsinst->fst);
++			fsinst->installed = 0;
++		}
++		fsinst++;
++	}
++}
++
++module_init(init_yaffs_fs)
++    module_exit(exit_yaffs_fs)
++
++    MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011");
++MODULE_LICENSE("GPL");
+diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
+new file mode 100644
+index 00000000..d277e20e
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs1.c
+@@ -0,0 +1,422 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_yaffs1.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++#include "yaffs_attribs.h"
++
++int yaffs1_scan(struct yaffs_dev *dev)
++{
++	struct yaffs_ext_tags tags;
++	int blk;
++	int result;
++	int chunk;
++	int c;
++	int deleted;
++	enum yaffs_block_state state;
++	LIST_HEAD(hard_list);
++	struct yaffs_block_info *bi;
++	u32 seq_number;
++	struct yaffs_obj_hdr *oh;
++	struct yaffs_obj *in;
++	struct yaffs_obj *parent;
++	int alloc_failed = 0;
++	struct yaffs_shadow_fixer *shadow_fixers = NULL;
++	u8 *chunk_data;
++
++	yaffs_trace(YAFFS_TRACE_SCAN,
++		"yaffs1_scan starts  intstartblk %d intendblk %d...",
++		dev->internal_start_block, dev->internal_end_block);
++
++	chunk_data = yaffs_get_temp_buffer(dev);
++
++	dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++	/* Scan all the blocks to determine their state */
++	bi = dev->block_info;
++	for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
++	     blk++) {
++		yaffs_clear_chunk_bits(dev, blk);
++		bi->pages_in_use = 0;
++		bi->soft_del_pages = 0;
++
++		yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++		bi->block_state = state;
++		bi->seq_number = seq_number;
++
++		if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++			bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
++
++		yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
++			"Block scanning block %d state %d seq %d",
++			blk, state, seq_number);
++
++		if (state == YAFFS_BLOCK_STATE_DEAD) {
++			yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++				"block %d is bad", blk);
++		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++			yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
++			dev->n_erased_blocks++;
++			dev->n_free_chunks += dev->param.chunks_per_block;
++		}
++		bi++;
++	}
++
++	/* For each block.... */
++	for (blk = dev->internal_start_block;
++	     !alloc_failed && blk <= dev->internal_end_block; blk++) {
++
++		cond_resched();
++
++		bi = yaffs_get_block_info(dev, blk);
++		state = bi->block_state;
++
++		deleted = 0;
++
++		/* For each chunk in each block that needs scanning.... */
++		for (c = 0;
++			!alloc_failed && c < dev->param.chunks_per_block &&
++			state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
++			/* Read the tags and decide what to do */
++			chunk = blk * dev->param.chunks_per_block + c;
++
++			result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
++							  &tags);
++
++			/* Let's have a good look at this chunk... */
++
++			if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
++			    tags.is_deleted) {
++				/* YAFFS1 only...
++				 * A deleted chunk
++				 */
++				deleted++;
++				dev->n_free_chunks++;
++			} else if (!tags.chunk_used) {
++				/* An unassigned chunk in the block
++				 * This means that either the block is empty or
++				 * this is the one being allocated from
++				 */
++
++				if (c == 0) {
++					/* We're looking at the first chunk in
++					 *the block so the block is unused */
++					state = YAFFS_BLOCK_STATE_EMPTY;
++					dev->n_erased_blocks++;
++				} else {
++					/* this is the block being allocated */
++					yaffs_trace(YAFFS_TRACE_SCAN,
++						" Allocating from %d %d",
++						blk, c);
++					state = YAFFS_BLOCK_STATE_ALLOCATING;
++					dev->alloc_block = blk;
++					dev->alloc_page = c;
++					dev->alloc_block_finder = blk;
++
++				}
++
++				dev->n_free_chunks +=
++				    (dev->param.chunks_per_block - c);
++			} else if (tags.chunk_id > 0) {
++				/* chunk_id > 0 so it is a data chunk... */
++				unsigned int endpos;
++
++				yaffs_set_chunk_bit(dev, blk, c);
++				bi->pages_in_use++;
++
++				in = yaffs_find_or_create_by_number(dev,
++							tags.obj_id,
++							YAFFS_OBJECT_TYPE_FILE);
++				/* PutChunkIntoFile checks for a clash
++				 * (two data chunks with the same chunk_id).
++				 */
++
++				if (!in)
++					alloc_failed = 1;
++
++				if (in) {
++					if (!yaffs_put_chunk_in_file
++					    (in, tags.chunk_id, chunk, 1))
++						alloc_failed = 1;
++				}
++
++				endpos =
++				    (tags.chunk_id - 1) *
++				    dev->data_bytes_per_chunk +
++				    tags.n_bytes;
++				if (in &&
++				    in->variant_type ==
++				     YAFFS_OBJECT_TYPE_FILE &&
++				    in->variant.file_variant.scanned_size <
++				      endpos) {
++					in->variant.file_variant.scanned_size =
++					    endpos;
++					if (!dev->param.use_header_file_size) {
++						in->variant.
++						    file_variant.file_size =
++						    in->variant.
++						    file_variant.scanned_size;
++					}
++
++				}
++			} else {
++				/* chunk_id == 0, so it is an ObjectHeader.
++				 * Make the object
++				 */
++				yaffs_set_chunk_bit(dev, blk, c);
++				bi->pages_in_use++;
++
++				result = yaffs_rd_chunk_tags_nand(dev, chunk,
++								  chunk_data,
++								  NULL);
++
++				oh = (struct yaffs_obj_hdr *)chunk_data;
++
++				in = yaffs_find_by_number(dev, tags.obj_id);
++				if (in && in->variant_type != oh->type) {
++					/* This should not happen, but somehow
++					 * Wev'e ended up with an obj_id that
++					 * has been reused but not yet deleted,
++					 * and worse still it has changed type.
++					 * Delete the old object.
++					 */
++
++					yaffs_del_obj(in);
++					in = NULL;
++				}
++
++				in = yaffs_find_or_create_by_number(dev,
++								tags.obj_id,
++								oh->type);
++
++				if (!in)
++					alloc_failed = 1;
++
++				if (in && oh->shadows_obj > 0) {
++
++					struct yaffs_shadow_fixer *fixer;
++					fixer =
++						kmalloc(sizeof
++						(struct yaffs_shadow_fixer),
++						GFP_NOFS);
++					if (fixer) {
++						fixer->next = shadow_fixers;
++						shadow_fixers = fixer;
++						fixer->obj_id = tags.obj_id;
++						fixer->shadowed_id =
++						    oh->shadows_obj;
++						yaffs_trace(YAFFS_TRACE_SCAN,
++							" Shadow fixer: %d shadows %d",
++							fixer->obj_id,
++							fixer->shadowed_id);
++
++					}
++
++				}
++
++				if (in && in->valid) {
++					/* We have already filled this one.
++					 * We have a duplicate and need to
++					 * resolve it. */
++
++					unsigned existing_serial = in->serial;
++					unsigned new_serial =
++					    tags.serial_number;
++
++					if (((existing_serial + 1) & 3) ==
++					    new_serial) {
++						/* Use new one - destroy the
++						 * exisiting one */
++						yaffs_chunk_del(dev,
++								in->hdr_chunk,
++								1, __LINE__);
++						in->valid = 0;
++					} else {
++						/* Use existing - destroy
++						 * this one. */
++						yaffs_chunk_del(dev, chunk, 1,
++								__LINE__);
++					}
++				}
++
++				if (in && !in->valid &&
++				    (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++				     tags.obj_id ==
++				     YAFFS_OBJECTID_LOSTNFOUND)) {
++					/* We only load some info, don't fiddle
++					 * with directory structure */
++					in->valid = 1;
++					in->variant_type = oh->type;
++
++					in->yst_mode = oh->yst_mode;
++					yaffs_load_attribs(in, oh);
++					in->hdr_chunk = chunk;
++					in->serial = tags.serial_number;
++
++				} else if (in && !in->valid) {
++					/* we need to load this info */
++
++					in->valid = 1;
++					in->variant_type = oh->type;
++
++					in->yst_mode = oh->yst_mode;
++					yaffs_load_attribs(in, oh);
++					in->hdr_chunk = chunk;
++					in->serial = tags.serial_number;
++
++					yaffs_set_obj_name_from_oh(in, oh);
++					in->dirty = 0;
++
++					/* directory stuff...
++					 * hook up to parent
++					 */
++
++					parent =
++					    yaffs_find_or_create_by_number
++					    (dev, oh->parent_obj_id,
++					     YAFFS_OBJECT_TYPE_DIRECTORY);
++					if (!parent)
++						alloc_failed = 1;
++					if (parent && parent->variant_type ==
++					    YAFFS_OBJECT_TYPE_UNKNOWN) {
++						/* Set up as a directory */
++						parent->variant_type =
++						    YAFFS_OBJECT_TYPE_DIRECTORY;
++						INIT_LIST_HEAD(&parent->
++							variant.dir_variant.
++							children);
++					} else if (!parent ||
++						parent->variant_type !=
++						YAFFS_OBJECT_TYPE_DIRECTORY) {
++						/* Hoosterman, a problem....
++						 * We're trying to use a
++						 * non-directory as a directory
++						 */
++
++						yaffs_trace(YAFFS_TRACE_ERROR,
++							"yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++							);
++						parent = dev->lost_n_found;
++					}
++
++					yaffs_add_obj_to_dir(parent, in);
++
++					switch (in->variant_type) {
++					case YAFFS_OBJECT_TYPE_UNKNOWN:
++						/* Todo got a problem */
++						break;
++					case YAFFS_OBJECT_TYPE_FILE:
++						if (dev->param.
++						    use_header_file_size)
++							in->variant.
++							file_variant.file_size
++							= yaffs_oh_to_size(oh);
++						break;
++					case YAFFS_OBJECT_TYPE_HARDLINK:
++						in->variant.
++						    hardlink_variant.equiv_id =
++						    oh->equiv_id;
++						list_add(&in->hard_links,
++								&hard_list);
++						break;
++					case YAFFS_OBJECT_TYPE_DIRECTORY:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SPECIAL:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SYMLINK:
++						in->variant.symlink_variant.
++						    alias =
++						    yaffs_clone_str(oh->alias);
++						if (!in->variant.
++						    symlink_variant.alias)
++							alloc_failed = 1;
++						break;
++					}
++				}
++			}
++		}
++
++		if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++			/* If we got this far while scanning,
++			 * then the block is fully allocated. */
++			state = YAFFS_BLOCK_STATE_FULL;
++		}
++
++		if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
++			/* If the block was partially allocated then
++			 * treat it as fully allocated. */
++			state = YAFFS_BLOCK_STATE_FULL;
++			dev->alloc_block = -1;
++		}
++
++		bi->block_state = state;
++
++		/* Now let's see if it was dirty */
++		if (bi->pages_in_use == 0 &&
++		    !bi->has_shrink_hdr &&
++		    bi->block_state == YAFFS_BLOCK_STATE_FULL)
++			yaffs_block_became_dirty(dev, blk);
++	}
++
++	/* Ok, we've done all the scanning.
++	 * Fix up the hard link chains.
++	 * We should now have scanned all the objects, now it's time to add
++	 * these hardlinks.
++	 */
++
++	yaffs_link_fixup(dev, &hard_list);
++
++	/*
++	 * Fix up any shadowed objects.
++	 * There should not be more than one of these.
++	 */
++	{
++		struct yaffs_shadow_fixer *fixer;
++		struct yaffs_obj *obj;
++
++		while (shadow_fixers) {
++			fixer = shadow_fixers;
++			shadow_fixers = fixer->next;
++			/* Complete the rename transaction by deleting the
++			 * shadowed object then setting the object header
++			 to unshadowed.
++			 */
++			obj = yaffs_find_by_number(dev, fixer->shadowed_id);
++			if (obj)
++				yaffs_del_obj(obj);
++
++			obj = yaffs_find_by_number(dev, fixer->obj_id);
++
++			if (obj)
++				yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++			kfree(fixer);
++		}
++	}
++
++	yaffs_release_temp_buffer(dev, chunk_data);
++
++	if (alloc_failed)
++		return YAFFS_FAIL;
++
++	yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
++
++	return YAFFS_OK;
++}
+diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
+new file mode 100644
+index 00000000..97e2fdd0
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs1.h
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS1_H__
++#define __YAFFS_YAFFS1_H__
++
++#include "yaffs_guts.h"
++int yaffs1_scan(struct yaffs_dev *dev);
++
++#endif
+diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
+new file mode 100644
+index 00000000..f1dc9722
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs2.c
+@@ -0,0 +1,1532 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_checkptrw.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_verify.h"
++#include "yaffs_attribs.h"
++#include "yaffs_summary.h"
++
++/*
++ * Checkpoints are really no benefit on very small partitions.
++ *
++ * To save space on small partitions don't bother with checkpoints unless
++ * the partition is at least this big.
++ */
++#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
++#define YAFFS_SMALL_HOLE_THRESHOLD 4
++
++/*
++ * Oldest Dirty Sequence Number handling.
++ */
++
++/* yaffs_calc_oldest_dirty_seq()
++ * yaffs2_find_oldest_dirty_seq()
++ * Calculate the oldest dirty sequence number if we don't know it.
++ */
++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
++{
++	int i;
++	unsigned seq;
++	unsigned block_no = 0;
++	struct yaffs_block_info *b;
++
++	if (!dev->param.is_yaffs2)
++		return;
++
++	/* Find the oldest dirty sequence number. */
++	seq = dev->seq_number + 1;
++	b = dev->block_info;
++	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++		if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
++		    (b->pages_in_use - b->soft_del_pages) <
++		    dev->param.chunks_per_block &&
++		    b->seq_number < seq) {
++			seq = b->seq_number;
++			block_no = i;
++		}
++		b++;
++	}
++
++	if (block_no) {
++		dev->oldest_dirty_seq = seq;
++		dev->oldest_dirty_block = block_no;
++	}
++}
++
++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
++{
++	if (!dev->param.is_yaffs2)
++		return;
++
++	if (!dev->oldest_dirty_seq)
++		yaffs_calc_oldest_dirty_seq(dev);
++}
++
++/*
++ * yaffs_clear_oldest_dirty_seq()
++ * Called when a block is erased or marked bad. (ie. when its seq_number
++ * becomes invalid). If the value matches the oldest then we clear
++ * dev->oldest_dirty_seq to force its recomputation.
++ */
++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
++				   struct yaffs_block_info *bi)
++{
++
++	if (!dev->param.is_yaffs2)
++		return;
++
++	if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
++		dev->oldest_dirty_seq = 0;
++		dev->oldest_dirty_block = 0;
++	}
++}
++
++/*
++ * yaffs2_update_oldest_dirty_seq()
++ * Update the oldest dirty sequence number whenever we dirty a block.
++ * Only do this if the oldest_dirty_seq is actually being tracked.
++ */
++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
++				    struct yaffs_block_info *bi)
++{
++	if (!dev->param.is_yaffs2)
++		return;
++
++	if (dev->oldest_dirty_seq) {
++		if (dev->oldest_dirty_seq > bi->seq_number) {
++			dev->oldest_dirty_seq = bi->seq_number;
++			dev->oldest_dirty_block = block_no;
++		}
++	}
++}
++
++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
++{
++
++	if (!dev->param.is_yaffs2)
++		return 1;	/* disqualification only applies to yaffs2. */
++
++	if (!bi->has_shrink_hdr)
++		return 1;	/* can gc */
++
++	yaffs2_find_oldest_dirty_seq(dev);
++
++	/* Can't do gc of this block if there are any blocks older than this
++	 * one that have discarded pages.
++	 */
++	return (bi->seq_number <= dev->oldest_dirty_seq);
++}
++
++/*
++ * yaffs2_find_refresh_block()
++ * periodically finds the oldest full block by sequence number for refreshing.
++ * Only for yaffs2.
++ */
++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
++{
++	u32 b;
++	u32 oldest = 0;
++	u32 oldest_seq = 0;
++	struct yaffs_block_info *bi;
++
++	if (!dev->param.is_yaffs2)
++		return oldest;
++
++	/*
++	 * If refresh period < 10 then refreshing is disabled.
++	 */
++	if (dev->param.refresh_period < 10)
++		return oldest;
++
++	/*
++	 * Fix broken values.
++	 */
++	if (dev->refresh_skip > dev->param.refresh_period)
++		dev->refresh_skip = dev->param.refresh_period;
++
++	if (dev->refresh_skip > 0)
++		return oldest;
++
++	/*
++	 * Refresh skip is now zero.
++	 * We'll do a refresh this time around....
++	 * Update the refresh skip and find the oldest block.
++	 */
++	dev->refresh_skip = dev->param.refresh_period;
++	dev->refresh_count++;
++	bi = dev->block_info;
++	for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++
++		if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++
++			if (oldest < 1 || bi->seq_number < oldest_seq) {
++				oldest = b;
++				oldest_seq = bi->seq_number;
++			}
++		}
++		bi++;
++	}
++
++	if (oldest > 0) {
++		yaffs_trace(YAFFS_TRACE_GC,
++			"GC refresh count %d selected block %d with seq_number %d",
++			dev->refresh_count, oldest, oldest_seq);
++	}
++
++	return oldest;
++}
++
++int yaffs2_checkpt_required(struct yaffs_dev *dev)
++{
++	int nblocks;
++
++	if (!dev->param.is_yaffs2)
++		return 0;
++
++	nblocks = dev->internal_end_block - dev->internal_start_block + 1;
++
++	return !dev->param.skip_checkpt_wr &&
++	    !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
++}
++
++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
++{
++	int retval;
++	int n_bytes = 0;
++	int n_blocks;
++	int dev_blocks;
++
++	if (!dev->param.is_yaffs2)
++		return 0;
++
++	if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
++		/* Not a valid value so recalculate */
++		dev_blocks = dev->param.end_block - dev->param.start_block + 1;
++		n_bytes += sizeof(struct yaffs_checkpt_validity);
++		n_bytes += sizeof(struct yaffs_checkpt_dev);
++		n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
++		n_bytes += dev_blocks * dev->chunk_bit_stride;
++		n_bytes +=
++		    (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
++		    dev->n_obj;
++		n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
++		n_bytes += sizeof(struct yaffs_checkpt_validity);
++		n_bytes += sizeof(u32);	/* checksum */
++
++		/* Round up and add 2 blocks to allow for some bad blocks,
++		 * so add 3 */
++
++		n_blocks =
++		    (n_bytes /
++		     (dev->data_bytes_per_chunk *
++		      dev->param.chunks_per_block)) + 3;
++
++		dev->checkpoint_blocks_required = n_blocks;
++	}
++
++	retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
++	if (retval < 0)
++		retval = 0;
++	return retval;
++}
++
++/*--------------------- Checkpointing --------------------*/
++
++static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
++{
++	struct yaffs_checkpt_validity cp;
++
++	memset(&cp, 0, sizeof(cp));
++
++	cp.struct_type = sizeof(cp);
++	cp.magic = YAFFS_MAGIC;
++	cp.version = YAFFS_CHECKPOINT_VERSION;
++	cp.head = (head) ? 1 : 0;
++
++	return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
++{
++	struct yaffs_checkpt_validity cp;
++	int ok;
++
++	ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++	if (ok)
++		ok = (cp.struct_type == sizeof(cp)) &&
++		    (cp.magic == YAFFS_MAGIC) &&
++		    (cp.version == YAFFS_CHECKPOINT_VERSION) &&
++		    (cp.head == ((head) ? 1 : 0));
++	return ok ? 1 : 0;
++}
++
++static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
++				      struct yaffs_dev *dev)
++{
++	cp->n_erased_blocks = dev->n_erased_blocks;
++	cp->alloc_block = dev->alloc_block;
++	cp->alloc_page = dev->alloc_page;
++	cp->n_free_chunks = dev->n_free_chunks;
++
++	cp->n_deleted_files = dev->n_deleted_files;
++	cp->n_unlinked_files = dev->n_unlinked_files;
++	cp->n_bg_deletions = dev->n_bg_deletions;
++	cp->seq_number = dev->seq_number;
++
++}
++
++static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
++				     struct yaffs_checkpt_dev *cp)
++{
++	dev->n_erased_blocks = cp->n_erased_blocks;
++	dev->alloc_block = cp->alloc_block;
++	dev->alloc_page = cp->alloc_page;
++	dev->n_free_chunks = cp->n_free_chunks;
++
++	dev->n_deleted_files = cp->n_deleted_files;
++	dev->n_unlinked_files = cp->n_unlinked_files;
++	dev->n_bg_deletions = cp->n_bg_deletions;
++	dev->seq_number = cp->seq_number;
++}
++
++static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
++{
++	struct yaffs_checkpt_dev cp;
++	u32 n_bytes;
++	u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++	int ok;
++
++	/* Write device runtime values */
++	yaffs2_dev_to_checkpt_dev(&cp, dev);
++	cp.struct_type = sizeof(cp);
++
++	ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++	if (!ok)
++		return 0;
++
++	/* Write block info */
++	n_bytes = n_blocks * sizeof(struct yaffs_block_info);
++	ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
++	if (!ok)
++		return 0;
++
++	/* Write chunk bits */
++	n_bytes = n_blocks * dev->chunk_bit_stride;
++	ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
++{
++	struct yaffs_checkpt_dev cp;
++	u32 n_bytes;
++	u32 n_blocks =
++	    (dev->internal_end_block - dev->internal_start_block + 1);
++	int ok;
++
++	ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++	if (!ok)
++		return 0;
++
++	if (cp.struct_type != sizeof(cp))
++		return 0;
++
++	yaffs_checkpt_dev_to_dev(dev, &cp);
++
++	n_bytes = n_blocks * sizeof(struct yaffs_block_info);
++
++	ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
++
++	if (!ok)
++		return 0;
++
++	n_bytes = n_blocks * dev->chunk_bit_stride;
++
++	ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++	return ok ? 1 : 0;
++}
++
++static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
++				   struct yaffs_obj *obj)
++{
++	cp->obj_id = obj->obj_id;
++	cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
++	cp->hdr_chunk = obj->hdr_chunk;
++	cp->variant_type = obj->variant_type;
++	cp->deleted = obj->deleted;
++	cp->soft_del = obj->soft_del;
++	cp->unlinked = obj->unlinked;
++	cp->fake = obj->fake;
++	cp->rename_allowed = obj->rename_allowed;
++	cp->unlink_allowed = obj->unlink_allowed;
++	cp->serial = obj->serial;
++	cp->n_data_chunks = obj->n_data_chunks;
++
++	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++		cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
++	else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++		cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
++}
++
++static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
++				     struct yaffs_checkpt_obj *cp)
++{
++	struct yaffs_obj *parent;
++
++	if (obj->variant_type != cp->variant_type) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"Checkpoint read object %d type %d chunk %d does not match existing object type %d",
++			cp->obj_id, cp->variant_type, cp->hdr_chunk,
++			obj->variant_type);
++		return 0;
++	}
++
++	obj->obj_id = cp->obj_id;
++
++	if (cp->parent_id)
++		parent = yaffs_find_or_create_by_number(obj->my_dev,
++						cp->parent_id,
++						YAFFS_OBJECT_TYPE_DIRECTORY);
++	else
++		parent = NULL;
++
++	if (parent) {
++		if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++			yaffs_trace(YAFFS_TRACE_ALWAYS,
++				"Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
++				cp->obj_id, cp->parent_id,
++				cp->variant_type, cp->hdr_chunk,
++				parent->variant_type);
++			return 0;
++		}
++		yaffs_add_obj_to_dir(parent, obj);
++	}
++
++	obj->hdr_chunk = cp->hdr_chunk;
++	obj->variant_type = cp->variant_type;
++	obj->deleted = cp->deleted;
++	obj->soft_del = cp->soft_del;
++	obj->unlinked = cp->unlinked;
++	obj->fake = cp->fake;
++	obj->rename_allowed = cp->rename_allowed;
++	obj->unlink_allowed = cp->unlink_allowed;
++	obj->serial = cp->serial;
++	obj->n_data_chunks = cp->n_data_chunks;
++
++	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++		obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
++	else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++		obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
++
++	if (obj->hdr_chunk > 0)
++		obj->lazy_loaded = 1;
++	return 1;
++}
++
++static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
++				       struct yaffs_tnode *tn, u32 level,
++				       int chunk_offset)
++{
++	int i;
++	struct yaffs_dev *dev = in->my_dev;
++	int ok = 1;
++	u32 base_offset;
++
++	if (!tn)
++		return 1;
++
++	if (level > 0) {
++		for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++			if (!tn->internal[i])
++				continue;
++			ok = yaffs2_checkpt_tnode_worker(in,
++				 tn->internal[i],
++				 level - 1,
++				 (chunk_offset <<
++				  YAFFS_TNODES_INTERNAL_BITS) + i);
++		}
++		return ok;
++	}
++
++	/* Level 0 tnode */
++	base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
++	ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
++			sizeof(base_offset));
++	if (ok)
++		ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
++			dev->tnode_size);
++
++	return ok;
++}
++
++static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
++{
++	u32 end_marker = ~0;
++	int ok = 1;
++
++	if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++		return ok;
++
++	ok = yaffs2_checkpt_tnode_worker(obj,
++					 obj->variant.file_variant.top,
++					 obj->variant.file_variant.
++					 top_level, 0);
++	if (ok)
++		ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
++				sizeof(end_marker)) == sizeof(end_marker));
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
++{
++	u32 base_chunk;
++	int ok = 1;
++	struct yaffs_dev *dev = obj->my_dev;
++	struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
++	struct yaffs_tnode *tn;
++	int nread = 0;
++
++	ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
++	      sizeof(base_chunk));
++
++	while (ok && (~base_chunk)) {
++		nread++;
++		/* Read level 0 tnode */
++
++		tn = yaffs_get_tnode(dev);
++		if (tn)
++			ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
++				dev->tnode_size);
++		else
++			ok = 0;
++
++		if (tn && ok)
++			ok = yaffs_add_find_tnode_0(dev,
++						    file_stuct_ptr,
++						    base_chunk, tn) ? 1 : 0;
++
++		if (ok)
++			ok = (yaffs2_checkpt_rd
++			      (dev, &base_chunk,
++			       sizeof(base_chunk)) == sizeof(base_chunk));
++	}
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"Checkpoint read tnodes %d records, last %d. ok %d",
++		nread, base_chunk, ok);
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_checkpt_obj cp;
++	int i;
++	int ok = 1;
++	struct list_head *lh;
++
++	/* Iterate through the objects in each hash entry,
++	 * dumping them to the checkpointing stream.
++	 */
++
++	for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
++		list_for_each(lh, &dev->obj_bucket[i].list) {
++			obj = list_entry(lh, struct yaffs_obj, hash_link);
++			if (!obj->defered_free) {
++				yaffs2_obj_checkpt_obj(&cp, obj);
++				cp.struct_type = sizeof(cp);
++
++				yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++					"Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
++					cp.obj_id, cp.parent_id,
++					cp.variant_type, cp.hdr_chunk, obj);
++
++				ok = (yaffs2_checkpt_wr(dev, &cp,
++						sizeof(cp)) == sizeof(cp));
++
++				if (ok &&
++					obj->variant_type ==
++					YAFFS_OBJECT_TYPE_FILE)
++					ok = yaffs2_wr_checkpt_tnodes(obj);
++			}
++		}
++	}
++
++	/* Dump end of list */
++	memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
++	cp.struct_type = sizeof(cp);
++
++	if (ok)
++		ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
++{
++	struct yaffs_obj *obj;
++	struct yaffs_checkpt_obj cp;
++	int ok = 1;
++	int done = 0;
++	LIST_HEAD(hard_list);
++
++
++	while (ok && !done) {
++		ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++		if (cp.struct_type != sizeof(cp)) {
++			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++				"struct size %d instead of %d ok %d",
++				cp.struct_type, (int)sizeof(cp), ok);
++			ok = 0;
++		}
++
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"Checkpoint read object %d parent %d type %d chunk %d ",
++			cp.obj_id, cp.parent_id, cp.variant_type,
++			cp.hdr_chunk);
++
++		if (ok && cp.obj_id == ~0) {
++			done = 1;
++		} else if (ok) {
++			obj =
++			    yaffs_find_or_create_by_number(dev, cp.obj_id,
++							   cp.variant_type);
++			if (obj) {
++				ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
++				if (!ok)
++					break;
++				if (obj->variant_type ==
++					YAFFS_OBJECT_TYPE_FILE) {
++					ok = yaffs2_rd_checkpt_tnodes(obj);
++				} else if (obj->variant_type ==
++					YAFFS_OBJECT_TYPE_HARDLINK) {
++					list_add(&obj->hard_links, &hard_list);
++				}
++			} else {
++				ok = 0;
++			}
++		}
++	}
++
++	if (ok)
++		yaffs_link_fixup(dev, &hard_list);
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
++{
++	u32 checkpt_sum;
++	int ok;
++
++	yaffs2_get_checkpt_sum(dev, &checkpt_sum);
++
++	ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
++		sizeof(checkpt_sum));
++
++	if (!ok)
++		return 0;
++
++	return 1;
++}
++
++static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
++{
++	u32 checkpt_sum0;
++	u32 checkpt_sum1;
++	int ok;
++
++	yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
++
++	ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
++		sizeof(checkpt_sum1));
++
++	if (!ok)
++		return 0;
++
++	if (checkpt_sum0 != checkpt_sum1)
++		return 0;
++
++	return 1;
++}
++
++static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
++{
++	int ok = 1;
++
++	if (!yaffs2_checkpt_required(dev)) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"skipping checkpoint write");
++		ok = 0;
++	}
++
++	if (ok)
++		ok = yaffs2_checkpt_open(dev, 1);
++
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"write checkpoint validity");
++		ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
++	}
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"write checkpoint device");
++		ok = yaffs2_wr_checkpt_dev(dev);
++	}
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"write checkpoint objects");
++		ok = yaffs2_wr_checkpt_objs(dev);
++	}
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"write checkpoint validity");
++		ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
++	}
++
++	if (ok)
++		ok = yaffs2_wr_checkpt_sum(dev);
++
++	if (!yaffs_checkpt_close(dev))
++		ok = 0;
++
++	if (ok)
++		dev->is_checkpointed = 1;
++	else
++		dev->is_checkpointed = 0;
++
++	return dev->is_checkpointed;
++}
++
++static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
++{
++	int ok = 1;
++
++	if (!dev->param.is_yaffs2)
++		ok = 0;
++
++	if (ok && dev->param.skip_checkpt_rd) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"skipping checkpoint read");
++		ok = 0;
++	}
++
++	if (ok)
++		ok = yaffs2_checkpt_open(dev, 0); /* open for read */
++
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"read checkpoint validity");
++		ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
++	}
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"read checkpoint device");
++		ok = yaffs2_rd_checkpt_dev(dev);
++	}
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"read checkpoint objects");
++		ok = yaffs2_rd_checkpt_objs(dev);
++	}
++	if (ok) {
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"read checkpoint validity");
++		ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
++	}
++
++	if (ok) {
++		ok = yaffs2_rd_checkpt_sum(dev);
++		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++			"read checkpoint checksum %d", ok);
++	}
++
++	if (!yaffs_checkpt_close(dev))
++		ok = 0;
++
++	if (ok)
++		dev->is_checkpointed = 1;
++	else
++		dev->is_checkpointed = 0;
++
++	return ok ? 1 : 0;
++}
++
++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
++{
++	if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
++		dev->is_checkpointed = 0;
++		yaffs2_checkpt_invalidate_stream(dev);
++	}
++	if (dev->param.sb_dirty_fn)
++		dev->param.sb_dirty_fn(dev);
++}
++
++int yaffs_checkpoint_save(struct yaffs_dev *dev)
++{
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"save entry: is_checkpointed %d",
++		dev->is_checkpointed);
++
++	yaffs_verify_objects(dev);
++	yaffs_verify_blocks(dev);
++	yaffs_verify_free_chunks(dev);
++
++	if (!dev->is_checkpointed) {
++		yaffs2_checkpt_invalidate(dev);
++		yaffs2_wr_checkpt_data(dev);
++	}
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
++		"save exit: is_checkpointed %d",
++		dev->is_checkpointed);
++
++	return dev->is_checkpointed;
++}
++
++int yaffs2_checkpt_restore(struct yaffs_dev *dev)
++{
++	int retval;
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"restore entry: is_checkpointed %d",
++		dev->is_checkpointed);
++
++	retval = yaffs2_rd_checkpt_data(dev);
++
++	if (dev->is_checkpointed) {
++		yaffs_verify_objects(dev);
++		yaffs_verify_blocks(dev);
++		yaffs_verify_free_chunks(dev);
++	}
++
++	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++		"restore exit: is_checkpointed %d",
++		dev->is_checkpointed);
++
++	return retval;
++}
++
++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
++{
++	/* if new_size > old_file_size.
++	 * We're going to be writing a hole.
++	 * If the hole is small then write zeros otherwise write a start
++	 * of hole marker.
++	 */
++	loff_t old_file_size;
++	loff_t increase;
++	int small_hole;
++	int result = YAFFS_OK;
++	struct yaffs_dev *dev = NULL;
++	u8 *local_buffer = NULL;
++	int small_increase_ok = 0;
++
++	if (!obj)
++		return YAFFS_FAIL;
++
++	if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++		return YAFFS_FAIL;
++
++	dev = obj->my_dev;
++
++	/* Bail out if not yaffs2 mode */
++	if (!dev->param.is_yaffs2)
++		return YAFFS_OK;
++
++	old_file_size = obj->variant.file_variant.file_size;
++
++	if (new_size <= old_file_size)
++		return YAFFS_OK;
++
++	increase = new_size - old_file_size;
++
++	if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
++	    yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
++		small_hole = 1;
++	else
++		small_hole = 0;
++
++	if (small_hole)
++		local_buffer = yaffs_get_temp_buffer(dev);
++
++	if (local_buffer) {
++		/* fill hole with zero bytes */
++		loff_t pos = old_file_size;
++		int this_write;
++		int written;
++		memset(local_buffer, 0, dev->data_bytes_per_chunk);
++		small_increase_ok = 1;
++
++		while (increase > 0 && small_increase_ok) {
++			this_write = increase;
++			if (this_write > dev->data_bytes_per_chunk)
++				this_write = dev->data_bytes_per_chunk;
++			written =
++			    yaffs_do_file_wr(obj, local_buffer, pos, this_write,
++					     0);
++			if (written == this_write) {
++				pos += this_write;
++				increase -= this_write;
++			} else {
++				small_increase_ok = 0;
++			}
++		}
++
++		yaffs_release_temp_buffer(dev, local_buffer);
++
++		/* If out of space then reverse any chunks we've added */
++		if (!small_increase_ok)
++			yaffs_resize_file_down(obj, old_file_size);
++	}
++
++	if (!small_increase_ok &&
++	    obj->parent &&
++	    obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++	    obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
++		/* Write a hole start header with the old file size */
++		yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
++	}
++
++	return result;
++}
++
++struct yaffs_block_index {
++	int seq;
++	int block;
++};
++
++static int yaffs2_ybicmp(const void *a, const void *b)
++{
++	int aseq = ((struct yaffs_block_index *)a)->seq;
++	int bseq = ((struct yaffs_block_index *)b)->seq;
++	int ablock = ((struct yaffs_block_index *)a)->block;
++	int bblock = ((struct yaffs_block_index *)b)->block;
++
++	if (aseq == bseq)
++		return ablock - bblock;
++
++	return aseq - bseq;
++}
++
++static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
++		struct yaffs_block_info *bi,
++		int blk, int chunk_in_block,
++		int *found_chunks,
++		u8 *chunk_data,
++		struct list_head *hard_list,
++		int summary_available)
++{
++	struct yaffs_obj_hdr *oh;
++	struct yaffs_obj *in;
++	struct yaffs_obj *parent;
++	int equiv_id;
++	loff_t file_size;
++	int is_shrink;
++	int is_unlinked;
++	struct yaffs_ext_tags tags;
++	int result;
++	int alloc_failed = 0;
++	int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
++	struct yaffs_file_var *file_var;
++	struct yaffs_hardlink_var *hl_var;
++	struct yaffs_symlink_var *sl_var;
++
++	if (summary_available) {
++		result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
++		tags.seq_number = bi->seq_number;
++	}
++
++	if (!summary_available || tags.obj_id == 0) {
++		result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
++		dev->tags_used++;
++	} else {
++		dev->summary_used++;
++	}
++
++	/* Let's have a good look at this chunk... */
++
++	if (!tags.chunk_used) {
++		/* An unassigned chunk in the block.
++		 * If there are used chunks after this one, then
++		 * it is a chunk that was skipped due to failing
++		 * the erased check. Just skip it so that it can
++		 * be deleted.
++		 * But, more typically, We get here when this is
++		 * an unallocated chunk and his means that
++		 * either the block is empty or this is the one
++		 * being allocated from
++		 */
++
++		if (*found_chunks) {
++			/* This is a chunk that was skipped due
++			 * to failing the erased check */
++		} else if (chunk_in_block == 0) {
++			/* We're looking at the first chunk in
++			 * the block so the block is unused */
++			bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++			dev->n_erased_blocks++;
++		} else {
++			if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++			    bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
++				if (dev->seq_number == bi->seq_number) {
++					/* Allocating from this block*/
++					yaffs_trace(YAFFS_TRACE_SCAN,
++					    " Allocating from %d %d",
++					    blk, chunk_in_block);
++
++					bi->block_state =
++						YAFFS_BLOCK_STATE_ALLOCATING;
++					dev->alloc_block = blk;
++					dev->alloc_page = chunk_in_block;
++					dev->alloc_block_finder = blk;
++				} else {
++					/* This is a partially written block
++					 * that is not the current
++					 * allocation block.
++					 */
++					yaffs_trace(YAFFS_TRACE_SCAN,
++						"Partially written block %d detected. gc will fix this.",
++						blk);
++				}
++			}
++		}
++
++		dev->n_free_chunks++;
++
++	} else if (tags.ecc_result ==
++		YAFFS_ECC_RESULT_UNFIXED) {
++		yaffs_trace(YAFFS_TRACE_SCAN,
++			" Unfixed ECC in chunk(%d:%d), chunk ignored",
++			blk, chunk_in_block);
++			dev->n_free_chunks++;
++	} else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
++		   tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
++		   tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
++		   (tags.chunk_id > 0 &&
++		     tags.n_bytes > dev->data_bytes_per_chunk) ||
++		   tags.seq_number != bi->seq_number) {
++		yaffs_trace(YAFFS_TRACE_SCAN,
++			"Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
++			blk, chunk_in_block, tags.obj_id,
++			tags.chunk_id, tags.n_bytes);
++		dev->n_free_chunks++;
++	} else if (tags.chunk_id > 0) {
++		/* chunk_id > 0 so it is a data chunk... */
++		loff_t endpos;
++		loff_t chunk_base = (tags.chunk_id - 1) *
++					dev->data_bytes_per_chunk;
++
++		*found_chunks = 1;
++
++		yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++		bi->pages_in_use++;
++
++		in = yaffs_find_or_create_by_number(dev,
++					tags.obj_id,
++					YAFFS_OBJECT_TYPE_FILE);
++		if (!in)
++			/* Out of memory */
++			alloc_failed = 1;
++
++		if (in &&
++		    in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
++		    chunk_base < in->variant.file_variant.shrink_size) {
++			/* This has not been invalidated by
++			 * a resize */
++			if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
++								chunk, -1))
++				alloc_failed = 1;
++
++			/* File size is calculated by looking at
++			 * the data chunks if we have not
++			 * seen an object header yet.
++			 * Stop this practice once we find an
++			 * object header.
++			 */
++			endpos = chunk_base + tags.n_bytes;
++
++			if (!in->valid &&
++			    in->variant.file_variant.scanned_size < endpos) {
++				in->variant.file_variant.
++				    scanned_size = endpos;
++				in->variant.file_variant.
++				    file_size = endpos;
++			}
++		} else if (in) {
++			/* This chunk has been invalidated by a
++			 * resize, or a past file deletion
++			 * so delete the chunk*/
++			yaffs_chunk_del(dev, chunk, 1, __LINE__);
++		}
++	} else {
++		/* chunk_id == 0, so it is an ObjectHeader.
++		 * Thus, we read in the object header and make
++		 * the object
++		 */
++		*found_chunks = 1;
++
++		yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++		bi->pages_in_use++;
++
++		oh = NULL;
++		in = NULL;
++
++		if (tags.extra_available) {
++			in = yaffs_find_or_create_by_number(dev,
++					tags.obj_id,
++					tags.extra_obj_type);
++			if (!in)
++				alloc_failed = 1;
++		}
++
++		if (!in ||
++		    (!in->valid && dev->param.disable_lazy_load) ||
++		    tags.extra_shadows ||
++		    (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++				 tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
++
++			/* If we don't have  valid info then we
++			 * need to read the chunk
++			 * TODO In future we can probably defer
++			 * reading the chunk and living with
++			 * invalid data until needed.
++			 */
++
++			result = yaffs_rd_chunk_tags_nand(dev,
++						  chunk,
++						  chunk_data,
++						  NULL);
++
++			oh = (struct yaffs_obj_hdr *)chunk_data;
++
++			if (dev->param.inband_tags) {
++				/* Fix up the header if they got
++				 * corrupted by inband tags */
++				oh->shadows_obj =
++				    oh->inband_shadowed_obj_id;
++				oh->is_shrink =
++				    oh->inband_is_shrink;
++			}
++
++			if (!in) {
++				in = yaffs_find_or_create_by_number(dev,
++							tags.obj_id, oh->type);
++				if (!in)
++					alloc_failed = 1;
++			}
++		}
++
++		if (!in) {
++			/* TODO Hoosterman we have a problem! */
++			yaffs_trace(YAFFS_TRACE_ERROR,
++				"yaffs tragedy: Could not make object for object  %d at chunk %d during scan",
++				tags.obj_id, chunk);
++			return YAFFS_FAIL;
++		}
++
++		if (in->valid) {
++			/* We have already filled this one.
++			 * We have a duplicate that will be
++			 * discarded, but we first have to suck
++			 * out resize info if it is a file.
++			 */
++			if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
++				((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
++				 (tags.extra_available &&
++				  tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++				)) {
++				loff_t this_size = (oh) ?
++					yaffs_oh_to_size(oh) :
++					tags.extra_file_size;
++				u32 parent_obj_id = (oh) ?
++					oh->parent_obj_id :
++					tags.extra_parent_id;
++
++				is_shrink = (oh) ?
++					oh->is_shrink :
++					tags.extra_is_shrink;
++
++				/* If it is deleted (unlinked
++				 * at start also means deleted)
++				 * we treat the file size as
++				 * being zeroed at this point.
++				 */
++				if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
++				    parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
++					this_size = 0;
++					is_shrink = 1;
++				}
++
++				if (is_shrink &&
++				    in->variant.file_variant.shrink_size >
++				    this_size)
++					in->variant.file_variant.shrink_size =
++					this_size;
++
++				if (is_shrink)
++					bi->has_shrink_hdr = 1;
++			}
++			/* Use existing - destroy this one. */
++			yaffs_chunk_del(dev, chunk, 1, __LINE__);
++		}
++
++		if (!in->valid && in->variant_type !=
++		    (oh ? oh->type : tags.extra_obj_type))
++			yaffs_trace(YAFFS_TRACE_ERROR,
++				"yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
++				oh ? oh->type : tags.extra_obj_type,
++				in->variant_type, tags.obj_id,
++				chunk);
++
++		if (!in->valid &&
++		    (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++		     tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
++			/* We only load some info, don't fiddle
++			 * with directory structure */
++			in->valid = 1;
++
++			if (oh) {
++				in->yst_mode = oh->yst_mode;
++				yaffs_load_attribs(in, oh);
++				in->lazy_loaded = 0;
++			} else {
++				in->lazy_loaded = 1;
++			}
++			in->hdr_chunk = chunk;
++
++		} else if (!in->valid) {
++			/* we need to load this info */
++			in->valid = 1;
++			in->hdr_chunk = chunk;
++			if (oh) {
++				in->variant_type = oh->type;
++				in->yst_mode = oh->yst_mode;
++				yaffs_load_attribs(in, oh);
++
++				if (oh->shadows_obj > 0)
++					yaffs_handle_shadowed_obj(dev,
++					     oh->shadows_obj, 1);
++
++				yaffs_set_obj_name_from_oh(in, oh);
++				parent = yaffs_find_or_create_by_number(dev,
++						oh->parent_obj_id,
++						YAFFS_OBJECT_TYPE_DIRECTORY);
++				file_size = yaffs_oh_to_size(oh);
++				is_shrink = oh->is_shrink;
++				equiv_id = oh->equiv_id;
++			} else {
++				in->variant_type = tags.extra_obj_type;
++				parent = yaffs_find_or_create_by_number(dev,
++						tags.extra_parent_id,
++						YAFFS_OBJECT_TYPE_DIRECTORY);
++				file_size = tags.extra_file_size;
++				is_shrink = tags.extra_is_shrink;
++				equiv_id = tags.extra_equiv_id;
++				in->lazy_loaded = 1;
++			}
++			in->dirty = 0;
++
++			if (!parent)
++				alloc_failed = 1;
++
++			/* directory stuff...
++			 * hook up to parent
++			 */
++
++			if (parent &&
++			    parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
++				/* Set up as a directory */
++				parent->variant_type =
++					YAFFS_OBJECT_TYPE_DIRECTORY;
++				INIT_LIST_HEAD(&parent->
++						variant.dir_variant.children);
++			} else if (!parent ||
++				   parent->variant_type !=
++					YAFFS_OBJECT_TYPE_DIRECTORY) {
++				/* Hoosterman, another problem....
++				 * Trying to use a non-directory as a directory
++				 */
++
++				yaffs_trace(YAFFS_TRACE_ERROR,
++					"yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++					);
++				parent = dev->lost_n_found;
++			}
++			yaffs_add_obj_to_dir(parent, in);
++
++			is_unlinked = (parent == dev->del_dir) ||
++					(parent == dev->unlinked_dir);
++
++			if (is_shrink)
++				/* Mark the block */
++				bi->has_shrink_hdr = 1;
++
++			/* Note re hardlinks.
++			 * Since we might scan a hardlink before its equivalent
++			 * object is scanned we put them all in a list.
++			 * After scanning is complete, we should have all the
++			 * objects, so we run through this list and fix up all
++			 * the chains.
++			 */
++
++			switch (in->variant_type) {
++			case YAFFS_OBJECT_TYPE_UNKNOWN:
++				/* Todo got a problem */
++				break;
++			case YAFFS_OBJECT_TYPE_FILE:
++				file_var = &in->variant.file_variant;
++				if (file_var->scanned_size < file_size) {
++					/* This covers the case where the file
++					 * size is greater than the data held.
++					 * This will happen if the file is
++					 * resized to be larger than its
++					 * current data extents.
++					 */
++					file_var->file_size = file_size;
++					file_var->scanned_size = file_size;
++				}
++
++				if (file_var->shrink_size > file_size)
++					file_var->shrink_size = file_size;
++
++				break;
++			case YAFFS_OBJECT_TYPE_HARDLINK:
++				hl_var = &in->variant.hardlink_variant;
++				if (!is_unlinked) {
++					hl_var->equiv_id = equiv_id;
++					list_add(&in->hard_links, hard_list);
++				}
++				break;
++			case YAFFS_OBJECT_TYPE_DIRECTORY:
++				/* Do nothing */
++				break;
++			case YAFFS_OBJECT_TYPE_SPECIAL:
++				/* Do nothing */
++				break;
++			case YAFFS_OBJECT_TYPE_SYMLINK:
++				sl_var = &in->variant.symlink_variant;
++				if (oh) {
++					sl_var->alias =
++					    yaffs_clone_str(oh->alias);
++					if (!sl_var->alias)
++						alloc_failed = 1;
++				}
++				break;
++			}
++		}
++	}
++	return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
++}
++
++int yaffs2_scan_backwards(struct yaffs_dev *dev)
++{
++	int blk;
++	int block_iter;
++	int start_iter;
++	int end_iter;
++	int n_to_scan = 0;
++	enum yaffs_block_state state;
++	int c;
++	int deleted;
++	LIST_HEAD(hard_list);
++	struct yaffs_block_info *bi;
++	u32 seq_number;
++	int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++	u8 *chunk_data;
++	int found_chunks;
++	int alloc_failed = 0;
++	struct yaffs_block_index *block_index = NULL;
++	int alt_block_index = 0;
++	int summary_available;
++
++	yaffs_trace(YAFFS_TRACE_SCAN,
++		"yaffs2_scan_backwards starts  intstartblk %d intendblk %d...",
++		dev->internal_start_block, dev->internal_end_block);
++
++	dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++	block_index =
++		kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
++
++	if (!block_index) {
++		block_index =
++		    vmalloc(n_blocks * sizeof(struct yaffs_block_index));
++		alt_block_index = 1;
++	}
++
++	if (!block_index) {
++		yaffs_trace(YAFFS_TRACE_SCAN,
++			"yaffs2_scan_backwards() could not allocate block index!"
++			);
++		return YAFFS_FAIL;
++	}
++
++	dev->blocks_in_checkpt = 0;
++
++	chunk_data = yaffs_get_temp_buffer(dev);
++
++	/* Scan all the blocks to determine their state */
++	bi = dev->block_info;
++	for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
++	     blk++) {
++		yaffs_clear_chunk_bits(dev, blk);
++		bi->pages_in_use = 0;
++		bi->soft_del_pages = 0;
++
++		yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++		bi->block_state = state;
++		bi->seq_number = seq_number;
++
++		if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++			bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++		if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++			bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++
++		yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
++			"Block scanning block %d state %d seq %d",
++			blk, bi->block_state, seq_number);
++
++		if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++			dev->blocks_in_checkpt++;
++
++		} else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
++			yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++				"block %d is bad", blk);
++		} else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++			yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
++			dev->n_erased_blocks++;
++			dev->n_free_chunks += dev->param.chunks_per_block;
++		} else if (bi->block_state ==
++				YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++			/* Determine the highest sequence number */
++			if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++			    seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++				block_index[n_to_scan].seq = seq_number;
++				block_index[n_to_scan].block = blk;
++				n_to_scan++;
++				if (seq_number >= dev->seq_number)
++					dev->seq_number = seq_number;
++			} else {
++				/* TODO: Nasty sequence number! */
++				yaffs_trace(YAFFS_TRACE_SCAN,
++					"Block scanning block %d has bad sequence number %d",
++					blk, seq_number);
++			}
++		}
++		bi++;
++	}
++
++	yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
++
++	cond_resched();
++
++	/* Sort the blocks by sequence number */
++	sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
++		   yaffs2_ybicmp, NULL);
++
++	cond_resched();
++
++	yaffs_trace(YAFFS_TRACE_SCAN, "...done");
++
++	/* Now scan the blocks looking at the data. */
++	start_iter = 0;
++	end_iter = n_to_scan - 1;
++	yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
++
++	/* For each block.... backwards */
++	for (block_iter = end_iter;
++	     !alloc_failed && block_iter >= start_iter;
++	     block_iter--) {
++		/* Cooperative multitasking! This loop can run for so
++		   long that watchdog timers expire. */
++		cond_resched();
++
++		/* get the block to scan in the correct order */
++		blk = block_index[block_iter].block;
++		bi = yaffs_get_block_info(dev, blk);
++		deleted = 0;
++
++		summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
++
++		/* For each chunk in each block that needs scanning.... */
++		found_chunks = 0;
++		if (summary_available)
++			c = dev->chunks_per_summary - 1;
++		else
++			c = dev->param.chunks_per_block - 1;
++
++		for (/* c is already initialised */;
++		     !alloc_failed && c >= 0 &&
++		     (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++		      bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
++		      c--) {
++			/* Scan backwards...
++			 * Read the tags and decide what to do
++			 */
++			if (yaffs2_scan_chunk(dev, bi, blk, c,
++					&found_chunks, chunk_data,
++					&hard_list, summary_available) ==
++					YAFFS_FAIL)
++				alloc_failed = 1;
++		}
++
++		if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++			/* If we got this far while scanning, then the block
++			 * is fully allocated. */
++			bi->block_state = YAFFS_BLOCK_STATE_FULL;
++		}
++
++		/* Now let's see if it was dirty */
++		if (bi->pages_in_use == 0 &&
++		    !bi->has_shrink_hdr &&
++		    bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++			yaffs_block_became_dirty(dev, blk);
++		}
++	}
++
++	yaffs_skip_rest_of_block(dev);
++
++	if (alt_block_index)
++		vfree(block_index);
++	else
++		kfree(block_index);
++
++	/* Ok, we've done all the scanning.
++	 * Fix up the hard link chains.
++	 * We have scanned all the objects, now it's time to add these
++	 * hardlinks.
++	 */
++	yaffs_link_fixup(dev, &hard_list);
++
++	yaffs_release_temp_buffer(dev, chunk_data);
++
++	if (alloc_failed)
++		return YAFFS_FAIL;
++
++	yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
++
++	return YAFFS_OK;
++}
+diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
+new file mode 100644
+index 00000000..2363bfd8
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs2.h
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS2_H__
++#define __YAFFS_YAFFS2_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
++				   struct yaffs_block_info *bi);
++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
++				    struct yaffs_block_info *bi);
++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
++int yaffs2_checkpt_required(struct yaffs_dev *dev);
++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
++
++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
++int yaffs2_checkpt_save(struct yaffs_dev *dev);
++int yaffs2_checkpt_restore(struct yaffs_dev *dev);
++
++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
++int yaffs2_scan_backwards(struct yaffs_dev *dev);
++
++#endif
+diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
+new file mode 100644
+index 00000000..666d909b
+--- /dev/null
++++ b/fs/yaffs2/yportenv.h
+@@ -0,0 +1,82 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YPORTENV_H__
++#define __YPORTENV_H__
++
++/*
++ * Define the MTD version in terms of Linux Kernel versions
++ * This allows yaffs to be used independantly of the kernel
++ * as well as with it.
++ */
++
++#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
++#ifdef YAFFS_OUT_OF_TREE
++#include "moduleconfig.h"
++#endif
++
++#include <linux/version.h>
++#define MTD_VERSION_CODE LINUX_VERSION_CODE
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/xattr.h>
++#include <linux/list.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/sort.h>
++#include <linux/bitops.h>
++
++/*  These type wrappings are used to support Unicode names in WinCE. */
++#define YCHAR char
++#define YUCHAR unsigned char
++#define _Y(x)     x
++
++#define YAFFS_LOSTNFOUND_NAME		"lost+found"
++#define YAFFS_LOSTNFOUND_PREFIX		"obj"
++
++
++#define YAFFS_ROOT_MODE			0755
++#define YAFFS_LOSTNFOUND_MODE		0700
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
++#define Y_TIME_CONVERT(x) (x).tv_sec
++#else
++#define Y_CURRENT_TIME CURRENT_TIME
++#define Y_TIME_CONVERT(x) (x)
++#endif
++
++#define compile_time_assertion(assertion) \
++	({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
++
++
++#define yaffs_trace(msk, fmt, ...) do { \
++	if (yaffs_trace_mask & (msk)) \
++		printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
++} while (0)
++
++
++#endif
+diff --git a/include/linux/clk.h b/include/linux/clk.h
+index 1d37f42a..f49e434e 100644
+--- a/include/linux/clk.h
++++ b/include/linux/clk.h
+@@ -64,6 +64,11 @@ int clk_enable(struct clk *clk);
+  */
+ void clk_disable(struct clk *clk);
+ 
++/*
++ * clk_reset
++ */
++void clk_reset(struct clk *clk);
++
+ /**
+  * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
+  *		  This is only valid once the clock source has been enabled.
+@@ -155,4 +160,6 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+ int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
+ 			struct device *dev);
+ 
++void clk_change_parent(struct clk *clk, int select);
++
+ #endif
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index eee7adde..adff3291 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -18,12 +18,16 @@
+  * The full GNU General Public License is included in this distribution in the
+  * file called COPYING.
+  */
+-#ifndef DMAENGINE_H
+-#define DMAENGINE_H
++#ifndef LINUX_DMAENGINE_H
++#define LINUX_DMAENGINE_H
+ 
+ #include <linux/device.h>
+ #include <linux/uio.h>
+-#include <linux/dma-mapping.h>
++#include <linux/bug.h>
++#include <linux/scatterlist.h>
++#include <linux/bitmap.h>
++#include <linux/types.h>
++#include <asm/page.h>
+ 
+ /**
+  * typedef dma_cookie_t - an opaque DMA cookie
+@@ -31,8 +35,8 @@
+  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
+  */
+ typedef s32 dma_cookie_t;
+-#define DMA_MIN_COOKIE	1
+-#define DMA_MAX_COOKIE	INT_MAX
++#define DMA_MIN_COOKIE  1
++#define DMA_MAX_COOKIE  INT_MAX
+ 
+ #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
+ 
+@@ -44,10 +48,10 @@ typedef s32 dma_cookie_t;
+  * @DMA_ERROR: transaction failed
+  */
+ enum dma_status {
+-	DMA_SUCCESS,
+-	DMA_IN_PROGRESS,
+-	DMA_PAUSED,
+-	DMA_ERROR,
++    DMA_SUCCESS,
++    DMA_IN_PROGRESS,
++    DMA_PAUSED,
++    DMA_ERROR,
+ };
+ 
+ /**
+@@ -57,23 +61,105 @@ enum dma_status {
+  * automatically set as dma devices are registered.
+  */
+ enum dma_transaction_type {
+-	DMA_MEMCPY,
+-	DMA_XOR,
+-	DMA_PQ,
+-	DMA_XOR_VAL,
+-	DMA_PQ_VAL,
+-	DMA_MEMSET,
+-	DMA_INTERRUPT,
+-	DMA_SG,
+-	DMA_PRIVATE,
+-	DMA_ASYNC_TX,
+-	DMA_SLAVE,
+-	DMA_CYCLIC,
++    DMA_MEMCPY,
++    DMA_XOR,
++    DMA_PQ,
++    DMA_XOR_VAL,
++    DMA_PQ_VAL,
++    DMA_MEMSET,
++    DMA_INTERRUPT,
++    DMA_SG,
++    DMA_PRIVATE,
++    DMA_ASYNC_TX,
++    DMA_SLAVE,
++    DMA_CYCLIC,
++    DMA_INTERLEAVE,
++/* last transaction type for creation of the capabilities mask */
++    DMA_TX_TYPE_END,
+ };
+ 
+-/* last transaction type for creation of the capabilities mask */
+-#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
++/**
++ * enum dma_transfer_direction - dma transfer mode and direction indicator
++ * @DMA_MEM_TO_MEM: Async/Memcpy mode
++ * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
++ * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
++ * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
++ */
++enum dma_transfer_direction {
++    DMA_MEM_TO_MEM,
++    DMA_MEM_TO_DEV,
++    DMA_DEV_TO_MEM,
++    DMA_DEV_TO_DEV,
++    DMA_TRANS_NONE,
++};
++
++/**
++ * Interleaved Transfer Request
++ * ----------------------------
++ * A chunk is collection of contiguous bytes to be transfered.
++ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
++ * ICGs may or maynot change between chunks.
++ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
++ *  that when repeated an integral number of times, specifies the transfer.
++ * A transfer template is specification of a Frame, the number of times
++ *  it is to be repeated and other per-transfer attributes.
++ *
++ * Practically, a client driver would have ready a template for each
++ *  type of transfer it is going to need during its lifetime and
++ *  set only 'src_start' and 'dst_start' before submitting the requests.
++ *
++ *
++ *  |      Frame-1        |       Frame-2       | ~ |       Frame-'numf'  |
++ *  |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
++ *
++ *    ==  Chunk size
++ *    ... ICG
++ */
++
++/**
++ * struct data_chunk - Element of scatter-gather list that makes a frame.
++ * @size: Number of bytes to read from source.
++ *    size_dst := fn(op, size_src), so doesn't mean much for destination.
++ * @icg: Number of bytes to jump after last src/dst address of this
++ *   chunk and before first src/dst address for next chunk.
++ *   Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
++ *   Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
++ */
++struct data_chunk {
++    size_t size;
++    size_t icg;
++};
+ 
++/**
++ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
++ *   and attributes.
++ * @src_start: Bus address of source for the first chunk.
++ * @dst_start: Bus address of destination for the first chunk.
++ * @dir: Specifies the type of Source and Destination.
++ * @src_inc: If the source address increments after reading from it.
++ * @dst_inc: If the destination address increments after writing to it.
++ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
++ *      Otherwise, source is read contiguously (icg ignored).
++ *      Ignored if src_inc is false.
++ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
++ *      Otherwise, destination is filled contiguously (icg ignored).
++ *      Ignored if dst_inc is false.
++ * @numf: Number of frames in this template.
++ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
++ * @sgl: Array of {chunk,icg} pairs that make up a frame.
++ */
++struct dma_interleaved_template {
++    dma_addr_t src_start;
++    dma_addr_t dst_start;
++    enum dma_transfer_direction dir;
++    bool src_inc;
++    bool dst_inc;
++    bool src_sgl;
++    bool dst_sgl;
++    size_t numf;
++    size_t frame_size;
++    struct data_chunk sgl[0];
++};
+ 
+ /**
+  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
+@@ -86,9 +172,9 @@ enum dma_transaction_type {
+  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
+  * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
+  * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
+- * 	(if not set, do the source dma-unmapping as page)
++ *  (if not set, do the source dma-unmapping as page)
+  * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
+- * 	(if not set, do the destination dma-unmapping as page)
++ *  (if not set, do the destination dma-unmapping as page)
+  * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
+  * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
+  * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
+@@ -98,16 +184,16 @@ enum dma_transaction_type {
+  *  on the result of this operation
+  */
+ enum dma_ctrl_flags {
+-	DMA_PREP_INTERRUPT = (1 << 0),
+-	DMA_CTRL_ACK = (1 << 1),
+-	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
+-	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
+-	DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
+-	DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
+-	DMA_PREP_PQ_DISABLE_P = (1 << 6),
+-	DMA_PREP_PQ_DISABLE_Q = (1 << 7),
+-	DMA_PREP_CONTINUE = (1 << 8),
+-	DMA_PREP_FENCE = (1 << 9),
++    DMA_PREP_INTERRUPT = (1 << 0),
++    DMA_CTRL_ACK = (1 << 1),
++    DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
++    DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
++    DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
++    DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
++    DMA_PREP_PQ_DISABLE_P = (1 << 6),
++    DMA_PREP_PQ_DISABLE_Q = (1 << 7),
++    DMA_PREP_CONTINUE = (1 << 8),
++    DMA_PREP_FENCE = (1 << 9),
+ };
+ 
+ /**
+@@ -125,19 +211,19 @@ enum dma_ctrl_flags {
+  * into external start mode.
+  */
+ enum dma_ctrl_cmd {
+-	DMA_TERMINATE_ALL,
+-	DMA_PAUSE,
+-	DMA_RESUME,
+-	DMA_SLAVE_CONFIG,
+-	FSLDMA_EXTERNAL_START,
++    DMA_TERMINATE_ALL,
++    DMA_PAUSE,
++    DMA_RESUME,
++    DMA_SLAVE_CONFIG,
++    FSLDMA_EXTERNAL_START,
+ };
+ 
+ /**
+  * enum sum_check_bits - bit position of pq_check_flags
+  */
+ enum sum_check_bits {
+-	SUM_CHECK_P = 0,
+-	SUM_CHECK_Q = 1,
++    SUM_CHECK_P = 0,
++    SUM_CHECK_Q = 1,
+ };
+ 
+ /**
+@@ -146,8 +232,8 @@ enum sum_check_bits {
+  * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
+  */
+ enum sum_check_flags {
+-	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
+-	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
++    SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
++    SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
+ };
+ 
+ 
+@@ -164,15 +250,16 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
+  */
+ 
+ struct dma_chan_percpu {
+-	/* stats */
+-	unsigned long memcpy_count;
+-	unsigned long bytes_transferred;
++    /* stats */
++    unsigned long memcpy_count;
++    unsigned long bytes_transferred;
+ };
+ 
+ /**
+  * struct dma_chan - devices supply DMA channels, clients use them
+  * @device: ptr to the dma device who supplies this channel, always !%NULL
+  * @cookie: last cookie value returned to client
++ * @completed_cookie: last completed cookie for this channel
+  * @chan_id: channel ID for sysfs
+  * @dev: class device for sysfs
+  * @device_node: used to add this to the device chan list
+@@ -182,18 +269,19 @@ struct dma_chan_percpu {
+  * @private: private data for certain client-channel associations
+  */
+ struct dma_chan {
+-	struct dma_device *device;
+-	dma_cookie_t cookie;
+-
+-	/* sysfs */
+-	int chan_id;
+-	struct dma_chan_dev *dev;
+-
+-	struct list_head device_node;
+-	struct dma_chan_percpu __percpu *local;
+-	int client_count;
+-	int table_count;
+-	void *private;
++    struct dma_device *device;
++    dma_cookie_t cookie;
++    dma_cookie_t completed_cookie;
++
++    /* sysfs */
++    int chan_id;
++    struct dma_chan_dev *dev;
++
++    struct list_head device_node;
++    struct dma_chan_percpu __percpu *local;
++    int client_count;
++    int table_count;
++    void *private;
+ };
+ 
+ /**
+@@ -204,10 +292,10 @@ struct dma_chan {
+  * @idr_ref - reference count to gate release of dma_device dev_id
+  */
+ struct dma_chan_dev {
+-	struct dma_chan *chan;
+-	struct device device;
+-	int dev_id;
+-	atomic_t *idr_ref;
++    struct dma_chan *chan;
++    struct device device;
++    int dev_id;
++    atomic_t *idr_ref;
+ };
+ 
+ /**
+@@ -215,11 +303,11 @@ struct dma_chan_dev {
+  * device, source or target buses
+  */
+ enum dma_slave_buswidth {
+-	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
+-	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
+-	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
+-	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
+-	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
++    DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
++    DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
++    DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
++    DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
++    DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+ };
+ 
+ /**
+@@ -247,6 +335,12 @@ enum dma_slave_buswidth {
+  * may or may not be applicable on memory sources.
+  * @dst_maxburst: same as src_maxburst but for destination target
+  * mutatis mutandis.
++ * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
++ * with 'true' if peripheral should be flow controller. Direction will be
++ * selected at Runtime.
++ * @slave_id: Slave requester id. Only valid for slave channels. The dma
++ * slave peripheral will have unique id as dma requester which need to be
++ * pass as slave config.
+  *
+  * This struct is passed in as configuration data to a DMA engine
+  * in order to set up a certain channel for DMA transport at runtime.
+@@ -266,18 +360,20 @@ enum dma_slave_buswidth {
+  * struct, if applicable.
+  */
+ struct dma_slave_config {
+-	enum dma_data_direction direction;
+-	dma_addr_t src_addr;
+-	dma_addr_t dst_addr;
+-	enum dma_slave_buswidth src_addr_width;
+-	enum dma_slave_buswidth dst_addr_width;
+-	u32 src_maxburst;
+-	u32 dst_maxburst;
++    enum dma_transfer_direction direction;
++    dma_addr_t src_addr;
++    dma_addr_t dst_addr;
++    enum dma_slave_buswidth src_addr_width;
++    enum dma_slave_buswidth dst_addr_width;
++    u32 src_maxburst;
++    u32 dst_maxburst;
++    bool device_fc;
++    unsigned int slave_id;
+ };
+ 
+ static inline const char *dma_chan_name(struct dma_chan *chan)
+ {
+-	return dev_name(&chan->dev->device);
++    return dev_name(&chan->dev->device);
+ }
+ 
+ void dma_chan_cleanup(struct kref *kref);
+@@ -300,9 +396,9 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
+  * struct dma_async_tx_descriptor - async transaction descriptor
+  * ---dma generic offload fields---
+  * @cookie: tracking cookie for this transaction, set to -EBUSY if
+- *	this tx is sitting on a dependency list
++ *  this tx is sitting on a dependency list
+  * @flags: flags to augment operation preparation, control completion, and
+- * 	communicate status
++ *  communicate status
+  * @phys: physical address of the descriptor
+  * @chan: target channel for this operation
+  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
+@@ -314,17 +410,17 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
+  * @lock: protect the parent and next pointers
+  */
+ struct dma_async_tx_descriptor {
+-	dma_cookie_t cookie;
+-	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
+-	dma_addr_t phys;
+-	struct dma_chan *chan;
+-	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+-	dma_async_tx_callback callback;
+-	void *callback_param;
++    dma_cookie_t cookie;
++    enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
++    dma_addr_t phys;
++    struct dma_chan *chan;
++    dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
++    dma_async_tx_callback callback;
++    void *callback_param;
+ #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+-	struct dma_async_tx_descriptor *next;
+-	struct dma_async_tx_descriptor *parent;
+-	spinlock_t lock;
++    struct dma_async_tx_descriptor *next;
++    struct dma_async_tx_descriptor *parent;
++    spinlock_t lock;
+ #endif
+ };
+ 
+@@ -337,7 +433,7 @@ static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+ }
+ static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+ {
+-	BUG();
++    BUG();
+ }
+ static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+ {
+@@ -347,42 +443,42 @@ static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+ }
+ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+ {
+-	return NULL;
++    return NULL;
+ }
+ static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+ {
+-	return NULL;
++    return NULL;
+ }
+ 
+ #else
+ static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+ {
+-	spin_lock_bh(&txd->lock);
++    spin_lock_bh(&txd->lock);
+ }
+ static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+ {
+-	spin_unlock_bh(&txd->lock);
++    spin_unlock_bh(&txd->lock);
+ }
+ static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+ {
+-	txd->next = next;
+-	next->parent = txd;
++    txd->next = next;
++    next->parent = txd;
+ }
+ static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+ {
+-	txd->parent = NULL;
++    txd->parent = NULL;
+ }
+ static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+ {
+-	txd->next = NULL;
++    txd->next = NULL;
+ }
+ static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+ {
+-	return txd->parent;
++    return txd->parent;
+ }
+ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+ {
+-	return txd->next;
++    return txd->next;
+ }
+ #endif
+ 
+@@ -392,13 +488,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
+  * @last: last completed DMA cookie
+  * @used: last issued DMA cookie (i.e. the one in progress)
+  * @residue: the remaining number of bytes left to transmit
+- *	on the selected transfer for states DMA_IN_PROGRESS and
+- *	DMA_PAUSED if this is implemented in the driver, else 0
++ *  on the selected transfer for states DMA_IN_PROGRESS and
++ *  DMA_PAUSED if this is implemented in the driver, else 0
+  */
+ struct dma_tx_state {
+-	dma_cookie_t last;
+-	dma_cookie_t used;
+-	u32 residue;
++    dma_cookie_t last;
++    dma_cookie_t used;
++    u32 residue;
+ };
+ 
+ /**
+@@ -417,7 +513,7 @@ struct dma_tx_state {
+  * @dev_id: unique device ID
+  * @dev: struct device reference for dma mapping api
+  * @device_alloc_chan_resources: allocate resources and return the
+- *	number of allocated descriptors
++ *  number of allocated descriptors
+  * @device_free_chan_resources: release DMA channel's resources
+  * @device_prep_dma_memcpy: prepares a memcpy operation
+  * @device_prep_dma_xor: prepares a xor operation
+@@ -428,179 +524,247 @@ struct dma_tx_state {
+  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+  * @device_prep_slave_sg: prepares a slave dma operation
+  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
+- *	The function takes a buffer of size buf_len. The callback function will
+- *	be called after period_len bytes have been transferred.
++ *  The function takes a buffer of size buf_len. The callback function will
++ *  be called after period_len bytes have been transferred.
++ * @device_prep_interleaved_dma: Transfer expression in a generic way.
+  * @device_control: manipulate all pending operations on a channel, returns
+- *	zero or error code
++ *  zero or error code
+  * @device_tx_status: poll for transaction completion, the optional
+- *	txstate parameter can be supplied with a pointer to get a
+- *	struct with auxiliary transfer status information, otherwise the call
+- *	will just return a simple status code
++ *  txstate parameter can be supplied with a pointer to get a
++ *  struct with auxiliary transfer status information, otherwise the call
++ *  will just return a simple status code
+  * @device_issue_pending: push pending transactions to hardware
+  */
+ struct dma_device {
+ 
+-	unsigned int chancnt;
+-	unsigned int privatecnt;
+-	struct list_head channels;
+-	struct list_head global_node;
+-	dma_cap_mask_t  cap_mask;
+-	unsigned short max_xor;
+-	unsigned short max_pq;
+-	u8 copy_align;
+-	u8 xor_align;
+-	u8 pq_align;
+-	u8 fill_align;
+-	#define DMA_HAS_PQ_CONTINUE (1 << 15)
+-
+-	int dev_id;
+-	struct device *dev;
+-
+-	int (*device_alloc_chan_resources)(struct dma_chan *chan);
+-	void (*device_free_chan_resources)(struct dma_chan *chan);
+-
+-	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+-		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+-		size_t len, unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
+-		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+-		unsigned int src_cnt, size_t len, unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
+-		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
+-		size_t len, enum sum_check_flags *result, unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
+-		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+-		unsigned int src_cnt, const unsigned char *scf,
+-		size_t len, unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
+-		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+-		unsigned int src_cnt, const unsigned char *scf, size_t len,
+-		enum sum_check_flags *pqres, unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
+-		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+-		unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
+-		struct dma_chan *chan, unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+-		struct dma_chan *chan,
+-		struct scatterlist *dst_sg, unsigned int dst_nents,
+-		struct scatterlist *src_sg, unsigned int src_nents,
+-		unsigned long flags);
+-
+-	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+-		struct dma_chan *chan, struct scatterlist *sgl,
+-		unsigned int sg_len, enum dma_data_direction direction,
+-		unsigned long flags);
+-	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
+-		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+-		size_t period_len, enum dma_data_direction direction);
+-	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+-		unsigned long arg);
+-
+-	enum dma_status (*device_tx_status)(struct dma_chan *chan,
+-					    dma_cookie_t cookie,
+-					    struct dma_tx_state *txstate);
+-	void (*device_issue_pending)(struct dma_chan *chan);
++    unsigned int chancnt;
++    unsigned int privatecnt;
++    struct list_head channels;
++    struct list_head global_node;
++    dma_cap_mask_t  cap_mask;
++    unsigned short max_xor;
++    unsigned short max_pq;
++    u8 copy_align;
++    u8 xor_align;
++    u8 pq_align;
++    u8 fill_align;
++    #define DMA_HAS_PQ_CONTINUE (1 << 15)
++
++    int dev_id;
++    struct device *dev;
++
++    int (*device_alloc_chan_resources)(struct dma_chan *chan);
++    void (*device_free_chan_resources)(struct dma_chan *chan);
++
++    struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
++        struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
++        size_t len, unsigned long flags);
++    struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
++        struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
++        unsigned int src_cnt, size_t len, unsigned long flags);
++    struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
++        struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
++        size_t len, enum sum_check_flags *result, unsigned long flags);
++    struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
++        struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
++        unsigned int src_cnt, const unsigned char *scf,
++        size_t len, unsigned long flags);
++    struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
++        struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
++        unsigned int src_cnt, const unsigned char *scf, size_t len,
++        enum sum_check_flags *pqres, unsigned long flags);
++    struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
++        struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
++        unsigned long flags);
++    struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
++        struct dma_chan *chan, unsigned long flags);
++    struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
++        struct dma_chan *chan,
++        struct scatterlist *dst_sg, unsigned int dst_nents,
++        struct scatterlist *src_sg, unsigned int src_nents,
++        unsigned long flags);
++
++    struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
++        struct dma_chan *chan, struct scatterlist *sgl,
++        unsigned int sg_len, enum dma_transfer_direction direction,
++        unsigned long flags, void *context);
++    struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
++        struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
++        size_t period_len, enum dma_transfer_direction direction,
++        unsigned long flags, void *context);
++    struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
++        struct dma_chan *chan, struct dma_interleaved_template *xt,
++        unsigned long flags);
++    int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
++        unsigned long arg);
++
++    enum dma_status (*device_tx_status)(struct dma_chan *chan,
++                        dma_cookie_t cookie,
++                        struct dma_tx_state *txstate);
++    void (*device_issue_pending)(struct dma_chan *chan);
+ };
+ 
+ static inline int dmaengine_device_control(struct dma_chan *chan,
+-					   enum dma_ctrl_cmd cmd,
+-					   unsigned long arg)
++                       enum dma_ctrl_cmd cmd,
++                       unsigned long arg)
+ {
+-	return chan->device->device_control(chan, cmd, arg);
++    if (chan->device->device_control)
++        return chan->device->device_control(chan, cmd, arg);
++
++    return -ENOSYS;
+ }
+ 
+ static inline int dmaengine_slave_config(struct dma_chan *chan,
+-					  struct dma_slave_config *config)
++                      struct dma_slave_config *config)
++{
++    return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
++            (unsigned long)config);
++}
++
++static inline bool is_slave_direction(enum dma_transfer_direction direction)
++{
++    return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
++}
++
++static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
++    struct dma_chan *chan, dma_addr_t buf, size_t len,
++    enum dma_transfer_direction dir, unsigned long flags)
++{
++    struct scatterlist sg;
++    sg_init_table(&sg, 1);
++    sg_dma_address(&sg) = buf;
++    sg_dma_len(&sg) = len;
++
++    return chan->device->device_prep_slave_sg(chan, &sg, 1,
++                          dir, flags, NULL);
++}
++
++static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
++    struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
++    enum dma_transfer_direction dir, unsigned long flags)
+ {
+-	return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
+-			(unsigned long)config);
++    return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
++                          dir, flags, NULL);
++}
++
++#ifdef CONFIG_RAPIDIO_DMA_ENGINE
++struct rio_dma_ext;
++static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
++    struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
++    enum dma_transfer_direction dir, unsigned long flags,
++    struct rio_dma_ext *rio_ext)
++{
++    return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
++                          dir, flags, rio_ext);
++}
++#endif
++
++static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
++        struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
++        size_t period_len, enum dma_transfer_direction dir,
++        unsigned long flags)
++{
++    return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
++                        period_len, dir, flags, NULL);
++}
++
++static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
++        struct dma_chan *chan, struct dma_interleaved_template *xt,
++        unsigned long flags)
++{
++    return chan->device->device_prep_interleaved_dma(chan, xt, flags);
+ }
+ 
+ static inline int dmaengine_terminate_all(struct dma_chan *chan)
+ {
+-	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
++    return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+ }
+ 
+ static inline int dmaengine_pause(struct dma_chan *chan)
+ {
+-	return dmaengine_device_control(chan, DMA_PAUSE, 0);
++    return dmaengine_device_control(chan, DMA_PAUSE, 0);
+ }
+ 
+ static inline int dmaengine_resume(struct dma_chan *chan)
+ {
+-	return dmaengine_device_control(chan, DMA_RESUME, 0);
++    return dmaengine_device_control(chan, DMA_RESUME, 0);
++}
++
++static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
++    dma_cookie_t cookie, struct dma_tx_state *state)
++{
++    return chan->device->device_tx_status(chan, cookie, state);
+ }
+ 
+ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+ {
+-	return desc->tx_submit(desc);
++    return desc->tx_submit(desc);
+ }
+ 
+ static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+ {
+-	size_t mask;
+-
+-	if (!align)
+-		return true;
+-	mask = (1 << align) - 1;
+-	if (mask & (off1 | off2 | len))
+-		return false;
+-	return true;
++    size_t mask;
++
++    if (!align)
++        return true;
++    mask = (1 << align) - 1;
++    if (mask & (off1 | off2 | len))
++        return false;
++    return true;
+ }
+ 
+ static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
+-				       size_t off2, size_t len)
++                       size_t off2, size_t len)
+ {
+-	return dmaengine_check_align(dev->copy_align, off1, off2, len);
++    return dmaengine_check_align(dev->copy_align, off1, off2, len);
+ }
+ 
+ static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
+-				      size_t off2, size_t len)
++                      size_t off2, size_t len)
+ {
+-	return dmaengine_check_align(dev->xor_align, off1, off2, len);
++    return dmaengine_check_align(dev->xor_align, off1, off2, len);
+ }
+ 
+ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
+-				     size_t off2, size_t len)
++                     size_t off2, size_t len)
+ {
+-	return dmaengine_check_align(dev->pq_align, off1, off2, len);
++    return dmaengine_check_align(dev->pq_align, off1, off2, len);
+ }
+ 
+ static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
+-				       size_t off2, size_t len)
++                       size_t off2, size_t len)
+ {
+-	return dmaengine_check_align(dev->fill_align, off1, off2, len);
++    return dmaengine_check_align(dev->fill_align, off1, off2, len);
+ }
+ 
+ static inline void
+ dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
+ {
+-	dma->max_pq = maxpq;
+-	if (has_pq_continue)
+-		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
++    dma->max_pq = maxpq;
++    if (has_pq_continue)
++        dma->max_pq |= DMA_HAS_PQ_CONTINUE;
+ }
+ 
+ static inline bool dmaf_continue(enum dma_ctrl_flags flags)
+ {
+-	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
++    return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
+ }
+ 
+ static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
+ {
+-	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
++    enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
+ 
+-	return (flags & mask) == mask;
++    return (flags & mask) == mask;
+ }
+ 
+ static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
+ {
+-	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
++    return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
+ }
+ 
+ static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+ {
+-	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
++    return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
+ }
+ 
+ /* dma_maxpq - reduce maxpq in the face of continued operations
+@@ -618,13 +782,13 @@ static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+  */
+ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
+ {
+-	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
+-		return dma_dev_to_maxpq(dma);
+-	else if (dmaf_p_disabled_continue(flags))
+-		return dma_dev_to_maxpq(dma) - 1;
+-	else if (dmaf_continue(flags))
+-		return dma_dev_to_maxpq(dma) - 3;
+-	BUG();
++    if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
++        return dma_dev_to_maxpq(dma);
++    else if (dmaf_p_disabled_continue(flags))
++        return dma_dev_to_maxpq(dma) - 1;
++    else if (dmaf_continue(flags))
++        return dma_dev_to_maxpq(dma) - 3;
++    BUG();
+ }
+ 
+ /* --- public DMA engine API --- */
+@@ -642,8 +806,8 @@ static inline void dmaengine_put(void)
+ #endif
+ 
+ #ifdef CONFIG_NET_DMA
+-#define net_dmaengine_get()	dmaengine_get()
+-#define net_dmaengine_put()	dmaengine_put()
++#define net_dmaengine_get() dmaengine_get()
++#define net_dmaengine_put() dmaengine_put()
+ #else
+ static inline void net_dmaengine_get(void)
+ {
+@@ -654,8 +818,8 @@ static inline void net_dmaengine_put(void)
+ #endif
+ 
+ #ifdef CONFIG_ASYNC_TX_DMA
+-#define async_dmaengine_get()	dmaengine_get()
+-#define async_dmaengine_put()	dmaengine_put()
++#define async_dmaengine_get()   dmaengine_get()
++#define async_dmaengine_put()   dmaengine_put()
+ #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
+ #else
+@@ -671,80 +835,64 @@ static inline void async_dmaengine_put(void)
+ static inline struct dma_chan *
+ async_dma_find_channel(enum dma_transaction_type type)
+ {
+-	return NULL;
++    return NULL;
+ }
+ #endif /* CONFIG_ASYNC_TX_DMA */
+ 
+ dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
+-	void *dest, void *src, size_t len);
++    void *dest, void *src, size_t len);
+ dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
+-	struct page *page, unsigned int offset, void *kdata, size_t len);
++    struct page *page, unsigned int offset, void *kdata, size_t len);
+ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
+-	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
+-	unsigned int src_off, size_t len);
++    struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
++    unsigned int src_off, size_t len);
+ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
+-	struct dma_chan *chan);
++    struct dma_chan *chan);
+ 
+ static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
+ {
+-	tx->flags |= DMA_CTRL_ACK;
++    tx->flags |= DMA_CTRL_ACK;
+ }
+ 
+ static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
+ {
+-	tx->flags &= ~DMA_CTRL_ACK;
++    tx->flags &= ~DMA_CTRL_ACK;
+ }
+ 
+ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
+ {
+-	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
+-}
+-
+-#define first_dma_cap(mask) __first_dma_cap(&(mask))
+-static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
+-{
+-	return min_t(int, DMA_TX_TYPE_END,
+-		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
+-}
+-
+-#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
+-static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
+-{
+-	return min_t(int, DMA_TX_TYPE_END,
+-		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
++    return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
+ }
+ 
+ #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
+ static inline void
+ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+ {
+-	set_bit(tx_type, dstp->bits);
++    set_bit(tx_type, dstp->bits);
+ }
+ 
+ #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
+ static inline void
+ __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+ {
+-	clear_bit(tx_type, dstp->bits);
++    clear_bit(tx_type, dstp->bits);
+ }
+ 
+ #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
+ static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
+ {
+-	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
++    bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
+ }
+ 
+ #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
+ static inline int
+ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
+ {
+-	return test_bit(tx_type, srcp->bits);
++    return test_bit(tx_type, srcp->bits);
+ }
+ 
+ #define for_each_dma_cap_mask(cap, mask) \
+-	for ((cap) = first_dma_cap(mask);	\
+-		(cap) < DMA_TX_TYPE_END;	\
+-		(cap) = next_dma_cap((cap), (mask)))
++    for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
+ 
+ /**
+  * dma_async_issue_pending - flush pending transactions to HW
+@@ -755,11 +903,9 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
+  */
+ static inline void dma_async_issue_pending(struct dma_chan *chan)
+ {
+-	chan->device->device_issue_pending(chan);
++    chan->device->device_issue_pending(chan);
+ }
+ 
+-#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
+-
+ /**
+  * dma_async_is_tx_complete - poll for transaction completion
+  * @chan: DMA channel
+@@ -772,72 +918,76 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
+  * the status of multiple cookies without re-checking hardware state.
+  */
+ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+-	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
++    dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+ {
+-	struct dma_tx_state state;
+-	enum dma_status status;
+-
+-	status = chan->device->device_tx_status(chan, cookie, &state);
+-	if (last)
+-		*last = state.last;
+-	if (used)
+-		*used = state.used;
+-	return status;
++    struct dma_tx_state state;
++    enum dma_status status;
++
++    status = chan->device->device_tx_status(chan, cookie, &state);
++    if (last)
++        *last = state.last;
++    if (used)
++        *used = state.used;
++    return status;
+ }
+ 
+-#define dma_async_memcpy_complete(chan, cookie, last, used)\
+-	dma_async_is_tx_complete(chan, cookie, last, used)
+-
+ /**
+  * dma_async_is_complete - test a cookie against chan state
+  * @cookie: transaction identifier to test status of
+  * @last_complete: last know completed transaction
+  * @last_used: last cookie value handed out
+  *
+- * dma_async_is_complete() is used in dma_async_memcpy_complete()
++ * dma_async_is_complete() is used in dma_async_is_tx_complete()
+  * the test logic is separated for lightweight testing of multiple cookies
+  */
+ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
+-			dma_cookie_t last_complete, dma_cookie_t last_used)
++            dma_cookie_t last_complete, dma_cookie_t last_used)
+ {
+-	if (last_complete <= last_used) {
+-		if ((cookie <= last_complete) || (cookie > last_used))
+-			return DMA_SUCCESS;
+-	} else {
+-		if ((cookie <= last_complete) && (cookie > last_used))
+-			return DMA_SUCCESS;
+-	}
+-	return DMA_IN_PROGRESS;
++    if (last_complete <= last_used) {
++        if ((cookie <= last_complete) || (cookie > last_used))
++            return DMA_SUCCESS;
++    } else {
++        if ((cookie <= last_complete) && (cookie > last_used))
++            return DMA_SUCCESS;
++    }
++    return DMA_IN_PROGRESS;
+ }
+ 
+ static inline void
+ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
+ {
+-	if (st) {
+-		st->last = last;
+-		st->used = used;
+-		st->residue = residue;
+-	}
++    if (st) {
++        st->last = last;
++        st->used = used;
++        st->residue = residue;
++    }
+ }
+ 
+ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
+ #ifdef CONFIG_DMA_ENGINE
+ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
+ void dma_issue_pending_all(void);
+-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
++struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
++                    dma_filter_fn fn, void *fn_param);
++struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
+ void dma_release_channel(struct dma_chan *chan);
+ #else
+ static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+ {
+-	return DMA_SUCCESS;
++    return DMA_SUCCESS;
+ }
+ static inline void dma_issue_pending_all(void)
+ {
+ }
+-static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
+-					      dma_filter_fn fn, void *fn_param)
++static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
++                          dma_filter_fn fn, void *fn_param)
+ {
+-	return NULL;
++    return NULL;
++}
++static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
++                             const char *name)
++{
++    return NULL;
+ }
+ static inline void dma_release_channel(struct dma_chan *chan)
+ {
+@@ -850,28 +1000,45 @@ int dma_async_device_register(struct dma_device *device);
+ void dma_async_device_unregister(struct dma_device *device);
+ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
+ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
++struct dma_chan *net_dma_find_channel(void);
+ #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
++#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
++    __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
++
++static inline struct dma_chan
++*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
++                  dma_filter_fn fn, void *fn_param,
++                  struct device *dev, char *name)
++{
++    struct dma_chan *chan;
++
++    chan = dma_request_slave_channel(dev, name);
++    if (chan)
++        return chan;
++
++    return __dma_request_channel(mask, fn, fn_param);
++}
+ 
+ /* --- Helper iov-locking functions --- */
+ 
+ struct dma_page_list {
+-	char __user *base_address;
+-	int nr_pages;
+-	struct page **pages;
++    char __user *base_address;
++    int nr_pages;
++    struct page **pages;
+ };
+ 
+ struct dma_pinned_list {
+-	int nr_iovecs;
+-	struct dma_page_list page_list[0];
++    int nr_iovecs;
++    struct dma_page_list page_list[0];
+ };
+ 
+ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
+ void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
+ 
+ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
+-	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
++    struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
+ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
+-	struct dma_pinned_list *pinned_list, struct page *page,
+-	unsigned int offset, size_t len);
++    struct dma_pinned_list *pinned_list, struct page *page,
++    unsigned int offset, size_t len);
+ 
+ #endif /* DMAENGINE_H */
+diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
+deleted file mode 100644
+index 4bfe0a2f..00000000
+--- a/include/linux/dw_dmac.h
++++ /dev/null
+@@ -1,139 +0,0 @@
+-/*
+- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
+- * AVR32 systems.)
+- *
+- * Copyright (C) 2007 Atmel Corporation
+- * Copyright (C) 2010-2011 ST Microelectronics
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#ifndef DW_DMAC_H
+-#define DW_DMAC_H
+-
+-#include <linux/dmaengine.h>
+-
+-/**
+- * struct dw_dma_platform_data - Controller configuration parameters
+- * @nr_channels: Number of channels supported by hardware (max 8)
+- * @is_private: The device channels should be marked as private and not for
+- *	by the general purpose DMA channel allocator.
+- */
+-struct dw_dma_platform_data {
+-	unsigned int	nr_channels;
+-	bool		is_private;
+-#define CHAN_ALLOCATION_ASCENDING	0	/* zero to seven */
+-#define CHAN_ALLOCATION_DESCENDING	1	/* seven to zero */
+-	unsigned char	chan_allocation_order;
+-#define CHAN_PRIORITY_ASCENDING		0	/* chan0 highest */
+-#define CHAN_PRIORITY_DESCENDING	1	/* chan7 highest */
+-	unsigned char	chan_priority;
+-};
+-
+-/**
+- * enum dw_dma_slave_width - DMA slave register access width.
+- * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
+- * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
+- * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
+- */
+-enum dw_dma_slave_width {
+-	DW_DMA_SLAVE_WIDTH_8BIT,
+-	DW_DMA_SLAVE_WIDTH_16BIT,
+-	DW_DMA_SLAVE_WIDTH_32BIT,
+-};
+-
+-/* bursts size */
+-enum dw_dma_msize {
+-	DW_DMA_MSIZE_1,
+-	DW_DMA_MSIZE_4,
+-	DW_DMA_MSIZE_8,
+-	DW_DMA_MSIZE_16,
+-	DW_DMA_MSIZE_32,
+-	DW_DMA_MSIZE_64,
+-	DW_DMA_MSIZE_128,
+-	DW_DMA_MSIZE_256,
+-};
+-
+-/* flow controller */
+-enum dw_dma_fc {
+-	DW_DMA_FC_D_M2M,
+-	DW_DMA_FC_D_M2P,
+-	DW_DMA_FC_D_P2M,
+-	DW_DMA_FC_D_P2P,
+-	DW_DMA_FC_P_P2M,
+-	DW_DMA_FC_SP_P2P,
+-	DW_DMA_FC_P_M2P,
+-	DW_DMA_FC_DP_P2P,
+-};
+-
+-/**
+- * struct dw_dma_slave - Controller-specific information about a slave
+- *
+- * @dma_dev: required DMA master device
+- * @tx_reg: physical address of data register used for
+- *	memory-to-peripheral transfers
+- * @rx_reg: physical address of data register used for
+- *	peripheral-to-memory transfers
+- * @reg_width: peripheral register width
+- * @cfg_hi: Platform-specific initializer for the CFG_HI register
+- * @cfg_lo: Platform-specific initializer for the CFG_LO register
+- * @src_master: src master for transfers on allocated channel.
+- * @dst_master: dest master for transfers on allocated channel.
+- * @src_msize: src burst size.
+- * @dst_msize: dest burst size.
+- * @fc: flow controller for DMA transfer
+- */
+-struct dw_dma_slave {
+-	struct device		*dma_dev;
+-	dma_addr_t		tx_reg;
+-	dma_addr_t		rx_reg;
+-	enum dw_dma_slave_width	reg_width;
+-	u32			cfg_hi;
+-	u32			cfg_lo;
+-	u8			src_master;
+-	u8			dst_master;
+-	u8			src_msize;
+-	u8			dst_msize;
+-	u8			fc;
+-};
+-
+-/* Platform-configurable bits in CFG_HI */
+-#define DWC_CFGH_FCMODE		(1 << 0)
+-#define DWC_CFGH_FIFO_MODE	(1 << 1)
+-#define DWC_CFGH_PROTCTL(x)	((x) << 2)
+-#define DWC_CFGH_SRC_PER(x)	((x) << 7)
+-#define DWC_CFGH_DST_PER(x)	((x) << 11)
+-
+-/* Platform-configurable bits in CFG_LO */
+-#define DWC_CFGL_LOCK_CH_XFER	(0 << 12)	/* scope of LOCK_CH */
+-#define DWC_CFGL_LOCK_CH_BLOCK	(1 << 12)
+-#define DWC_CFGL_LOCK_CH_XACT	(2 << 12)
+-#define DWC_CFGL_LOCK_BUS_XFER	(0 << 14)	/* scope of LOCK_BUS */
+-#define DWC_CFGL_LOCK_BUS_BLOCK	(1 << 14)
+-#define DWC_CFGL_LOCK_BUS_XACT	(2 << 14)
+-#define DWC_CFGL_LOCK_CH	(1 << 15)	/* channel lockout */
+-#define DWC_CFGL_LOCK_BUS	(1 << 16)	/* busmaster lockout */
+-#define DWC_CFGL_HS_DST_POL	(1 << 18)	/* dst handshake active low */
+-#define DWC_CFGL_HS_SRC_POL	(1 << 19)	/* src handshake active low */
+-
+-/* DMA API extensions */
+-struct dw_cyclic_desc {
+-	struct dw_desc	**desc;
+-	unsigned long	periods;
+-	void		(*period_callback)(void *param);
+-	void		*period_callback_param;
+-};
+-
+-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+-		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+-		enum dma_data_direction direction);
+-void dw_dma_cyclic_free(struct dma_chan *chan);
+-int dw_dma_cyclic_start(struct dma_chan *chan);
+-void dw_dma_cyclic_stop(struct dma_chan *chan);
+-
+-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+-
+-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+-
+-#endif /* DW_DMAC_H */
+diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
+index bdd7ceeb..52040142 100644
+--- a/include/linux/mmc/dw_mmc.h
++++ b/include/linux/mmc/dw_mmc.h
+@@ -14,6 +14,12 @@
+ #ifndef _LINUX_MMC_DW_MMC_H_
+ #define _LINUX_MMC_DW_MMC_H_
+ 
++#include <linux/scatterlist.h>
++#include <linux/compiler.h>
++#include <linux/types.h>
++#include <linux/io.h>
++#include <linux/mmc/host.h>
++
+ #define MAX_MCI_SLOTS	2
+ 
+ enum dw_mci_state {
+@@ -117,6 +123,8 @@ struct dw_mci {
+ 
+ 	/* DMA interface members*/
+ 	int			use_dma;
++	int 		using_dma;
++	unsigned int		prev_blksz;
+ 
+ 	dma_addr_t		sg_dma;
+ 	void			*sg_cpu;
+@@ -154,6 +162,9 @@ struct dw_mci {
+ 	u32			quirks;
+ 
+ 	struct regulator	*vmmc;	/* Power regulator */
++
++	int 		dma_data_mapped;
++	int 		data_error_flag;
+ };
+ 
+ /* DMA ops for Internal/External DMAC interface */
+@@ -200,7 +211,7 @@ struct dw_mci_board {
+ 	/* delay in mS before detecting cards after interrupt */
+ 	u32 detect_delay_ms;
+ 
+-	int (*init)(u32 slot_id, irq_handler_t , void *);
++	int (*init)(u32 slot_id,void* irq_handler_t , void *);
+ 	int (*get_ro)(u32 slot_id);
+ 	int (*get_cd)(u32 slot_id);
+ 	int (*get_ocr)(u32 slot_id);
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 1ee44244..107b5174 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -280,6 +280,7 @@ struct mmc_host {
+ #endif
+ 
+ 	struct dentry		*debugfs_root;
++	unsigned int		rescan_count;
+ 
+ 	unsigned long		private[0] ____cacheline_aligned;
+ };
+diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
+index 245cdace..573ed64b 100644
+--- a/include/linux/mmc/sdio.h
++++ b/include/linux/mmc/sdio.h
+@@ -72,6 +72,7 @@
+ #define  SDIO_CCCR_REV_1_00	0	/* CCCR/FBR Version 1.00 */
+ #define  SDIO_CCCR_REV_1_10	1	/* CCCR/FBR Version 1.10 */
+ #define  SDIO_CCCR_REV_1_20	2	/* CCCR/FBR Version 1.20 */
++#define  SDIO_CCCR_REV_3_00	3	/* to support SDIO 3.0 (luoc) */
+ 
+ #define  SDIO_SDIO_REV_1_00	0	/* SDIO Spec Version 1.00 */
+ #define  SDIO_SDIO_REV_1_10	1	/* SDIO Spec Version 1.10 */
+diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
+index 57cc0e63..ec2c889b 100644
+--- a/include/linux/mtd/bbm.h
++++ b/include/linux/mtd/bbm.h
+@@ -101,10 +101,28 @@ struct nand_bbt_descr {
+ /* Chip stores bad block marker on BOTH 1st and 6th bytes of OOB */
+ #define NAND_BBT_SCANBYTE1AND6 0x00100000
+ /* The nand_bbt_descr was created dynamicaly and must be freed */
+-#define NAND_BBT_DYNAMICSTRUCT 0x00200000
++/*#define NAND_BBT_DYNAMICSTRUCT 0x00200000*/
++/*
++ * Use a flash based bad block table. By default, OOB identifier is saved in
++ * OOB area. This option is passed to the default bad block table function.
++ */
++#define NAND_BBT_USE_FLASH      0x00020000
++
+ /* The bad block table does not OOB for marker */
+ #define NAND_BBT_NO_OOB		0x00400000
+ 
++/*
++ * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
++ * entire spare area. Must be used with NAND_BBT_USE_FLASH.
++ */
++#define NAND_BBT_NO_OOB_BBM     0x00800000
++/*
++ * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
++ * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
++ * in nand_chip.bbt_options.
++ */
++#define NAND_BBT_DYNAMICSTRUCT  0x80000000
++
+ /* The maximum number of blocks to scan for a bbt */
+ #define NAND_BBT_SCAN_MAXBLOCKS	4
+ 
+diff --git a/include/linux/mtd/spi-nand.h b/include/linux/mtd/spi-nand.h
+new file mode 100644
+index 00000000..a12173d4
+--- /dev/null
++++ b/include/linux/mtd/spi-nand.h
+@@ -0,0 +1,334 @@
++/*-
++ *
++ * Copyright (c) 2009-2014 Micron Technology, Inc.
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * Peter Pan <peterpandong at micron.com>
++ *
++ * based on mt29f_spinand.h
++ */
++#ifndef __LINUX_MTD_SPI_NAND_H
++#define __LINUX_MTD_SPI_NAND_H
++
++#include <linux/wait.h>
++#include <linux/spinlock.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/flashchip.h>
++
++
++/*
++ * Standard SPI-NAND flash commands
++ */
++#define SPINAND_CMD_READ			0x13
++#define SPINAND_CMD_READ_RDM			0x03
++#define SPINAND_CMD_PROG_LOAD			0x02
++#define SPINAND_CMD_PROG_RDM			0x84
++#define SPINAND_CMD_PROG			0x10
++#define SPINAND_CMD_ERASE_BLK			0xd8
++#define SPINAND_CMD_WR_ENABLE			0x06
++#define SPINAND_CMD_WR_DISABLE			0x04
++#define SPINAND_CMD_READ_ID			0x9f
++#define SPINAND_CMD_RESET			0xff
++#define SPINAND_CMD_READ_REG			0x0f
++#define SPINAND_CMD_WRITE_REG			0x1f
++
++#define SPINAND_CMD_READ_CACHE_X2		0x3b
++#define SPINAND_CMD_READ_CACHE_X4		0x6b
++#define SPINAND_CMD_READ_CACHE_DUAL		0xbb
++#define SPINAND_CMD_READ_CACHE_QUAD		0xeb
++
++#define SPINAND_CMD_PROG_LOAD_X4		0x32
++#define SPINAND_CMD_PROG_RDM_X4			0xC4 /*or 34*/
++
++/* feature registers */
++#define REG_BLOCK_LOCK			0xa0
++#define REG_OTP				0xb0
++#define REG_STATUS			0xc0/* timing */
++
++/* status */
++#define STATUS_OIP_MASK			0x01
++#define STATUS_READY			(0 << 0)
++#define STATUS_BUSY			(1 << 0)
++
++#define STATUS_E_FAIL_MASK		0x04
++#define STATUS_E_FAIL			(1 << 2)
++
++#define STATUS_P_FAIL_MASK		0x08
++#define STATUS_P_FAIL			(1 << 3)
++
++/*OTP register defines*/
++#define OTP_ECC_MASK			0X10
++#define OTP_ECC_ENABLE			(1 << 4)
++#define OTP_ENABLE			(1 << 6)
++#define OTP_LOCK			(1 << 7)
++
++/* block lock */
++#define BL_ALL_LOCKED      0x38
++#define BL_1_2_LOCKED      0x30
++#define BL_1_4_LOCKED      0x28
++#define BL_1_8_LOCKED      0x20
++#define BL_1_16_LOCKED     0x18
++#define BL_1_32_LOCKED     0x10
++#define BL_1_64_LOCKED     0x08
++#define BL_ALL_UNLOCKED    0
++
++#define SPI_NAND_ECC_SHIFT		4
++
++#define SPI_NAND_MT29F_ECC_MASK		3
++#define SPI_NAND_MT29F_ECC_CORRECTED	1
++#define SPI_NAND_MT29F_ECC_UNCORR	2
++#define SPI_NAND_MT29F_ECC_RESERVED	3
++#define SPI_NAND_MT29F_ECC_SHIFT	4
++
++#define SPI_NAND_GD5F_ECC_MASK		7
++#define SPI_NAND_GD5F_ECC_UNCORR	7
++#define SPI_NAND_GD5F_ECC_SHIFT		4
++
++struct spi_nand_onfi_params {
++	/* rev info and features block */
++	/* 'O' 'N' 'F' 'I'  */
++	u8		sig[4];				/*0-3*/
++	__le16		revision;			/*4-5*/
++	__le16		features;			/*6-7*/
++	__le16		opt_cmd;			/*8-9*/
++	u8		reserved0[22];			/*10-31*/
++
++	/* manufacturer information block */
++	char		manufacturer[12];		/*32-43*/
++	char		model[20];			/*44-63*/
++	u8		mfr_id;				/*64*/
++	__le16		date_code;			/*65-66*/
++	u8		reserved1[13];			/*67-79*/
++
++	/* memory organization block */
++	__le32		byte_per_page;			/*80-83*/
++	__le16		spare_bytes_per_page;		/*84*85*/
++	__le32		data_bytes_per_ppage;		/*86-89*/
++	__le16		spare_bytes_per_ppage;		/*90-91*/
++	__le32		pages_per_block;		/*92-95*/
++	__le32		blocks_per_lun;			/*96-99*/
++	u8		lun_count;			/*100*/
++	u8		addr_cycles;			/*101*/
++	u8		bits_per_cell;			/*102*/
++	__le16		bb_per_lun;			/*103-104*/
++	__le16		block_endurance;		/*105-106*/
++	u8		guaranteed_good_blocks;		/*107*/
++	__le16		guaranteed_block_endurance;	/*108-109*/
++	u8		programs_per_page;		/*110*/
++	u8		ppage_attr;			/*111*/
++	u8		ecc_bits;			/*112*/
++	u8		interleaved_bits;		/*113*/
++	u8		interleaved_ops;		/*114*/
++	u8		reserved2[13];			/*115-127*/
++
++	/* electrical parameter block */
++	u8		io_pin_capacitance_max;		/*128*/
++	__le16		timing_mode;			/*129-130*/
++	__le16		program_cache_timing_mode;	/*131-132*/
++	__le16		t_prog;				/*133-134*/
++	__le16		t_bers;				/*135-136*/
++	__le16		t_r;				/*137-138*/
++	__le16		t_ccs;				/*139-140*/
++	u8		reserved3[23];			/*141-163*/
++
++	/* vendor */
++	__le16		vendor_specific_revision;	/*164-165*/
++	u8		vendor_specific[88];		/*166-253*/
++
++	__le16		crc;				/*254-255*/
++} __packed;
++
++#define ONFI_CRC_BASE	0x4F4E
++
++#define SPINAND_MAX_ID_LEN		4
++
++/**
++ * struct spi_nand_chip - SPI-NAND Private Flash Chip Data
++ * @chip_lock:		[INTERN] protection lock
++ * @name:		name of the chip
++ * @wq:			[INTERN] wait queue to sleep on if a SPI-NAND operation
++ *			is in progress used instead of the per chip wait queue
++ *			when a hw controller is available.
++ * @mfr_id:		[BOARDSPECIFIC] manufacture id
++ * @dev_id:		[BOARDSPECIFIC] device id
++ * @state:		[INTERN] the current state of the SPI-NAND device
++ * @spi:		[INTERN] point to spi device structure
++ * @mtd:		[INTERN] point to MTD device structure
++ * @reset:		[REPLACEABLE] function to reset the device
++ * @read_id:		[REPLACEABLE] read manufacture id and device id
++ * @load_page:		[REPLACEABLE] load page from NAND to cache
++ * @read_cache:		[REPLACEABLE] read data from cache
++ * @store_cache:	[REPLACEABLE] write data to cache
++ * @write_page:		[REPLACEABLE] program NAND with cache data
++ * @erase_block:	[REPLACEABLE] erase a given block
++ * @waitfunc:		[REPLACEABLE] wait for ready.
++ * @write_enable:	[REPLACEABLE] set write enable latch
++ * @get_ecc_status:	[REPLACEABLE] get ecc and bitflip status
++ * @enable_ecc:		[REPLACEABLE] enable on-die ecc
++ * @disable_ecc:	[REPLACEABLE] disable on-die ecc
++ * @buf:		[INTERN] buffer for read/write
++ * @oobbuf:		[INTERN] buffer for read/write oob
++ * @pagebuf:		[INTERN] holds the pagenumber which is currently in
++ *			data_buf.
++ * @pagebuf_bitflips:	[INTERN] holds the bitflip count for the page which is
++ *			currently in data_buf.
++ * @size:		[INTERN] the size of chip
++ * @block_size:		[INTERN] the size of eraseblock
++ * @page_size:		[INTERN] the size of page
++ * @page_spare_size:	[INTERN] the size of page oob size
++ * @block_shift:	[INTERN] number of address bits in a eraseblock
++ * @page_shift:		[INTERN] number of address bits in a page (column
++ *			address bits).
++ * @pagemask:		[INTERN] page number mask = number of (pages / chip) - 1
++ * @options:		[BOARDSPECIFIC] various chip options. They can partly
++ *			be set to inform nand_scan about special functionality.
++ * @ecc_strength_ds:	[INTERN] ECC correctability from the datasheet.
++ *			Minimum amount of bit errors per @ecc_step_ds guaranteed
++ *			to be correctable. If unknown, set to zero.
++ * @ecc_step_ds:	[INTERN] ECC step required by the @ecc_strength_ds,
++ *                      also from the datasheet. It is the recommended ECC step
++ *			size, if known; if unknown, set to zero.
++ * @ecc_mask:
++ * @ecc_uncorr:
++ * @bits_per_cell:	[INTERN] number of bits per cell. i.e., 1 means SLC.
++ * @ecclayout:		[BOARDSPECIFIC] ECC layout control structure
++ *			See the defines for further explanation.
++ * @bbt_options:	[INTERN] bad block specific options. All options used
++ *			here must come from bbm.h. By default, these options
++ *			will be copied to the appropriate nand_bbt_descr's.
++ * @bbt:		[INTERN] bad block table pointer
++ * @badblockpos:	[INTERN] position of the bad block marker in the oob
++ *			area.
++ * @bbt_td:		[REPLACEABLE] bad block table descriptor for flash
++ *			lookup.
++ * @bbt_md:		[REPLACEABLE] bad block table mirror descriptor
++ * @badblock_pattern:	[REPLACEABLE] bad block scan pattern used for initial
++ *			bad block scan.
++ * @onfi_params:	[INTERN] holds the ONFI page parameter when ONFI is
++ *			supported, 0 otherwise.
++ */
++struct spi_nand_chip {
++	spinlock_t	chip_lock;
++	char		*name;
++	wait_queue_head_t wq;
++	u8		dev_id_len;
++	u8		dev_id[SPINAND_MAX_ID_LEN];
++	flstate_t	state;
++	struct spi_device	*spi;
++	struct mtd_info	*mtd;
++
++	int (*reset)(struct spi_nand_chip *chip);
++	int (*read_id)(struct spi_nand_chip *chip, u8 *id);
++	int (*load_page)(struct spi_nand_chip *chip, unsigned int page_addr);
++	int (*read_cache)(struct spi_nand_chip *chip, unsigned int page_addr,
++		unsigned int page_offset,	size_t length, u8 *read_buf);
++	int (*store_cache)(struct spi_nand_chip *chip, unsigned int page_addr,
++		unsigned int page_offset,	size_t length, u8 *write_buf);
++	int (*write_page)(struct spi_nand_chip *chip, unsigned int page_addr);
++	int (*erase_block)(struct spi_nand_chip *chip, u32 page_addr);
++	int (*waitfunc)(struct spi_nand_chip *chip, u8 *status);
++	int (*write_enable)(struct spi_nand_chip *chip);
++	void (*get_ecc_status)(struct spi_nand_chip *chip, unsigned int status,
++						unsigned int *corrected,
++						unsigned int *ecc_errors);
++	int (*enable_ecc)(struct spi_nand_chip *chip);
++	int (*disable_ecc)(struct spi_nand_chip *chip);
++	int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
++
++	u8		*buf;
++	u8		*oobbuf;
++	int		pagebuf;
++	u32		pagebuf_bitflips;
++	u64		size;
++	u32		block_size;
++	u16		page_size;
++	u16		page_spare_size;
++	u8		block_shift;
++	u8		page_shift;
++	u16		page_mask;
++	u32		options;
++	u16		ecc_strength_ds;
++	u16		ecc_step_ds;
++	u8		ecc_mask;
++	u8		ecc_uncorr;
++	u8		bits_per_cell;
++	struct nand_ecclayout *ecclayout;
++	u32		bbt_options;
++	u8		*bbt;
++	int		badblockpos;
++	struct nand_bbt_descr *bbt_td;
++	struct nand_bbt_descr *bbt_md;
++	struct nand_bbt_descr *badblock_pattern;
++	struct spi_nand_onfi_params	 onfi_params;
++};
++
++
++struct spi_nand_id_info{
++#define SPI_NAND_ID_NO_DUMMY  (0xff)
++	u8 id_addr;
++	u8 id_len;
++};
++
++struct spi_nand_flash {
++	char		*name;
++	struct spi_nand_id_info id_info;
++	u8		dev_id[SPINAND_MAX_ID_LEN];
++	u32		page_size;
++	u32		page_spare_size;
++	u32		pages_per_blk;
++	u32		blks_per_chip;
++	u32		options;
++	u8		ecc_mask;
++	u8		ecc_uncorr;
++	struct nand_ecclayout *ecc_layout;
++};
++
++struct spi_nand_cmd {
++	u8		cmd;
++	u32		n_addr;		/* Number of address */
++	u8		addr[3];	/* Reg Offset */
++	u32		n_tx;		/* Number of tx bytes */
++	u8		*tx_buf;	/* Tx buf */
++	u8		tx_nbits;
++	u32		n_rx;		/* Number of rx bytes */
++	u8		*rx_buf;	/* Rx buf */
++	u8		rx_nbits;
++};
++
++#define SPI_NAND_INFO(nm, mid, did, pagesz, sparesz, pg_per_blk,\
++	blk_per_chip, opts)				\
++	{ .name = (nm), .mfr_id = (mid), .dev_id = (did),\
++	.page_size = (pagesz), .page_spare_size = (sparesz),\
++	.pages_per_blk = (pg_per_blk), .blks_per_chip = (blk_per_chip),\
++	.options = (opts) }
++
++#define SPINAND_NEED_PLANE_SELECT	(1 << 0)
++
++#define SPINAND_MFR_MICRON		0x2C
++#define SPINAND_MFR_GIGADEVICE	0xC8
++
++int spi_nand_send_cmd(struct spi_device *spi, struct spi_nand_cmd *cmd);
++int spi_nand_read_from_cache(struct spi_nand_chip *chip,
++		u32 page_addr, u32 column, size_t len, u8 *rbuf);
++int spi_nand_read_from_cache_snor_protocol(struct spi_nand_chip *chip,
++		u32 page_addr, u32 column, size_t len, u8 *rbuf);
++int spi_nand_scan_ident(struct mtd_info *mtd);
++int spi_nand_scan_tail(struct mtd_info *mtd);
++int spi_nand_scan_ident_release(struct mtd_info *mtd);
++int spi_nand_scan_tail_release(struct mtd_info *mtd);
++int spi_nand_release(struct mtd_info *mtd);
++int __spi_nand_erase(struct mtd_info *mtd, struct erase_info *einfo,
++		int allowbbt);
++int spi_nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
++int spi_nand_default_bbt(struct mtd_info *mtd);
++int spi_nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
++#endif /* __LINUX_MTD_SPI_NAND_H */
++
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index 7c775751..31a1e273 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -1,8 +1,13 @@
+ #ifndef __LINUX_PWM_H
+ #define __LINUX_PWM_H
+ 
++#include <linux/err.h>
++#include <linux/of.h>
++
+ struct pwm_device;
++struct seq_file;
+ 
++#if defined(CONFIG_PWM) || defined(CONFIG_HAVE_PWM)
+ /*
+  * pwm_request - request a PWM device
+  */
+@@ -27,5 +32,251 @@ int pwm_enable(struct pwm_device *pwm);
+  * pwm_disable - stop a PWM output toggling
+  */
+ void pwm_disable(struct pwm_device *pwm);
++#else
++static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++static inline void pwm_free(struct pwm_device *pwm)
++{
++}
++
++static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
++{
++	return -EINVAL;
++}
++
++static inline int pwm_enable(struct pwm_device *pwm)
++{
++	return -EINVAL;
++}
++
++static inline void pwm_disable(struct pwm_device *pwm)
++{
++}
++#endif
++
++struct pwm_chip;
++
++/**
++ * enum pwm_polarity - polarity of a PWM signal
++ * @PWM_POLARITY_NORMAL: a high signal for the duration of the duty-
++ * cycle, followed by a low signal for the remainder of the pulse
++ * period
++ * @PWM_POLARITY_INVERSED: a low signal for the duration of the duty-
++ * cycle, followed by a high signal for the remainder of the pulse
++ * period
++ */
++enum pwm_polarity {
++	PWM_POLARITY_NORMAL,
++	PWM_POLARITY_INVERSED,
++};
++
++enum {
++	PWMF_REQUESTED = 1 << 0,
++	PWMF_ENABLED = 1 << 1,
++};
++
++struct pwm_device {
++	const char		*label;
++	unsigned long		flags;
++	unsigned int		hwpwm;
++	unsigned int		pwm;
++	struct pwm_chip		*chip;
++	void			*chip_data;
++
++	unsigned int		period; /* in nanoseconds */
++};
++
++static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
++{
++	if (pwm)
++		pwm->period = period;
++}
++
++static inline unsigned int pwm_get_period(struct pwm_device *pwm)
++{
++	return pwm ? pwm->period : 0;
++}
++
++/*
++ * pwm_set_polarity - configure the polarity of a PWM signal
++ */
++int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
++
++/**
++ * struct pwm_ops - PWM controller operations
++ * @request: optional hook for requesting a PWM
++ * @free: optional hook for freeing a PWM
++ * @config: configure duty cycles and period length for this PWM
++ * @set_polarity: configure the polarity of this PWM
++ * @enable: enable PWM output toggling
++ * @disable: disable PWM output toggling
++ * @dbg_show: optional routine to show contents in debugfs
++ * @owner: helps prevent removal of modules exporting active PWMs
++ */
++struct pwm_ops {
++	int			(*request)(struct pwm_chip *chip,
++					   struct pwm_device *pwm);
++	void			(*free)(struct pwm_chip *chip,
++					struct pwm_device *pwm);
++	int			(*config)(struct pwm_chip *chip,
++					  struct pwm_device *pwm,
++					  int duty_ns, int period_ns);
++	int			(*set_polarity)(struct pwm_chip *chip,
++					  struct pwm_device *pwm,
++					  enum pwm_polarity polarity);
++	int			(*enable)(struct pwm_chip *chip,
++					  struct pwm_device *pwm);
++	void			(*disable)(struct pwm_chip *chip,
++					   struct pwm_device *pwm);
++#ifdef CONFIG_DEBUG_FS
++	void			(*dbg_show)(struct pwm_chip *chip,
++					    struct seq_file *s);
++#endif
++	struct module		*owner;
++};
++
++/**
++ * struct pwm_chip - abstract a PWM controller
++ * @dev: device providing the PWMs
++ * @list: list node for internal use
++ * @ops: callbacks for this PWM controller
++ * @base: number of first PWM controlled by this chip
++ * @npwm: number of PWMs controlled by this chip
++ * @pwms: array of PWM devices allocated by the framework
++ * @can_sleep: must be true if the .config(), .enable() or .disable()
++ *             operations may sleep
++ */
++struct pwm_chip {
++	struct device		*dev;
++	struct list_head	list;
++	const struct pwm_ops	*ops;
++	int			base;
++	unsigned int		npwm;
++
++	struct pwm_device	*pwms;
++/*
++	struct pwm_device *	(*of_xlate)(struct pwm_chip *pc,
++					    const struct of_phandle_args *args);
++*/
++	unsigned int		of_pwm_n_cells;
++	bool			can_sleep;
++};
++
++#if defined(CONFIG_PWM)
++int pwm_set_chip_data(struct pwm_device *pwm, void *data);
++void *pwm_get_chip_data(struct pwm_device *pwm);
++
++int pwmchip_add(struct pwm_chip *chip);
++int pwmchip_remove(struct pwm_chip *chip);
++struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
++					 unsigned int index,
++					 const char *label);
++/*
++struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
++		const struct of_phandle_args *args);
++*/
++struct pwm_device *pwm_get(struct device *dev, const char *con_id);
++struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id);
++void pwm_put(struct pwm_device *pwm);
++
++struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
++struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
++				   const char *con_id);
++void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
++
++bool pwm_can_sleep(struct pwm_device *pwm);
++#else
++static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
++{
++	return -EINVAL;
++}
++
++static inline void *pwm_get_chip_data(struct pwm_device *pwm)
++{
++	return NULL;
++}
++
++static inline int pwmchip_add(struct pwm_chip *chip)
++{
++	return -EINVAL;
++}
++
++static inline int pwmchip_remove(struct pwm_chip *chip)
++{
++	return -EINVAL;
++}
++
++static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
++						       unsigned int index,
++						       const char *label)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++static inline struct pwm_device *pwm_get(struct device *dev,
++					 const char *consumer)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++static inline struct pwm_device *of_pwm_get(struct device_node *np,
++					    const char *con_id)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++static inline void pwm_put(struct pwm_device *pwm)
++{
++}
++
++static inline struct pwm_device *devm_pwm_get(struct device *dev,
++					      const char *consumer)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
++						 struct device_node *np,
++						 const char *con_id)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
++{
++}
++
++static inline bool pwm_can_sleep(struct pwm_device *pwm)
++{
++	return false;
++}
++#endif
++
++struct pwm_lookup {
++	struct list_head list;
++	const char *provider;
++	unsigned int index;
++	const char *dev_id;
++	const char *con_id;
++};
++
++#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id)	\
++	{						\
++		.provider = _provider,			\
++		.index = _index,			\
++		.dev_id = _dev_id,			\
++		.con_id = _con_id,			\
++	}
++
++#if defined(CONFIG_PWM)
++void pwm_add_table(struct pwm_lookup *table, size_t num);
++#else
++static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
++{
++}
++#endif
+ 
+ #endif /* __LINUX_PWM_H */
+diff --git a/include/linux/rtc.h b/include/linux/rtc.h
+index 93f4d035..75bae8a9 100644
+--- a/include/linux/rtc.h
++++ b/include/linux/rtc.h
+@@ -18,15 +18,15 @@
+  */
+ 
+ struct rtc_time {
+-	int tm_sec;
+-	int tm_min;
+-	int tm_hour;
+-	int tm_mday;
+-	int tm_mon;
+-	int tm_year;
+-	int tm_wday;
+-	int tm_yday;
+-	int tm_isdst;
++	int tm_sec;		//0~59
++	int tm_min;		//0~59
++	int tm_hour;	//0~23
++	int tm_mday;	//1~31
++	int tm_mon;		//0~11
++	int tm_year;	//offset from 1900
++	int tm_wday;	//0~6   sunday:0
++	int tm_yday;	//offset from 1.1    0~365
++	int tm_isdst;	//???
+ };
+ 
+ /*
+diff --git a/drivers/spi/dw_spi.h b/include/linux/spi/dw_spi.h
+similarity index 87%
+rename from drivers/spi/dw_spi.h
+rename to include/linux/spi/dw_spi.h
+index 7a5e78d2..3bf6c7f2 100644
+--- a/drivers/spi/dw_spi.h
++++ b/include/linux/spi/dw_spi.h
+@@ -4,6 +4,27 @@
+ #include <linux/io.h>
+ #include <linux/scatterlist.h>
+ 
++
++#define YU_ADD_ISR_TASKLET
++
++#ifdef CONFIG_JLINK_DEBUG
++#	define DEBUG_DW_SPI0
++
++#	ifdef DEBUG_DW_SPI0
++#		define DW_SPI0_REG_BASE			(0xf0500000)
++		#define DW_SPI0_CS_REG				(0xf0300000)
++
++#	else
++		#define DW_SPI_REG_BASE			(0xfe400000)
++		#define DW_SPI0_CS_REG			(0xfe500000)
++#	endif
++
++#else
++#	define DW_SPI0_CS_REG			(0xfe500000)
++#endif
++
++
++
+ /* Bit fields in CTRLR0 */
+ #define SPI_DFS_OFFSET			0
+ 
+@@ -138,7 +159,7 @@ struct dw_spi {
+ 	u32			dma_width;
+ 	int			cs_change;
+ 	irqreturn_t		(*transfer_handler)(struct dw_spi *dws);
+-	void			(*cs_control)(u32 command);
++	void			(*cs_control)(struct spi_device *spi, u32 command);
+ 
+ 	/* Dma info */
+ 	int			dma_inited;
+@@ -152,9 +173,18 @@ struct dw_spi {
+ 	struct dw_spi_dma_ops	*dma_ops;
+ 	void			*dma_priv; /* platform relate info */
+ 	struct pci_dev		*dmac;
++	void * dma_rx_dummy;
++	void * dma_tx_dummy;
+ 
+ 	/* Bus interface info */
+ 	void			*priv;
++
++
++#ifdef YU_ADD_ISR_TASKLET
++	struct tasklet_struct	yu_add_isr_tasklet;
++
++#endif
++
+ #ifdef CONFIG_DEBUG_FS
+ 	struct dentry *debugfs;
+ #endif
+@@ -169,6 +199,10 @@ struct dw_spi {
+ #define dw_writew(dw, name, val) \
+ 	__raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
+ 
++
++#define yu_write(val,add)    __raw_writel((val), (add))
++
++
+ static inline void spi_enable_chip(struct dw_spi *dws, int enable)
+ {
+ 	dw_writel(dws, ssienr, (enable ? 1 : 0));
+@@ -179,17 +213,19 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div)
+ 	dw_writel(dws, baudr, div);
+ }
+ 
+-static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
++static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi)
+ {
++	 u16 cs = spi->chip_select;
+ 	if (cs > dws->num_cs)
+ 		return;
+ 
+ 	if (dws->cs_control)
+-		dws->cs_control(1);
++		dws->cs_control(spi, 1);
+ 
+ 	dw_writel(dws, ser, 1 << cs);
+ }
+ 
++
+ /* Disable IRQ bits */
+ static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
+ {
+@@ -218,7 +254,8 @@ struct dw_spi_chip {
+ 	u8 poll_mode;	/* 0 for contoller polling mode */
+ 	u8 type;	/* SPI/SSP/Micrwire */
+ 	u8 enable_dma;
+-	void (*cs_control)(u32 command);
++	void *cs_control;
++//	void (*cs_control)(u32 command);
+ };
+ 
+ extern int dw_spi_add_host(struct dw_spi *dws);
+diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
+index 4ebaf082..dab0e66b 100644
+--- a/include/linux/usb/ch11.h
++++ b/include/linux/usb/ch11.h
+@@ -18,6 +18,16 @@
+ #define USB_RT_HUB	(USB_TYPE_CLASS | USB_RECIP_DEVICE)
+ #define USB_RT_PORT	(USB_TYPE_CLASS | USB_RECIP_OTHER)
+ 
++#define HUB_CHAR_LPSM		0x0003 /* Logical Power Switching Mode mask */
++#define HUB_CHAR_COMMON_LPSM	0x0000 /* All ports power control at once */
++#define HUB_CHAR_INDV_PORT_LPSM	0x0001 /* per-port power control */
++#define HUB_CHAR_NO_LPSM	0x0002 /* no power switching */
++#define HUB_CHAR_COMPOUND	0x0004 /* hub is part of a compound device */
++
++
++
++
++
+ /*
+  * Hub class requests
+  * See USB 2.0 spec Table 11-16
+@@ -54,6 +64,7 @@
+ #define USB_PORT_FEAT_L1		5	/* L1 suspend */
+ #define USB_PORT_FEAT_POWER		8
+ #define USB_PORT_FEAT_LOWSPEED		9	/* Should never be used */
++#define USB_PORT_FEAT_HIGHSPEED		10
+ #define USB_PORT_FEAT_C_CONNECTION	16
+ #define USB_PORT_FEAT_C_ENABLE		17
+ #define USB_PORT_FEAT_C_SUSPEND		18
+@@ -168,6 +179,7 @@ struct usb_port_status {
+ #define HUB_CHAR_LPSM		0x0003 /* D1 .. D0 */
+ #define HUB_CHAR_COMPOUND	0x0004 /* D2       */
+ #define HUB_CHAR_OCPM		0x0018 /* D4 .. D3 */
++#define HUB_CHAR_INDV_PORT_OCPM	0x0008 /* per-port Over-current reporting */
+ #define HUB_CHAR_TTTT           0x0060 /* D6 .. D5 */
+ #define HUB_CHAR_PORTIND        0x0080 /* D7       */
+ 
+diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
+index dd1571db..6c180f78 100644
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -16,7 +16,7 @@
+ #define __LINUX_USB_GADGET_H
+ 
+ #include <linux/slab.h>
+-
++#include <linux/usb/ch9.h>
+ struct usb_ep;
+ 
+ /**
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 0097136b..7396dba9 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -19,10 +19,10 @@
+ #ifndef __USB_CORE_HCD_H
+ #define __USB_CORE_HCD_H
+ 
+-#ifdef __KERNEL__
++//#ifdef __KERNEL__
+ 
+ #include <linux/rwsem.h>
+-
++#include <linux/usb.h>
+ #define MAX_TOPO_LEVEL		6
+ 
+ /* This file contains declarations of usbcore internals that are mostly
+@@ -505,6 +505,11 @@ extern void usb_ep0_reinit(struct usb_device *);
+ /* class requests from USB 3.0 hub spec, table 10-5 */
+ #define SetHubDepth		(0x3000 | HUB_SET_DEPTH)
+ #define GetPortErrorCount	(0x8000 | HUB_GET_PORT_ERR_COUNT)
++/*-------------------------------------------------------------------------*/
++
++/* hub.h ... DeviceRemovable in 2.4.2-ac11, gone in 2.4.10 */
++/* bleech -- resurfaced in 2.4.11 or 2.4.12 */
++#define bitmap 	DeviceRemovable
+ 
+ /*
+  * Generic bandwidth allocation constants/support
+@@ -669,6 +674,6 @@ extern struct rw_semaphore ehci_cf_port_reset_rwsem;
+ #define USB_EHCI_LOADED		2
+ extern unsigned long usb_hcds_loaded;
+ 
+-#endif /* __KERNEL__ */
++//#endif /* __KERNEL__ */
+ 
+ #endif /* __USB_CORE_HCD_H */
+diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
+index d87f44f5..9e480472 100644
+--- a/include/linux/usb/otg.h
++++ b/include/linux/usb/otg.h
+@@ -11,6 +11,12 @@
+ 
+ #include <linux/notifier.h>
+ 
++enum usb_dr_mode {
++	USB_DR_MODE_UNKNOWN,
++	USB_DR_MODE_HOST,
++	USB_DR_MODE_PERIPHERAL,
++	USB_DR_MODE_OTG,
++};
+ /* OTG defines lots of enumeration states before device reset */
+ enum usb_otg_state {
+ 	OTG_STATE_UNDEFINED = 0,
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index 605b0aa8..3f1c2dee 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -33,6 +33,8 @@ struct usbnet {
+ 	wait_queue_head_t	*wait;
+ 	struct mutex		phy_mutex;
+ 	unsigned char		suspend_count;
++	unsigned char		pkt_cnt, pkt_err;
++	unsigned short		rx_qlen, tx_qlen;
+ 
+ 	/* i/o info: pipes etc */
+ 	unsigned		in, out;
+@@ -69,6 +71,7 @@ struct usbnet {
+ #		define EVENT_DEV_WAKING 6
+ #		define EVENT_DEV_ASLEEP 7
+ #		define EVENT_DEV_OPEN	8
++#		define EVENT_RX_KILL	10
+ };
+ 
+ static inline struct usb_driver *driver_of(struct usb_interface *intf)
+@@ -150,6 +153,10 @@ struct driver_info {
+ 	int		in;		/* rx endpoint */
+ 	int		out;		/* tx endpoint */
+ 
++	/* add driver private info by zhangy
++	2018-10-31 to fix asix rx fixup lost data */
++	void			*driver_priv;
++
+ 	unsigned long	data;		/* Misc driver specific data */
+ };
+ 
+@@ -191,7 +198,8 @@ extern void usbnet_cdc_status(struct usbnet *, struct urb *);
+ enum skb_state {
+ 	illegal = 0,
+ 	tx_start, tx_done,
+-	rx_start, rx_done, rx_cleanup
++	rx_start, rx_done, rx_cleanup,
++	unlink_start
+ };
+ 
+ struct skb_data {	/* skb->cb is one of these */
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 396e8fc8..6ea76103 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -426,7 +426,8 @@ struct station_parameters {
+  * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
+  * @STATION_INFO_BSS_PARAM: @bss_param filled
+  * @STATION_INFO_CONNECTED_TIME: @connected_time filled
+- */
++ * @STATION_INFO_ASSOC_REQ_IES: @assos_req_ies filled
++*/
+ enum station_info_flags {
+ 	STATION_INFO_INACTIVE_TIME	= 1<<0,
+ 	STATION_INFO_RX_BYTES		= 1<<1,
+@@ -443,8 +444,10 @@ enum station_info_flags {
+ 	STATION_INFO_RX_DROP_MISC	= 1<<12,
+ 	STATION_INFO_SIGNAL_AVG		= 1<<13,
+ 	STATION_INFO_RX_BITRATE		= 1<<14,
+-	STATION_INFO_BSS_PARAM          = 1<<15,
+-	STATION_INFO_CONNECTED_TIME	= 1<<16
++	STATION_INFO_BSS_PARAM		= 1<<15,
++	STATION_INFO_CONNECTED_TIME	= 1<<16,
++	STATION_INFO_ASSOC_REQ_IES	= 1<<17
++
+ };
+ 
+ /**
+@@ -536,6 +539,11 @@ struct sta_bss_parameters {
+  *	This number should increase every time the list of stations
+  *	changes, i.e. when a station is added or removed, so that
+  *	userspace can tell whether it got a consistent snapshot.
++ * @assoc_req_ies: IEs from (Re)Association Request.
++ *	This is used only when in AP mode with drivers that do not use
++ *	user space MLME/SME implementation. The information is provided for
++ *	the cfg80211_new_sta() calls to notify user space of the IEs.
++ * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
+  */
+ struct station_info {
+ 	u32 filled;
+@@ -558,6 +566,9 @@ struct station_info {
+ 	struct sta_bss_parameters bss_param;
+ 
+ 	int generation;
++
++	const u8 *assoc_req_ies;
++	size_t assoc_req_ies_len;
+ };
+ 
+ /**
+diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
+index eb51d76e..85872b2e 100644
+--- a/kernel/timeconst.pl
++++ b/kernel/timeconst.pl
+@@ -370,7 +370,7 @@ if ($hz eq '--can') {
+ 	}
+ 
+ 	@val = @{$canned_values{$hz}};
+-	if (!defined(@val)) {
++	if (!defined()) {
+ 		@val = compute_values($hz);
+ 	}
+ 	output($hz, @val);
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index dd373c8e..dcb37b28 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1,4 +1,11 @@
+ 
++config TEST_BOOT_TIME
++	bool "Set GPIO level for measuring boot time."
++	default n
++	help
++	  This option will set GPIO 4 to level high at the beginning of
++	  kernel decompressing stage.
++
+ config PRINTK_TIME
+ 	bool "Show timing information on printks"
+ 	depends on PRINTK
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 46cbd28f..ef70bf94 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -185,10 +185,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
+ 	prefetchw(skb);
+ 
+ 	size = SKB_DATA_ALIGN(size);
+-	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
+-			gfp_mask, node);
++	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	data = kmalloc_node_track_caller(size, gfp_mask, node);
+ 	if (!data)
+ 		goto nodata;
++
++	/* kmalloc(size) might give us more room than requested.
++	 * Put skb_shared_info exactly at the end of allocated zone,
++	 * to allow max possible filling before reallocation.
++	 */
++	size = SKB_WITH_OVERHEAD(ksize(data));
+ 	prefetchw(data + size);
+ 
+ 	/*
+diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
+index 1f1ef70f..15a57575 100644
+--- a/net/wireless/Kconfig
++++ b/net/wireless/Kconfig
+@@ -1,5 +1,5 @@
+ config WIRELESS_EXT
+-	bool
++	def_bool y
+ 
+ config WEXT_CORE
+ 	def_bool y
+@@ -14,7 +14,7 @@ config WEXT_SPY
+ 	bool
+ 
+ config WEXT_PRIV
+-	bool
++	def_bool y
+ 
+ config CFG80211
+ 	tristate "cfg80211 - wireless configuration API"
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 1ac9443b..6496472e 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2209,6 +2209,11 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
+ 	}
+ 	nla_nest_end(msg, sinfoattr);
+ 
++	if (sinfo->assoc_req_ies)
++	{
++		NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
++			sinfo->assoc_req_ies);
++	}
+ 	return genlmsg_end(msg, hdr);
+ 
+  nla_put_failure:
+@@ -2236,6 +2241,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
+ 	}
+ 
+ 	while (1) {
++		memset(&sinfo, 0, sizeof(sinfo));
+ 		err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
+ 					     mac_addr, &sinfo);
+ 		if (err == -ENOENT)
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 379574c3..7b0add2b 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -1759,6 +1759,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
+ static void restore_regulatory_settings(bool reset_user)
+ {
+ 	char alpha2[2];
++	char world_alpha2[2];
+ 	struct reg_beacon *reg_beacon, *btmp;
+ 	struct regulatory_request *reg_request, *tmp;
+ 	LIST_HEAD(tmp_reg_req_list);
+@@ -1809,11 +1810,13 @@ static void restore_regulatory_settings(bool reset_user)
+ 
+ 	/* First restore to the basic regulatory settings */
+ 	cfg80211_regdomain = cfg80211_world_regdom;
++	world_alpha2[0] = cfg80211_regdomain->alpha2[0];
++	world_alpha2[1] = cfg80211_regdomain->alpha2[1];
+ 
+ 	mutex_unlock(&reg_mutex);
+ 	mutex_unlock(&cfg80211_mutex);
+ 
+-	regulatory_hint_core(cfg80211_regdomain->alpha2);
++	regulatory_hint_core(world_alpha2);
+ 
+ 	/*
+ 	 * This restores the ieee80211_regdom module parameter
+diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
+index 885683a3..4eb638bd 100644
+--- a/sound/arm/Kconfig
++++ b/sound/arm/Kconfig
+@@ -27,7 +27,26 @@ config SND_PXA2XX_LIB
+ 
+ config SND_PXA2XX_LIB_AC97
+ 	bool
++	
++config SND_FH_LIB
++	tristate
++	select SND_FH_CODEC if SND_FH_LIB_AC97
+ 
++config SND_FH_LIB_AC97
++	bool	
++config SND_FH_PCM
++	bool
++config SND_FH_AC97
++	tristate "AC97 driver for the Intel FH chip"
++	
++	select SND_FH_PCM
++	select SND_AC97_CODEC
++	select SND_FH_LIB
++	select SND_FH_LIB_AC97
++	help
++	  Say Y or M if you want to support any AC97 codec attached to
++	  the fh81 AC97 interface.
++	
+ config SND_PXA2XX_AC97
+ 	tristate "AC97 driver for the Intel PXA2xx chip"
+ 	depends on ARCH_PXA
+diff --git a/sound/arm/Makefile b/sound/arm/Makefile
+index 8c0c851d..72cd341a 100644
+--- a/sound/arm/Makefile
++++ b/sound/arm/Makefile
+@@ -7,10 +7,16 @@ snd-aaci-objs			:= aaci.o
+ 
+ obj-$(CONFIG_SND_PXA2XX_PCM)	+= snd-pxa2xx-pcm.o
+ snd-pxa2xx-pcm-objs		:= pxa2xx-pcm.o
++obj-$(CONFIG_SND_FH_PCM)	+= snd-fh-pcm.o
++snd-fh-pcm-objs		:= fh_pcm.o
+ 
+ obj-$(CONFIG_SND_PXA2XX_LIB)	+= snd-pxa2xx-lib.o
++obj-$(CONFIG_SND_FH_LIB)	+= snd-fh-lib.o
+ snd-pxa2xx-lib-y		:= pxa2xx-pcm-lib.o
++snd-fh-lib-y		:= fh-pcm-lib.o
+ snd-pxa2xx-lib-$(CONFIG_SND_PXA2XX_LIB_AC97)	+= pxa2xx-ac97-lib.o
+-
++snd-fh-lib-$(CONFIG_SND_FH_LIB_AC97)	+= fh-ac97-lib.o
+ obj-$(CONFIG_SND_PXA2XX_AC97)	+= snd-pxa2xx-ac97.o
++obj-$(CONFIG_SND_FH_AC97)	+= snd-fh-ac97.o
+ snd-pxa2xx-ac97-objs		:= pxa2xx-ac97.o
++snd-fh-ac97-objs		:= fh-ac97.o
+\ No newline at end of file
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 98175a09..cd9b5329 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -119,6 +119,11 @@ config SND_SOC_AC97_CODEC
+ 	tristate
+ 	select SND_AC97_CODEC
+ 
++config SND_SOC_FH_CODEC
++	tristate
++	select SND_FH_CODEC
++	select SND_AC97_CODEC
++
+ config SND_SOC_AD1836
+ 	tristate
+ 
+@@ -222,6 +227,9 @@ config SND_SOC_STAC9766
+ config SND_SOC_TLV320AIC23
+ 	tristate
+ 
++config SND_SOC_FSH0LS029AA
++	tristate
++
+ config SND_SOC_TLV320AIC26
+ 	tristate "TI TLV320AIC26 Codec support" if SND_SOC_OF_SIMPLE
+ 	depends on SPI
+diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
+index fd855840..116edc8a 100644
+--- a/sound/soc/codecs/Makefile
++++ b/sound/soc/codecs/Makefile
+@@ -1,5 +1,6 @@
+ snd-soc-88pm860x-objs := 88pm860x-codec.o
+ snd-soc-ac97-objs := ac97.o
++snd-soc-fh-objs := fh.o
+ snd-soc-ad1836-objs := ad1836.o
+ snd-soc-ad193x-objs := ad193x.o
+ snd-soc-ad1980-objs := ad1980.o
+@@ -30,6 +31,7 @@ snd-soc-spdif-objs := spdif_transciever.o
+ snd-soc-ssm2602-objs := ssm2602.o
+ snd-soc-stac9766-objs := stac9766.o
+ snd-soc-tlv320aic23-objs := tlv320aic23.o
++snd-soc-fsh0ls029aa-objs := fsh0ls029aa.o
+ snd-soc-tlv320aic26-objs := tlv320aic26.o
+ snd-soc-tlv320aic3x-objs := tlv320aic3x.o
+ snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
+@@ -91,6 +93,7 @@ snd-soc-wm9090-objs := wm9090.o
+ 
+ obj-$(CONFIG_SND_SOC_88PM860X)	+= snd-soc-88pm860x.o
+ obj-$(CONFIG_SND_SOC_AC97_CODEC)	+= snd-soc-ac97.o
++obj-$(CONFIG_SND_SOC_FH_CODEC)	+= snd-soc-fh.o
+ obj-$(CONFIG_SND_SOC_AD1836)	+= snd-soc-ad1836.o
+ obj-$(CONFIG_SND_SOC_AD193X)	+= snd-soc-ad193x.o
+ obj-$(CONFIG_SND_SOC_AD1980)	+= snd-soc-ad1980.o
+@@ -122,6 +125,7 @@ obj-$(CONFIG_SND_SOC_SPDIF)	+= snd-soc-spdif.o
+ obj-$(CONFIG_SND_SOC_SSM2602)	+= snd-soc-ssm2602.o
+ obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
+ obj-$(CONFIG_SND_SOC_TLV320AIC23)	+= snd-soc-tlv320aic23.o
++obj-$(CONFIG_SND_SOC_FSH0LS029AA)	+= snd-soc-fsh0ls029aa.o
+ obj-$(CONFIG_SND_SOC_TLV320AIC26)	+= snd-soc-tlv320aic26.o
+ obj-$(CONFIG_SND_SOC_TLV320AIC3X)	+= snd-soc-tlv320aic3x.o
+ obj-$(CONFIG_SND_SOC_TVL320AIC32X4)     += snd-soc-tlv320aic32x4.o
+diff --git a/sound/soc/codecs/fsh0ls029aa.c b/sound/soc/codecs/fsh0ls029aa.c
+new file mode 100644
+index 00000000..10dec074
+--- /dev/null
++++ b/sound/soc/codecs/fsh0ls029aa.c
+@@ -0,0 +1,99 @@
++/*
++ * ad73311.c  --  ALSA Soc AD73311 codec support
++ *
++ * Copyright:	Analog Device Inc.
++ * Author:	Cliff Cai <cliff.cai@analog.com>
++ *
++ *  This program is free software; you can redistribute  it and/or modify it
++ *  under  the terms of  the GNU General  Public License as published by the
++ *  Free Software Foundation;  either version 2 of the  License, or (at your
++ *  option) any later version.
++ */
++
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/ac97_codec.h>
++#include <sound/initval.h>
++#include <sound/soc.h>
++
++#include "fsh0ls029aa.h"
++static int ak4104_set_dai_fmt(){
++
++	return 0;
++}
++static int ak4104_hw_params(){
++
++	return 0;
++}
++static int ak4104_sys_params(){
++
++	return 0;
++}
++static struct snd_soc_dai_ops ak4101_dai_ops = {
++	.hw_params = ak4104_hw_params,
++	.set_fmt = ak4104_set_dai_fmt,
++	.set_sysclk=ak4104_sys_params,
++};
++
++static struct snd_soc_dai_driver ad73311_dai = {
++	.name = "fh-acodec-hifi",
++	.playback = {
++		.stream_name = "Playback",
++		.channels_min = 2,
++		.channels_max = 2,
++		.rates = SNDRV_PCM_RATE_8000,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE, },
++	.capture = {
++		.stream_name = "Capture",
++		.channels_min = 2,
++		.channels_max = 2,
++		.rates = SNDRV_PCM_RATE_8000,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE, },
++		.ops=&ak4101_dai_ops,
++};
++
++static struct snd_soc_codec_driver soc_codec_dev_ad73311;
++
++static int ad73311_probe(struct platform_device *pdev)
++{
++//	printk("ad73311 probe \n");
++	return snd_soc_register_codec(&pdev->dev,
++			&soc_codec_dev_ad73311, &ad73311_dai, 1);
++}
++
++static int __devexit ad73311_remove(struct platform_device *pdev)
++{
++	snd_soc_unregister_codec(&pdev->dev);
++	return 0;
++}
++
++static struct platform_driver ad73311_codec_driver = {
++	.driver = {
++			.name = "fh-acodec",
++			.owner = THIS_MODULE,
++	},
++
++	.probe = ad73311_probe,
++	.remove = __devexit_p(ad73311_remove),
++};
++
++static int __init ad73311_init(void)
++{
++	return platform_driver_register(&ad73311_codec_driver);
++}
++module_init(ad73311_init);
++
++static void __exit ad73311_exit(void)
++{
++	platform_driver_unregister(&ad73311_codec_driver);
++}
++module_exit(ad73311_exit);
++
++MODULE_DESCRIPTION("ASoC ad73311 driver");
++MODULE_AUTHOR("Cliff Cai ");
++MODULE_LICENSE("GPL");
+diff --git a/sound/soc/codecs/fsh0ls029aa.h b/sound/soc/codecs/fsh0ls029aa.h
+new file mode 100644
+index 00000000..4b353eef
+--- /dev/null
++++ b/sound/soc/codecs/fsh0ls029aa.h
+@@ -0,0 +1,88 @@
++/*
++ * File:         sound/soc/codec/ad73311.h
++ * Based on:
++ * Author:       Cliff Cai <cliff.cai@analog.com>
++ *
++ * Created:      Thur Sep 25, 2008
++ * Description:  definitions for AD73311 registers
++ *
++ *
++ * Modified:
++ *               Copyright 2006 Analog Devices Inc.
++ *
++ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, see the file COPYING, or write
++ * to the Free Software Foundation, Inc.,
++ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++#ifndef __AD73311_H__
++#define __AD73311_H__
++
++#define AD_CONTROL	0x8000
++#define AD_DATA		0x0000
++#define AD_READ		0x4000
++#define AD_WRITE	0x0000
++
++/* Control register A */
++#define CTRL_REG_A	(0 << 8)
++
++#define REGA_MODE_PRO	0x00
++#define REGA_MODE_DATA	0x01
++#define REGA_MODE_MIXED	0x03
++#define REGA_DLB		0x04
++#define REGA_SLB		0x08
++#define REGA_DEVC(x)		((x & 0x7) << 4)
++#define REGA_RESET		0x80
++
++/* Control register B */
++#define CTRL_REG_B	(1 << 8)
++
++#define REGB_DIRATE(x)	(x & 0x3)
++#define REGB_SCDIV(x)	((x & 0x3) << 2)
++#define REGB_MCDIV(x)	((x & 0x7) << 4)
++#define REGB_CEE		(1 << 7)
++
++/* Control register C */
++#define CTRL_REG_C	(2 << 8)
++
++#define REGC_PUDEV		(1 << 0)
++#define REGC_PUADC		(1 << 3)
++#define REGC_PUDAC		(1 << 4)
++#define REGC_PUREF		(1 << 5)
++#define REGC_REFUSE		(1 << 6)
++
++/* Control register D */
++#define CTRL_REG_D	(3 << 8)
++
++#define REGD_IGS(x)		(x & 0x7)
++#define REGD_RMOD		(1 << 3)
++#define REGD_OGS(x)		((x & 0x7) << 4)
++#define REGD_MUTE		(1 << 7)
++
++/* Control register E */
++#define CTRL_REG_E	(4 << 8)
++
++#define REGE_DA(x)		(x & 0x1f)
++#define REGE_IBYP		(1 << 5)
++
++/* Control register F */
++#define CTRL_REG_F	(5 << 8)
++
++#define REGF_SEEN		(1 << 5)
++#define REGF_INV		(1 << 6)
++#define REGF_ALB		(1 << 7)
++
++#endif
+diff --git a/sound/soc/dwc/Kconfig b/sound/soc/dwc/Kconfig
+new file mode 100644
+index 00000000..ae37a7a6
+--- /dev/null
++++ b/sound/soc/dwc/Kconfig
+@@ -0,0 +1,54 @@
++config SND_FULLHAN_SOC
++	tristate "SoC Audio for the FULLHAN System-on-Chip"
++	help
++	  Say Y or M if you want to add support for codecs attached to
++	  the ATMEL SSC interface. You will also need
++	  to select the audio interfaces to support below.
++
++config SND_FULLHAN_SOC_SSC
++	tristate
++	depends on SND_FULLHAN_SOC
++	help
++	  Say Y or M if you want to add support for codecs the
++	  ATMEL SSC interface. You will also needs to select the individual
++	  machine drivers to support below.
++
++config SND_FULLHAN_SOC_SAM9G20_WM8731
++	tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board"
++	depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC && \
++                   AT91_PROGRAMMABLE_CLOCKS
++	select SND_FULLHAN_SOC_SSC
++	select SND_SOC_WM8731
++	help
++	  Say Y if you want to add support for SoC audio on WM8731-based
++	  AT91sam9g20 evaluation board.
++
++config SND_FULLHAN_SOC_PLAYPAQ
++        tristate "SoC Audio support for PlayPaq with WM8510"
++        depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
++        select SND_ATMEL_SOC_SSC
++        select SND_SOC_WM8510
++        help
++          Say Y or M here if you want to add support for SoC audio
++          on the LRS PlayPaq.
++config SND_FH_SOC_I2S
++	tristate
++	
++	
++config SND_FULLHAN_SOC_PLAYPAQ_SLAVE
++        bool "Run CODEC on PlayPaq in slave mode"
++        depends on SND_FULLHAN_SOC_PLAYPAQ
++        default n
++        help
++          Say Y if you want to run with the AT32 SSC generating the BCLK
++          and FRAME signals on the PlayPaq.  Unless you want to play
++          with the AT32 as the SSC master, you probably want to say N here,
++          as this will give you better sound quality.
++
++config SND_FULLHAN_SOC_FH
++	tristate "SoC Audio support for fullhan-81 board"
++	select SND_FULLHAN_SOC_SSC
++	select SND_SOC_FSH0LS029AA
++	select SND_FH_SOC_I2S
++	help
++	  Say Y here to support sound on fh81 board.
+diff --git a/sound/soc/dwc/Makefile b/sound/soc/dwc/Makefile
+new file mode 100644
+index 00000000..96f58598
+--- /dev/null
++++ b/sound/soc/dwc/Makefile
+@@ -0,0 +1,20 @@
++# AT91 Platform Support
++snd-soc-fullhan-pcm-objs := fullhan-pcm.o
++#snd-soc-fullhan_ssc_dai-objs := fh_i2s_dai.o
++
++obj-$(CONFIG_SND_FULLHAN_SOC) += snd-soc-fullhan-pcm.o
++#obj-$(CONFIG_SND_FULLHAN_SOC_SSC) += snd-soc-fullhan_ssc_dai.o
++
++# AT91 Machine Support
++snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
++
++# AT32 Machine Support
++snd-soc-playpaq-objs := playpaq_wm8510.o
++snd-soc-fh-objs := fh.o dma.o
++obj-$(CONFIG_SND_FH_SOC_I2S) += snd-soc-fh-i2s.o
++snd-soc-fh-i2s-objs := fh_i2s.o
++
++
++obj-$(CONFIG_SND_FULLHAN_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
++obj-$(CONFIG_SND_FULLHAN_SOC_PLAYPAQ) += snd-soc-playpaq.o
++obj-$(CONFIG_SND_FULLHAN_SOC_FH) += snd-soc-fh.o
+diff --git a/sound/soc/dwc/dma.c b/sound/soc/dwc/dma.c
+new file mode 100644
+index 00000000..4dd098ce
+--- /dev/null
++++ b/sound/soc/dwc/dma.c
+@@ -0,0 +1,636 @@
++
++/*******************************************
++ * 
++ * new drive add by xuww
++ * 
++ * 
++ * **********************************/
++/**
++* @file			
++* @brief  
++* @version 
++* @author		xuww
++* @date			
++* @note
++*      
++*      
++* @copy
++*
++* �˴���Ϊ�Ϻ����΢�������޹�˾��Ŀ���룬�κ��˼���˾δ����ɲ��ø��ƴ�����������
++* ����˾�������Ŀ����˾����һ��׷��Ȩ����
++*
++* <h1><center>&copy; COPYRIGHT 2013 fullhan</center></h1>
++*/
++/* Includes ------------------------------------------------------------------*/
++#include "dma.h"
++#include <mach/fh_predefined.h>
++
++#define		MAX_DMA_CHANS				(4)
++
++/* Private typedef -----------------------------------------------------------*/
++/* Private define ------------------------------------------------------------*/
++/* Private macro -------------------------------------------------------------*/
++/* Private variables ---------------------------------------------------------*/
++/* Private function prototypes -----------------------------------------------*/
++/* Private functions ---------------------------------------------------------*/
++/*******************************************************************************
++* Function Name  : Dma_GetChanStatus
++* Description    : get the channel status 
++* Input          : nChanID :channel ID 
++* Output         : None
++* Return         : None
++* 		                                      
++ *******************************************************************************/
++int Dma_GetChanStatus( int nChanID )
++{
++	return (int)(GET_REG( REG_DMAC_CHAN_EN ) & ( 1 << nChanID ));
++}
++/*******************************************************************************
++* Function Name  : Dma_EnableChan
++* Description    : enable channel
++* Input          : nChanID   :channel ID 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_EnableChan( int nChanID )
++{
++	int nMask = (1 << (nChanID + 8)) | ( 1 << nChanID );
++
++	SET_REG_M( REG_DMAC_CHAN_EN, nMask, nMask );
++}
++/*******************************************************************************
++* Function Name  : Dma_DisableChan
++* Description    : disable channel  
++* Input          : nChanID :channel ID 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_DisableChan( int nChanID )
++{
++	int nMask = ( 1 << ( nChanID + 8 ) );
++
++	SET_REG_M( REG_DMAC_CHAN_EN, nMask, (nMask + (1 << nChanID)) );
++	while( GET_REG( REG_DMAC_CHAN_EN) & (1 << nChanID) );
++}
++/*******************************************************************************
++* Function Name  : Dma_ClearIsrBit
++* Description    : clear the  interruput   bit 
++* Input          : iChan :channel ID    nMask :unchange bit
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_ClearIsrBit( int iChan, int nMask )
++{
++	if( nMask & DMA_INTT_TXR)
++		SET_REG( REG_DMAC_INTCLR_TFR, 1 << iChan );
++	if( nMask & DMA_INTT_BLOCK)
++		SET_REG( REG_DMAC_INTCLR_BLK, 1 << iChan );
++	if( nMask & DMA_INTT_SOURCE )
++		SET_REG( REG_DMAC_INTCLR_SRCTXR, 1 << iChan );
++	if( nMask & DMA_INTT_DEST )
++		SET_REG( REG_DMAC_INTCLR_DSTTXR, 1 << iChan );
++	if( nMask & DMA_INTT_ERR )
++		SET_REG( REG_DMAC_INTCLR_ERR, 1 << iChan );
++}
++/*******************************************************************************
++* Function Name  : Dma_EnableIsrBit
++* Description    : enable intruput bit  
++* Input          : iChan :channel ID    nMask:unchange bit 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_EnableIsrBit( int iChan, int nMask )
++{
++	int nEnable = (1 << (iChan + 8)) | (1 << iChan);
++	
++	if( nMask & DMA_INTT_TXR) 
++		SET_REG_M( REG_DMAC_INTMSK_TFR, nEnable, nEnable );
++	if( nMask & DMA_INTT_BLOCK)
++		SET_REG_M( REG_DMAC_INTMSK_BLK, nEnable, nEnable );
++	if( nMask & DMA_INTT_SOURCE)
++		SET_REG_M( REG_DMAC_INTMSK_SRCTXR, nEnable, nEnable );
++	if( nMask & DMA_INTT_DEST)
++		SET_REG_M( REG_DMAC_INTMSK_DSTTXR, nEnable, nEnable );
++	if( nMask & DMA_INTT_ERR)
++		SET_REG_M( REG_DMAC_INTMSK_ERR, nEnable, nEnable );
++	
++	SET_REG_M( REG_DMAC_CTXi(iChan), 1, 1 );		// Enable isr.
++}
++/*******************************************************************************
++* Function Name  : Dma_DisableIsrBit
++* Description    : disbale interruput  
++* Input          : iChan:channel ID    nMask:unchange bit
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++
++void Dma_DisableIsrBit( int iChan, int nMask )
++{
++	int nEnable = (1 << (iChan + 8)) | (1 << iChan);
++	
++	if( nMask & DMA_INTT_TXR) 
++		SET_REG_M( REG_DMAC_INTCLR_TFR, 0, nEnable );
++	if( nMask & DMA_INTT_BLOCK)
++		SET_REG_M( REG_DMAC_INTCLR_BLK, 0, nEnable );
++	if( nMask & DMA_INTT_SOURCE)
++		SET_REG_M( REG_DMAC_INTCLR_SRCTXR, 0, nEnable );
++	if( nMask & DMA_INTT_DEST)
++		SET_REG_M( REG_DMAC_INTCLR_DSTTXR, 0, nEnable );
++	if( nMask & DMA_INTT_ERR)
++		SET_REG_M( REG_DMAC_INTCLR_ERR, 0, nEnable );
++}
++/*******************************************************************************
++* Function Name  : Dma_QueryISRStatus
++* Description    : not use 
++* Input          : None
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++int Dma_QueryISRStatus( int iChan )
++{
++	return 0;
++}
++/*******************************************************************************
++* Function Name  : Dma_ClearTfrDone
++* Description    : clear tfr Done bit
++* Input          : iChan:channel ID 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_ClearTfrDone(int iChan )
++{
++	SET_REG_M( REG_DMAC_CTXi(iChan) + 4, 0, 1 << 12 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetTxrSize
++* Description    : set txr size 
++* Input          : iChan:channel ID   nByes:size 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetTxrSize( int iChan, int nBytes )
++{
++	if( nBytes > 4095 )
++		nBytes = 4095;
++	SET_REG_M( REG_DMAC_CTXi(iChan) + 4, (unsigned long long)nBytes, 0xfff );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetSrcWidth
++* Description    : set source width 
++* Input          : iChan:channel ID   nWidth :fifo width
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcWidth( int iChan, int nWidth )
++{
++	SET_REG_M( REG_DMAC_CTXi(iChan), nWidth << 4, 0x70 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstWidth
++* Description    : set destination  
++* Input          : iChan:channel ID   nWidth :fifo width
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstWidth( int iChan, int nWidth )
++{
++	SET_REG_M( REG_DMAC_CTXi(iChan), nWidth << 1, 0xe );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetSrcSize
++* Description    : set source size  
++* Input          : iChan :channel ID  nSize : fifo depth
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcSize( int iChan, int nSize )		// burst size
++{
++	SET_REG_M( REG_DMAC_CTXi(iChan), nSize << 14, 0x1c000 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstSize
++* Description    : set  destination size 
++* Input          : iChan :channel ID  nSize : fifo depth
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstSize( int iChan, int nSize )
++{
++	SET_REG_M( REG_DMAC_CTXi(iChan), nSize << 11, 0x3800 );
++}
++/*******************************************************************************
++* Function Name  : Dma_EnableSrcBlkChain
++* Description    : enable source  block chain 
++* Input          : iChan:channel ID 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_EnableSrcBlkChain(int iChan )
++{
++	SET_REG_M( REG_DMAC_CTXi(iChan), 1 << 28, 1 << 28 );
++}
++/*******************************************************************************
++* Function Name  : Dma_EnableDstBlkChain
++* Description    : enable destinationg block chain 
++* Input          : iChan:channel ID    
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_EnableDstBlkChain(int iChan )
++{
++	SET_REG_M( REG_DMAC_CTXi(iChan), 1 << 27, 1 << 27 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetTxrType
++* Description    : set   txr  mode 
++* Input          : iChan:channel ID    nMode :transation mode
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetTxrType( int iChan, int nMode )
++{
++	if( nMode >= 0 && nMode < DMA_TTFC_INVALID )
++	{
++		SET_REG_M( REG_DMAC_CTXi(iChan), nMode << 20, 0x7 << 20 );	
++	}
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstIncDirection
++* Description    : set source address increment decrement   or not change 
++* Input          : iChan:channel ID    nDir :0 :increment  1:decrement   other :not   change
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcIncDirection( int iChan, int nDir )
++{
++	if( nDir == DMA_DIR_INC )
++		SET_REG_M( REG_DMAC_CTXi(iChan), 0, 0x600 );
++	else if( nDir == DMA_DIR_DEC )
++		SET_REG_M( REG_DMAC_CTXi(iChan), 0x200, 0x200 );
++	else
++		SET_REG_M( REG_DMAC_CTXi(iChan), 0x400, 0x400 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstIncDirection
++* Description    : set destination address increment decrement   or not change
++* Input          : iChan:channel ID    nDir :0 :increment  1:decrement   other :not   change
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstIncDirection( int iChan, int nDir )
++{
++	if( nDir == DMA_DIR_INC )
++		SET_REG_M( REG_DMAC_CTXi(iChan), 0, 0x180 );
++	else if( nDir == DMA_DIR_DEC )
++		SET_REG_M( REG_DMAC_CTXi(iChan), 0x80, 0x080 );
++	else
++		SET_REG_M( REG_DMAC_CTXi(iChan), 0x100, 0x100 );
++}
++/*******************************************************************************
++* Function Name  : Dma_EnableGather
++* Description    : set   enable   gather
++* Input          : iChan :channel ID    bEnable :0 disable   1:enable
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_EnableGather(int iChan, int bEnable )
++{
++	int v;
++	if( bEnable ) v = 1;
++	else v = 0;
++	SET_REG_M( REG_DMAC_CTXi(iChan), v << 17, 1 << 17 );	
++}
++/*******************************************************************************
++* Function Name  : Dma_EnableScatter
++* Description    : set   enable   scatter  
++* Input          : iChan :channel ID    bEnable :0 disable   1:enable 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_EnableScatter(int iChan, int bEnable )
++{
++	int v = 0;
++	if( bEnable ) v = 1;
++	SET_REG_M( REG_DMAC_CTXi(iChan), v << 18, 1 << 18 );	
++}
++/*******************************************************************************
++* Function Name  : Dma_SetSrcHsMode
++* Description    : set the source handshaking  mode  
++* Input          : iChan:channe ID    nMode:0 hardware  1:software
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcHsMode( int iChan, int nMode )
++{
++	nMode &= 0x1;
++	SET_REG_M( REG_DMAC_CFGi(iChan), nMode << 11, 1 << 11 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstHsMode
++* Description    : set the destination handshaking  mode  
++* Input          : iChan:channe ID    nMode:0 hardware  1:software
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstHsMode( int iChan, int nMode )
++{
++	nMode &= 1;
++	SET_REG_M( REG_DMAC_CFGi(iChan), nMode << 10, 1 << 10 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetFifoMode
++* Description    : set fifo request transation mode 
++* Input          : iChan :channel   nMode  :1:half  fifo  or 0:enough one burst
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetFifoMode( int iChan, int nMode )
++{
++	nMode &= 1;
++	SET_REG_M( REG_DMAC_CFGi(iChan) + 4, nMode << 1, 1 << 1 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetFlowCtrl
++* Description    : set dam flow control :source or  destionation 
++* Input          : iChan :channel ID      ctrl:0: source   1:destinationg
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetFlowCtrl( int iChan, int ctrl )
++{
++	ctrl &= 1;
++	SET_REG_M( REG_DMAC_CFGi(iChan) + 4, ctrl, 1 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetSrcAutoload
++* Description    : set  destination auto load  the   init address
++* Input          : iChan :channel ID    bEnable :enable or disable
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcAutoload(int iChan, int bEnable )
++{
++	int v = bEnable ? 1 : 0;
++	SET_REG_M( REG_DMAC_CFGi(iChan), v << 30, 1 << 30 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstAutoload
++* Description    : set  destination auto load  the   init address
++* Input          : iChan :channel ID    bEnable :enable or disable 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstAutoload(int iChan, int bEnable )
++{
++	int v = bEnable ? 1 : 0;
++	SET_REG_M( REG_DMAC_CFGi(iChan), v << 31, 1 << 31 );
++}
++/*******************************************************************************
++* Function Name  : Write_Regm
++* Description    : write the   reg   mask the unchange bit  
++* Input          : addr: reg address   value :reg value    mask :unchange bit 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++static void Write_Regm( unsigned int addr, unsigned int value, unsigned int mask )
++{
++	unsigned int tmp = GET_REG(addr);
++	tmp &= ~mask;
++	value &= mask;
++	tmp |= value;
++	SET_REG(addr, tmp);
++}
++/*******************************************************************************
++* Function Name  : Dma_SetMaxBurst
++* Description    : set the max burst size
++* Input          : iChan:channel ID     nSize :  burst size
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetMaxBurst( int iChan, int nSize )
++{
++	if( nSize > 1023 )
++		nSize = 1023; 
++	Write_Regm( REG_DMAC_CFGi(iChan), (nSize << 20), 0x3ff00000 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetSrcHsPol
++* Description    : set the  source handshaking polatity
++* Input          : iChan:channel ID      nPol: polarity  high or low
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcHsPol( int iChan, int nPol )
++{
++	nPol &= 1;
++	SET_REG_M( REG_DMAC_CFGi(iChan), nPol << 19, 1 << 19 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstHsPol
++* Description    : set the  destination handshaking polatity
++* Input          : iChan:channel ID      nPol: polarity  high or low
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstHsPol( int iChan, int nPol )
++{
++	nPol &= 1;
++	SET_REG_M( REG_DMAC_CFGi(iChan), nPol << 18, 1 << 18 );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetLinkEntry
++* Description    : enbale the  link list 
++* Input          : iChan:channel ID    nAddr :link list address
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetLinkEntry( int iChan, unsigned int nAddr )
++{
++	nAddr &= 0xfffffffc;
++	// force to use AHB Master 0, for this is the only AHB Master.
++	SET_REG_M( REG_DMAC_LLPi(iChan), nAddr, 0xffffffff );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetSrcAddress
++* Description    : set source address 
++* Input          : iChan:channel ID     nAddr:destination address
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcAddress( int iChan, unsigned int nAddr )
++{
++	SET_REG_M( REG_DMAC_SARi(iChan), nAddr, 0xffffffff );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstAddress
++* Description    : set destination address 
++* Input          : iChan:channel ID     nAddr:destination address
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstAddress( int iChan, unsigned int nAddr )
++{
++	SET_REG_M( REG_DMAC_DARi(iChan), nAddr, 0xffffffff );
++}
++/*******************************************************************************
++* Function Name  : Dma_SetSrcPe
++* Description    : select the hardshaking   interface
++* Input          : iChan:channel ID   nPer:handshaking interface
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetSrcPer( int iChan, unsigned int nPer )
++{
++	if( nPer < DMA_HSP_INVALID ) 
++	{
++		SET_REG_M( REG_DMAC_CFGi(iChan) + 4, nPer << 7, 0xf << 7 );
++	}
++}
++/*******************************************************************************
++* Function Name  : Dma_SetDstPer
++* Description    : select the hardshaking   interface
++* Input          : iChan:channel ID   nPer:handshaking interface
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_SetDstPer( int iChan, unsigned int nPer )
++{
++	if( nPer < DMA_HSP_INVALID )
++	{
++		SET_REG_M( REG_DMAC_CFGi(iChan) + 4, nPer << 11, 0xf << 11 );
++	}
++}
++/*******************************************************************************
++* Function Name  : Dma_GetIsrChan
++* Description    : get the status of dam interruput  
++* Input          : iChan :channel ID 
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++unsigned int Dma_GetIsrChan(unsigned int nMask)
++{
++	if( nMask & DMA_INTT_TXR )
++		return GET_REG(REG_DMAC_INTSTAT_TFR);
++	if( nMask & DMA_INTT_BLOCK )
++		return GET_REG(REG_DMAC_INTSTAT_BLK);
++	if( nMask & DMA_INTT_SOURCE )
++		return GET_REG(REG_DMAC_INTSTAT_SRCTXR);
++	if( nMask & DMA_INTT_DEST )
++		return GET_REG(REG_DMAC_INTSTAT_DSTTXR);
++	if( nMask & DMA_INTT_ERR )
++		return GET_REG(REG_DMAC_INTSTAT_ERR);
++	
++	return 0;
++}
++/*******************************************************************************
++* Function Name  : Dma_StartSrctfr
++* Description    : dma source start transaction 
++* Input          : iChan:channel
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_StartSrctfr( int iChan )
++{
++	int nMask = 0x101 << (iChan);
++
++	SET_REG( REG_DMAC_REQSRC, 0x101 );
++	SET_REG( REG_DMAC_SGLREQSRC, 0x101 );
++}
++/*******************************************************************************
++* Function Name  : Dma_StartDsttfr
++* Description    : destination transaction  request
++* Input          : iChan: channel ID
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_StartDsttfr( int iChan )
++{
++	int nMask = 0x101 << (iChan);
++	SET_REG( REG_DMAC_REQDST, nMask );
++	SET_REG( REG_DMAC_SGLREQDST, nMask );
++}
++/*******************************************************************************
++* Function Name  : fh_dma_init
++* Description    : dma init
++* Input          : None
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void fh_dma_init()
++{
++		Dma_Init();
++}
++/*******************************************************************************
++* Function Name  : Dma_Init
++* Description    : dma init 
++* Input          : None
++* Output         : None
++* Return         : None   	
++* 		                                      
++ *******************************************************************************/
++void Dma_Init()
++{
++	int i;
++	SET_REG( REG_DMAC_CFG_REG, 1 );
++	
++	for( i = 0; i < MAX_DMA_CHANS; i ++ )
++	{
++		Dma_DisableChan(i);
++		Dma_ClearIsrBit( i, DMA_INTT_TXR | DMA_INTT_BLOCK | DMA_INTT_SOURCE | DMA_INTT_DEST | DMA_INTT_ERR );
++		Dma_ClearTfrDone(i);
++		Dma_SetTxrType(i, DMA_TTFC_M2P_DMAC);
++		Dma_SetSrcWidth( i, DMA_TXR_32BITS );
++		Dma_SetSrcSize( i, DMA_BURST_8 );	
++		Dma_SetDstWidth( i, DMA_TXR_8BITS );
++		Dma_SetDstSize( i, DMA_BURST_8 );
++		Dma_SetSrcHsPol( i, DMA_HSPOL_ACTHIGH );  
++		Dma_SetDstHsPol( i, DMA_HSPOL_ACTHIGH );	
++		Dma_SetSrcIncDirection( i, DMA_DIR_INC );
++		Dma_SetDstIncDirection( i, DMA_DIR_UNCHG );
++		Dma_SetSrcHsMode( i, DMA_HSMODE_SOFTWARE );
++		Dma_SetDstHsMode( i, DMA_HSMODE_SOFTWARE );
++		Dma_SetMaxBurst( i, 0 );
++		Dma_SetFifoMode( i, 0 );
++		Dma_SetLinkEntry( i, 0 );
++		Dma_EnableGather( i, 0 );
++		Dma_EnableScatter( i, 0 );
++	}
++}
+diff --git a/sound/soc/dwc/dma.h b/sound/soc/dwc/dma.h
+new file mode 100644
+index 00000000..81fccc3f
+--- /dev/null
++++ b/sound/soc/dwc/dma.h
+@@ -0,0 +1,212 @@
++
++/*********************************
++ * 
++ * new drive add by xuww
++ * 
++ * ****************************/
++#ifndef DMA__H
++#define	DMA__H
++
++
++#if(1)
++enum
++{
++	DMA_INTT_TXR = 1,
++	DMA_INTT_BLOCK = 2,
++	DMA_INTT_SOURCE = 4,
++	DMA_INTT_DEST = 8,
++	DMA_INTT_ERR = 16
++};
++
++enum
++{
++	DMA_TXR_8BITS = 0,
++	DMA_TXR_16BITS = 1,
++	DMA_TXR_32BITS = 2,
++	DMA_TXR_64BITS = 3,
++	DMA_TXR_128BITS = 4,
++	DMA_TXR_256BITS = 5,
++	DMA_TXR_INVALID = 6
++};
++
++enum
++{
++	DMA_BURST_1 = 0,
++	DMA_BURST_4 = 1,
++	DMA_BURST_8 = 2,
++	DMA_BURST_16 = 3,
++	DMA_BURST_32 = 4,
++	DMA_BURST_64 = 5,
++	DMA_BURST_128 = 6,
++	DMA_BURST_256 = 7,
++	DMA_BURST_INVALID = 8
++};
++
++enum
++{
++	DMA_TTFC_M2M_DMAC,
++	DMA_TTFC_M2P_DMAC,
++	DMA_TTFC_P2M_DMAC,
++	DMA_TTFC_P2P_DMAC,
++	DMA_TTFC_P2M_PFR,
++	DMA_TTFC_P2P_PSRC,
++	DMA_TTFC_M2P_PFR,
++	DMA_TTFC_P2P_PDST,
++	DMA_TTFC_INVALID
++};
++
++enum
++{
++	DMA_DIR_INC,
++	DMA_DIR_DEC,
++	DMA_DIR_UNCHG,
++	DMA_DIR_INVALID
++};
++
++enum
++{
++	DMA_HSPOL_ACTHIGH,
++	DMA_HSPOL_ACTLOW
++};
++
++enum
++{
++	DMA_HSMODE_HARDWARE = 0,
++	DMA_HSMODE_SOFTWARE = 1
++};
++
++enum
++{
++	DMA_HSP_SDC,
++	DMA_HSP_AIFRX,
++	DMA_HSP_AIFTX,
++	DMA_HSP_TAE,
++	DMA_HSP_I2SRX,
++	DMA_HSP_I2STX,
++	DMA_HSP_SPI0RX,
++	DMA_HSP_SPI0TX,
++	DMA_HSP_SPI1RX,
++	DMA_HSP_SPI1TX,
++	DMA_HSP_UART0RX,
++	DMA_HSP_UART0TX,
++	DMA_HSP_UART1RX,
++	DMA_HSP_UART1TX,
++	DMA_HSP_SPI2RX,
++	DMA_HSP_SPI2TX,
++	DMA_HSP_INVALID
++};
++#endif
++#define		DMAC_REG_BASE		(0xfe600000)
++#define     REG_DMAC_SAR_OFFSET     (0x0)
++#define     REG_DMAC_DAR_OFFSET     (0x8)
++#define     REG_DMAC_LLP_OFFSET     (0x10)
++#define     REG_DMAC_CTX_OFFSET     (0x18)
++#define     REG_DMAC_SSTAT_OFFSET       (0x20)
++#define     REG_DMAC_DSTAT_OFFSET       (0x28)
++#define     REG_DMAC_SSTATAR_OFFSET (0x30)
++#define     REG_DMAC_DSTATAR_OFFSET (0x38)
++#define     REG_DMAC_CFG_OFFSET     (0x40)
++#define     REG_DMAC_SGR_OFFSET     (0x48)
++#define     REG_DMAC_DSR_OFFSET     (0x50)
++#define     REG_DMAC_SARi(n)            (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SAR_OFFSET)
++#define     REG_DMAC_DARi(n)            (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DAR_OFFSET)
++#define     REG_DMAC_LLPi(n)            (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_LLP_OFFSET)
++#define     REG_DMAC_CTXi(n)            (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_CTX_OFFSET)
++#define     REG_DMAC_SSTATi(n)          (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SSTAT_OFFSET)
++#define     REG_DMAC_DSTATi(n)          (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DSTAT_OFFSET)
++#define     REG_DMAC_SSTATARi(n)        (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SSTATAR_OFFSET)
++#define     REG_DMAC_DSTATARi(n)        (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DSTATAR_OFFSET)
++#define     REG_DMAC_CFGi(n)            (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_CFG_OFFSET)
++#define     REG_DMAC_SGRi(n)            (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_SGR_OFFSET)
++#define     REG_DMAC_DSRi(n)            (DMAC_REG_BASE + (n) * 0x58 + REG_DMAC_DSR_OFFSETR)
++
++#define     REG_DMAC_INTRAWTFR          (DMAC_REG_BASE + 0x2c0)
++#define     REG_DMAC_INTRAWBLK          (DMAC_REG_BASE + 0x2c8)
++#define     REG_DMAC_INTRAWSRCTXR       (DMAC_REG_BASE + 0x2d0)
++#define     REG_DMAC_INTRAWDSTTXR       (DMAC_REG_BASE + 0x2d8)
++#define     REG_DMAC_INTRAWERR          (DMAC_REG_BASE + 0x2e0)
++#define     REG_DMAC_INTSTAT_TFR        (DMAC_REG_BASE + 0x2e8)
++#define     REG_DMAC_INTSTAT_BLK        (DMAC_REG_BASE + 0x2f0)
++#define     REG_DMAC_INTSTAT_SRCTXR     (DMAC_REG_BASE + 0x2f8)
++#define     REG_DMAC_INTSTAT_DSTTXR     (DMAC_REG_BASE + 0x300)
++#define     REG_DMAC_INTSTAT_ERR        (DMAC_REG_BASE + 0x308)
++#define     REG_DMAC_INTMSK_TFR         (DMAC_REG_BASE + 0x310)
++#define     REG_DMAC_INTMSK_BLK         (DMAC_REG_BASE + 0x318)
++#define     REG_DMAC_INTMSK_SRCTXR      (DMAC_REG_BASE + 0x320)
++#define     REG_DMAC_INTMSK_DSTTXR      (DMAC_REG_BASE + 0x328)
++#define     REG_DMAC_INTMSK_ERR         (DMAC_REG_BASE + 0x330)
++#define     REG_DMAC_INTCLR_TFR         (DMAC_REG_BASE + 0x338)
++#define     REG_DMAC_INTCLR_BLK         (DMAC_REG_BASE + 0x340)
++#define     REG_DMAC_INTCLR_SRCTXR      (DMAC_REG_BASE + 0x348)
++#define     REG_DMAC_INTCLR_DSTTXR      (DMAC_REG_BASE + 0x350)
++#define     REG_DMAC_INTCLR_ERR         (DMAC_REG_BASE + 0x358)
++#define     REG_DMAC_INT_STATUS_ALL     (DMAC_REG_BASE + 0x360)
++
++#define     REG_DMAC_REQSRC             (DMAC_REG_BASE + 0x368)
++#define     REG_DMAC_REQDST             (DMAC_REG_BASE + 0x370)
++#define     REG_DMAC_SGLREQSRC          (DMAC_REG_BASE + 0x378)
++#define     REG_DMAC_SGLREQDST          (DMAC_REG_BASE + 0x380)
++#define     REG_DMAC_LSTSRC             (DMAC_REG_BASE + 0x388)
++#define     REG_DMAC_LSTDST             (DMAC_REG_BASE + 0x390)
++#define     REG_DMAC_CFG_REG            (DMAC_REG_BASE + 0x398)
++#define     REG_DMAC_CHAN_EN            (DMAC_REG_BASE + 0x3a0)
++#define     REG_DMAC_IDREG              (DMAC_REG_BASE + 0x3a8)
++#define     REG_DMAC_TESTREG            (DMAC_REG_BASE + 0x3b0)
++#define     REG_DMAC_COMPARAMS_6        (DMAC_REG_BASE + 0x3c8)
++#define     REG_DMAC_COMPARAMS_5        (DMAC_REG_BASE + 0x3d0)
++#define     REG_DMAC_COMPARAMS_4        (DMAC_REG_BASE + 0x3d8)
++#define     REG_DMAC_COMPARAMS_3        (DMAC_REG_BASE + 0x3e0)
++#define     REG_DMAC_COMPARAMS_2        (DMAC_REG_BASE + 0x3e8)
++#define     REG_DMAC_COMPARAMS_1        (DMAC_REG_BASE + 0x3f0)
++#define     REG_DMAC_COMP_IDREG         (DMAC_REG_BASE + 0x3f8)
++int 	Dma_GetChanStatus( int nChanID );
++void 	Dma_EnableChan( int nChanID );
++void 	Dma_DisableChan( int nChanID );
++
++void 	Dma_ClearIsrBit( int iChan, int nMask );
++void 	Dma_EnableIsrBit( int iChan, int nMask );
++void 	Dma_DisableIsrBit( int iChan, int nMask );
++unsigned int Dma_GetIsrChan(unsigned int nMask);
++int 	Dma_QueryIsrStatus( );
++
++void 	Dma_ClearTfrDone(int iChan);
++void 	Dma_SetTxrSize( int iChan, int nBytes );
++void 	Dma_SetSrcWidth( int iChan, int nWidth );
++void	Dma_SetDstWidth( int iChan, int nWidth );
++void 	Dma_SetSrcSize( int iChan, int nSize );		// burst size
++void	Dma_SetDstSize( int iChan, int nSize );
++void 	Dma_EnableSrcBlkChain(int iChan);
++void	Dma_EnableDstBlkChain(int iChan);
++
++void 	Dma_SetTxrType( int iChan, int nMode );
++void	Dma_SetSrcIncDirection( int iChan, int nDir );
++void	Dma_SetDstIncDirection( int iChan, int nDir );
++
++void 	Dma_EnableGather( int iChan, int bEnable );
++void 	Dma_EnableScatter( int iChan, int bEnable);
++
++void 	Dma_SetSrcHsMode( int iChan, int nMode );
++void 	Dma_SetDstHsMode( int iChan, int nMode );
++
++void	Dma_SetFifoMode( int iChan, int nMode );
++void 	Dma_SetFlowCtrl( int iChan, int ctrl );
++void 	Dma_SetSrcAutoload(int iChan, int bEnable );
++void	Dma_SetDstAutoload(int iChan, int bEnable );
++void	Dma_SetMaxBurst( int iChan, int nSize );
++
++void 	Dma_SetSrcHsPol( int iChan, int nPol );
++void 	Dma_SetDstHsPol( int iChan, int nPol );
++
++void 	Dma_SetLinkEntry( int iChan, unsigned int nAddr );
++void 	Dma_SetSrcAddress( int iChan, unsigned int nAddr );
++void 	Dma_SetDstAddress( int iChan, unsigned int nAddr );
++void	Dma_SetSrcPer( int iChan, unsigned int nPer );
++void 	Dma_SetDstPer( int iChan, unsigned int nPer );
++
++void 	Dma_StartSrctfr( int iChan );
++void 	Dma_StartDsttfr( int iChan );
++
++void Dma_Init();
++
++#endif
++
+diff --git a/sound/soc/dwc/fh.c b/sound/soc/dwc/fh.c
+new file mode 100644
+index 00000000..31b8639c
+--- /dev/null
++++ b/sound/soc/dwc/fh.c
+@@ -0,0 +1,240 @@
++/*
++ * ASoC driver for Stretch s6105 IP camera platform
++ *
++ * Author:      Daniel Gloeckner, <dg@emlix.com>
++ * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/timer.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/i2c.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/soc.h>
++
++
++
++#include "fullhan-pcm.h"
++#include "fh_i2s.h"
++
++#define S6105_CAM_CODEC_CLOCK 12288000
++
++static int s6105_hw_params(struct snd_pcm_substream *substream,
++			   struct snd_pcm_hw_params *params)
++{
++	struct snd_soc_pcm_runtime *rtd = substream->private_data;
++	struct snd_soc_dai *codec_dai = rtd->codec_dai;
++	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++	int ret = 0;
++
++	/* set codec DAI configuration */
++	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
++					     SND_SOC_DAIFMT_CBM_CFM);
++	if (ret < 0)
++		return ret;
++	/* set cpu DAI configuration */
++	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM |
++					   SND_SOC_DAIFMT_NB_NF);
++	if (ret < 0)
++		return ret;
++
++	/* set the codec system clock */
++	ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK,
++					    SND_SOC_CLOCK_OUT);
++	if (ret < 0)
++		return ret;
++	return 0;
++}
++
++static struct snd_soc_ops s6105_ops = {
++	.hw_params = s6105_hw_params,
++};
++
++/* s6105 machine dapm widgets */
++static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
++	SND_SOC_DAPM_LINE("Audio Out Differential", NULL),
++	SND_SOC_DAPM_LINE("Audio Out Stereo", NULL),
++	SND_SOC_DAPM_LINE("Audio In", NULL),
++};
++
++/* s6105 machine audio_mapnections to the codec pins */
++static const struct snd_soc_dapm_route audio_map[] = {
++	/* Audio Out connected to HPLOUT, HPLCOM, HPROUT */
++	{"Audio Out Differential", NULL, "HPLOUT"},
++	{"Audio Out Differential", NULL, "HPLCOM"},
++	{"Audio Out Stereo", NULL, "HPLOUT"},
++	{"Audio Out Stereo", NULL, "HPROUT"},
++
++	/* Audio In connected to LINE1L, LINE1R */
++	{"LINE1L", NULL, "Audio In"},
++	{"LINE1R", NULL, "Audio In"},
++};
++
++static int output_type_info(struct snd_kcontrol *kcontrol,
++			    struct snd_ctl_elem_info *uinfo)
++{
++	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
++	uinfo->count = 1;
++	uinfo->value.enumerated.items = 2;
++	if (uinfo->value.enumerated.item) {
++		uinfo->value.enumerated.item = 1;
++		strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT");
++	} else {
++		strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM");
++	}
++	return 0;
++}
++
++static int output_type_get(struct snd_kcontrol *kcontrol,
++			   struct snd_ctl_elem_value *ucontrol)
++{
++	ucontrol->value.enumerated.item[0] = kcontrol->private_value;
++	return 0;
++}
++
++static int output_type_put(struct snd_kcontrol *kcontrol,
++			   struct snd_ctl_elem_value *ucontrol)
++{
++	struct snd_soc_codec *codec = kcontrol->private_data;
++	struct snd_soc_dapm_context *dapm = &codec->dapm;
++	unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
++	char *differential = "Audio Out Differential";
++	char *stereo = "Audio Out Stereo";
++
++	if (kcontrol->private_value == val)
++		return 0;
++	kcontrol->private_value = val;
++	snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
++	snd_soc_dapm_sync(dapm);
++	snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
++	snd_soc_dapm_sync(dapm);
++
++	return 1;
++}
++
++static const struct snd_kcontrol_new audio_out_mux = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "Master Output Mux",
++	.index = 0,
++	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++	.info = output_type_info,
++	.get = output_type_get,
++	.put = output_type_put,
++	.private_value = 1 /* default to stereo */
++};
++
++/* Logic for a aic3x as connected on the s6105 ip camera ref design */
++static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
++{
++	struct snd_soc_codec *codec = rtd->codec;
++	struct snd_soc_dapm_context *dapm = &codec->dapm;
++
++	/* Add s6105 specific widgets */
++	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
++				  ARRAY_SIZE(aic3x_dapm_widgets));
++
++	/* Set up s6105 specific audio path audio_map */
++	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
++
++	/* not present */
++	snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
++	snd_soc_dapm_nc_pin(dapm, "LINE2L");
++	snd_soc_dapm_nc_pin(dapm, "LINE2R");
++
++	/* not connected */
++	snd_soc_dapm_nc_pin(dapm, "MIC3L"); /* LINE2L on this chip */
++	snd_soc_dapm_nc_pin(dapm, "MIC3R"); /* LINE2R on this chip */
++	snd_soc_dapm_nc_pin(dapm, "LLOUT");
++	snd_soc_dapm_nc_pin(dapm, "RLOUT");
++	snd_soc_dapm_nc_pin(dapm, "HPRCOM");
++
++	/* always connected */
++	snd_soc_dapm_enable_pin(dapm, "Audio In");
++
++	/* must correspond to audio_out_mux.private_value initializer */
++	snd_soc_dapm_disable_pin(dapm, "Audio Out Differential");
++	snd_soc_dapm_sync(dapm);
++	snd_soc_dapm_enable_pin(dapm, "Audio Out Stereo");
++
++	snd_soc_dapm_sync(dapm);
++
++	snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec));
++
++	return 0;
++}
++
++/* s6105 digital audio interface glue - connects codec <--> CPU */
++static struct snd_soc_dai_link s6105_dai = {
++	.name = "TLV320AIC31",
++	.stream_name = "AIC31",
++	.cpu_dai_name = "s6000-i2s.0",
++	.codec_dai_name = "fh-acodec-hifi",
++	.platform_name = "fh-pcm-audio",
++	.codec_name = "fh-acodec",
++	.init = s6105_aic3x_init,
++	.ops = &s6105_ops,
++};
++
++/* s6105 audio machine driver */
++static struct snd_soc_card snd_soc_card_s6105 = {
++	.name = "Stretch IP Camera",
++	.dai_link = &s6105_dai,
++	.num_links = 1,
++};
++
++static struct s6000_snd_platform_data __initdata s6105_snd_data = {
++	.wide		= 0,
++	.channel_in	= 0,
++	.channel_out	= 1,
++	.lines_in	= 1,
++	.lines_out	= 1,
++	.same_rate	= 1,
++};
++
++static struct platform_device *s6105_snd_device;
++
++/* temporary i2c device creation until this can be moved into the machine
++ * support file.
++*/
++static struct i2c_board_info i2c_device[] = {
++	{ I2C_BOARD_INFO("tlv320aic33", 0x18), }
++};
++
++static int __init s6105_init(void)
++{
++	int ret;
++	i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device));
++
++	s6105_snd_device = platform_device_alloc("soc-audio", -1);
++	if (!s6105_snd_device)
++		return -ENOMEM;
++
++	platform_set_drvdata(s6105_snd_device, &snd_soc_card_s6105);
++	platform_device_add_data(s6105_snd_device, &s6105_snd_data,
++				 sizeof(s6105_snd_data));
++
++	ret = platform_device_add(s6105_snd_device);
++	if (ret)
++		platform_device_put(s6105_snd_device);
++
++	return ret;
++}
++
++static void __exit s6105_exit(void)
++{
++	platform_device_unregister(s6105_snd_device);
++}
++
++module_init(s6105_init);
++module_exit(s6105_exit);
++
++MODULE_AUTHOR("Daniel Gloeckner");
++MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver");
++MODULE_LICENSE("GPL");
+diff --git a/sound/soc/dwc/fh_i2s.c b/sound/soc/dwc/fh_i2s.c
+new file mode 100644
+index 00000000..8658b13a
+--- /dev/null
++++ b/sound/soc/dwc/fh_i2s.c
+@@ -0,0 +1,1072 @@
++/*
++ * ALSA SoC I2S (McBSP) Audio Layer for TI DAVINCI processor
++ *
++ * Author:      Vladimir Barinov, <vbarinov@embeddedalley.com>
++ * Copyright:   (C) 2007 MontaVista Software, Inc., <source@mvista.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/i2c.h>
++#include <linux/irqreturn.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/initval.h>
++#include <sound/soc.h>
++#include <linux/kernel.h>
++//#include <mach/asp.h>
++
++#include "fullhan-pcm.h"
++#include "fh_i2s.h"
++
++#define I2S_FIFO_LEN_RX  40
++#define I2S_FIFO_LEN_TX  40
++extern void i2s_irq_enquen(int type, u8 *buff, u8 len,u8 reset);
++/*
++ * NOTE:  terminology here is confusing.
++ *
++ *  - This driver supports the "Audio Serial Port" (ASP),
++ *    found on dm6446, dm355, and other DaVinci chips.
++ *
++ *  - But it labels it a "Multi-channel Buffered Serial Port"
++ *    (McBSP) as on older chips like the dm642 ... which was
++ *    backward-compatible, possibly explaining that confusion.
++ *
++ *  - OMAP chips have a controller called McBSP, which is
++ *    incompatible with the DaVinci flavor of McBSP.
++ *
++ *  - Newer DaVinci chips have a controller called McASP,
++ *    incompatible with ASP and with either McBSP.
++ *
++ * In short:  this uses ASP to implement I2S, not McBSP.
++ * And it won't be the only DaVinci implemention of I2S.
++ */
++#define DAVINCI_MCBSP_DRR_REG	0x00
++#define DAVINCI_MCBSP_DXR_REG	0x04
++#define DAVINCI_MCBSP_SPCR_REG	0x08
++#define DAVINCI_MCBSP_RCR_REG	0x0c
++#define DAVINCI_MCBSP_XCR_REG	0x10
++#define DAVINCI_MCBSP_SRGR_REG	0x14
++#define DAVINCI_MCBSP_PCR_REG	0x24
++
++#define DAVINCI_MCBSP_SPCR_RRST		(1 << 0)
++#define DAVINCI_MCBSP_SPCR_RINTM(v)	((v) << 4)
++#define DAVINCI_MCBSP_SPCR_XRST		(1 << 16)
++#define DAVINCI_MCBSP_SPCR_XINTM(v)	((v) << 20)
++#define DAVINCI_MCBSP_SPCR_GRST		(1 << 22)
++#define DAVINCI_MCBSP_SPCR_FRST		(1 << 23)
++#define DAVINCI_MCBSP_SPCR_FREE		(1 << 25)
++
++#define DAVINCI_MCBSP_RCR_RWDLEN1(v)	((v) << 5)
++#define DAVINCI_MCBSP_RCR_RFRLEN1(v)	((v) << 8)
++#define DAVINCI_MCBSP_RCR_RDATDLY(v)	((v) << 16)
++#define DAVINCI_MCBSP_RCR_RFIG		(1 << 18)
++#define DAVINCI_MCBSP_RCR_RWDLEN2(v)	((v) << 21)
++#define DAVINCI_MCBSP_RCR_RFRLEN2(v)	((v) << 24)
++#define DAVINCI_MCBSP_RCR_RPHASE	BIT(31)
++
++#define DAVINCI_MCBSP_XCR_XWDLEN1(v)	((v) << 5)
++#define DAVINCI_MCBSP_XCR_XFRLEN1(v)	((v) << 8)
++#define DAVINCI_MCBSP_XCR_XDATDLY(v)	((v) << 16)
++#define DAVINCI_MCBSP_XCR_XFIG		(1 << 18)
++#define DAVINCI_MCBSP_XCR_XWDLEN2(v)	((v) << 21)
++#define DAVINCI_MCBSP_XCR_XFRLEN2(v)	((v) << 24)
++#define DAVINCI_MCBSP_XCR_XPHASE	BIT(31)
++
++#define DAVINCI_MCBSP_SRGR_FWID(v)	((v) << 8)
++#define DAVINCI_MCBSP_SRGR_FPER(v)	((v) << 16)
++#define DAVINCI_MCBSP_SRGR_FSGM		(1 << 28)
++#define DAVINCI_MCBSP_SRGR_CLKSM	BIT(29)
++
++#define DAVINCI_MCBSP_PCR_CLKRP		(1 << 0)
++#define DAVINCI_MCBSP_PCR_CLKXP		(1 << 1)
++#define DAVINCI_MCBSP_PCR_FSRP		(1 << 2)
++#define DAVINCI_MCBSP_PCR_FSXP		(1 << 3)
++#define DAVINCI_MCBSP_PCR_SCLKME	(1 << 7)
++#define DAVINCI_MCBSP_PCR_CLKRM		(1 << 8)
++#define DAVINCI_MCBSP_PCR_CLKXM		(1 << 9)
++#define DAVINCI_MCBSP_PCR_FSRM		(1 << 10)
++#define DAVINCI_MCBSP_PCR_FSXM		(1 << 11)
++
++enum {
++	DAVINCI_MCBSP_WORD_8 = 0,
++	DAVINCI_MCBSP_WORD_12,
++	DAVINCI_MCBSP_WORD_16,
++	DAVINCI_MCBSP_WORD_20,
++	DAVINCI_MCBSP_WORD_24,
++	DAVINCI_MCBSP_WORD_32,
++};
++struct my_data{
++	struct work_struct my_work;
++	int value;
++	u8 buff;
++	u8 len;
++	void __iomem *base;
++	};
++struct my_data *rx_md,*tx_md;
++struct work_struct rx_work_queue,tx_work_queue;
++int g_i2s_base;
++//init test data
++struct my_data *init_data(struct my_data *md,struct work_struct  work_queue)
++{
++md = (struct my_data *)kmalloc(sizeof(struct my_data),GFP_KERNEL);
++md->my_work=work_queue;
++return md;
++}
++
++
++static const unsigned char data_type[SNDRV_PCM_FORMAT_S32_LE + 1] = {
++		[SNDRV_PCM_FORMAT_S8] = 1, [SNDRV_PCM_FORMAT_S16_LE] = 2,
++		[SNDRV_PCM_FORMAT_S32_LE] = 4, };
++
++static const unsigned char asp_word_length[SNDRV_PCM_FORMAT_S32_LE + 1] = {
++		[SNDRV_PCM_FORMAT_S8] = DAVINCI_MCBSP_WORD_8, [SNDRV_PCM_FORMAT_S16_LE
++				] = DAVINCI_MCBSP_WORD_16, [SNDRV_PCM_FORMAT_S32_LE
++				] = DAVINCI_MCBSP_WORD_32, };
++
++static const unsigned char double_fmt[SNDRV_PCM_FORMAT_S32_LE + 1] = {
++		[SNDRV_PCM_FORMAT_S8] = SNDRV_PCM_FORMAT_S16_LE,
++		[SNDRV_PCM_FORMAT_S16_LE] = SNDRV_PCM_FORMAT_S32_LE, };
++enum dma_event_q {
++	EVENTQ_0 = 0, EVENTQ_1 = 1, EVENTQ_2 = 2, EVENTQ_3 = 3, EVENTQ_DEFAULT = -1
++};
++struct davinci_pcm_dma_params {
++	int channel; /* sync dma channel ID */
++	unsigned short acnt;
++	dma_addr_t dma_addr; /* device physical address for DMA */
++	unsigned sram_size;
++	enum dma_event_q asp_chan_q; /* event queue number for ASP channel */
++	enum dma_event_q ram_chan_q; /* event queue number for RAM channel */
++	unsigned char data_type; /* xfer data type */
++	unsigned char convert_mono_stereo;
++	unsigned int fifo_level;
++	int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after);
++};
++struct s6000_i2s_dev {
++	dma_addr_t sifbase;
++	u8 __iomem *scbbase;
++	unsigned int wide;
++	unsigned int channel_in;
++	unsigned int channel_out;
++	unsigned int lines_in;
++	unsigned int lines_out;
++	struct s6000_pcm_dma_params dma_params;
++	int irq;
++	void __iomem *base;
++	struct clk *clk;
++	struct device *dev;
++};
++struct davinci_mcbsp_dev {
++	struct device *dev;
++	struct davinci_pcm_dma_params dma_params;
++	void __iomem *base;
++#define MOD_DSP_A	0
++#define MOD_DSP_B	1
++	int mode;
++	u32 pcr;
++	struct clk *clk;
++	/*
++	 * Combining both channels into 1 element will at least double the
++	 * amount of time between servicing the dma channel, increase
++	 * effiency, and reduce the chance of overrun/underrun. But,
++	 * it will result in the left & right channels being swapped.
++	 *
++	 * If relabeling the left and right channels is not possible,
++	 * you may want to let the codec know to swap them back.
++	 *
++	 * It may allow x10 the amount of time to service dma requests,
++	 * if the codec is master and is using an unnecessarily fast bit clock
++	 * (ie. tlvaic23b), independent of the sample rate. So, having an
++	 * entire frame at once means it can be serviced at the sample rate
++	 * instead of the bit clock rate.
++	 *
++	 * In the now unlikely case that an underrun still
++	 * occurs, both the left and right samples will be repeated
++	 * so that no pops are heard, and the left and right channels
++	 * won't end up being swapped because of the underrun.
++	 */
++	unsigned enable_channel_combine :1;
++
++	unsigned int fmt;
++	int clk_div;
++	int clk_input_pin;
++	bool i2s_accurate_sck;
++};
++struct i2c_adapter *codec_i2c_adapter;
++void set_i2c_codec_adapter(struct i2c_adapter * adapter) {
++	codec_i2c_adapter = adapter;
++}
++EXPORT_SYMBOL(set_i2c_codec_adapter);
++
++int i2c_write_codec(u8 addr, u8 data) {
++	int rval;
++	struct i2c_msg msgs[1];
++	u8 send[2];
++	msgs[0].len = 2;
++	msgs[0].addr = 0x1a;
++	msgs[0].flags = 0;
++	msgs[0].buf = send;
++	send[0] = addr;
++	send[1] = data;
++	rval = i2c_transfer(codec_i2c_adapter, msgs, 1);
++	return rval;
++}
++
++static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev,
++		int reg, u32 val) {
++	__raw_writel(val, dev->base + reg);
++}
++
++static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg) {
++	return __raw_readl(dev->base + reg);
++}
++
++static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback) {
++	u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP;
++	/* The clock needs to toggle to complete reset.
++	 * So, fake it by toggling the clk polarity.
++	 */
++	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m);
++	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr);
++}
++
++static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev,
++		struct snd_pcm_substream *substream) {
++//	struct snd_soc_pcm_runtime *rtd = substream->private_data;
++//	struct snd_soc_platform *platform = rtd->platform;
++//	int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
++//	u32 spcr;
++//	u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST;
++//	spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
++//	if (spcr & mask) {
++//		/* start off disabled */
++//		davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG,
++//				spcr & ~mask);
++//		toggle_clock(dev, playback);
++//	}
++//	if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM |
++//			DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) {
++//		/* Start the sample generator */
++//		spcr |= DAVINCI_MCBSP_SPCR_GRST;
++//		davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
++//	}
++//
++//	if (playback) {
++//		/* Stop the DMA to avoid data loss */
++//		/* while the transmitter is out of reset to handle XSYNCERR */
++//		if (platform->driver->ops->trigger) {
++//			int ret = platform->driver->ops->trigger(substream,
++//				SNDRV_PCM_TRIGGER_STOP);
++//			if (ret < 0)
++//				printk(KERN_DEBUG "Playback DMA stop failed\n");
++//		}
++//
++//		/* Enable the transmitter */
++//		spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
++//		spcr |= DAVINCI_MCBSP_SPCR_XRS		SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
++
++//		davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
++//
++//		/* wait for any unexpected frame sync error to occur */
++//		udelay(100);
++//
++//		/* Disable the transmitter to clear any outstanding XSYNCERR */
++//		spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
++//		spcr &= ~DAVINCI_MCBSP_SPCR_XRST;
++//		davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
++//		toggle_clock(dev, playback);
++//
++//		/* Restart the DMA */
++//		if (platform->driver->ops->trigger) {
++//			int ret = platform->driver->ops->trigger(substream,
++//				SNDRV_PCM_TRIGGER_START);2
++//			if (ret < 0)
++//				printk(KERN_DEBUG "Playback DMA start failed\n");
++//		}
++//	}
++//
++//	/* Enable transmitter or receiver */
++//	spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
++//	spcr |= mask;
++//
++//	if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) {
++//		/* Start frame sync */
++//		spcr |= DAVINCI_MCBSP_SPCR_FRST;
++//	}
++//	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
++}
++
++static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback) {
++
++//	u32 spcr;
++//
++//	/* Reset transmitter/receiver and sample rate/frame sync generators */
++//	spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
++//	spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST);
++//	spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST;
++//	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
++//	toggle_clock(dev, playback);
++}
++
++#define DEFAULT_BITPERSAMPLE	16
++
++static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
++		unsigned int fmt) {
++//	struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
++//	unsigned int pcr;
++//	unsigned int srgr;
++//	/* Attention srgr is updated by hw_params! */
++//	srgr = DAVINCI_MCBSP_SRGR_FSGM |
++//		DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) |
++//		DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1);
++//
++//	dev->fmt = fmt;
++//	/* set master/slave audio interface */
++//	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
++//	case SND_SOC_DAIFMT_CBS_CFS:
++//		/* cpu is master */
++//		pcr = DAVINCI_MCBSP_PCR_FSXM |
++//			DAVINCI_MCBSP_PCR_FSRM |
++//			DAVINCI_MCBSP_PCR_CLKXM |
++//			DAVINCI_MCBSP_PCR_CLKRM;
++//		break;
++//	case SND_SOC_DAIFMT_CBM_CFS:
++//		pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM;
++//		/*
++//		 * Selection of the clock input pin that is the
++//		 * input for the Sample Rate Generator.
++//		 * McBSP FSR and FSX are driven by the Sample Rate
++//		 * Generator.
++//		 */
++//		switch (dev->clk_input_pin) {
++//		case MCBSP_CLKS:
++//			pcr |= DAVINCI_MCBSP_PCR_CLKXM |
++//				DAVINCI_MCBSP_PCR_CLKRM;
++//			break;
++//		case MCBSP_CLKR:
++//			pcr |= DAVINCI_MCBSP_PCR_SCLKME;
++//			break;
++//		default:
++//			dev_err(dev->dev, "bad clk_input_pin\n");
++//			return -EINVAL;
++//		}
++//
++//		break;
++//	case SND_SOC_DAIFMT_CBM_CFM:
++//		/* codec is master */
++//		pcr = 0;
++//		break;
++//	default:
++//		printk(KERN_ERR "%s:bad master\n", __func__);
++//		return -EINVAL;
++//	}
++//
++//	/* interface format */
++//	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
++//	case SND_SOC_DAIFMT_I2S:
++//		/* Davinci doesn't support TRUE I2S, but some codecs will have
++//		 * the left and right channels contiguous. This allows
++//		 * dsp_a mode to be used with an inverted normal frame clk.
++//		 * If your codec is master and does not have contiguous
++//		 * channels, then you will have sound on only one channel.
++//		 * Try using a different mode, or codec as slave.
++//		 *
++//		 * The TLV320AIC33 is an example of a codec where this works.
++//		 * It has a variable bit clock frequency allowing it to have
++//		 * valid data on every bit clock.
++//		 *
++//		 * The TLV320AIC23 is an example of a codec where this does not
++//		 * work. It has a fixed bit clock frequency with progressively
++//		 * more empty bit clock slots between channels as the sample
++//		 * rate is lowered.
++//		 */
++//		fmt ^= SND_SOC_DAIFMT_NB_IF;
++//	case SND_SOC_DAIFMT_DSP_A:
++//		dev->mode = MOD_DSP_A;
++//		break;
++//	case SND_SOC_DAIFMT_DSP_B:
++//		dev->mode = MOD_DSP_B;
++//		break;
++//	default:
++//		printk(KERN_ERR "%s:bad format\n", __func__);
++//		return -EINVAL;
++//	}
++//
++//	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++//	case SND_SOC_DAIFMT_NB_NF:
++//		/* CLKRP Receive clock polarity,
++//		 *	1 - sampled on rising edge of CLKR
++//		 *	valid on rising edge
++//		 * CLKXP Transmit clock polarity,
++//		 *	1 - clocked on falling edge of CLKX
++//		 *	valid on rising edge
++//		 * FSRP  Receive frame sync pol, 0 - active high
++//		 * FSXP  Transmit frame sync pol, 0 - active high
++//		 */
++//		pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP);
++//		break;
++//	case SND_SOC_DAIFMT_IB_IF:
++//		/* CLKRP Receive clock polarity,
++//		 *	0 - sampled on falling edge of CLKR
++//		 *	valid on falling edge
++//		 * CLKXP Transmit clock polarity,
++//		 *	0 - clocked on rising edge of CLKX
++//		 *	valid on falling edge
++//		 * FSRP  Receive frame sync pol, 1 - active low
++//		 * FSXP  Transmit frame sync pol, 1 - active low
++//		 */
++//		pcr |= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP);
++//		break;
++//	case SND_SOC_DAIFMT_NB_IF:
++//		/* CLKRP Receive clock polarity,
++//		 *	1 - sampled on rising edge of CLKR
++//		 *	valid on rising edge
++//		 * CLKXP Transmit clock polarity,
++//		 *	1 - clocked on falling edge of CLKX
++//		 *	valid on rising edge
++//		 * FSRP  Receive frame sync pol, 1 - active low
++//		 * FSXP  Transmit frame sync pol, 1 - active low
++//		 */
++//		pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP |
++//			DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP);
++//		break;
++//	case SND_SOC_DAIFMT_IB_NF:
++//		/* CLKRP Receive clock polarity,
++//		 *	0 - sampled on falling edge of CLKR
++//		 *	valid on falling edge
++//		 * CLKXP Transmit clock polarity,
++//		 *	0 - clocked on rising edge of CLKX
++//		 *	valid on falling edge
++//		 * FSRP  Receive frame sync pol, 0 - active high
++//		 * FSXP  Transmit frame sync pol, 0 - active high
++//		 */
++//		break;
++//	default:
++//		return -EINVAL;
++//	}
++//	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
++//	dev->pcr = pcr;
++//	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr);
++	return 0;
++}
++
++static int davinci_i2s_dai_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id,
++		int div) {
++
++//	struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
++//
++//	if (div_id != DAVINCI_MCBSP_CLKGDV)
++//		return -ENODEV;
++//
++//	dev->clk_div = div;
++	return 0;
++}
++
++static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
++		struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) {
++
++
++	struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
++	struct davinci_pcm_dma_params *dma_params = &dev->dma_params;
++	struct snd_interval *i = NULL;
++	int mcbsp_word_length, master;
++	unsigned int rcr, xcr, srgr, clk_div, freq, framesize;
++	u32 spcr;
++	snd_pcm_format_t fmt;
++	unsigned element_cnt = 1;
++
++	/* general line settings */
++#if 0
++	spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
++	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
++		spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
++		davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
++	} else {
++		spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
++		davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
++	}
++
++	master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK;
++	fmt = params_format(params);
++	mcbsp_word_length = asp_word_length[fmt];
++
++	switch (master) {
++		case SND_SOC_DAIFMT_CBS_CFS:
++		freq = clk_get_rate(dev->clk);
++		srgr = DAVINCI_MCBSP_SRGR_FSGM |
++		DAVINCI_MCBSP_SRGR_CLKSM;
++		srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length *
++				8 - 1);
++		if (dev->i2s_accurate_sck) {
++			clk_div = 256;
++			do {
++				framesize = (freq / (--clk_div)) /
++				params->rate_num *
++				params->rate_den;
++			}while (((framesize < 33) || (fram		SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
++					SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX), .formats =
++					(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates =
++					(SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 |
++					SNDRV_PCM_RATE_8000_192000), .rate_min = 0, .rate_max = 1562500,
++					.channels_min = 2, .channels_max = 8, .buffer_bytes_max = 0x7ffffff0,
++					.period_bytes_min = 16, .period_bytes_max = 0xfffff0, .periods_min = 2,
++					.periods_max = 1024, /* no limit */
++					.fifo_size = 0, };
++esize > 4095)) &&
++					(clk_div));
++			clk_div--;
++			srgr |= DAVINCI_MCBSP_SRGR_FPER(framesize - 1);
++		} else {
++			/* symmetric waveforms */
++			clk_div = freq / (mcbsp_word_length * 16) /
++			params->rate_num * params->rate_den;
++			srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length *
++					16 - 1);
++		}
++		clk_div &= 0xFF;
++		srgr |= clk_div;
++		break;
++		case SND_SOC_DAIFMT_CBM_CFS:
++		srgr = DAVINCI_MCBSP_SRGR_FSGM;
++		clk_div = dev->clk_div - 1;
++		srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1);
++		srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1);
++		clk_div &= 0xFF;
++		srgr |= clk_div;
++		break;
++		case SND_SOC_DAIFMT_CBM_CFM:
++		/* Clock and frame sync given from external sources */
++		i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
++		srgr = DAVINCI_MCBSP_SRGR_FSGM;
++		srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1);
++		pr_debug("%s - %d  FWID set: re-read srgr = %X\n",
++				__func__, __LINE__, snd_interval_value(i) - 1);
++
++		i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS);
++		srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1);
++		break;
++		default:
++		return -EINVAL;
++	}
++	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
++
++	rcr = DAVINCI_MCBSP_RCR_RFIG;
++	xcr = DAVINCI_MCBSP_XCR_XFIG;
++	if (dev->mode == MOD_DSP_B) {
++		rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0);
++		xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0);
++	} else {
++		rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1);
++		xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1);
++	}
++	/* Determine xfer data type */
++	fmt = params_format(params);
++	if ((fmt > SNDRV_PCM_FORMAT_S32_LE) || !data_type[fmt]) {
++		printk(KERN_WARNING "davinci-i2s: unsupported PCM format\n");
++		return -EINVAL;
++	}
++
++	if (params_channels(params) == 2) {
++		element_cnt = 2;
++		if (double_fmt[fmt] && dev->enad work_func(struct work_struct *work)
++		{
++			printk("%s \n",__FUNCTION__);ble_channel_combine) {
++			element_cnt = 1;
++			fmt = double_fmt[fmt];
++		}
++		switch (master) {
++			case SND_SOC_DAIFMT_CBS_CFS:
++			case SND_SOC_DAIFMT_CBS_CFM:
++			rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(0);
++			xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(0);
++			rcr |= DAVINCI_MCBSP_RCR_RPHASE;
++			xcr |= DAVINCI_MCBSP_XCR_XPHASE;
++			break;
++			case SND_SOC_DAIFMT_CBM_CFM:
++			case SND_SOC_DAIFMT_CBM_CFS:
++			rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(element_cnt - 1);
++			xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(element_cnt - 1);
++			break;
++			default:
++			return -EINVAL;
++		}
++	}
++//	dma_params->acnt = dma_params->data_type = data_type[fmt];
++//	dma_params->fifo_level = 0;
++	mcbsp_word_length = asp_word_length[fmt];
++
++	switch (master) {
++		case SND_SOC_DAIFMT_CBS_CFS:
++		case SND_SOC_DAIFMT_CBS_CFM:
++		rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(0);
++		xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(0);
++		break;
++		case SND_SOC_DAIFMT_CBM_CFM:
++		case SND_SOC_DAIFMT_CBM_CFS:
++		rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(element_cnt - 1);
++		xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(element_cnt - 1);
++		break;
++		default:
++		return -EINVAL;
++	}
++
++	rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) |
++	DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length);
++	xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) |
++	DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length);
++
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr);
++	else
++	davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr);
++
++	pr_debug("%s - %d  srgr=%X\n", __func__, __LINE__, srgr);
++	pr_debug("%s - %d  xcr=%X\n", __func__, __LINE__, xcr);
++	pr_debug("%s - %d  rcr=%X\n", __func__, __LINE__, rcr);
++#endif
++
++
++	return 0;
++}
++
++static int davinci_i2s_prepare(struct snd_pcm_substream *substream,
++		struct snd_soc_dai *dai) {
++
++//	struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
++//	int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
++//	davinci_mcbsp_stop(dev, playback);
++	return 0;
++}
++static struct {
++	spinlock_t lock;
++	void __iomem *regs;
++	struct clk *clk;
++	unsigned long in_use;
++	unsigned long next_heartbeat;
++	struct timer_list timer;
++	int expect_close;
++} dw_i2s;
++#define	I2S_IOCTL_BASE	'W'
++#define	I2S_GETSUPPORT	_IOR(I2S_IOCTL_BASE, 3, int)
++typedef unsigned short UINT16;
++typedef unsigned int uint32;
++//#define BASEADDR_I2S_REG                    dw_i2s.regs
++#define	OFFSET_I2S_IER                      0x0000
++#define	OFFSET_I2S_IRER                     0x0004
++#define	OFFSET_I2S_ITER                     0x0008
++#define	OFFSET_I2S_CER                      0x000c
++#define	OFFSET_I2S_CCR                      0x0010
++#define	OFFSET_I2S_RXFFR                    0x0014
++#define	OFFSET_I2S_TXFFR                    0x0018
++#define	OFFSET_I2S_LRBR0                    0x0020
++#define	OFFSET_I2S_LRBR1                    0x0060
++#define	OFFSET_I2S_LRBR2                    0x00A0
++#define	OFFSET_I2S_LRBR3                    0x00E0
++#define	OFFSET_I2S_LTHR0                    0x0020
++#define	OFFSET_I2S_LTHR1                    0x0060
++#define	OFFSET_I2S_LTHR2                    0x00A0
++#define OFFSET_I2S_LTHR3                    0x00E0
++#define	OFFSET_I2S_RRBR0                    0x0024
++#define	OFFSET_I2S_RRBR1                    0x0064
++#define	OFFSET_I2S_RRBR2                    0x00A4
++#define	OFFSET_I2S_RRBR3                    0x00E4
++#define	OFFSET_I2S_RTHR0                    0x0024
++#define	OFFSET_I2S_RTHR1                    0x0064
++#define	OFFSET_I2S_RTHR2                    0x00A4
++#define	OFFSET_I2S_RTHR3                    0x00E4
++#define	OFFSET_I2S_RER0                     0x0028
++#define	OFFSET_I2S_RER1                     0x0068
++#define OFFSET_I2S_RER2                     0x00A8
++#define OFFSET_I2S_RER3                     0x00E8
++#define	OFFSET_I2S_TER0                     0x002C
++#define	OFFSET_I2S_TER1                     0x006C
++#define	OFFSET_I2S_TER2                     0x00AC
++#define	OFFSET_I2S_TER3                     0x00EC
++#define	OFFSET_I2S_RCR0                     0x0030
++#define	OFFSET_I2S_RCR1                     0x0070
++#define	OFFSET_I2S_RCR2                     0x00B0
++#define	OFFSET_I2S_RCR3                     0x00F0
++#define	OFFSET_I2S_TCR0                     0x0034
++#define	OFFSET_I2S_TCR1                     0x0074
++#define	OFFSET_I2S_TCR2                     0x00B4
++#define	OFFSET_I2S_TCR3                     0x00F4
++#define	OFFSET_I2S_ISR0                     0x0038
++#define	OFFSET_I2S_ISR1                     0x0078
++#define	OFFSET_I2S_ISR2                     0x00B8
++#define	OFFSET_I2S_ISR3                     0x00F8
++#define	OFFSET_I2S_IMR0                     0x003C
++#define	OFFSET_I2S_IMR1                     0x007C
++#define	OFFSET_I2S_IMR2                     0x00BC
++#define	OFFSET_I2S_IMR3                     0x00FC
++#define	OFFSET_I2S_ROR0                     0x0040
++#define OFFSET_I2S_ROR1                     0x0080
++#define	OFFSET_I2S_ROR2                     0x00C0
++#define	OFFSET_I2S_ROR3                     0x0100
++#define	OFFSET_I2S_TOR0                     0x0044
++#define	OFFSET_I2S_TOR1                     0x0084
++#define	OFFSET_I2S_TOR2                     0x00C4
++#define	OFFSET_I2S_TOR3                     0x0104
++#define	OFFSET_I2S_RFCR0                    0x0048
++#define	OFFSET_I2S_RFCR1                    0x0088
++#define	OFFSET_I2S_RFCR2                    0x00C8
++#define	OFFSET_I2S_RFCR3                    0x0108
++#define OFFSET_I2S_TFCR0                    0x004C
++#define OFFSET_I2S_TFCR1                    0x008C
++#define	OFFSET_I2S_TFCR2                    0x00CC
++#define	OFFSET_I2S_TFCR3                    0x010C
++#define	OFFSET_I2S_RFF0                     0x0050
++#define	OFFSET_I2S_RFF1                     0x0090
++#define	OFFSET_I2S_RFF2                     0x00D4
++#define	OFFSET_I2S_RFF3                     0x0110
++#define	OFFSET_I2S_TFF0                     0x0054
++#define OFFSET_I2S_TFF1                     0x0094
++#define OFFSET_I2S_TFF2                     0x00D4
++#define	OFFSET_I2S_TFF3                     0x0114
++#define	OFFSET_I2S_RXDMA                    0x01C0
++#define	OFFSET_I2S_RRXDMA                   0x01C4
++#define	OFFSET_I2S_TXDMA                    0x01C8
++#define	OFFSET_I2S_RTXDMA                   0x01CC
++#define	OFFSET_I2S_COMP_PARAM_2             0x01f0
++#define	OFFSET_I2S_COMP_PARAM_1             0x01f4
++#define	OFFSET_I2S_COMP_VERSION             0x01f8
++#define	OFFSET_I2S_COMP_TYPE                0x01fc
++#define RESOLUTION12
++#define write_reg(addr,reg)  (*((volatile uint32 *)(addr)))=(uint32)(reg)
++#define read_reg(addr)  (*((volatile uint32 *)(addr)))
++static int dw_i2s_action(void * base,int channel) {
++
++	int data, rx_data_right, rx_data_left,temp;
++	unsigned int i2s_base;
++	i2s_base = base;
++	temp = read_reg(OFFSET_I2S_IMR0 + i2s_base);
++	if (SNDRV_PCM_STREAM_PLAYBACK == channel) {
++		write_reg(OFFSET_I2S_TCR0 + i2s_base, 0x4);
++		write_reg(OFFSET_I2S_TFCR0 + i2s_base, 0x10);
++		write_reg(OFFSET_I2S_ITER + i2s_base, 0x01);
++		write_reg(OFFSET_I2S_TXFFR + i2s_base, 1);
++		temp &=~(1<<4);
++		temp |= (1<<1);
++		temp |= (1<<5);
++		write_reg(OFFSET_I2S_TER0 + i2s_base, 1);
++
++	} else {
++		write_reg(OFFSET_I2S_IRER + i2s_base, 0x01);
++		write_reg(OFFSET_I2S_RCR0 + i2s_base, 0x4);
++		write_reg(OFFSET_I2S_RFCR0 + i2s_base, I2S_FIFO_LEN_RX);
++		write_reg(OFFSET_I2S_RXFFR + i2s_base, 1);
++		temp &=~(1<<0);
++		temp |= (1<<1);
++		temp |= (1<<5);
++		write_reg(OFFSET_I2S_RER0 + i2s_base, 1);
++
++	}
++	write_reg(OFFSET_I2S_IMR0 + i2s_base, temp); //interrupt mask
++}
++
++static void codec_config(void)
++{
++		i2c_write_codec(0x0, 0x44);//set 8K sample
++		i2c_write_codec(0x9, 0x2);
++		i2c_write_codec(0x4, 0x10);
++		i2c_write_codec(0x1, 0x3c);
++		i2c_write_codec(0x5, 0x5);
++		i2c_write_codec(0x7, 0xe6);
++		i2c_write_codec(0x2, 0x14);
++		i2c_write_codec(0x8, 0x38);
++		i2c_write_codec(0xf, 0x1b);
++		i2c_write_codec(0x10, 0x1b);
++}
++
++static int s6000_i2s_start_channel(struct s6000_i2s_dev *dev, int channel) {
++
++	dw_i2s_action(dev->base,channel);
++	return 0;
++}
++static void s6000_i2s_start(struct snd_pcm_substream *substream) {
++	struct snd_soc_pcm_runtime *rtd = substream->private_data;
++	struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
++	s6000_i2s_start_channel(dev, substream->stream);
++
++}
++static int s6000_i2s_stop_channel(struct s6000_i2s_dev *dev, int channel) {
++	int temp,i;
++	temp = read_reg(OFFSET_I2S_IMR0 + dev->base);
++	if (SNDRV_PCM_STREAM_PLAYBACK == channel) {
++		write_reg(OFFSET_I2S_TER0 + dev->base, 0);
++		temp |=(1<<4);
++		temp |= (1<<1);
++		temp |= (1<<5);
++		write_reg(OFFSET_I2S_IMR0 + dev->base,temp); //interrupt mask
++
++
++	} else {
++		write_reg(OFFSET_I2S_RER0 + dev->base, 0);
++		temp |=(1<<0);
++		temp |= (1<<1);
++		temp |= (1<<5);
++		write_reg(OFFSET_I2S_IMR0 + dev->base,temp); //interrupt mask
++
++	}
++	return 0;
++
++}
++static void s6000_i2s_stop(struct snd_pcm_substream *substream) {
++	struct snd_soc_pcm_runtime *rtd = substream->private_data;
++	struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
++	s6000_i2s_stop_channel(dev, substream->stream);
++}
++static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
++		int after) {
++	int ret = 0;
++	int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
++	switch (cmd) {
++	case SNDRV_PCM_TRIGGER_START:
++	case SNDRV_PCM_TRIGGER_RESUME:
++	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++		s6000_i2s_start(substream);
++		break;
++	case SNDRV_PCM_TRIGGER_STOP:
++	case SNDRV_PCM_TRIGGER_SUSPEND:
++	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++	//	if (!after)
++			s6000_i2s_stop(substream);
++				break;
++	}
++	return 0;
++}
++
++static int davinci_i2s_startup(struct snd_pcm_substream *substream,
++		struct snd_soc_dai *dai) {
++//	struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
++//
++//	snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
++	return 0;
++}
++
++static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
++		struct snd_soc_dai *dai) {
++//	struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
++//	int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
++//	davinci_mcbsp_stop(dev, playback);
++}
++
++#define DAVINCI_I2S_RATES	SNDRV_PCM_RATE_8000_96000
++
++static struct snd_soc_dai_ops davinci_i2s_dai_ops = { .startup =
++		davinci_i2s_startup, .shutdown = davinci_i2s_shutdown, .prepare =
++		davinci_i2s_prepare, .trigger = davinci_i2s_trigger, .hw_params =
++		davinci_i2s_hw_params, .set_fmt = davinci_i2s_set_dai_fmt, .set_clkdiv =
++		davinci_i2s_dai_set_clkdiv,
++
++};
++int  s6000_i2s_dai_probe(struct snd_soc_dai *dai) {
++
++	struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
++	struct s6000_snd_platform_data *pdata = dai->dev->platform_data;
++	dai->capture_dma_data = &dev->dma_params;
++	dai->playback_dma_data = &dev->dma_params;
++	dev->dma_params.trigger = davinci_i2s_trigger;
++//	dev->wide = pdata->wide;
++//	dev->channel_in = pdata->channel_in;
++//	dev->channel_out = pdata->channel_out;
++//	dev->lines_in = pdata->lines_in;
++//	dev->lines_out = pdata->lines_out;
++	dev->dma_params.sif_in = 0xf0901c0;
++	dev->dma_params.sif_out = 0xf0901c8;
++	return 0;
++
++}
++static struct snd_soc_dai_driver davinci_i2s_dai = { .probe =
++		s6000_i2s_dai_probe, .playback = { .channels_min = 2, .channels_max = 2,
++		.rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE, },
++		.capture = { .channels_min = 2, .channels_max = 2, .rates =
++				DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops =
++				&davinci_i2s_dai_ops,
++
++};
++//work queue funtion
++
++
++ void capture_work_func(struct work_struct *work)
++{
++	uint32 count_data,i,audio_data,temp;
++	uint32 buff[I2S_FIFO_LEN_RX];
++	for(count_data=0;count_data<I2S_FIFO_LEN_RX;count_data++) {
++			audio_data = read_reg(OFFSET_I2S_RRBR0 + g_i2s_base);
++			i = read_reg(OFFSET_I2S_LRBR0 + g_i2s_base);
++			buff[count_data] = audio_data;
++	}
++	i2s_irq_enquen(SNDRV_PCM_STREAM_CAPTURE,(u8 *)buff,count_data<<2,0);
++	temp = read_reg(OFFSET_I2S_IMR0 + g_i2s_base);
++	temp &=~(1<<0);
++	write_reg(OFFSET_I2S_IMR0 + g_i2s_base,temp);
++}
++ void playback_work_func(struct work_struct *work)
++{
++	uint32 count_data,temp;
++	uint32 buff[I2S_FIFO_LEN_TX];
++	i2s_irq_enquen(SNDRV_PCM_STREAM_PLAYBACK,(u8 *)buff,I2S_FIFO_LEN_TX<<2,0);
++	for(count_data=0;count_data<I2S_FIFO_LEN_TX;count_data++) {
++			write_reg(OFFSET_I2S_RTHR0 + g_i2s_base,buff[count_data]);
++			write_reg(OFFSET_I2S_LTHR0 + g_i2s_base,buff[count_data]);
++	}
++	temp = read_reg(OFFSET_I2S_IMR0 + g_i2s_base);
++	temp &=~(1<<4);
++	write_reg(OFFSET_I2S_IMR0 + g_i2s_base,temp);
++}
++
++static irqreturn_t davinci_i2s_irq(int irq, void *data) {
++	uint32 irq_data,temp;
++	struct s6000_i2s_dev *fdev = data;
++	irq_data = read_reg(OFFSET_I2S_ISR0 + fdev->base);
++	temp = read_reg(OFFSET_I2S_IMR0 + fdev->base);
++	if ( (irq_data & 0x10)&&( !(temp&(1<<4))) ) {
++		temp |= (1<<4);
++		write_reg(OFFSET_I2S_IMR0 + fdev->base,temp);
++		schedule_work(&tx_md->my_work);
++		//playback_work_func(&tx_md->my_work);
++	}
++	if ( (irq_data & 0x01)&&( !(temp&(1<<0))) ) {
++		temp|= (1<<0);
++		write_reg(OFFSET_I2S_IMR0 +fdev->base,temp);
++		schedule_work(&rx_md->my_work);
++		//capture_work_func(&rx_md->my_work);
++	}
++		return IRQ_HANDLED;
++	}
++
++static void i2s_config(void)
++{
++	write_reg(OFFSET_I2S_IER + g_i2s_base, 0x01);//i2s   enable
++	write_reg(OFFSET_I2S_CCR + g_i2s_base, 0x8);
++	write_reg(OFFSET_I2S_CER + g_i2s_base, 0x01);
++}
++	static int davinci_i2s_probe(struct platform_device *pdev) {
++		struct snd_platform_data *pdata = pdev->dev.platform_data;
++		struct s6000_i2s_dev *dev;
++		struct resource *mem, *ioarea, *res;
++
++
++		if (!pdata)
++			return -EINVAL;
++
++		int ret;
++
++		rx_md=init_data(rx_md,rx_work_queue);
++		INIT_WORK(&rx_md->my_work,capture_work_func);
++		//rx_wq=create_singlethread_workqueue("capture_workqueue");
++		tx_md = init_data(tx_md,tx_work_queue);
++		INIT_WORK(&tx_md->my_work,playback_work_func);
++		//tx_wq=create_singlethread_workqueue("capture_workqueue");
++		//queue_work(wq,&md->my_work);
++		mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++		if (!mem) {
++			printk("i2s platform get resource err\n");
++			dev_err(&pdev->dev, "no mem resource?\n");
++			return -ENODEV;
++		}
++
++		ioarea = request_mem_region(mem->start, resource_size(mem), pdev->name);
++		if (!ioarea) {
++			printk("i2s request mem region err\n");
++			dev_err(&pdev->dev, "McBSP region already claimed\n");
++			return -EBUSY;
++		}
++
++		dev = kzalloc(sizeof(struct s6000_i2s_dev), GFP_KERNEL);
++		if (!dev) {
++			printk("i2s kzalloc err \n");
++			ret = -ENOMEM;
++			goto err_release_region;
++		}
++		dev->irq = platform_get_irq(pdev, 0);
++		int rc;
++		rc = request_irq(dev->irq, davinci_i2s_irq,
++		IRQF_DISABLED, pdev->name, dev);
++		if (rc) {
++			printk("request irq err \n");
++			free_irq(dev->irq, dev);
++		}
++
++		dev->base = ioremap(mem->start, resource_size(mem));
++		g_i2s_base = dev->base;
++		rx_md->base =dev->base;
++		tx_md->base = rx_md->base;
++		if (!dev->base) {
++			dev_err(&pdev->dev, "ioremap failed\n");
++			ret = -ENOMEM;
++			goto err_release_clk;
++		}
++
++		dev->dev = &pdev->dev;
++
++		dev_set_drvdata(&pdev->dev, dev);
++
++		ret = snd_soc_register_dai(&pdev->dev, &davinci_i2s_dai);
++		if (ret != 0)
++			goto err_iounmap;
++
++		codec_config();
++		i2s_config();
++
++		return 0;
++
++		err_iounmap: iounmap(dev->base);
++		err_release_clk: clk_disable(dev->clk);
++		clk_put(dev->clk);
++		err_free_mem: kfree(dev);
++		err_release_region:
++		release_mem_region(mem->start, resource_size(mem));
++
++		return ret;
++	}
++
++	static int davinci_i2s_remove(struct platform_device *pdev) {
++		struct davinci_mcbsp_dev *dev = dev_get_drvdata(&pdev->dev);
++		struct resource *mem;
++
++		snd_soc_unregister_dai(&pdev->dev);
++//	clk_disable(dev->clk);
++//	clk_put(dev->clk);
++//	dev->clk = NULL;
++		kfree(dev);
++		mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++		release_mem_region(mem->start, resource_size(mem));
++
++		return 0;
++	}
++
++	static struct platform_driver davinci_mcbsp_driver = {
++			.probe =davinci_i2s_probe,
++			.remove = davinci_i2s_remove,
++			.driver = {
++					.name =
++							"s6000-i2s", .owner = THIS_MODULE,
++			},
++	};
++
++static int __init davinci_i2s_init(void)
++{
++	return platform_driver_register(&davinci_mcbsp_driver);
++}
++	module_init(davinci_i2s_init);
++
++static void __exit davinci_i2s_exit(void)
++{
++	platform_driver_unregister(&davinci_mcbsp_driver);
++}
++	module_exit(davinci_i2s_exit);
++
++	MODULE_AUTHOR("Vladimir Barinov");
++	MODULE_DESCRIPTION("TI DAVINCI I2S (McBSP) SoC Interface");
++	MODULE_LICENSE("GPL");
+diff --git a/sound/soc/dwc/fh_i2s.h b/sound/soc/dwc/fh_i2s.h
+new file mode 100644
+index 00000000..86aa1921
+--- /dev/null
++++ b/sound/soc/dwc/fh_i2s.h
+@@ -0,0 +1,23 @@
++/*
++ * ALSA SoC I2S Audio Layer for the Stretch s6000 family
++ *
++ * Author:      Daniel Gloeckner, <dg@emlix.com>
++ * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _S6000_I2S_H
++#define _S6000_I2S_H
++
++struct s6000_snd_platform_data {
++	int lines_in;
++	int lines_out;
++	int channel_in;
++	int channel_out;
++	int wide;
++	int same_rate;
++};
++#endif
+diff --git a/sound/soc/dwc/fh_i2s_dai.c b/sound/soc/dwc/fh_i2s_dai.c
+new file mode 100644
+index 00000000..28f8ea67
+--- /dev/null
++++ b/sound/soc/dwc/fh_i2s_dai.c
+@@ -0,0 +1,1003 @@
++/*
++ * ALSA SoC McASP Audio Layer for TI DAVINCI processor
++ *
++ * Multi-channel Audio Serial Port Driver
++ *
++ * Author: Nirmal Pandey <n-pandey@ti.com>,
++ *         Suresh Rajashekara <suresh.r@ti.com>
++ *         Steve Chen <schen@.mvista.com>
++ *
++ * Copyright:   (C) 2009 MontaVista Software, Inc., <source@mvista.com>
++ * Copyright:   (C) 2009  Texas Instruments, India
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/initval.h>
++#include <sound/soc.h>
++
++#include "fullhan-pcm.h"
++#include "fh_i2s_dai.h"
++
++/*
++ * McASP register definitions
++ */
++#define DAVINCI_MCASP_PID_REG		0x00
++#define DAVINCI_MCASP_PWREMUMGT_REG	0x04
++
++#define DAVINCI_MCASP_PFUNC_REG		0x10
++#define DAVINCI_MCASP_PDIR_REG		0x14
++#define DAVINCI_MCASP_PDOUT_REG		0x18
++#define DAVINCI_MCASP_PDSET_REG		0x1c
++
++#define DAVINCI_MCASP_PDCLR_REG		0x20
++
++#define DAVINCI_MCASP_TLGC_REG		0x30
++#define DAVINCI_MCASP_TLMR_REG		0x34
++
++#define DAVINCI_MCASP_GBLCTL_REG	0x44
++#define DAVINCI_MCASP_AMUTE_REG		0x48
++#define DAVINCI_MCASP_LBCTL_REG		0x4c
++
++#define DAVINCI_MCASP_TXDITCTL_REG	0x50
++
++#define DAVINCI_MCASP_GBLCTLR_REG	0x60
++#define DAVINCI_MCASP_RXMASK_REG	0x64
++#define DAVINCI_MCASP_RXFMT_REG		0x68
++#define DAVINCI_MCASP_RXFMCTL_REG	0x6c
++
++#define DAVINCI_MCASP_ACLKRCTL_REG	0x70
++#define DAVINCI_MCASP_AHCLKRCTL_REG	0x74
++#define DAVINCI_MCASP_RXTDM_REG		0x78
++#define DAVINCI_MCASP_EVTCTLR_REG	0x7c
++
++#define DAVINCI_MCASP_RXSTAT_REG	0x80
++#define DAVINCI_MCASP_RXTDMSLOT_REG	0x84
++#define DAVINCI_MCASP_RXCLKCHK_REG	0x88
++#define DAVINCI_MCASP_REVTCTL_REG	0x8c
++
++#define DAVINCI_MCASP_GBLCTLX_REG	0xa0
++#define DAVINCI_MCASP_TXMASK_REG	0xa4
++#define DAVINCI_MCASP_TXFMT_REG		0xa8
++#define DAVINCI_MCASP_TXFMCTL_REG	0xac
++
++#define DAVINCI_MCASP_ACLKXCTL_REG	0xb0
++#define DAVINCI_MCASP_AHCLKXCTL_REG	0xb4
++#define DAVINCI_MCASP_TXTDM_REG		0xb8
++#define DAVINCI_MCASP_EVTCTLX_REG	0xbc
++
++#define DAVINCI_MCASP_TXSTAT_REG	0xc0
++#define DAVINCI_MCASP_TXTDMSLOT_REG	0xc4
++#define DAVINCI_MCASP_TXCLKCHK_REG	0xc8
++#define DAVINCI_MCASP_XEVTCTL_REG	0xcc
++
++/* Left(even TDM Slot) Channel Status Register File */
++#define DAVINCI_MCASP_DITCSRA_REG	0x100
++/* Right(odd TDM slot) Channel Status Register File */
++#define DAVINCI_MCASP_DITCSRB_REG	0x118
++/* Left(even TDM slot) User Data Register File */
++#define DAVINCI_MCASP_DITUDRA_REG	0x130
++/* Right(odd TDM Slot) User Data Register File */
++#define DAVINCI_MCASP_DITUDRB_REG	0x148
++
++/* Serializer n Control Register */
++#define DAVINCI_MCASP_XRSRCTL_BASE_REG	0x180
++#define DAVINCI_MCASP_XRSRCTL_REG(n)	(DAVINCI_MCASP_XRSRCTL_BASE_REG + \
++						(n << 2))
++
++/* Transmit Buffer for Serializer n */
++#define DAVINCI_MCASP_TXBUF_REG		0x200
++/* Receive Buffer for Serializer n */
++#define DAVINCI_MCASP_RXBUF_REG		0x280
++
++/* McASP FIFO Registers */
++#define DAVINCI_MCASP_WFIFOCTL		(0x1010)
++#define DAVINCI_MCASP_WFIFOSTS		(0x1014)
++#define DAVINCI_MCASP_RFIFOCTL		(0x1018)
++#define DAVINCI_MCASP_RFIFOSTS		(0x101C)
++
++/*
++ * DAVINCI_MCASP_PWREMUMGT_REG - Power Down and Emulation Management
++ *     Register Bits
++ */
++#define MCASP_FREE	BIT(0)
++#define MCASP_SOFT	BIT(1)
++
++/*
++ * DAVINCI_MCASP_PFUNC_REG - Pin Function / GPIO Enable Register Bits
++ */
++#define AXR(n)		(1<<n)
++#define PFUNC_AMUTE	BIT(25)
++#define ACLKX		BIT(26)
++#define AHCLKX		BIT(27)
++#define AFSX		BIT(28)
++#define ACLKR		BIT(29)
++#define AHCLKR		BIT(30)
++#define AFSR		BIT(31)
++
++/*
++ * DAVINCI_MCASP_PDIR_REG - Pin Direction Register Bits
++ */
++#define AXR(n)		(1<<n)
++#define PDIR_AMUTE	BIT(25)
++#define ACLKX		BIT(26)
++#define AHCLKX		BIT(27)
++#define AFSX		BIT(28)
++#define ACLKR		BIT(29)
++#define AHCLKR		BIT(30)
++#define AFSR		BIT(31)
++
++/*
++ * DAVINCI_MCASP_TXDITCTL_REG - Transmit DIT Control Register Bits
++ */
++#define DITEN	BIT(0)	/* Transmit DIT mode enable/disable */
++#define VA	BIT(2)
++#define VB	BIT(3)
++
++/*
++ * DAVINCI_MCASP_TXFMT_REG - Transmit Bitstream Format Register Bits
++ */
++#define TXROT(val)	(val)
++#define TXSEL		BIT(3)
++#define TXSSZ(val)	(val<<4)
++#define TXPBIT(val)	(val<<8)
++#define TXPAD(val)	(val<<13)
++#define TXORD		BIT(15)
++#define FSXDLY(val)	(val<<16)
++
++/*
++ * DAVINCI_MCASP_RXFMT_REG - Receive Bitstream Format Register Bits
++ */
++#define RXROT(val)	(val)
++#define RXSEL		BIT(3)
++#define RXSSZ(val)	(val<<4)
++#define RXPBIT(val)	(val<<8)
++#define RXPAD(val)	(val<<13)
++#define RXORD		BIT(15)
++#define FSRDLY(val)	(val<<16)
++
++/*
++ * DAVINCI_MCASP_TXFMCTL_REG -  Transmit Frame Control Register Bits
++ */
++#define FSXPOL		BIT(0)
++#define AFSXE		BIT(1)
++#define FSXDUR		BIT(4)
++#define FSXMOD(val)	(val<<7)
++
++/*
++ * DAVINCI_MCASP_RXFMCTL_REG - Receive Frame Control Register Bits
++ */
++#define FSRPOL		BIT(0)
++#define AFSRE		BIT(1)
++#define FSRDUR		BIT(4)
++#define FSRMOD(val)	(val<<7)
++
++/*
++ * DAVINCI_MCASP_ACLKXCTL_REG - Transmit Clock Control Register Bits
++ */
++#define ACLKXDIV(val)	(val)
++#define ACLKXE		BIT(5)
++#define TX_ASYNC	BIT(6)
++#define ACLKXPOL	BIT(7)
++
++/*
++ * DAVINCI_MCASP_ACLKRCTL_REG Receive Clock Control Register Bits
++ */
++#define ACLKRDIV(val)	(val)
++#define ACLKRE		BIT(5)
++#define RX_ASYNC	BIT(6)
++#define ACLKRPOL	BIT(7)
++
++/*
++ * DAVINCI_MCASP_AHCLKXCTL_REG - High Frequency Transmit Clock Control
++ *     Register Bits
++ */
++#define AHCLKXDIV(val)	(val)
++#define AHCLKXPOL	BIT(14)
++#define AHCLKXE		BIT(15)
++
++/*
++ * DAVINCI_MCASP_AHCLKRCTL_REG - High Frequency Receive Clock Control
++ *     Register Bits
++ */
++#define AHCLKRDIV(val)	(val)
++#define AHCLKRPOL	BIT(14)
++#define AHCLKRE		BIT(15)
++
++/*
++ * DAVINCI_MCASP_XRSRCTL_BASE_REG -  Serializer Control Register Bits
++ */
++#define MODE(val)	(val)
++#define DISMOD		(val)(val<<2)
++#define TXSTATE		BIT(4)
++#define RXSTATE		BIT(5)
++
++/*
++ * DAVINCI_MCASP_LBCTL_REG - Loop Back Control Register Bits
++ */
++#define LBEN		BIT(0)
++#define LBORD		BIT(1)
++#define LBGENMODE(val)	(val<<2)
++
++/*
++ * DAVINCI_MCASP_TXTDMSLOT_REG - Transmit TDM Slot Register configuration
++ */
++#define TXTDMS(n)	(1<<n)
++
++/*
++ * DAVINCI_MCASP_RXTDMSLOT_REG - Receive TDM Slot Register configuration
++ */
++#define RXTDMS(n)	(1<<n)
++
++/*
++ * DAVINCI_MCASP_GBLCTL_REG -  Global Control Register Bits
++ */
++#define RXCLKRST	BIT(0)	/* Receiver Clock Divider Reset */
++#define RXHCLKRST	BIT(1)	/* Receiver High Frequency Clock Divider */
++#define RXSERCLR	BIT(2)	/* Receiver Serializer Clear */
++#define RXSMRST		BIT(3)	/* Receiver State Machine Reset */
++#define RXFSRST		BIT(4)	/* Frame Sync Generator Reset */
++#define TXCLKRST	BIT(8)	/* Transmitter Clock Divider Reset */
++#define TXHCLKRST	BIT(9)	/* Transmitter High Frequency Clock Divider*/
++#define TXSERCLR	BIT(10)	/* Transmit Serializer Clear */
++#define TXSMRST		BIT(11)	/* Transmitter State Machine Reset */
++#define TXFSRST		BIT(12)	/* Frame Sync Generator Reset */
++
++/*
++ * DAVINCI_MCASP_AMUTE_REG -  Mute Control Register Bits
++ */
++#define MUTENA(val)	(val)
++#define MUTEINPOL	BIT(2)
++#define MUTEINENA	BIT(3)
++#define MUTEIN		BIT(4)
++#define MUTER		BIT(5)
++#define MUTEX		BIT(6)
++#define MUTEFSR		BIT(7)
++#define MUTEFSX		BIT(8)
++#define MUTEBADCLKR	BIT(9)
++#define MUTEBADCLKX	BIT(10)
++#define MUTERXDMAERR	BIT(11)
++#define MUTETXDMAERR	BIT(12)
++
++/*
++ * DAVINCI_MCASP_REVTCTL_REG - Receiver DMA Event Control Register bits
++ */
++#define RXDATADMADIS	BIT(0)
++
++/*
++ * DAVINCI_MCASP_XEVTCTL_REG - Transmitter DMA Event Control Register bits
++ */
++#define TXDATADMADIS	BIT(0)
++
++/*
++ * DAVINCI_MCASP_W[R]FIFOCTL - Write/Read FIFO Control Register bits
++ */
++#define FIFO_ENABLE	BIT(16)
++#define NUMEVT_MASK	(0xFF << 8)
++#define NUMDMA_MASK	(0xFF)
++
++#define DAVINCI_MCASP_NUM_SERIALIZER	16
++
++static inline void mcasp_set_bits(void __iomem *reg, u32 val)
++{
++	__raw_writel(__raw_readl(reg) | val, reg);
++}
++
++static inline void mcasp_clr_bits(void __iomem *reg, u32 val)
++{
++	__raw_writel((__raw_readl(reg) & ~(val)), reg);
++}
++
++static inline void mcasp_mod_bits(void __iomem *reg, u32 val, u32 mask)
++{
++	__raw_writel((__raw_readl(reg) & ~mask) | val, reg);
++}
++
++static inline void mcasp_set_reg(void __iomem *reg, u32 val)
++{
++	__raw_writel(val, reg);
++}
++
++static inline u32 mcasp_get_reg(void __iomem *reg)
++{
++	return (unsigned int)__raw_readl(reg);
++}
++
++static inline void mcasp_set_ctl_reg(void __iomem *regs, u32 val)
++{
++//	int i = 0;
++//
++//	mcasp_set_bits(regs, val);
++//
++//	/* programming GBLCTL needs to read back from GBLCTL and verfiy */
++//	/* loop count is to avoid the lock-up */
++//	for (i = 0; i < 1000; i++) {
++//		if ((mcasp_get_reg(regs) & val) == val)
++//			break;
++//	}
++//
++//	if (i == 1000 && ((mcasp_get_reg(regs) & val) != val))
++//		printk(KERN_ERR "GBLCTL write error\n");
++}
++
++static void mcasp_start_rx(struct davinci_audio_dev *dev)
++{
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
++//
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
++//
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
++}
++
++static void mcasp_start_tx(struct davinci_audio_dev *dev)
++{
++//	u8 offset = 0, i;
++//	u32 cnt;
++//
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
++//
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
++//	mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
++//	for (i = 0; i < dev->num_serializer; i++) {
++//		if (dev->serial_dir[i] == TX_MODE) {
++//			offset = i;
++//			break;
++//		}
++//	}
++//
++//	/* wait for TX ready */
++//	cnt = 0;
++//	while (!(mcasp_get_reg(dev->base + DAVINCI_MCASP_XRSRCTL_REG(offset)) &
++//		 TXSTATE) && (cnt < 100000))
++//		cnt++;
++//
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
++}
++
++static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
++{
++//	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
++//		if (dev->txnumevt)	/* enable FIFO */
++//			mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++//								FIFO_ENABLE);
++//		mcasp_start_tx(dev);
++//	} else {
++//		if (dev->rxnumevt)	/* enable FIFO */
++//			mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++//								FIFO_ENABLE);
++//		mcasp_start_rx(dev);
++//	}
++}
++
++static void mcasp_stop_rx(struct davinci_audio_dev *dev)
++{
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, 0);
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
++}
++
++static void mcasp_stop_tx(struct davinci_audio_dev *dev)
++{
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, 0);
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
++}
++
++static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream)
++{
++//	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
++//		if (dev->txnumevt)	/* disable FIFO */
++//			mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++//								FIFO_ENABLE);
++//		mcasp_stop_tx(dev);
++//	} else {
++//		if (dev->rxnumevt)	/* disable FIFO */
++//			mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++//								FIFO_ENABLE);
++//		mcasp_stop_rx(dev);
++//	}
++}
++
++static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
++					 unsigned int fmt)
++{
++//	struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
++//	void __iomem *base = dev->base;
++//
++//	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
++//	case SND_SOC_DAIFMT_CBS_CFS:
++//		/* codec is clock and frame slave */
++//		mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
++//		mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
++//
++//		mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
++//		mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
++//
++//		mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
++//				ACLKX | AHCLKX | AFSX);
++//		break;
++//	case SND_SOC_DAIFMT_CBM_CFS:
++//		/* codec is clock master and frame slave */
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
++//		mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
++//
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
++//		mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
++//
++//		mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
++//				ACLKX | ACLKR);
++//		mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
++//				AFSX | AFSR);
++//		break;
++//	case SND_SOC_DAIFMT_CBM_CFM:
++//		/* codec is clock and frame master */
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
++//		mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
++//
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
++//		mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
++//
++//		mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
++//				ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR);
++//		break;
++//
++//	default:
++//		return -EINVAL;
++//	}
++//
++//	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++//	case SND_SOC_DAIFMT_IB_NF:
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
++//		mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
++//
++//		mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
++//		mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
++//		break;
++//
++//	case SND_SOC_DAIFMT_NB_IF:
++//		mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
++//		mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
++//
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
++//		mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
++//		break;
++//
++//	case SND_SOC_DAIFMT_IB_IF:
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
++//		mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
++//
++//		mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
++//		mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
++//		break;
++//
++//	case SND_SOC_DAIFMT_NB_NF:
++//		mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
++//		mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
++//
++//		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
++//		mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
++//		break;
++//
++//	default:
++//		return -EINVAL;
++//	}
++
++	return 0;
++}
++
++static int davinci_config_channel_size(struct davinci_audio_dev *dev,
++				       int channel_size)
++{
++//	u32 fmt = 0;
++//	u32 mask, rotate;
++//
++//	switch (channel_size) {
++//	case DAVINCI_AUDIO_WORD_8:
++//		fmt = 0x03;
++//		rotate = 6;
++//		mask = 0x000000ff;
++//		break;
++//
++//	case DAVINCI_AUDIO_WORD_12:
++//		fmt = 0x05;
++//		rotate = 5;
++//		mask = 0x00000fff;
++//		break;
++//
++//	case DAVINCI_AUDIO_WORD_16:
++//		fmt = 0x07;
++//		rotate = 4;
++//		mask = 0x0000ffff;
++//		break;
++//
++//	case DAVINCI_AUDIO_WORD_20:
++//		fmt = 0x09;
++//		rotate = 3;
++//		mask = 0x000fffff;
++//		break;
++//
++//	case DAVINCI_AUDIO_WORD_24:
++//		fmt = 0x0B;
++//		rotate = 2;
++//		mask = 0x00ffffff;
++//		break;
++//
++//	case DAVINCI_AUDIO_WORD_28:
++//		fmt = 0x0D;
++//		rotate = 1;
++//		mask = 0x0fffffff;
++//		break;
++//
++//	case DAVINCI_AUDIO_WORD_32:
++//		fmt = 0x0F;
++//		rotate = 0;
++//		mask = 0xffffffff;
++//		break;
++//
++//	default:
++//		return -EINVAL;
++//	}
++//
++//	mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
++//					RXSSZ(fmt), RXSSZ(0x0F));
++//	mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
++//					TXSSZ(fmt), TXSSZ(0x0F));
++//	mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(rotate),
++//							TXROT(7));
++//	mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rotate),
++//							RXROT(7));
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, mask);
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask);
++//
++//	return 0;
++}
++
++static void davinci_hw_common_param(struct davinci_audio_dev *dev, int stream)
++{
++//	int i;
++//	u8 tx_ser = 0;
++//	u8 rx_ser = 0;
++//
++//	/* Default configuration */
++//	mcasp_set_bits(dev->base + DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
++//
++//	/* All PINS as McASP */
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_PFUNC_REG, 0x00000000);
++//
++//	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
++//		mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
++//		mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG,
++//				TXDATADMADIS);
++//	} else {
++//		mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
++//		mcasp_clr_bits(dev->base + DAVINCI_MCASP_REVTCTL_REG,
++//				RXDATADMADIS);
++//	}
++//
++//	for (i = 0; i < dev->num_serializer; i++) {
++//		mcasp_set_bits(dev->base + DAVINCI_MCASP_XRSRCTL_REG(i),
++//					dev->serial_dir[i]);
++//		if (dev->serial_dir[i] == TX_MODE) {
++//			mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
++//					AXR(i));
++//			tx_ser++;
++//		} else if (dev->serial_dir[i] == RX_MODE) {
++//			mcasp_clr_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
++//					AXR(i));
++//			rx_ser++;
++//		}
++//	}
++//
++//	if (dev->txnumevt && stream == SNDRV_PCM_STREAM_PLAYBACK) {
++//		if (dev->txnumevt * tx_ser > 64)
++//			dev->txnumevt = 1;
++//
++//		mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, tx_ser,
++//								NUMDMA_MASK);
++//		mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++//				((dev->txnumevt * tx_ser) << 8), NUMEVT_MASK);
++//	}
++//
++//	if (dev->rxnumevt && stream == SNDRV_PCM_STREAM_CAPTURE) {
++//		if (dev->rxnumevt * rx_ser > 64)
++//			dev->rxnumevt = 1;
++//
++//		mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, rx_ser,
++//								NUMDMA_MASK);
++//		mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++//				((dev->rxnumevt * rx_ser) << 8), NUMEVT_MASK);
++//	}
++}
++
++static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
++{
++//	int i, active_slots;
++//	u32 mask = 0;
++//
++//	active_slots = (dev->tdm_slots > 31) ? 32 : dev->tdm_slots;
++//	for (i = 0; i < active_slots; i++)
++//		mask |= (1 << i);
++//
++//	mcasp_clr_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC);
++//
++//	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
++//		/* bit stream is MSB first  with no delay */
++//		/* DSP_B mode */
++//		mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG,
++//				AHCLKXE);
++//		mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask);
++//		mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD);
++//
++//		if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
++//			mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
++//					FSXMOD(dev->tdm_slots), FSXMOD(0x1FF));
++//		else
++//			printk(KERN_ERR "playback tdm slot %d not supported\n",
++//				dev->tdm_slots);
++//
++//		mcasp_clr_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
++//	} else {
++//		/* bit stream is MSB first with no delay */
++//		/* DSP_B mode */
++//		mcasp_set_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXORD);
++//		mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG,
++//				AHCLKRE);
++//		mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask);
++//
++//		if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
++//			mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG,
++//					FSRMOD(dev->tdm_slots), FSRMOD(0x1FF));
++//		else
++//			printk(KERN_ERR "capture tdm slot %d not supported\n",
++//				dev->tdm_slots);
++//
++//		mcasp_clr_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
++//	}
++}
++
++/* S/PDIF */
++static void davinci_hw_dit_param(struct davinci_audio_dev *dev)
++{
++//	/* Set the PDIR for Serialiser as output */
++//	mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG, AFSX);
++//
++//	/* TXMASK for 24 bits */
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0x00FFFFFF);
++//
++//	/* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
++//	   and LSB first */
++//	mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
++//						TXROT(6) | TXSSZ(15));
++//
++//	/* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
++//						AFSXE | FSXMOD(0x180));
++//
++//	/* Set the TX tdm : for all the slots */
++//	mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, 0xFFFFFFFF);
++//
++//	/* Set the TX clock controls : div = 1 and internal */
++//	mcasp_set_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG,
++//						ACLKXE | TX_ASYNC);
++//
++//	mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
++//
++//	/* Only 44100 and 48000 are valid, both have the same setting */
++//	mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXDIV(3));
++//
++//	/* Enable the DIT */
++//	mcasp_set_bits(dev->base + DAVINCI_MCASP_TXDITCTL_REG, DITEN);
++}
++
++static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
++					struct snd_pcm_hw_params *params,
++					struct snd_soc_dai *cpu_dai)
++{
++//	struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
++//	struct davinci_pcm_dma_params *dma_params =
++//					&dev->dma_params[substream->stream];
++//	int word_length;
++//	u8 fifo_level;
++//
++//	davinci_hw_common_param(dev, substream->stream);
++//	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++//		fifo_level = dev->txnumevt;
++//	else
++//		fifo_level = dev->rxnumevt;
++//
++//	if (dev->op_mode == DAVINCI_MCASP_DIT_MODE)
++//		davinci_hw_dit_param(dev);
++//	else
++//		davinci_hw_param(dev, substream->stream);
++//
++//	switch (params_format(params)) {
++//	case SNDRV_PCM_FORMAT_S8:
++//		dma_params->data_type = 1;
++//		word_length = DAVINCI_AUDIO_WORD_8;
++//		break;
++//
++//	case SNDRV_PCM_FORMAT_S16_LE:
++//		dma_params->data_type = 2;
++//		word_length = DAVINCI_AUDIO_WORD_16;
++//		break;
++//
++//	case SNDRV_PCM_FORMAT_S32_LE:
++//		dma_params->data_type = 4;
++//		word_length = DAVINCI_AUDIO_WORD_32;
++//		break;
++//
++//	default:
++//		printk(KERN_WARNING "davinci-mcasp: unsupported PCM format");
++//		return -EINVAL;
++//	}
++//
++//	if (dev->version == MCASP_VERSION_2 && !fifo_level)
++//		dma_params->acnt = 4;
++//	else
++//		dma_params->acnt = dma_params->data_type;
++//
++//	dma_params->fifo_level = fifo_level;
++//	davinci_config_channel_size(dev, word_length);
++
++	return 0;
++}
++
++static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
++				     int cmd, struct snd_soc_dai *cpu_dai)
++{
++	struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
++	int ret = 0;
++
++	switch (cmd) {
++	case SNDRV_PCM_TRIGGER_RESUME:
++	case SNDRV_PCM_TRIGGER_START:
++	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++		if (!dev->clk_active) {
++			clk_enable(dev->clk);
++			dev->clk_active = 1;
++		}
++		davinci_mcasp_start(dev, substream->stream);
++		break;
++
++	case SNDRV_PCM_TRIGGER_SUSPEND:
++		davinci_mcasp_stop(dev, substream->stream);
++		if (dev->clk_active) {
++			clk_disable(dev->clk);
++			dev->clk_active = 0;
++		}
++
++		break;
++
++	case SNDRV_PCM_TRIGGER_STOP:
++	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++		davinci_mcasp_stop(dev, substream->stream);
++		break;
++
++	default:
++		ret = -EINVAL;
++	}
++
++	return ret;
++}
++
++static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
++				 struct snd_soc_dai *dai)
++{
++	struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(dai);
++
++	//snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
++	return 0;
++}
++
++static struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
++	.startup	= davinci_mcasp_startup,
++	.trigger	= davinci_mcasp_trigger,
++	.hw_params	= davinci_mcasp_hw_params,
++	.set_fmt	= davinci_mcasp_set_dai_fmt,
++
++};
++
++static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
++	{
++		.name		= "davinci-mcasp.0",
++		.playback	= {
++			.channels_min	= 2,
++			.channels_max 	= 2,
++			.rates 		= DAVINCI_MCASP_RATES,
++			.formats 	= SNDRV_PCM_FMTBIT_S8 |
++						SNDRV_PCM_FMTBIT_S16_LE |
++						SNDRV_PCM_FMTBIT_S32_LE,
++		},
++		.capture 	= {
++			.channels_min 	= 2,
++			.channels_max 	= 2,
++			.rates 		= DAVINCI_MCASP_RATES,
++			.formats	= SNDRV_PCM_FMTBIT_S8 |
++						SNDRV_PCM_FMTBIT_S16_LE |
++						SNDRV_PCM_FMTBIT_S32_LE,
++		},
++		.ops 		= &davinci_mcasp_dai_ops,
++
++	},
++	{
++		"davinci-mcasp.1",
++		.playback 	= {
++			.channels_min	= 1,
++			.channels_max	= 384,
++			.rates		= DAVINCI_MCASP_RATES,
++			.formats	= SNDRV_PCM_FMTBIT_S16_LE,
++		},
++		.ops 		= &davinci_mcasp_dai_ops,
++	},
++
++};
++
++static int davinci_mcasp_probe(struct platform_device *pdev)
++{
++	struct davinci_pcm_dma_params *dma_data;
++	struct resource *mem, *ioarea, *res;
++	struct snd_platform_data *pdata;
++	struct davinci_audio_dev *dev;
++	int ret = 0;
++
++	dev = kzalloc(sizeof(struct davinci_audio_dev), GFP_KERNEL);
++	if (!dev)
++		return	-ENOMEM;
++
++	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!mem) {
++		dev_err(&pdev->dev, "no mem resource?\n");
++		ret = -ENODEV;
++		goto err_release_data;
++	}
++
++	ioarea = request_mem_region(mem->start,
++			resource_size(mem), pdev->name);
++	if (!ioarea) {
++		dev_err(&pdev->dev, "Audio region already claimed\n");
++		ret = -EBUSY;
++		goto err_release_data;
++	}
++
++	pdata = pdev->dev.platform_data;
++	dev->clk = clk_get(&pdev->dev, NULL);
++	if (IS_ERR(dev->clk)) {
++		ret = -ENODEV;
++		goto err_release_region;
++	}
++
++	clk_enable(dev->clk);
++	dev->clk_active = 1;
++
++	dev->base = ioremap(mem->start, resource_size(mem));
++	if (!dev->base) {
++		dev_err(&pdev->dev, "ioremap failed\n");
++		ret = -ENOMEM;
++		goto err_release_clk;
++	}
++
++//	dev->op_mode = pdata->op_mode;
++//	dev->tdm_slots = pdata->tdm_slots;
++//	dev->num_serializer = pdata->num_serializer;
++//	dev->serial_dir = pdata->serial_dir;
++//	dev->codec_fmt = pdata->codec_fmt;
++//	dev->version = pdata->version;
++//	dev->txnumevt = pdata->txnumevt;
++//	dev->rxnumevt = pdata->rxnumevt;
++
++//	dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
++//	dma_data->asp_chan_q = pdata->asp_chan_q;
++//	dma_data->ram_chan_q = pdata->ram_chan_q;
++//	dma_data->sram_size = pdata->sram_size_playback;
++//	dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
++//							mem->start);
++
++	/* first TX, then RX */
++//	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
++//	if (!res) {
++//		dev_err(&pdev->dev, "no DMA resource\n");
++//		ret = -ENODEV;
++//		goto err_iounmap;
++//	}
++
++//	dma_data->channel = res->start;
++//
++//	dma_data = &dev->dma_params[SNDRV_PCM_STREAM_CAPTURE];
++//	dma_data->asp_chan_q = pdata->asp_chan_q;
++//	dma_data->ram_chan_q = pdata->ram_chan_q;
++//	dma_data->sram_size = pdata->sram_size_capture;
++//	dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
++//							mem->start);
++
++//	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
++//	if (!res) {
++//		dev_err(&pdev->dev, "no DMA resource\n");
++//		ret = -ENODEV;
++//		goto err_iounmap;
++//	}
++
++//	dma_data->channel = res->start;
++	dev_set_drvdata(&pdev->dev, dev);
++	ret = snd_soc_register_dai(&pdev->dev, &davinci_mcasp_dai[0]);
++
++	if (ret != 0)
++		goto err_iounmap;
++	return 0;
++
++err_iounmap:
++	iounmap(dev->base);
++err_release_clk:
++	clk_disable(dev->clk);
++	clk_put(dev->clk);
++err_release_region:
++	release_mem_region(mem->start, resource_size(mem));
++err_release_data:
++	kfree(dev);
++
++	return ret;
++}
++
++static int davinci_mcasp_remove(struct platform_device *pdev)
++{
++	struct davinci_audio_dev *dev = dev_get_drvdata(&pdev->dev);
++	struct resource *mem;
++
++	snd_soc_unregister_dai(&pdev->dev);
++	clk_disable(dev->clk);
++	clk_put(dev->clk);
++	dev->clk = NULL;
++
++	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	release_mem_region(mem->start, resource_size(mem));
++
++	kfree(dev);
++
++	return 0;
++}
++
++static struct platform_driver davinci_mcasp_driver = {
++	.probe		= davinci_mcasp_probe,
++	.remove		= davinci_mcasp_remove,
++	.driver		= {
++		.name	= "davinci-mcasp",
++		.owner	= THIS_MODULE,
++	},
++};
++
++static int __init davinci_mcasp_init(void)
++{
++	return platform_driver_register(&davinci_mcasp_driver);
++}
++module_init(davinci_mcasp_init);
++
++static void __exit davinci_mcasp_exit(void)
++{
++	platform_driver_unregister(&davinci_mcasp_driver);
++}
++module_exit(davinci_mcasp_exit);
++
++MODULE_AUTHOR("Steve Chen");
++MODULE_DESCRIPTION("TI DAVINCI McASP SoC Interface");
++MODULE_LICENSE("GPL");
++
+diff --git a/sound/soc/dwc/fh_i2s_dai.h b/sound/soc/dwc/fh_i2s_dai.h
+new file mode 100644
+index 00000000..5b2f207b
+--- /dev/null
++++ b/sound/soc/dwc/fh_i2s_dai.h
+@@ -0,0 +1,59 @@
++/*
++ * ALSA SoC McASP Audio Layer for TI DAVINCI processor
++ *
++ * MCASP related definitions
++ *
++ * Author: Nirmal Pandey <n-pandey@ti.com>,
++ *         Suresh Rajashekara <suresh.r@ti.com>
++ *         Steve Chen <schen@.mvista.com>
++ *
++ * Copyright:   (C) 2009 MontaVista Software, Inc., <source@mvista.com>
++ * Copyright:   (C) 2009  Texas Instruments, India
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef DAVINCI_MCASP_H
++#define DAVINCI_MCASP_H
++
++#include <linux/io.h>
++//#include <mach/asp.h>
++#include "fullhan-pcm.h"
++
++#define DAVINCI_MCASP_RATES	SNDRV_PCM_RATE_8000_96000
++#define DAVINCI_MCASP_I2S_DAI	0
++#define DAVINCI_MCASP_DIT_DAI	1
++
++enum {
++	DAVINCI_AUDIO_WORD_8 = 0,
++	DAVINCI_AUDIO_WORD_12,
++	DAVINCI_AUDIO_WORD_16,
++	DAVINCI_AUDIO_WORD_20,
++	DAVINCI_AUDIO_WORD_24,
++	DAVINCI_AUDIO_WORD_32,
++	DAVINCI_AUDIO_WORD_28,  /* This is only valid for McASP */
++};
++
++struct davinci_audio_dev {
++	//struct davinci_pcm_dma_params dma_params[2];
++	void __iomem *base;
++	int sample_rate;
++	struct clk *clk;
++	unsigned int codec_fmt;
++	u8 clk_active;
++
++	/* McASP specific data */
++	int	tdm_slots;
++	u8	op_mode;
++	u8	num_serializer;
++	u8	*serial_dir;
++	u8	version;
++
++	/* McASP FIFO related */
++	u8	txnumevt;
++	u8	rxnumevt;
++};
++
++#endif	/* DAVINCI_MCASP_H */
+diff --git a/sound/soc/dwc/fullhan-pcm.c b/sound/soc/dwc/fullhan-pcm.c
+new file mode 100644
+index 00000000..bd57e863
+--- /dev/null
++++ b/sound/soc/dwc/fullhan-pcm.c
+@@ -0,0 +1,555 @@
++/*
++ * ALSA PCM interface for the Stetch s6000 family
++ *
++ * Author:      Daniel Gloeckner, <dg@emlix.com>
++ * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/kthread.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++#include <linux/delay.h>
++#include <linux/completion.h>
++#include <asm/dma.h>
++//#include <variant/dmac.h>
++#include "dma.h"
++#include "fullhan-pcm.h"
++
++#define S6_PCM_PREALLOCATE_SIZE (96 * 1024)
++#define S6_PCM_PREALLOCATE_MAX  (2048 * 1024)
++
++
++struct snd_pcm_substream *capture_substream,*play_substream;
++
++
++
++static struct snd_pcm_hardware s6000_pcm_hardware = { .info =
++		(SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
++		SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
++		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX), .formats =
++		(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates =
++		(SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 |
++		SNDRV_PCM_RATE_8000_192000), .rate_min = 0, .rate_max = 1562500,
++		.channels_min = 2, .channels_max = 8, .buffer_bytes_max = 0x7ffffff0,
++		.period_bytes_min = 16, .period_bytes_max = 0xfffff0, .periods_min = 2,
++		.periods_max = 1024, /* no limit */
++		.fifo_size = 0, };
++
++struct s6000_runtime_data {
++	spinlock_t lock;
++	int period; /* current DMA period */
++	int pos;
++};
++
++
++void test_dma_copy(unsigned int src, unsigned int dst) {
++	Dma_SetTxrType(0, DMA_TTFC_M2M_DMAC);
++	Dma_SetSrcWidth(0, 2);
++	Dma_SetSrcSize(0, 0);
++	Dma_SetDstWidth(0, 2);				// UART can only accept 8bits input
++	Dma_SetDstSize(0, 0);		// burst size, UART has 16bytes FIFO, 1/2 thrl
++	Dma_SetSrcAddress(0, src);
++	Dma_SetDstAddress(0, dst);
++	Dma_SetSrcIncDirection(0, DMA_DIR_INC);
++	Dma_SetDstIncDirection(0, DMA_DIR_INC);
++	Dma_EnableIsrBit(0, DMA_INTT_BLOCK);		// block finish ISR.
++
++	Dma_SetTxrSize(0, 4);					// copy 1K bytes.
++
++	Dma_EnableChan(0);
++
++}
++
++static void copy_finish(struct snd_pcm_substream *substream ) {
++	snd_pcm_period_elapsed(substream);
++}
++
++void i2s_irq_enquen(int type, u8 *buff, u8 len,u8 reset)
++{
++	struct snd_pcm_substream *substream;
++	struct snd_pcm_runtime *runtime;
++	struct s6000_runtime_data *prtd;
++	static int rx_count = 0,tx_count = 0;
++	if (SNDRV_PCM_STREAM_PLAYBACK == type) {
++		if(reset){
++			tx_count = 0;
++			return;
++		}
++		if (!play_substream) {
++			return;
++		}
++		substream = play_substream;
++		runtime = substream->runtime;
++		prtd = runtime->private_data;
++		memcpy(buff,runtime->dma_area+prtd->pos,len);
++		tx_count += len;
++		if (tx_count >= snd_pcm_lib_period_bytes(substream)) {
++			tx_count = 0;
++			copy_finish(substream);
++		}
++	} else {
++		if(reset){
++					rx_count = 0;
++					return;
++			}
++		if(!capture_substream){
++			return;
++		}
++		substream = capture_substream;
++		runtime = substream->runtime;
++		prtd = runtime->private_data;
++		memcpy(runtime->dma_area+prtd->pos,buff,len);
++		rx_count += len;
++		if (rx_count >= snd_pcm_lib_period_bytes(substream) ) {
++			rx_count = 0;
++			copy_finish(substream);
++		}
++	}
++	prtd->pos += len;
++	if (prtd->pos  >= snd_pcm_lib_buffer_bytes(substream)) {
++			prtd->pos = 0;
++	}
++}
++EXPORT_SYMBOL(i2s_irq_enquen);
++
++
++static irqreturn_t s6000_pcm_irq(int irq, void *data) {
++//	struct snd_pcm *pcm = data;
++//	struct snd_soc_pcm_runtime *runtime = pcm->private_data;
++//	struct s6000_runtime_data *prtd;
++//	unsigned int has_xrun;
++//	int i, ret = IRQ_NONE;
++//
++//	for (i = 0; i < 2; ++i) {
++//		struct snd_pcm_substream *substream = pcm->streams[i].substream;
++//		struct s6000_pcm_dma_params *params = snd_soc_dai_get_dma_data(
++//				runtime->cpu_dai, substream);
++//		u32 channel;
++//		unsigned int pending;
++//
++//		if (substream == SNDRV_PCM_STREAM_PLAYBACK)
++//			channel = params->dma_s6000_runtime_dataout;
++//		else
++//			channel = params->dma_in;
++//
++//		has_xrun = params->check_xrun(runtime->cpu_dai);
++//
++//		if (!channel)
++//			continue;
++//
++//		if (unlikely(has_xrun & (1 << i)) && substream->runtime
++//				&& snd_pcm_running(substream)) {
++//			dev_dbg(pcm->dev, "xrun\n");
++//			snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
++//			ret = IRQ_HANDLED;
++//		}
++//
++//		pending = s6dmac_int_sources(DMA_MASK_DMAC(channel),
++//				DMA_INDEX_CHNL(channel));
++//
++//		if (pending & 1) {
++//			ret = IRQ_HANDLED;
++//			if (likely(substream->runtime && snd_pcm_running(substream))) {
++//				snd_pcm_period_elapsed(substream);
++//				dev_dbg(pcm->dev, "period elapsed %x %x\n",
++//						s6dmac_cur_src(DMA_MASK_DMAC(channel),
++//								DMA_INDEX_CHNL(channel)),
++//						s6dmac_cur_dst(DMA_MASK_DMAC(channel),
++//								DMA_INDEX_CHNL(channel)));
++//				prtd = substrcopy_finisheam->runtime->private_data;
++//				spin_lock(&prtd->lock);
++//				s6000_pcm_enqueue_dma(substream);
++//				spin_unlock(&prtd->lock);
++//			}
++//		}
++//
++//		if (unlikely(pending & ~7)) {
++//		if (pending & (1 << 3))
++//		printk(KERN_WARNING
++//				"s6000-pcm: DMA %x Underflow\n",
++//				channel);
++//		if (pending & (1 << 4))
++//		printk(KERN_WARNING
++//				"s6000-pcm: DMA %x Overflow\n",
++//				channel);
++//		if (pending & 0x1e0)
++//		printk(KERN_WARNING
++//				"s6000-pcm: DMA %x Master Error "
++//				"(mask %x)\n",
++//				channel, pending >> 5);
++//
++//	}
++//}
++
++//return ret;
++}
++
++static int s6000_pcm_start(struct snd_pcm_substream *substream) {
++
++struct s6000_runtime_data *prtd = substream->runtime->private_data;
++struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
++struct s6000_pcm_dma_params *par;
++unsigned long flags;
++int srcinc;
++u32 dma;
++par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
++
++//spin_lock_irqsave(&prtd->lock, flags);
++
++if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++	srcinc = 1;
++	dma = par->dma_out;
++} else {
++	srcinc = 0;
++	dma = par->dma_in;
++}
++
++//	s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma),
++//			   1 /* priority 1 (0 is max) */,
++//			   0 /* peripheral requests w/o xfer length mode */,
++//			   srcinc /* source address increment */,
++//			   srcinc^1 /* destination address increment */,
++//			   0 /* chunksize 0 (skip impossible on this dma) */,
++//			   0 /* source skip after chunk (impossible) */,
++//			   0 /* destination skip after chunk (impossible) */,
++//			   4 /* 16 byte burst size */,
++//			   -1 /* don't conserve bandwidth */,
++//			   0 /* low watermark irq descriptor threshold */,
++//			   0 /* disable hardware timestamps */,
++//			   1 /* enable channel */);
++//			prtd->period = 0;
++//	s6000_pcm_enqueue_dma(substream);
++
++//spin_unlock_irqrestore(&prtd->lock, flags);
++
++return 0;
++}
++
++static int s6000_pcm_stop(struct snd_pcm_substream *substream) {
++	struct s6000_runtime_data *prtd = substream->runtime->private_data;
++	struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
++	unsigned long flags;
++	capture_substream = play_substream = 0;
++//	u32 channel;
++
++//	par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
++//
++//	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++//		channel = par->dma_out;
++//	else
++//		channel = par->dma_in;
++//
++//	s6dmac_set_terminal_count(DMA_MASK_DMAC(channel),
++//				  DMA_INDEX_CHNL(channel), 0);
++//
++
++
++return 0;
++}
++
++static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd) {
++		struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
++		struct s6000_pcm_dma_params *par;
++		int ret;
++
++		par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
++
++		ret = par->trigger(substream, cmd, 0);
++		if (ret < 0)
++			return ret;
++
++		switch (cmd) {
++		case SNDRV_PCM_TRIGGER_START:
++		case SNDRV_PCM_TRIGGER_RESUME:
++		case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++			i2s_irq_enquen(substream->stream,0,0,1);
++			ret = s6000_pcm_start(substream);
++			break;
++		case SNDRV_PCM_TRIGGER_STOP:
++		case SNDRV_PCM_TRIGGER_SUSPEND:
++		case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++			ret = s6000_pcm_stop(substream);
++
++			break;
++		default:
++			ret = -EINVAL;
++		}
++		if (ret < 0)
++			return ret;
++
++		return par->trigger(substream, cmd, 1);
++}
++
++static int s6000_pcm_prepare(struct snd_pcm_substream *substream)
++{
++	return 0;
++}
++
++
++static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) {
++struct snd_pcm_runtime *runtime = substream->runtime;
++struct s6000_runtime_data *prtd = runtime->private_data;
++unsigned int offset;
++offset = bytes_to_frames(runtime, prtd->pos);
++if (unlikely(offset >= runtime->buffer_size))
++	offset = 0;
++return offset;
++}
++
++static int s6000_pcm_open(struct snd_pcm_substream *substream) {
++	struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
++	struct s6000_pcm_dma_params *par;
++	struct snd_pcm_runtime *runtime = substream->runtime;
++	struct s6000_runtime_data *prtd;
++	int ret;
++	par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
++	snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware);
++	ret = snd_pcm_hw_constraint_step(runtime, 0,
++	SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16);
++	if (ret < 0)
++		return ret;
++	ret = snd_pcm_hw_constraint_step(runtime, 0,
++	SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
++	if (ret < 0)
++		return ret;
++	ret = snd_pcm_hw_constraint_integer(runtime,
++	SNDRV_PCM_HW_PARAM_PERIODS);
++	if (ret < 0)
++		return ret;
++
++
++
++
++	//	if (par->same_rate) {
++	//		printk("s6000 pcm open 5.0\n");
++	//		int rate;
++	//		spin_lock(&par->lock); /* needed? */
++	//		rate = par->rate;
++	//		spin_unlock(&par->lock);
++	//		printk("s6000 pcm open 5.1\n");
++	//		if (rate != -1) {
++	//			ret = snd_pcm_hw_constraint_minmax(runtime,
++	//							SNDRV_PCM_HW_PARAM_RATE,
++	//							rate, rate);
++	//			printk("s6000 pcm open 5.2\n");
++	//			if (ret < 0)
++	//				return ret;
++	//		}
++	//	}
++	prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL);
++	if (prtd == NULL)
++		return -ENOMEM;
++
++	spin_lock_init(&prtd->lock);
++	prtd->period = 0;
++	prtd->pos = 0;
++	runtime->private_data = prtd;
++	/*remember to judge capture or play stream*/
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++		play_substream = substream;
++	}else {
++		capture_substream = substream;
++	}
++	//irq_emulation();
++	return 0;
++}
++
++static int s6000_pcm_close(struct snd_pcm_substream *substream) {
++	struct s6000_runtime_data *prtd = substream->runtime->private_data;
++	unsigned long flags;
++	spin_lock_irqsave(&prtd->lock, flags);
++		kfree(prtd);
++	spin_unlock_irqrestore(&prtd->lock, flags);
++return 0;
++}
++
++static int s6000_pcm_hw_params(struct snd_pcm_substream *substream,
++	struct snd_pcm_hw_params *hw_params) {
++struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
++struct s6000_pcm_dma_params *par;
++int ret;
++ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
++if (ret < 0) {
++	printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n");
++	return ret;
++}
++par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
++
++	if (par->same_rate) {
++		spin_lock(&par->lock);
++		if (par->rate == -1 ||
++		    !(par->in_use & ~(1 << substream->stream))) {
++			par->rate = params_rate(hw_params);
++			par->in_use |= 1 << substream->stream;
++		} else if (params_rate(hw_params) != par->rate) {
++			snd_pcm_lib_free_pages(substream);
++			par->in_use &= ~(1 << substream->stream);
++			ret = -EBUSY;
++		}
++		spin_unlock(&par->lock);
++	}
++	return ret;
++}
++
++static int s6000_pcm_hw_free(struct snd_pcm_substream *substream) {
++	struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
++		struct s6000_pcm_dma_params *par =
++			snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
++
++		spin_lock(&par->lock);
++		par->in_use &= ~(1 << substream->stream);
++		if (!par->in_use)
++			par->rate = -1;
++		spin_unlock(&par->lock);
++
++		return snd_pcm_lib_free_pages(substream);
++}
++int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
++	struct vm_area_struct *vma) {
++struct snd_pcm_runtime *runtime = substream->runtime;
++return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area,
++		runtime->dma_addr, runtime->dma_bytes);
++}
++int s6000_pcm_copy(struct snd_pcm_substream *substream, int channel,
++	snd_pcm_uframes_t pos, void __user *buf, snd_pcm_uframes_t count) {
++
++
++return 0;
++}
++static struct snd_pcm_ops s6000_pcm_ops = { .copy = s6000_pcm_copy, .open =
++	s6000_pcm_open, .close = s6000_pcm_close, .ioctl = snd_pcm_lib_ioctl,
++	.hw_params = s6000_pcm_hw_params, .hw_free = s6000_pcm_hw_free, .trigger =
++			s6000_pcm_trigger, .prepare = s6000_pcm_prepare, .pointer =
++			s6000_pcm_pointer,
++//	.mmap=pxa2xx_pcm_mmap,
++	};
++
++static void s6000_pcm_free(struct snd_pcm *pcm) {
++//	struct snd_soc_pcm_runtime *runtime = pcm->private_data;
++//	struct s6000_pcm_dma_params *params =
++//		snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[0].substream);
++//
++//	free_irq(params->irq, pcm);
++//	snd_pcm_lib_preallocate_free_for_all(pcm);
++}
++
++static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
++static int davinci_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
++	size_t size) {
++struct snd_pcm_substream *substream = pcm->streams[stream].substream;
++struct snd_dma_buffer *buf = &substream->dma_buffer;
++
++buf->dev.type = SNDRV_DMA_TYPE_DEV;
++buf->dev.dev = pcm->card->dev;
++buf->private_data = NULL;
++buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr,
++GFP_KERNEL);
++pr_debug("davinci_pcm: preallocate_dma_buffer: area=%p, addr=%p, "
++		"size=%d\n", (void *) buf->area, (void *) buf->addr, size);
++
++if (!buf->area)
++	return -ENOMEM;
++
++buf->bytes = size;
++return 0;
++}
++static u64 davinci_pcm_dmamask = 0xffffffff;
++
++static int s6000_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
++	struct snd_pcm *pcm) {
++struct snd_soc_pcm_runtime *runtime = pcm->private_data;
++struct s6000_pcm_dma_params *params;
++int res;
++int ret;
++
++#if 0
++if (dai->driver->playback.channels_min) {
++	ret = davinci_pcm_preallocate_dma_buffer(pcm,
++			SNDRV_PCM_STREAM_PLAYBACK,
++			pcm_hardware_playback.buffer_bytes_max);
++	if (ret)
++	return ret;
++}
++
++if (dai->driver->capture.channels_min) {
++	ret = davinci_pcm_preallocate_dma_buffer(pcm,
++			SNDRV_PCM_STREAM_CAPTURE,
++			pcm_hardware_capture.buffer_bytes_max);
++	if (ret)
++	return ret;
++}
++#endif
++params = snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[0].substream);
++if (!card->dev->dma_mask)
++	card->dev->dma_mask = &s6000_pcm_dmamask;
++if (!card->dev->coherent_dma_mask)
++	card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++
++res = snd_pcm_lib_preallocate_pages_for_all(pcm,
++SNDRV_DMA_TYPE_DEV, card->dev,
++S6_PCM_PREALLOCATE_SIZE,
++S6_PCM_PREALLOCATE_MAX);
++if (res)
++printk(KERN_WARNING "s6000-pcm: preallocation failed\n");
++
++
++return 0;
++}
++
++static struct snd_soc_platform_driver s6000_soc_platform = { .ops =
++	&s6000_pcm_ops, .pcm_new = s6000_pcm_new, .pcm_free = s6000_pcm_free, };
++
++static int __devinit s6000_soc_platform_probe(struct platform_device *pdev)
++{
++void * dw;
++int err,irq;
++
++err = request_irq(9, s6000_pcm_irq, 0, "dw_dmac", dw);
++if (err) {
++
++	printk("pcm  dma interrput err \n");
++}
++return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform);
++}
++
++static int __devexit s6000_soc_platform_remove(struct platform_device *pdev)
++{
++snd_soc_unregister_platform(&pdev->dev);
++return 0;
++}
++
++static struct platform_driver s6000_pcm_driver = { .driver = { .name =
++	"fh-pcm-audio", .owner = THIS_MODULE, },
++
++.probe = s6000_soc_platform_probe, .remove = __devexit_p(
++	s6000_soc_platform_remove), };
++
++static int __init snd_s6000_pcm_init(void)
++{
++
++	return platform_driver_register(&s6000_pcm_driver);
++}
++module_init(snd_s6000_pcm_init);
++
++static void __exit snd_s6000_pcm_exit(void)
++{
++	platform_driver_unregister(&s6000_pcm_driver);
++}
++module_exit(snd_s6000_pcm_exit);
++
++MODULE_AUTHOR("Daniel Gloeckner");
++MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module");
++MODULE_LICENSE("GPL");
+diff --git a/sound/soc/dwc/fullhan-pcm.h b/sound/soc/dwc/fullhan-pcm.h
+new file mode 100644
+index 00000000..09d9b883
+--- /dev/null
++++ b/sound/soc/dwc/fullhan-pcm.h
+@@ -0,0 +1,33 @@
++/*
++ * ALSA PCM interface for the Stretch s6000 family
++ *
++ * Author:      Daniel Gloeckner, <dg@emlix.com>
++ * Copyright:   (C) 2009 emlix GmbH <info@emlix.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _S6000_PCM_H
++#define _S6000_PCM_H
++
++struct snd_soc_dai;
++struct snd_pcm_substream;
++
++struct s6000_pcm_dma_params {
++	unsigned int (*check_xrun)(struct snd_soc_dai *cpu_dai);
++	int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after);
++	dma_addr_t sif_in;
++	dma_addr_t sif_out;
++	u32 dma_in;
++	u32 dma_out;
++	int irq;
++	int same_rate;
++
++	spinlock_t lock;
++	int in_use;
++	int rate;
++};
++
++#endif
+diff --git a/tools/perf/Makefile b/tools/perf/Makefile
+index c1683661..d5d628fb 100644
+--- a/tools/perf/Makefile
++++ b/tools/perf/Makefile
+@@ -96,7 +96,7 @@ ifndef PERF_DEBUG
+ endif
+ 
+ CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+-EXTLIBS = -lpthread -lrt -lelf -lm
++EXTLIBS = -lpthread -lrt -lelf -lm	-lebl -lz -ldl
+ ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+ ALL_LDFLAGS = $(LDFLAGS)
+ STRIP ?= strip
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index 3b9d0b80..4e93e8ce 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -18,25 +18,14 @@
+  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+  *
+  */
+-
++#include "util.h"
+ #include <sys/utsname.h>
+-#include <sys/types.h>
+-#include <sys/stat.h>
+-#include <fcntl.h>
+-#include <errno.h>
+-#include <stdio.h>
+-#include <unistd.h>
+ #include <getopt.h>
+-#include <stdlib.h>
+-#include <string.h>
+-#include <stdarg.h>
+-#include <ctype.h>
+ #include <dwarf-regs.h>
+ 
+ #include <linux/bitops.h>
+ #include "event.h"
+ #include "debug.h"
+-#include "util.h"
+ #include "symbol.h"
+ #include "probe-finder.h"
+ 
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0010-overlayfs.v11.patch b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0010-overlayfs.v11.patch
new file mode 100644
index 00000000..1dccf7b1
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0010-overlayfs.v11.patch
@@ -0,0 +1,3176 @@
+--- /dev/null
++++ b/Documentation/filesystems/overlayfs.txt
+@@ -0,0 +1,199 @@
++Written by: Neil Brown <neilb@suse.de>
++
++Overlay Filesystem
++==================
++
++This document describes a prototype for a new approach to providing
++overlay-filesystem functionality in Linux (sometimes referred to as
++union-filesystems).  An overlay-filesystem tries to present a
++filesystem which is the result over overlaying one filesystem on top
++of the other.
++
++The result will inevitably fail to look exactly like a normal
++filesystem for various technical reasons.  The expectation is that
++many use cases will be able to ignore these differences.
++
++This approach is 'hybrid' because the objects that appear in the
++filesystem do not all appear to belong to that filesystem.  In many
++cases an object accessed in the union will be indistinguishable
++from accessing the corresponding object from the original filesystem.
++This is most obvious from the 'st_dev' field returned by stat(2).
++
++While directories will report an st_dev from the overlay-filesystem,
++all non-directory objects will report an st_dev from the lower or
++upper filesystem that is providing the object.  Similarly st_ino will
++only be unique when combined with st_dev, and both of these can change
++over the lifetime of a non-directory object.  Many applications and
++tools ignore these values and will not be affected.
++
++Upper and Lower
++---------------
++
++An overlay filesystem combines two filesystems - an 'upper' filesystem
++and a 'lower' filesystem.  When a name exists in both filesystems, the
++object in the 'upper' filesystem is visible while the object in the
++'lower' filesystem is either hidden or, in the case of directories,
++merged with the 'upper' object.
++
++It would be more correct to refer to an upper and lower 'directory
++tree' rather than 'filesystem' as it is quite possible for both
++directory trees to be in the same filesystem and there is no
++requirement that the root of a filesystem be given for either upper or
++lower.
++
++The lower filesystem can be any filesystem supported by Linux and does
++not need to be writable.  The lower filesystem can even be another
++overlayfs.  The upper filesystem will normally be writable and if it
++is it must support the creation of trusted.* extended attributes, and
++must provide valid d_type in readdir responses, at least for symbolic
++links - so NFS is not suitable.
++
++A read-only overlay of two read-only filesystems may use any
++filesystem type.
++
++Directories
++-----------
++
++Overlaying mainly involved directories.  If a given name appears in both
++upper and lower filesystems and refers to a non-directory in either,
++then the lower object is hidden - the name refers only to the upper
++object.
++
++Where both upper and lower objects are directories, a merged directory
++is formed.
++
++At mount time, the two directories given as mount options are combined
++into a merged directory:
++
++  mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper /overlay
++
++Then whenever a lookup is requested in such a merged directory, the
++lookup is performed in each actual directory and the combined result
++is cached in the dentry belonging to the overlay filesystem.  If both
++actual lookups find directories, both are stored and a merged
++directory is created, otherwise only one is stored: the upper if it
++exists, else the lower.
++
++Only the lists of names from directories are merged.  Other content
++such as metadata and extended attributes are reported for the upper
++directory only.  These attributes of the lower directory are hidden.
++
++whiteouts and opaque directories
++--------------------------------
++
++In order to support rm and rmdir without changing the lower
++filesystem, an overlay filesystem needs to record in the upper filesystem
++that files have been removed.  This is done using whiteouts and opaque
++directories (non-directories are always opaque).
++
++The overlay filesystem uses extended attributes with a
++"trusted.overlay."  prefix to record these details.
++
++A whiteout is created as a symbolic link with target
++"(overlay-whiteout)" and with xattr "trusted.overlay.whiteout" set to "y".
++When a whiteout is found in the upper level of a merged directory, any
++matching name in the lower level is ignored, and the whiteout itself
++is also hidden.
++
++A directory is made opaque by setting the xattr "trusted.overlay.opaque"
++to "y".  Where the upper filesystem contains an opaque directory, any
++directory in the lower filesystem with the same name is ignored.
++
++readdir
++-------
++
++When a 'readdir' request is made on a merged directory, the upper and
++lower directories are each read and the name lists merged in the
++obvious way (upper is read first, then lower - entries that already
++exist are not re-added).  This merged name list is cached in the
++'struct file' and so remains as long as the file is kept open.  If the
++directory is opened and read by two processes at the same time, they
++will each have separate caches.  A seekdir to the start of the
++directory (offset 0) followed by a readdir will cause the cache to be
++discarded and rebuilt.
++
++This means that changes to the merged directory do not appear while a
++directory is being read.  This is unlikely to be noticed by many
++programs.
++
++seek offsets are assigned sequentially when the directories are read.
++Thus if
++  - read part of a directory
++  - remember an offset, and close the directory
++  - re-open the directory some time later
++  - seek to the remembered offset
++
++there may be little correlation between the old and new locations in
++the list of filenames, particularly if anything has changed in the
++directory.
++
++Readdir on directories that are not merged is simply handled by the
++underlying directory (upper or lower).
++
++
++Non-directories
++---------------
++
++Objects that are not directories (files, symlinks, device-special
++files etc.) are presented either from the upper or lower filesystem as
++appropriate.  When a file in the lower filesystem is accessed in a way
++the requires write-access, such as opening for write access, changing
++some metadata etc., the file is first copied from the lower filesystem
++to the upper filesystem (copy_up).  Note that creating a hard-link
++also requires copy_up, though of course creation of a symlink does
++not.
++
++The copy_up may turn out to be unnecessary, for example if the file is
++opened for read-write but the data is not modified.
++
++The copy_up process first makes sure that the containing directory
++exists in the upper filesystem - creating it and any parents as
++necessary.  It then creates the object with the same metadata (owner,
++mode, mtime, symlink-target etc.) and then if the object is a file, the
++data is copied from the lower to the upper filesystem.  Finally any
++extended attributes are copied up.
++
++Once the copy_up is complete, the overlay filesystem simply
++provides direct access to the newly created file in the upper
++filesystem - future operations on the file are barely noticed by the
++overlay filesystem (though an operation on the name of the file such as
++rename or unlink will of course be noticed and handled).
++
++
++Non-standard behavior
++---------------------
++
++The copy_up operation essentially creates a new, identical file and
++moves it over to the old name.  The new file may be on a different
++filesystem, so both st_dev and st_ino of the file may change.
++
++Any open files referring to this inode will access the old data and
++metadata.  Similarly any file locks obtained before copy_up will not
++apply to the copied up file.
++
++On a file is opened with O_RDONLY fchmod(2), fchown(2), futimesat(2)
++and fsetxattr(2) will fail with EROFS.
++
++If a file with multiple hard links is copied up, then this will
++"break" the link.  Changes will not be propagated to other names
++referring to the same inode.
++
++Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
++object in overlayfs will not contain vaid absolute paths, only
++relative paths leading up to the filesystem's root.  This will be
++fixed in the future.
++
++Some operations are not atomic, for example a crash during copy_up or
++rename will leave the filesystem in an inconsitent state.  This will
++be addressed in the future.
++
++Changes to underlying filesystems
++---------------------------------
++
++Offline changes, when the overlay is not mounted, are allowed to either
++the upper or the lower trees.
++
++Changes to the underlying filesystems while part of a mounted overlay
++filesystem are not allowed.  If the underlying filesystem is changed,
++the behavior of the overlay is undefined, though it will not result in
++a crash or deadlock.
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4727,6 +4727,13 @@ F:	drivers/scsi/osd/
+ F:	include/scsi/osd_*
+ F:	fs/exofs/
+ 
++OVERLAYFS FILESYSTEM
++M:	Miklos Szeredi <miklos@szeredi.hu>
++L:	linux-fsdevel@vger.kernel.org
++S:	Supported
++F:	fs/overlayfs/*
++F:	Documentation/filesystems/overlayfs.txt
++
+ P54 WIRELESS DRIVER
+ M:	Christian Lamparter <chunkeey@googlemail.com>
+ L:	linux-wireless@vger.kernel.org
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -63,6 +63,7 @@ source "fs/quota/Kconfig"
+ 
+ source "fs/autofs4/Kconfig"
+ source "fs/fuse/Kconfig"
++source "fs/overlayfs/Kconfig"
+ 
+ config CUSE
+ 	tristate "Character device in Userspace support"
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -105,6 +105,7 @@ obj-$(CONFIG_QNX4FS_FS)		+= qnx4/
+ obj-$(CONFIG_AUTOFS4_FS)	+= autofs4/
+ obj-$(CONFIG_ADFS_FS)		+= adfs/
+ obj-$(CONFIG_FUSE_FS)		+= fuse/
++obj-$(CONFIG_OVERLAYFS_FS)	+= overlayfs/
+ obj-$(CONFIG_UDF_FS)		+= udf/
+ obj-$(CONFIG_SUN_OPENPROMFS)	+= openpromfs/
+ obj-$(CONFIG_OMFS_FS)		+= omfs/
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -544,6 +544,13 @@ static struct dentry *ecryptfs_mount(str
+ 	s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+ 	s->s_blocksize = path.dentry->d_sb->s_blocksize;
+ 	s->s_magic = ECRYPTFS_SUPER_MAGIC;
++	s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
++
++	rc = -EINVAL;
++	if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++		printk(KERN_ERR "eCryptfs: maximum fs stacking depth exceeded\n");
++		goto out_free;
++	}
+ 
+ 	inode = ecryptfs_get_inode(path.dentry->d_inode, s);
+ 	rc = PTR_ERR(inode);
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1492,6 +1492,23 @@ void drop_collected_mounts(struct vfsmou
+ 	release_mounts(&umount_list);
+ }
+ 
++struct vfsmount *clone_private_mount(struct path *path)
++{
++	struct vfsmount *mnt;
++
++	if (IS_MNT_UNBINDABLE(path->mnt))
++		return ERR_PTR(-EINVAL);
++
++	down_read(&namespace_sem);
++	mnt = clone_mnt(path->mnt, path->dentry, CL_PRIVATE);
++	up_read(&namespace_sem);
++	if (!mnt)
++		return ERR_PTR(-ENOMEM);
++
++	return mnt;
++}
++EXPORT_SYMBOL_GPL(clone_private_mount);
++
+ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
+ 		   struct vfsmount *root)
+ {
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -666,8 +666,7 @@ static inline int __get_file_write_acces
+ 	return error;
+ }
+ 
+-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+-					struct file *f,
++static struct file *__dentry_open(struct path *path, struct file *f,
+ 					int (*open)(struct inode *, struct file *),
+ 					const struct cred *cred)
+ {
+@@ -675,15 +674,16 @@ static struct file *__dentry_open(struct
+ 	struct inode *inode;
+ 	int error;
+ 
++	path_get(path);
+ 	f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
+ 				FMODE_PREAD | FMODE_PWRITE;
+ 
+ 	if (unlikely(f->f_flags & O_PATH))
+ 		f->f_mode = FMODE_PATH;
+ 
+-	inode = dentry->d_inode;
++	inode = path->dentry->d_inode;
+ 	if (f->f_mode & FMODE_WRITE) {
+-		error = __get_file_write_access(inode, mnt);
++		error = __get_file_write_access(inode, path->mnt);
+ 		if (error)
+ 			goto cleanup_file;
+ 		if (!special_file(inode->i_mode))
+@@ -691,8 +691,7 @@ static struct file *__dentry_open(struct
+ 	}
+ 
+ 	f->f_mapping = inode->i_mapping;
+-	f->f_path.dentry = dentry;
+-	f->f_path.mnt = mnt;
++	f->f_path = *path;
+ 	f->f_pos = 0;
+ 	file_sb_list_add(f, inode->i_sb);
+ 
+@@ -745,7 +744,7 @@ cleanup_all:
+ 			 * here, so just reset the state.
+ 			 */
+ 			file_reset_write(f);
+-			mnt_drop_write(mnt);
++			mnt_drop_write(path->mnt);
+ 		}
+ 	}
+ 	file_sb_list_del(f);
+@@ -753,8 +752,7 @@ cleanup_all:
+ 	f->f_path.mnt = NULL;
+ cleanup_file:
+ 	put_filp(f);
+-	dput(dentry);
+-	mntput(mnt);
++	path_put(path);
+ 	return ERR_PTR(error);
+ }
+ 
+@@ -780,14 +778,14 @@ cleanup_file:
+ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+ 		int (*open)(struct inode *, struct file *))
+ {
++	struct path path = { .dentry = dentry, .mnt = nd->path.mnt };
+ 	const struct cred *cred = current_cred();
+ 
+ 	if (IS_ERR(nd->intent.open.file))
+ 		goto out;
+ 	if (IS_ERR(dentry))
+ 		goto out_err;
+-	nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
+-					     nd->intent.open.file,
++	nd->intent.open.file = __dentry_open(&path, nd->intent.open.file,
+ 					     open, cred);
+ out:
+ 	return nd->intent.open.file;
+@@ -816,10 +814,17 @@ struct file *nameidata_to_filp(struct na
+ 
+ 	/* Has the filesystem initialised the file for us? */
+ 	if (filp->f_path.dentry == NULL) {
+-		path_get(&nd->path);
+-		filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
+-				     NULL, cred);
++		struct inode *inode = nd->path.dentry->d_inode;
++
++		if (inode->i_op->open) {
++			int flags = filp->f_flags;
++			put_filp(filp);
++			filp = inode->i_op->open(nd->path.dentry, flags, cred);
++		} else {
++			filp = __dentry_open(&nd->path, filp, NULL, cred);
++		}
+ 	}
++
+ 	return filp;
+ }
+ 
+@@ -830,26 +835,45 @@ struct file *nameidata_to_filp(struct na
+ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
+ 			 const struct cred *cred)
+ {
+-	int error;
+-	struct file *f;
+-
+-	validate_creds(cred);
++	struct path path = { .dentry = dentry, .mnt = mnt };
++	struct file *ret;
+ 
+ 	/* We must always pass in a valid mount pointer. */
+ 	BUG_ON(!mnt);
+ 
+-	error = -ENFILE;
++	ret = vfs_open(&path, flags, cred);
++	path_put(&path);
++
++	return ret;
++}
++EXPORT_SYMBOL(dentry_open);
++
++/**
++ * vfs_open - open the file at the given path
++ * @path: path to open
++ * @flags: open flags
++ * @cred: credentials to use
++ *
++ * Open the file.  If successful, the returned file will have acquired
++ * an additional reference for path.
++ */
++struct file *vfs_open(struct path *path, int flags, const struct cred *cred)
++{
++	struct file *f;
++	struct inode *inode = path->dentry->d_inode;
++
++	validate_creds(cred);
++
++	if (inode->i_op->open)
++		return inode->i_op->open(path->dentry, flags, cred);
+ 	f = get_empty_filp();
+-	if (f == NULL) {
+-		dput(dentry);
+-		mntput(mnt);
+-		return ERR_PTR(error);
+-	}
++	if (f == NULL)
++		return ERR_PTR(-ENFILE);
+ 
+ 	f->f_flags = flags;
+-	return __dentry_open(dentry, mnt, f, NULL, cred);
++	return __dentry_open(path, f, NULL, cred);
+ }
+-EXPORT_SYMBOL(dentry_open);
++EXPORT_SYMBOL(vfs_open);
+ 
+ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+ {
+--- /dev/null
++++ b/fs/overlayfs/Kconfig
+@@ -0,0 +1,4 @@
++config OVERLAYFS_FS
++	tristate "Overlay filesystem support"
++	help
++	  Add support for overlay filesystem.
+--- /dev/null
++++ b/fs/overlayfs/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile for the overlay filesystem.
++#
++
++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o
++
++overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o
+--- /dev/null
++++ b/fs/overlayfs/copy_up.c
+@@ -0,0 +1,383 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/file.h>
++#include <linux/splice.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include <linux/uaccess.h>
++#include "overlayfs.h"
++
++#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
++
++static int ovl_copy_up_xattr(struct dentry *old, struct dentry *new)
++{
++	ssize_t list_size, size;
++	char *buf, *name, *value;
++	int error;
++
++	if (!old->d_inode->i_op->getxattr ||
++	    !new->d_inode->i_op->getxattr)
++		return 0;
++
++	list_size = vfs_listxattr(old, NULL, 0);
++	if (list_size <= 0) {
++		if (list_size == -EOPNOTSUPP)
++			return 0;
++		return list_size;
++	}
++
++	buf = kzalloc(list_size, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	error = -ENOMEM;
++	value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
++	if (!value)
++		goto out;
++
++	list_size = vfs_listxattr(old, buf, list_size);
++	if (list_size <= 0) {
++		error = list_size;
++		goto out_free_value;
++	}
++
++	for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
++		size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
++		if (size <= 0) {
++			error = size;
++			goto out_free_value;
++		}
++		error = vfs_setxattr(new, name, value, size, 0);
++		if (error)
++			goto out_free_value;
++	}
++
++out_free_value:
++	kfree(value);
++out:
++	kfree(buf);
++	return error;
++}
++
++static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
++{
++	struct file *old_file;
++	struct file *new_file;
++	int error = 0;
++
++	if (len == 0)
++		return 0;
++
++	old_file = vfs_open(old, O_RDONLY, current_cred());
++	if (IS_ERR(old_file))
++		return PTR_ERR(old_file);
++
++	new_file = vfs_open(new, O_WRONLY, current_cred());
++	if (IS_ERR(new_file)) {
++		error = PTR_ERR(new_file);
++		goto out_fput;
++	}
++
++	/* FIXME: copy up sparse files efficiently */
++	while (len) {
++		loff_t offset = new_file->f_pos;
++		size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
++		long bytes;
++
++		if (len < this_len)
++			this_len = len;
++
++		if (signal_pending_state(TASK_KILLABLE, current)) {
++			error = -EINTR;
++			break;
++		}
++
++		bytes = do_splice_direct(old_file, &offset, new_file, this_len,
++				 SPLICE_F_MOVE);
++		if (bytes <= 0) {
++			error = bytes;
++			break;
++		}
++
++		len -= bytes;
++	}
++
++	fput(new_file);
++out_fput:
++	fput(old_file);
++	return error;
++}
++
++static char *ovl_read_symlink(struct dentry *realdentry)
++{
++	int res;
++	char *buf;
++	struct inode *inode = realdentry->d_inode;
++	mm_segment_t old_fs;
++
++	res = -EINVAL;
++	if (!inode->i_op->readlink)
++		goto err;
++
++	res = -ENOMEM;
++	buf = (char *) __get_free_page(GFP_KERNEL);
++	if (!buf)
++		goto err;
++
++	old_fs = get_fs();
++	set_fs(get_ds());
++	/* The cast to a user pointer is valid due to the set_fs() */
++	res = inode->i_op->readlink(realdentry,
++				    (char __user *)buf, PAGE_SIZE - 1);
++	set_fs(old_fs);
++	if (res < 0) {
++		free_page((unsigned long) buf);
++		goto err;
++	}
++	buf[res] = '\0';
++
++	return buf;
++
++err:
++	return ERR_PTR(res);
++}
++
++static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
++{
++	struct iattr attr = {
++		.ia_valid = ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
++		.ia_atime = stat->atime,
++		.ia_mtime = stat->mtime,
++	};
++
++	return notify_change(upperdentry, &attr);
++}
++
++static int ovl_set_mode(struct dentry *upperdentry, umode_t mode)
++{
++	struct iattr attr = {
++		.ia_valid = ATTR_MODE,
++		.ia_mode = mode,
++	};
++
++	return notify_change(upperdentry, &attr);
++}
++
++static int ovl_copy_up_locked(struct dentry *upperdir, struct dentry *dentry,
++			      struct path *lowerpath, struct kstat *stat,
++			      const char *link)
++{
++	int err;
++	struct path newpath;
++	umode_t mode = stat->mode;
++
++	/* Can't properly set mode on creation because of the umask */
++	stat->mode &= S_IFMT;
++
++	ovl_path_upper(dentry, &newpath);
++	WARN_ON(newpath.dentry);
++	newpath.dentry = ovl_upper_create(upperdir, dentry, stat, link);
++	if (IS_ERR(newpath.dentry))
++		return PTR_ERR(newpath.dentry);
++
++	if (S_ISREG(stat->mode)) {
++		err = ovl_copy_up_data(lowerpath, &newpath, stat->size);
++		if (err)
++			goto err_remove;
++	}
++
++	err = ovl_copy_up_xattr(lowerpath->dentry, newpath.dentry);
++	if (err)
++		goto err_remove;
++
++	mutex_lock(&newpath.dentry->d_inode->i_mutex);
++	if (!S_ISLNK(stat->mode))
++		err = ovl_set_mode(newpath.dentry, mode);
++	if (!err)
++		err = ovl_set_timestamps(newpath.dentry, stat);
++	mutex_unlock(&newpath.dentry->d_inode->i_mutex);
++	if (err)
++		goto err_remove;
++
++	ovl_dentry_update(dentry, newpath.dentry);
++
++	/*
++	 * Easiest way to get rid of the lower dentry reference is to
++	 * drop this dentry.  This is neither needed nor possible for
++	 * directories.
++	 */
++	if (!S_ISDIR(stat->mode))
++		d_drop(dentry);
++
++	return 0;
++
++err_remove:
++	if (S_ISDIR(stat->mode))
++		vfs_rmdir(upperdir->d_inode, newpath.dentry);
++	else
++		vfs_unlink(upperdir->d_inode, newpath.dentry);
++
++	dput(newpath.dentry);
++
++	return err;
++}
++
++/*
++ * Copy up a single dentry
++ *
++ * Directory renames only allowed on "pure upper" (already created on
++ * upper filesystem, never copied up).  Directories which are on lower or
++ * are merged may not be renamed.  For these -EXDEV is returned and
++ * userspace has to deal with it.  This means, when copying up a
++ * directory we can rely on it and ancestors being stable.
++ *
++ * Non-directory renames start with copy up of source if necessary.  The
++ * actual rename will only proceed once the copy up was successful.  Copy
++ * up uses upper parent i_mutex for exclusion.  Since rename can change
++ * d_parent it is possible that the copy up will lock the old parent.  At
++ * that point the file will have already been copied up anyway.
++ */
++static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
++			   struct path *lowerpath, struct kstat *stat)
++{
++	int err;
++	struct kstat pstat;
++	struct path parentpath;
++	struct dentry *upperdir;
++	const struct cred *old_cred;
++	struct cred *override_cred;
++	char *link = NULL;
++
++	ovl_path_upper(parent, &parentpath);
++	upperdir = parentpath.dentry;
++
++	err = vfs_getattr(parentpath.mnt, parentpath.dentry, &pstat);
++	if (err)
++		return err;
++
++	if (S_ISLNK(stat->mode)) {
++		link = ovl_read_symlink(lowerpath->dentry);
++		if (IS_ERR(link))
++			return PTR_ERR(link);
++	}
++
++	err = -ENOMEM;
++	override_cred = prepare_creds();
++	if (!override_cred)
++		goto out_free_link;
++
++	override_cred->fsuid = stat->uid;
++	override_cred->fsgid = stat->gid;
++	/*
++	 * CAP_SYS_ADMIN for copying up extended attributes
++	 * CAP_DAC_OVERRIDE for create
++	 * CAP_FOWNER for chmod, timestamp update
++	 * CAP_FSETID for chmod
++	 * CAP_MKNOD for mknod
++	 */
++	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++	cap_raise(override_cred->cap_effective, CAP_FOWNER);
++	cap_raise(override_cred->cap_effective, CAP_FSETID);
++	cap_raise(override_cred->cap_effective, CAP_MKNOD);
++	old_cred = override_creds(override_cred);
++
++	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++	if (ovl_path_type(dentry) != OVL_PATH_LOWER) {
++		err = 0;
++	} else {
++		err = ovl_copy_up_locked(upperdir, dentry, lowerpath,
++					 stat, link);
++		if (!err) {
++			/* Restore timestamps on parent (best effort) */
++			ovl_set_timestamps(upperdir, &pstat);
++		}
++	}
++
++	mutex_unlock(&upperdir->d_inode->i_mutex);
++
++	revert_creds(old_cred);
++	put_cred(override_cred);
++
++out_free_link:
++	if (link)
++		free_page((unsigned long) link);
++
++	return err;
++}
++
++int ovl_copy_up(struct dentry *dentry)
++{
++	int err;
++
++	err = 0;
++	while (!err) {
++		struct dentry *next;
++		struct dentry *parent;
++		struct path lowerpath;
++		struct kstat stat;
++		enum ovl_path_type type = ovl_path_type(dentry);
++
++		if (type != OVL_PATH_LOWER)
++			break;
++
++		next = dget(dentry);
++		/* find the topmost dentry not yet copied up */
++		for (;;) {
++			parent = dget_parent(next);
++
++			type = ovl_path_type(parent);
++			if (type != OVL_PATH_LOWER)
++				break;
++
++			dput(next);
++			next = parent;
++		}
++
++		ovl_path_lower(next, &lowerpath);
++		err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
++		if (!err)
++			err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
++
++		dput(parent);
++		dput(next);
++	}
++
++	return err;
++}
++
++/* Optimize by not copying up the file first and truncating later */
++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size)
++{
++	int err;
++	struct kstat stat;
++	struct path lowerpath;
++	struct dentry *parent = dget_parent(dentry);
++
++	err = ovl_copy_up(parent);
++	if (err)
++		goto out_dput_parent;
++
++	ovl_path_lower(dentry, &lowerpath);
++	err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
++	if (err)
++		goto out_dput_parent;
++
++	if (size < stat.size)
++		stat.size = size;
++
++	err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
++
++out_dput_parent:
++	dput(parent);
++	return err;
++}
+--- /dev/null
++++ b/fs/overlayfs/dir.c
+@@ -0,0 +1,596 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include "overlayfs.h"
++
++static const char *ovl_whiteout_symlink = "(overlay-whiteout)";
++
++static int ovl_whiteout(struct dentry *upperdir, struct dentry *dentry)
++{
++	int err;
++	struct dentry *newdentry;
++	const struct cred *old_cred;
++	struct cred *override_cred;
++
++	/* FIXME: recheck lower dentry to see if whiteout is really needed */
++
++	err = -ENOMEM;
++	override_cred = prepare_creds();
++	if (!override_cred)
++		goto out;
++
++	/*
++	 * CAP_SYS_ADMIN for setxattr
++	 * CAP_DAC_OVERRIDE for symlink creation
++	 * CAP_FOWNER for unlink in sticky directory
++	 */
++	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++	cap_raise(override_cred->cap_effective, CAP_FOWNER);
++	override_cred->fsuid = 0;
++	override_cred->fsgid = 0;
++	old_cred = override_creds(override_cred);
++
++	newdentry = lookup_one_len(dentry->d_name.name, upperdir,
++				   dentry->d_name.len);
++	err = PTR_ERR(newdentry);
++	if (IS_ERR(newdentry))
++		goto out_put_cred;
++
++	/* Just been removed within the same locked region */
++	WARN_ON(newdentry->d_inode);
++
++	err = vfs_symlink(upperdir->d_inode, newdentry, ovl_whiteout_symlink);
++	if (err)
++		goto out_dput;
++
++	ovl_dentry_version_inc(dentry->d_parent);
++
++	err = vfs_setxattr(newdentry, ovl_whiteout_xattr, "y", 1, 0);
++	if (err)
++		vfs_unlink(upperdir->d_inode, newdentry);
++
++out_dput:
++	dput(newdentry);
++out_put_cred:
++	revert_creds(old_cred);
++	put_cred(override_cred);
++out:
++	if (err) {
++		/*
++		 * There's no way to recover from failure to whiteout.
++		 * What should we do?  Log a big fat error and... ?
++		 */
++		printk(KERN_ERR "overlayfs: ERROR - failed to whiteout '%s'\n",
++		       dentry->d_name.name);
++	}
++
++	return err;
++}
++
++static struct dentry *ovl_lookup_create(struct dentry *upperdir,
++					struct dentry *template)
++{
++	int err;
++	struct dentry *newdentry;
++	struct qstr *name = &template->d_name;
++
++	newdentry = lookup_one_len(name->name, upperdir, name->len);
++	if (IS_ERR(newdentry))
++		return newdentry;
++
++	if (newdentry->d_inode) {
++		const struct cred *old_cred;
++		struct cred *override_cred;
++
++		/* No need to check whiteout if lower parent is non-existent */
++		err = -EEXIST;
++		if (!ovl_dentry_lower(template->d_parent))
++			goto out_dput;
++
++		if (!S_ISLNK(newdentry->d_inode->i_mode))
++			goto out_dput;
++
++		err = -ENOMEM;
++		override_cred = prepare_creds();
++		if (!override_cred)
++			goto out_dput;
++
++		/*
++		 * CAP_SYS_ADMIN for getxattr
++		 * CAP_FOWNER for unlink in sticky directory
++		 */
++		cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++		cap_raise(override_cred->cap_effective, CAP_FOWNER);
++		old_cred = override_creds(override_cred);
++
++		err = -EEXIST;
++		if (ovl_is_whiteout(newdentry))
++			err = vfs_unlink(upperdir->d_inode, newdentry);
++
++		revert_creds(old_cred);
++		put_cred(override_cred);
++		if (err)
++			goto out_dput;
++
++		dput(newdentry);
++		newdentry = lookup_one_len(name->name, upperdir, name->len);
++		if (IS_ERR(newdentry)) {
++			ovl_whiteout(upperdir, template);
++			return newdentry;
++		}
++
++		/*
++		 * Whiteout just been successfully removed, parent
++		 * i_mutex is still held, there's no way the lookup
++		 * could return positive.
++		 */
++		WARN_ON(newdentry->d_inode);
++	}
++
++	return newdentry;
++
++out_dput:
++	dput(newdentry);
++	return ERR_PTR(err);
++}
++
++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
++				struct kstat *stat, const char *link)
++{
++	int err;
++	struct dentry *newdentry;
++	struct inode *dir = upperdir->d_inode;
++
++	newdentry = ovl_lookup_create(upperdir, dentry);
++	if (IS_ERR(newdentry))
++		goto out;
++
++	switch (stat->mode & S_IFMT) {
++	case S_IFREG:
++		err = vfs_create(dir, newdentry, stat->mode, NULL);
++		break;
++
++	case S_IFDIR:
++		err = vfs_mkdir(dir, newdentry, stat->mode);
++		break;
++
++	case S_IFCHR:
++	case S_IFBLK:
++	case S_IFIFO:
++	case S_IFSOCK:
++		err = vfs_mknod(dir, newdentry, stat->mode, stat->rdev);
++		break;
++
++	case S_IFLNK:
++		err = vfs_symlink(dir, newdentry, link);
++		break;
++
++	default:
++		err = -EPERM;
++	}
++	if (err) {
++		if (ovl_dentry_is_opaque(dentry))
++			ovl_whiteout(upperdir, dentry);
++		dput(newdentry);
++		newdentry = ERR_PTR(err);
++	} else if (WARN_ON(!newdentry->d_inode)) {
++		/*
++		 * Not quite sure if non-instantiated dentry is legal or not.
++		 * VFS doesn't seem to care so check and warn here.
++		 */
++		dput(newdentry);
++		newdentry = ERR_PTR(-ENOENT);
++	}
++
++out:
++	return newdentry;
++
++}
++
++static int ovl_set_opaque(struct dentry *upperdentry)
++{
++	int err;
++	const struct cred *old_cred;
++	struct cred *override_cred;
++
++	override_cred = prepare_creds();
++	if (!override_cred)
++		return -ENOMEM;
++
++	/* CAP_SYS_ADMIN for setxattr of "trusted" namespace */
++	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++	old_cred = override_creds(override_cred);
++	err = vfs_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
++	revert_creds(old_cred);
++	put_cred(override_cred);
++
++	return err;
++}
++
++static int ovl_remove_opaque(struct dentry *upperdentry)
++{
++	int err;
++	const struct cred *old_cred;
++	struct cred *override_cred;
++
++	override_cred = prepare_creds();
++	if (!override_cred)
++		return -ENOMEM;
++
++	/* CAP_SYS_ADMIN for removexattr of "trusted" namespace */
++	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++	old_cred = override_creds(override_cred);
++	err = vfs_removexattr(upperdentry, ovl_opaque_xattr);
++	revert_creds(old_cred);
++	put_cred(override_cred);
++
++	return err;
++}
++
++static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
++			 struct kstat *stat)
++{
++	int err;
++	enum ovl_path_type type;
++	struct path realpath;
++
++	type = ovl_path_real(dentry, &realpath);
++	err = vfs_getattr(realpath.mnt, realpath.dentry, stat);
++	if (err)
++		return err;
++
++	stat->dev = dentry->d_sb->s_dev;
++	stat->ino = dentry->d_inode->i_ino;
++
++	/*
++	 * It's probably not worth it to count subdirs to get the
++	 * correct link count.  nlink=1 seems to pacify 'find' and
++	 * other utilities.
++	 */
++	if (type == OVL_PATH_MERGE)
++		stat->nlink = 1;
++
++	return 0;
++}
++
++static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
++			     const char *link)
++{
++	int err;
++	struct dentry *newdentry;
++	struct dentry *upperdir;
++	struct inode *inode;
++	struct kstat stat = {
++		.mode = mode,
++		.rdev = rdev,
++	};
++
++	err = -ENOMEM;
++	inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
++	if (!inode)
++		goto out;
++
++	err = ovl_copy_up(dentry->d_parent);
++	if (err)
++		goto out_iput;
++
++	upperdir = ovl_dentry_upper(dentry->d_parent);
++	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++
++	newdentry = ovl_upper_create(upperdir, dentry, &stat, link);
++	err = PTR_ERR(newdentry);
++	if (IS_ERR(newdentry))
++		goto out_unlock;
++
++	ovl_dentry_version_inc(dentry->d_parent);
++	if (ovl_dentry_is_opaque(dentry) && S_ISDIR(mode)) {
++		err = ovl_set_opaque(newdentry);
++		if (err) {
++			vfs_rmdir(upperdir->d_inode, newdentry);
++			ovl_whiteout(upperdir, dentry);
++			goto out_dput;
++		}
++	}
++	ovl_dentry_update(dentry, newdentry);
++	d_instantiate(dentry, inode);
++	inode = NULL;
++	newdentry = NULL;
++	err = 0;
++
++out_dput:
++	dput(newdentry);
++out_unlock:
++	mutex_unlock(&upperdir->d_inode->i_mutex);
++out_iput:
++	iput(inode);
++out:
++	return err;
++}
++
++static int ovl_create(struct inode *dir, struct dentry *dentry, int mode,
++			struct nameidata *nd)
++{
++	return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
++}
++
++static int ovl_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++	return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
++}
++
++static int ovl_mknod(struct inode *dir, struct dentry *dentry, int mode,
++		       dev_t rdev)
++{
++	return ovl_create_object(dentry, mode, rdev, NULL);
++}
++
++static int ovl_symlink(struct inode *dir, struct dentry *dentry,
++			 const char *link)
++{
++	return ovl_create_object(dentry, S_IFLNK, 0, link);
++}
++
++static int ovl_do_remove(struct dentry *dentry, bool is_dir)
++{
++	int err;
++	enum ovl_path_type type;
++	struct path realpath;
++	struct dentry *upperdir;
++
++	err = ovl_copy_up(dentry->d_parent);
++	if (err)
++		return err;
++
++	upperdir = ovl_dentry_upper(dentry->d_parent);
++	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++	type = ovl_path_real(dentry, &realpath);
++	if (type != OVL_PATH_LOWER) {
++		err = -ESTALE;
++		if (realpath.dentry->d_parent != upperdir)
++			goto out_d_drop;
++
++		/* FIXME: create whiteout up front and rename to target */
++
++		if (is_dir)
++			err = vfs_rmdir(upperdir->d_inode, realpath.dentry);
++		else
++			err = vfs_unlink(upperdir->d_inode, realpath.dentry);
++		if (err)
++			goto out_d_drop;
++
++		ovl_dentry_version_inc(dentry->d_parent);
++	}
++
++	if (type != OVL_PATH_UPPER || ovl_dentry_is_opaque(dentry))
++		err = ovl_whiteout(upperdir, dentry);
++
++	/*
++	 * Keeping this dentry hashed would mean having to release
++	 * upperpath/lowerpath, which could only be done if we are the
++	 * sole user of this dentry.  Too tricky...  Just unhash for
++	 * now.
++	 */
++out_d_drop:
++	d_drop(dentry);
++	mutex_unlock(&upperdir->d_inode->i_mutex);
++
++	return err;
++}
++
++static int ovl_unlink(struct inode *dir, struct dentry *dentry)
++{
++	return ovl_do_remove(dentry, false);
++}
++
++
++static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
++{
++	int err;
++	enum ovl_path_type type;
++
++	type = ovl_path_type(dentry);
++	if (type != OVL_PATH_UPPER) {
++		err = ovl_check_empty_and_clear(dentry, type);
++		if (err)
++			return err;
++	}
++
++	return ovl_do_remove(dentry, true);
++}
++
++static int ovl_link(struct dentry *old, struct inode *newdir,
++		    struct dentry *new)
++{
++	int err;
++	struct dentry *olddentry;
++	struct dentry *newdentry;
++	struct dentry *upperdir;
++
++	err = ovl_copy_up(old);
++	if (err)
++		goto out;
++
++	err = ovl_copy_up(new->d_parent);
++	if (err)
++		goto out;
++
++	upperdir = ovl_dentry_upper(new->d_parent);
++	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++	newdentry = ovl_lookup_create(upperdir, new);
++	err = PTR_ERR(newdentry);
++	if (IS_ERR(newdentry))
++		goto out_unlock;
++
++	olddentry = ovl_dentry_upper(old);
++	err = vfs_link(olddentry, upperdir->d_inode, newdentry);
++	if (!err) {
++		if (WARN_ON(!newdentry->d_inode)) {
++			dput(newdentry);
++			err = -ENOENT;
++			goto out_unlock;
++		}
++
++		ovl_dentry_version_inc(new->d_parent);
++		ovl_dentry_update(new, newdentry);
++
++		ihold(old->d_inode);
++		d_instantiate(new, old->d_inode);
++	} else {
++		if (ovl_dentry_is_opaque(new))
++			ovl_whiteout(upperdir, new);
++		dput(newdentry);
++	}
++out_unlock:
++	mutex_unlock(&upperdir->d_inode->i_mutex);
++out:
++	return err;
++
++}
++
++static int ovl_rename(struct inode *olddir, struct dentry *old,
++			struct inode *newdir, struct dentry *new)
++{
++	int err;
++	enum ovl_path_type old_type;
++	enum ovl_path_type new_type;
++	struct dentry *old_upperdir;
++	struct dentry *new_upperdir;
++	struct dentry *olddentry;
++	struct dentry *newdentry;
++	struct dentry *trap;
++	bool old_opaque;
++	bool new_opaque;
++	bool new_create = false;
++	bool is_dir = S_ISDIR(old->d_inode->i_mode);
++
++	/* Don't copy up directory trees */
++	old_type = ovl_path_type(old);
++	if (old_type != OVL_PATH_UPPER && is_dir)
++		return -EXDEV;
++
++	if (new->d_inode) {
++		new_type = ovl_path_type(new);
++
++		if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
++			if (ovl_dentry_lower(old)->d_inode ==
++			    ovl_dentry_lower(new)->d_inode)
++				return 0;
++		}
++		if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
++			if (ovl_dentry_upper(old)->d_inode ==
++			    ovl_dentry_upper(new)->d_inode)
++				return 0;
++		}
++
++		if (new_type != OVL_PATH_UPPER &&
++		    S_ISDIR(new->d_inode->i_mode)) {
++			err = ovl_check_empty_and_clear(new, new_type);
++			if (err)
++				return err;
++		}
++	} else {
++		new_type = OVL_PATH_UPPER;
++	}
++
++	err = ovl_copy_up(old);
++	if (err)
++		return err;
++
++	err = ovl_copy_up(new->d_parent);
++	if (err)
++		return err;
++
++	old_upperdir = ovl_dentry_upper(old->d_parent);
++	new_upperdir = ovl_dentry_upper(new->d_parent);
++
++	trap = lock_rename(new_upperdir, old_upperdir);
++
++	olddentry = ovl_dentry_upper(old);
++	newdentry = ovl_dentry_upper(new);
++	if (newdentry) {
++		dget(newdentry);
++	} else {
++		new_create = true;
++		newdentry = ovl_lookup_create(new_upperdir, new);
++		err = PTR_ERR(newdentry);
++		if (IS_ERR(newdentry))
++			goto out_unlock;
++	}
++
++	err = -ESTALE;
++	if (olddentry->d_parent != old_upperdir)
++		goto out_dput;
++	if (newdentry->d_parent != new_upperdir)
++		goto out_dput;
++	if (olddentry == trap)
++		goto out_dput;
++	if (newdentry == trap)
++		goto out_dput;
++
++	old_opaque = ovl_dentry_is_opaque(old);
++	new_opaque = ovl_dentry_is_opaque(new) || new_type != OVL_PATH_UPPER;
++
++	if (is_dir && !old_opaque && new_opaque) {
++		err = ovl_set_opaque(olddentry);
++		if (err)
++			goto out_dput;
++	}
++
++	err = vfs_rename(old_upperdir->d_inode, olddentry,
++			 new_upperdir->d_inode, newdentry);
++
++	if (err) {
++		if (new_create && ovl_dentry_is_opaque(new))
++			ovl_whiteout(new_upperdir, new);
++		if (is_dir && !old_opaque && new_opaque)
++			ovl_remove_opaque(olddentry);
++		goto out_dput;
++	}
++
++	if (old_type != OVL_PATH_UPPER || old_opaque)
++		err = ovl_whiteout(old_upperdir, old);
++	if (is_dir && old_opaque && !new_opaque)
++		ovl_remove_opaque(olddentry);
++
++	if (old_opaque != new_opaque)
++		ovl_dentry_set_opaque(old, new_opaque);
++
++	ovl_dentry_version_inc(old->d_parent);
++	ovl_dentry_version_inc(new->d_parent);
++
++out_dput:
++	dput(newdentry);
++out_unlock:
++	unlock_rename(new_upperdir, old_upperdir);
++	return err;
++}
++
++const struct inode_operations ovl_dir_inode_operations = {
++	.lookup		= ovl_lookup,
++	.mkdir		= ovl_mkdir,
++	.symlink	= ovl_symlink,
++	.unlink		= ovl_unlink,
++	.rmdir		= ovl_rmdir,
++	.rename		= ovl_rename,
++	.link		= ovl_link,
++	.setattr	= ovl_setattr,
++	.create		= ovl_create,
++	.mknod		= ovl_mknod,
++	.permission	= ovl_permission,
++	.getattr	= ovl_dir_getattr,
++	.setxattr	= ovl_setxattr,
++	.getxattr	= ovl_getxattr,
++	.listxattr	= ovl_listxattr,
++	.removexattr	= ovl_removexattr,
++};
+--- /dev/null
++++ b/fs/overlayfs/inode.c
+@@ -0,0 +1,384 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/xattr.h>
++#include "overlayfs.h"
++
++int ovl_setattr(struct dentry *dentry, struct iattr *attr)
++{
++	struct dentry *upperdentry;
++	int err;
++
++	if ((attr->ia_valid & ATTR_SIZE) && !ovl_dentry_upper(dentry))
++		err = ovl_copy_up_truncate(dentry, attr->ia_size);
++	else
++		err = ovl_copy_up(dentry);
++	if (err)
++		return err;
++
++	upperdentry = ovl_dentry_upper(dentry);
++
++	if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++		attr->ia_valid &= ~ATTR_MODE;
++
++	mutex_lock(&upperdentry->d_inode->i_mutex);
++	err = notify_change(upperdentry, attr);
++	mutex_unlock(&upperdentry->d_inode->i_mutex);
++
++	return err;
++}
++
++static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
++			 struct kstat *stat)
++{
++	struct path realpath;
++
++	ovl_path_real(dentry, &realpath);
++	return vfs_getattr(realpath.mnt, realpath.dentry, stat);
++}
++
++int ovl_permission(struct inode *inode, int mask, unsigned int flags)
++{
++	struct ovl_entry *oe;
++	struct dentry *alias = NULL;
++	struct inode *realinode;
++	struct dentry *realdentry;
++	bool is_upper;
++	int err;
++
++	if (S_ISDIR(inode->i_mode)) {
++		oe = inode->i_private;
++	} else if (flags & IPERM_FLAG_RCU) {
++		return -ECHILD;
++	} else {
++		/*
++		 * For non-directories find an alias and get the info
++		 * from there.
++		 */
++		spin_lock(&inode->i_lock);
++		if (WARN_ON(list_empty(&inode->i_dentry))) {
++			spin_unlock(&inode->i_lock);
++			return -ENOENT;
++		}
++		alias = list_entry(inode->i_dentry.next, struct dentry, d_alias);
++		dget(alias);
++		spin_unlock(&inode->i_lock);
++		oe = alias->d_fsdata;
++	}
++
++	realdentry = ovl_entry_real(oe, &is_upper);
++
++	/* Careful in RCU walk mode */
++	realinode = ACCESS_ONCE(realdentry->d_inode);
++	if (!realinode) {
++		WARN_ON(!(flags & IPERM_FLAG_RCU));
++		err = -ENOENT;
++		goto out_dput;
++	}
++
++	if (mask & MAY_WRITE) {
++		umode_t mode = realinode->i_mode;
++
++		/*
++		 * Writes will always be redirected to upper layer, so
++		 * ignore lower layer being read-only.
++		 *
++		 * If the overlay itself is read-only then proceed
++		 * with the permission check, don't return EROFS.
++		 * This will only happen if this is the lower layer of
++		 * another overlayfs.
++		 *
++		 * If upper fs becomes read-only after the overlay was
++		 * constructed return EROFS to prevent modification of
++		 * upper layer.
++		 */
++		err = -EROFS;
++		if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
++		    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
++			goto out_dput;
++
++		/*
++		 * Nobody gets write access to an immutable file.
++		 */
++		err = -EACCES;
++		if (IS_IMMUTABLE(realinode))
++			goto out_dput;
++	}
++
++	if (realinode->i_op->permission)
++		err = realinode->i_op->permission(realinode, mask, flags);
++	else
++		err = generic_permission(realinode, mask, flags,
++					 realinode->i_op->check_acl);
++out_dput:
++	dput(alias);
++	return err;
++}
++
++
++struct ovl_link_data {
++	struct dentry *realdentry;
++	void *cookie;
++};
++
++static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++	void *ret;
++	struct dentry *realdentry;
++	struct inode *realinode;
++
++	realdentry = ovl_dentry_real(dentry);
++	realinode = realdentry->d_inode;
++
++	if (WARN_ON(!realinode->i_op->follow_link))
++		return ERR_PTR(-EPERM);
++
++	ret = realinode->i_op->follow_link(realdentry, nd);
++	if (IS_ERR(ret))
++		return ret;
++
++	if (realinode->i_op->put_link) {
++		struct ovl_link_data *data;
++
++		data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL);
++		if (!data) {
++			realinode->i_op->put_link(realdentry, nd, ret);
++			return ERR_PTR(-ENOMEM);
++		}
++		data->realdentry = realdentry;
++		data->cookie = ret;
++
++		return data;
++	} else {
++		return NULL;
++	}
++}
++
++static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
++{
++	struct inode *realinode;
++	struct ovl_link_data *data = c;
++
++	if (!data)
++		return;
++
++	realinode = data->realdentry->d_inode;
++	realinode->i_op->put_link(data->realdentry, nd, data->cookie);
++	kfree(data);
++}
++
++static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
++{
++	struct path realpath;
++	struct inode *realinode;
++
++	ovl_path_real(dentry, &realpath);
++	realinode = realpath.dentry->d_inode;
++
++	if (!realinode->i_op->readlink)
++		return -EINVAL;
++
++	touch_atime(realpath.mnt, realpath.dentry);
++
++	return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
++}
++
++
++static bool ovl_is_private_xattr(const char *name)
++{
++	return strncmp(name, "trusted.overlay.", 14) == 0;
++}
++
++int ovl_setxattr(struct dentry *dentry, const char *name,
++		 const void *value, size_t size, int flags)
++{
++	int err;
++	struct dentry *upperdentry;
++
++	if (ovl_is_private_xattr(name))
++		return -EPERM;
++
++	err = ovl_copy_up(dentry);
++	if (err)
++		return err;
++
++	upperdentry = ovl_dentry_upper(dentry);
++	return  vfs_setxattr(upperdentry, name, value, size, flags);
++}
++
++ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
++		     void *value, size_t size)
++{
++	if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
++	    ovl_is_private_xattr(name))
++		return -ENODATA;
++
++	return vfs_getxattr(ovl_dentry_real(dentry), name, value, size);
++}
++
++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
++{
++	ssize_t res;
++	int off;
++
++	res = vfs_listxattr(ovl_dentry_real(dentry), list, size);
++	if (res <= 0 || size == 0)
++		return res;
++
++	if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE)
++		return res;
++
++	/* filter out private xattrs */
++	for (off = 0; off < res;) {
++		char *s = list + off;
++		size_t slen = strlen(s) + 1;
++
++		BUG_ON(off + slen > res);
++
++		if (ovl_is_private_xattr(s)) {
++			res -= slen;
++			memmove(s, s + slen, res - off);
++		} else {
++			off += slen;
++		}
++	}
++
++	return res;
++}
++
++int ovl_removexattr(struct dentry *dentry, const char *name)
++{
++	int err;
++	struct path realpath;
++	enum ovl_path_type type;
++
++	if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
++	    ovl_is_private_xattr(name))
++		return -ENODATA;
++
++	type = ovl_path_real(dentry, &realpath);
++	if (type == OVL_PATH_LOWER) {
++		err = vfs_getxattr(realpath.dentry, name, NULL, 0);
++		if (err < 0)
++			return err;
++
++		err = ovl_copy_up(dentry);
++		if (err)
++			return err;
++
++		ovl_path_upper(dentry, &realpath);
++	}
++
++	return vfs_removexattr(realpath.dentry, name);
++}
++
++static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
++				  struct dentry *realdentry)
++{
++	if (type != OVL_PATH_LOWER)
++		return false;
++
++	if (special_file(realdentry->d_inode->i_mode))
++		return false;
++
++	if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
++		return false;
++
++	return true;
++}
++
++static struct file *ovl_open(struct dentry *dentry, int flags,
++			     const struct cred *cred)
++{
++	int err;
++	struct path realpath;
++	enum ovl_path_type type;
++
++	type = ovl_path_real(dentry, &realpath);
++	if (ovl_open_need_copy_up(flags, type, realpath.dentry)) {
++		if (flags & O_TRUNC)
++			err = ovl_copy_up_truncate(dentry, 0);
++		else
++			err = ovl_copy_up(dentry);
++		if (err)
++			return ERR_PTR(err);
++
++		ovl_path_upper(dentry, &realpath);
++	}
++
++	return vfs_open(&realpath, flags, cred);
++}
++
++static const struct inode_operations ovl_file_inode_operations = {
++	.setattr	= ovl_setattr,
++	.permission	= ovl_permission,
++	.getattr	= ovl_getattr,
++	.setxattr	= ovl_setxattr,
++	.getxattr	= ovl_getxattr,
++	.listxattr	= ovl_listxattr,
++	.removexattr	= ovl_removexattr,
++	.open		= ovl_open,
++};
++
++static const struct inode_operations ovl_symlink_inode_operations = {
++	.setattr	= ovl_setattr,
++	.follow_link	= ovl_follow_link,
++	.put_link	= ovl_put_link,
++	.readlink	= ovl_readlink,
++	.getattr	= ovl_getattr,
++	.setxattr	= ovl_setxattr,
++	.getxattr	= ovl_getxattr,
++	.listxattr	= ovl_listxattr,
++	.removexattr	= ovl_removexattr,
++};
++
++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
++			    struct ovl_entry *oe)
++{
++	struct inode *inode;
++
++	inode = new_inode(sb);
++	if (!inode)
++		return NULL;
++
++	mode &= S_IFMT;
++
++	inode->i_ino = get_next_ino();
++	inode->i_mode = mode;
++	inode->i_flags |= S_NOATIME | S_NOCMTIME;
++
++	switch (mode) {
++	case S_IFDIR:
++		inode->i_private = oe;
++		inode->i_op = &ovl_dir_inode_operations;
++		inode->i_fop = &ovl_dir_operations;
++		break;
++
++	case S_IFLNK:
++		inode->i_op = &ovl_symlink_inode_operations;
++		break;
++
++	case S_IFREG:
++	case S_IFSOCK:
++	case S_IFBLK:
++	case S_IFCHR:
++	case S_IFIFO:
++		inode->i_op = &ovl_file_inode_operations;
++		break;
++
++	default:
++		WARN(1, "illegal file type: %i\n", mode);
++		inode = NULL;
++	}
++
++	return inode;
++
++}
+--- /dev/null
++++ b/fs/overlayfs/overlayfs.h
+@@ -0,0 +1,63 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++struct ovl_entry;
++
++enum ovl_path_type {
++	OVL_PATH_UPPER,
++	OVL_PATH_MERGE,
++	OVL_PATH_LOWER,
++};
++
++extern const char *ovl_opaque_xattr;
++extern const char *ovl_whiteout_xattr;
++extern const struct dentry_operations ovl_dentry_operations;
++
++enum ovl_path_type ovl_path_type(struct dentry *dentry);
++u64 ovl_dentry_version_get(struct dentry *dentry);
++void ovl_dentry_version_inc(struct dentry *dentry);
++void ovl_path_upper(struct dentry *dentry, struct path *path);
++void ovl_path_lower(struct dentry *dentry, struct path *path);
++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
++struct dentry *ovl_dentry_upper(struct dentry *dentry);
++struct dentry *ovl_dentry_lower(struct dentry *dentry);
++struct dentry *ovl_dentry_real(struct dentry *dentry);
++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper);
++bool ovl_dentry_is_opaque(struct dentry *dentry);
++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
++bool ovl_is_whiteout(struct dentry *dentry);
++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
++			  struct nameidata *nd);
++
++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
++				struct kstat *stat, const char *link);
++
++/* readdir.c */
++extern const struct file_operations ovl_dir_operations;
++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type);
++
++/* inode.c */
++int ovl_setattr(struct dentry *dentry, struct iattr *attr);
++int ovl_permission(struct inode *inode, int mask, unsigned int flags);
++int ovl_setxattr(struct dentry *dentry, const char *name,
++		 const void *value, size_t size, int flags);
++ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
++		     void *value, size_t size);
++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
++int ovl_removexattr(struct dentry *dentry, const char *name);
++
++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
++			    struct ovl_entry *oe);
++/* dir.c */
++extern const struct inode_operations ovl_dir_inode_operations;
++
++/* copy_up.c */
++int ovl_copy_up(struct dentry *dentry);
++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size);
+--- /dev/null
++++ b/fs/overlayfs/readdir.c
+@@ -0,0 +1,558 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/namei.h>
++#include <linux/file.h>
++#include <linux/xattr.h>
++#include <linux/rbtree.h>
++#include <linux/security.h>
++#include "overlayfs.h"
++
++struct ovl_cache_entry {
++	const char *name;
++	unsigned int len;
++	unsigned int type;
++	u64 ino;
++	bool is_whiteout;
++	struct list_head l_node;
++	struct rb_node node;
++};
++
++struct ovl_readdir_data {
++	struct rb_root *root;
++	struct list_head *list;
++	struct list_head *middle;
++	struct dentry *dir;
++	int count;
++	int err;
++};
++
++struct ovl_dir_file {
++	bool is_real;
++	bool is_cached;
++	struct list_head cursor;
++	u64 cache_version;
++	struct list_head cache;
++	struct file *realfile;
++};
++
++static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
++{
++	return container_of(n, struct ovl_cache_entry, node);
++}
++
++static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
++						    const char *name, int len)
++{
++	struct rb_node *node = root->rb_node;
++	int cmp;
++
++	while (node) {
++		struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
++
++		cmp = strncmp(name, p->name, len);
++		if (cmp > 0)
++			node = p->node.rb_right;
++		else if (cmp < 0 || len < p->len)
++			node = p->node.rb_left;
++		else
++			return p;
++	}
++
++	return NULL;
++}
++
++static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
++						   u64 ino, unsigned int d_type)
++{
++	struct ovl_cache_entry *p;
++
++	p = kmalloc(sizeof(*p) + len + 1, GFP_KERNEL);
++	if (p) {
++		char *name_copy = (char *) (p + 1);
++		memcpy(name_copy, name, len);
++		name_copy[len] = '\0';
++		p->name = name_copy;
++		p->len = len;
++		p->type = d_type;
++		p->ino = ino;
++		p->is_whiteout = false;
++	}
++
++	return p;
++}
++
++static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
++				  const char *name, int len, u64 ino,
++				  unsigned int d_type)
++{
++	struct rb_node **newp = &rdd->root->rb_node;
++	struct rb_node *parent = NULL;
++	struct ovl_cache_entry *p;
++
++	while (*newp) {
++		int cmp;
++		struct ovl_cache_entry *tmp;
++
++		parent = *newp;
++		tmp = ovl_cache_entry_from_node(*newp);
++		cmp = strncmp(name, tmp->name, len);
++		if (cmp > 0)
++			newp = &tmp->node.rb_right;
++		else if (cmp < 0 || len < tmp->len)
++			newp = &tmp->node.rb_left;
++		else
++			return 0;
++	}
++
++	p = ovl_cache_entry_new(name, len, ino, d_type);
++	if (p == NULL)
++		return -ENOMEM;
++
++	list_add_tail(&p->l_node, rdd->list);
++	rb_link_node(&p->node, parent, newp);
++	rb_insert_color(&p->node, rdd->root);
++
++	return 0;
++}
++
++static int ovl_fill_lower(void *buf, const char *name, int namelen,
++			    loff_t offset, u64 ino, unsigned int d_type)
++{
++	struct ovl_readdir_data *rdd = buf;
++	struct ovl_cache_entry *p;
++
++	rdd->count++;
++	p = ovl_cache_entry_find(rdd->root, name, namelen);
++	if (p) {
++		list_move_tail(&p->l_node, rdd->middle);
++	} else {
++		p = ovl_cache_entry_new(name, namelen, ino, d_type);
++		if (p == NULL)
++			rdd->err = -ENOMEM;
++		else
++			list_add_tail(&p->l_node, rdd->middle);
++	}
++
++	return rdd->err;
++}
++
++static void ovl_cache_free(struct list_head *list)
++{
++	struct ovl_cache_entry *p;
++	struct ovl_cache_entry *n;
++
++	list_for_each_entry_safe(p, n, list, l_node)
++		kfree(p);
++
++	INIT_LIST_HEAD(list);
++}
++
++static int ovl_fill_upper(void *buf, const char *name, int namelen,
++			  loff_t offset, u64 ino, unsigned int d_type)
++{
++	struct ovl_readdir_data *rdd = buf;
++
++	rdd->count++;
++	return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
++}
++
++static inline int ovl_dir_read(struct path *realpath,
++			       struct ovl_readdir_data *rdd, filldir_t filler)
++{
++	struct file *realfile;
++	int err;
++
++	realfile = vfs_open(realpath, O_RDONLY | O_DIRECTORY, current_cred());
++	if (IS_ERR(realfile))
++		return PTR_ERR(realfile);
++
++	do {
++		rdd->count = 0;
++		rdd->err = 0;
++		err = vfs_readdir(realfile, filler, rdd);
++		if (err >= 0)
++			err = rdd->err;
++	} while (!err && rdd->count);
++	fput(realfile);
++
++	return 0;
++}
++
++static void ovl_dir_reset(struct file *file)
++{
++	struct ovl_dir_file *od = file->private_data;
++	enum ovl_path_type type = ovl_path_type(file->f_path.dentry);
++
++	if (ovl_dentry_version_get(file->f_path.dentry) != od->cache_version) {
++		list_del_init(&od->cursor);
++		ovl_cache_free(&od->cache);
++		od->is_cached = false;
++	}
++	WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
++	if (od->is_real && type == OVL_PATH_MERGE) {
++		fput(od->realfile);
++		od->realfile = NULL;
++		od->is_real = false;
++	}
++}
++
++static int ovl_dir_mark_whiteouts(struct ovl_readdir_data *rdd)
++{
++	struct ovl_cache_entry *p;
++	struct dentry *dentry;
++	const struct cred *old_cred;
++	struct cred *override_cred;
++
++	override_cred = prepare_creds();
++	if (!override_cred) {
++		ovl_cache_free(rdd->list);
++		return -ENOMEM;
++	}
++
++	/*
++	 * CAP_SYS_ADMIN for getxattr
++	 * CAP_DAC_OVERRIDE for lookup
++	 */
++	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++	old_cred = override_creds(override_cred);
++
++	mutex_lock(&rdd->dir->d_inode->i_mutex);
++	list_for_each_entry(p, rdd->list, l_node) {
++		if (p->type != DT_LNK)
++			continue;
++
++		dentry = lookup_one_len(p->name, rdd->dir, p->len);
++		if (IS_ERR(dentry))
++			continue;
++
++		p->is_whiteout = ovl_is_whiteout(dentry);
++		dput(dentry);
++	}
++	mutex_unlock(&rdd->dir->d_inode->i_mutex);
++
++	revert_creds(old_cred);
++	put_cred(override_cred);
++
++	return 0;
++}
++
++static inline int ovl_dir_read_merged(struct path *upperpath, struct path *lowerpath,
++			       struct ovl_readdir_data *rdd)
++{
++	int err;
++	struct rb_root root = RB_ROOT;
++	struct list_head middle;
++
++	rdd->root = &root;
++	if (upperpath->dentry) {
++		rdd->dir = upperpath->dentry;
++		err = ovl_dir_read(upperpath, rdd, ovl_fill_upper);
++		if (err)
++			goto out;
++
++		err = ovl_dir_mark_whiteouts(rdd);
++		if (err)
++			goto out;
++	}
++	/*
++	 * Insert lowerpath entries before upperpath ones, this allows
++	 * offsets to be reasonably constant
++	 */
++	list_add(&middle, rdd->list);
++	rdd->middle = &middle;
++	err = ovl_dir_read(lowerpath, rdd, ovl_fill_lower);
++	list_del(&middle);
++out:
++	rdd->root = NULL;
++
++	return err;
++}
++
++static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
++{
++	struct list_head *l;
++	loff_t off;
++
++	l = od->cache.next;
++	for (off = 0; off < pos; off++) {
++		if (l == &od->cache)
++			break;
++		l = l->next;
++	}
++	list_move_tail(&od->cursor, l);
++}
++
++static int ovl_readdir(struct file *file, void *buf, filldir_t filler)
++{
++	struct ovl_dir_file *od = file->private_data;
++	int res;
++
++	if (!file->f_pos)
++		ovl_dir_reset(file);
++
++	if (od->is_real) {
++		res = vfs_readdir(od->realfile, filler, buf);
++		file->f_pos = od->realfile->f_pos;
++
++		return res;
++	}
++
++	if (!od->is_cached) {
++		struct path lowerpath;
++		struct path upperpath;
++		struct ovl_readdir_data rdd = { .list = &od->cache };
++
++		ovl_path_lower(file->f_path.dentry, &lowerpath);
++		ovl_path_upper(file->f_path.dentry, &upperpath);
++
++		res = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
++		if (res) {
++			ovl_cache_free(rdd.list);
++			return res;
++		}
++
++		od->cache_version = ovl_dentry_version_get(file->f_path.dentry);
++		od->is_cached = true;
++
++		ovl_seek_cursor(od, file->f_pos);
++	}
++
++	while (od->cursor.next != &od->cache) {
++		int over;
++		loff_t off;
++		struct ovl_cache_entry *p;
++
++		p = list_entry(od->cursor.next, struct ovl_cache_entry, l_node);
++		off = file->f_pos;
++		if (!p->is_whiteout) {
++			over = filler(buf, p->name, p->len, off, p->ino, p->type);
++			if (over)
++				break;
++		}
++		file->f_pos++;
++		list_move(&od->cursor, &p->l_node);
++	}
++
++	return 0;
++}
++
++static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++	loff_t res;
++	struct ovl_dir_file *od = file->private_data;
++
++	mutex_lock(&file->f_dentry->d_inode->i_mutex);
++	if (!file->f_pos)
++		ovl_dir_reset(file);
++
++	if (od->is_real) {
++		res = vfs_llseek(od->realfile, offset, origin);
++		file->f_pos = od->realfile->f_pos;
++	} else {
++		res = -EINVAL;
++
++		switch (origin) {
++		case SEEK_CUR:
++			offset += file->f_pos;
++			break;
++		case SEEK_SET:
++			break;
++		default:
++			goto out_unlock;
++		}
++		if (offset < 0)
++			goto out_unlock;
++
++		if (offset != file->f_pos) {
++			file->f_pos = offset;
++			if (od->is_cached)
++				ovl_seek_cursor(od, offset);
++		}
++		res = offset;
++	}
++out_unlock:
++	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++
++	return res;
++}
++
++static int ovl_dir_fsync(struct file *file, int datasync)
++{
++	struct ovl_dir_file *od = file->private_data;
++
++	/* May need to reopen directory if it got copied up */
++	if (!od->realfile) {
++		struct path upperpath;
++
++		ovl_path_upper(file->f_path.dentry, &upperpath);
++		od->realfile = vfs_open(&upperpath, O_RDONLY, current_cred());
++		if (IS_ERR(od->realfile))
++			return PTR_ERR(od->realfile);
++	}
++
++	return vfs_fsync(od->realfile, datasync);
++}
++
++static int ovl_dir_release(struct inode *inode, struct file *file)
++{
++	struct ovl_dir_file *od = file->private_data;
++
++	list_del(&od->cursor);
++	ovl_cache_free(&od->cache);
++	if (od->realfile)
++		fput(od->realfile);
++	kfree(od);
++
++	return 0;
++}
++
++static int ovl_dir_open(struct inode *inode, struct file *file)
++{
++	struct path realpath;
++	struct file *realfile;
++	struct ovl_dir_file *od;
++	enum ovl_path_type type;
++
++	od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
++	if (!od)
++		return -ENOMEM;
++
++	type = ovl_path_real(file->f_path.dentry, &realpath);
++	realfile = vfs_open(&realpath, file->f_flags, current_cred());
++	if (IS_ERR(realfile)) {
++		kfree(od);
++		return PTR_ERR(realfile);
++	}
++	INIT_LIST_HEAD(&od->cache);
++	INIT_LIST_HEAD(&od->cursor);
++	od->is_cached = false;
++	od->realfile = realfile;
++	od->is_real = (type != OVL_PATH_MERGE);
++	file->private_data = od;
++
++	return 0;
++}
++
++const struct file_operations ovl_dir_operations = {
++	.read		= generic_read_dir,
++	.open		= ovl_dir_open,
++	.readdir	= ovl_readdir,
++	.llseek		= ovl_dir_llseek,
++	.fsync		= ovl_dir_fsync,
++	.release	= ovl_dir_release,
++};
++
++static int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
++{
++	int err;
++	struct path lowerpath;
++	struct path upperpath;
++	struct ovl_cache_entry *p;
++	struct ovl_readdir_data rdd = { .list = list };
++
++	ovl_path_upper(dentry, &upperpath);
++	ovl_path_lower(dentry, &lowerpath);
++
++	err = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
++	if (err)
++		return err;
++
++	err = 0;
++
++	list_for_each_entry(p, list, l_node) {
++		if (p->is_whiteout)
++			continue;
++
++		if (p->name[0] == '.') {
++			if (p->len == 1)
++				continue;
++			if (p->len == 2 && p->name[1] == '.')
++				continue;
++		}
++		err = -ENOTEMPTY;
++		break;
++	}
++
++	return err;
++}
++
++static int ovl_remove_whiteouts(struct dentry *dir, struct list_head *list)
++{
++	struct path upperpath;
++	struct dentry *upperdir;
++	struct ovl_cache_entry *p;
++	const struct cred *old_cred;
++	struct cred *override_cred;
++	int err;
++
++	ovl_path_upper(dir, &upperpath);
++	upperdir = upperpath.dentry;
++
++	override_cred = prepare_creds();
++	if (!override_cred)
++		return -ENOMEM;
++
++	/*
++	 * CAP_DAC_OVERRIDE for lookup and unlink
++	 * CAP_SYS_ADMIN for setxattr of "trusted" namespace
++	 * CAP_FOWNER for unlink in sticky directory
++	 */
++	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++	cap_raise(override_cred->cap_effective, CAP_FOWNER);
++	old_cred = override_creds(override_cred);
++
++	err = vfs_setxattr(upperdir, ovl_opaque_xattr, "y", 1, 0);
++	if (err)
++		goto out_revert_creds;
++
++	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++	list_for_each_entry(p, list, l_node) {
++		struct dentry *dentry;
++		int ret;
++
++		if (!p->is_whiteout)
++			continue;
++
++		dentry = lookup_one_len(p->name, upperdir, p->len);
++		if (IS_ERR(dentry)) {
++			printk(KERN_WARNING "overlayfs: failed to lookup whiteout %.*s: %li\n", p->len, p->name, PTR_ERR(dentry));
++			continue;
++		}
++		ret = vfs_unlink(upperdir->d_inode, dentry);
++		dput(dentry);
++		if (ret)
++			printk(KERN_WARNING "overlayfs: failed to unlink whiteout %.*s: %i\n", p->len, p->name, ret);
++	}
++	mutex_unlock(&upperdir->d_inode->i_mutex);
++
++out_revert_creds:
++	revert_creds(old_cred);
++	put_cred(override_cred);
++
++	return err;
++}
++
++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type)
++{
++	int err;
++	LIST_HEAD(list);
++
++	err = ovl_check_empty_dir(dentry, &list);
++	if (!err && type == OVL_PATH_MERGE)
++		err = ovl_remove_whiteouts(dentry, &list);
++
++	ovl_cache_free(&list);
++
++	return err;
++}
+--- /dev/null
++++ b/fs/overlayfs/super.c
+@@ -0,0 +1,656 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include <linux/mount.h>
++#include <linux/slab.h>
++#include <linux/parser.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include "overlayfs.h"
++
++MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
++MODULE_DESCRIPTION("Overlay filesystem");
++MODULE_LICENSE("GPL");
++
++struct ovl_config {
++	char *lowerdir;
++	char *upperdir;
++};
++
++/* private information held for overlayfs's superblock */
++struct ovl_fs {
++	struct vfsmount *upper_mnt;
++	struct vfsmount *lower_mnt;
++	/* pathnames of lower and upper dirs, for show_options */
++	struct ovl_config config;
++};
++
++/* private information held for every overlayfs dentry */
++struct ovl_entry {
++	/*
++	 * Keep "double reference" on upper dentries, so that
++	 * d_delete() doesn't think it's OK to reset d_inode to NULL.
++	 */
++	struct dentry *__upperdentry;
++	struct dentry *lowerdentry;
++	union {
++		struct {
++			u64 version;
++			bool opaque;
++		};
++		struct rcu_head rcu;
++	};
++};
++
++const char *ovl_whiteout_xattr = "trusted.overlay.whiteout";
++const char *ovl_opaque_xattr = "trusted.overlay.opaque";
++
++
++enum ovl_path_type ovl_path_type(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	if (oe->__upperdentry) {
++		if (oe->lowerdentry && S_ISDIR(dentry->d_inode->i_mode))
++			return OVL_PATH_MERGE;
++		else
++			return OVL_PATH_UPPER;
++	} else {
++		return OVL_PATH_LOWER;
++	}
++}
++
++static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
++{
++	struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
++	smp_read_barrier_depends();
++	return upperdentry;
++}
++
++void ovl_path_upper(struct dentry *dentry, struct path *path)
++{
++	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	path->mnt = ofs->upper_mnt;
++	path->dentry = ovl_upperdentry_dereference(oe);
++}
++
++void ovl_path_lower(struct dentry *dentry, struct path *path)
++{
++	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	path->mnt = ofs->lower_mnt;
++	path->dentry = oe->lowerdentry;
++}
++
++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
++{
++
++	enum ovl_path_type type = ovl_path_type(dentry);
++
++	if (type == OVL_PATH_LOWER)
++		ovl_path_lower(dentry, path);
++	else
++		ovl_path_upper(dentry, path);
++
++	return type;
++}
++
++struct dentry *ovl_dentry_upper(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	return ovl_upperdentry_dereference(oe);
++}
++
++struct dentry *ovl_dentry_lower(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	return oe->lowerdentry;
++}
++
++struct dentry *ovl_dentry_real(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++	struct dentry *realdentry;
++
++	realdentry = ovl_upperdentry_dereference(oe);
++	if (!realdentry)
++		realdentry = oe->lowerdentry;
++
++	return realdentry;
++}
++
++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
++{
++	struct dentry *realdentry;
++
++	realdentry = ovl_upperdentry_dereference(oe);
++	if (realdentry) {
++		*is_upper = true;
++	} else {
++		realdentry = oe->lowerdentry;
++		*is_upper = false;
++	}
++	return realdentry;
++}
++
++bool ovl_dentry_is_opaque(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++	return oe->opaque;
++}
++
++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++	oe->opaque = opaque;
++}
++
++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex));
++	WARN_ON(oe->__upperdentry);
++	BUG_ON(!upperdentry->d_inode);
++	smp_wmb();
++	oe->__upperdentry = dget(upperdentry);
++}
++
++void ovl_dentry_version_inc(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
++	oe->version++;
++}
++
++u64 ovl_dentry_version_get(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
++	return oe->version;
++}
++
++bool ovl_is_whiteout(struct dentry *dentry)
++{
++	int res;
++	char val;
++
++	if (!dentry)
++		return false;
++	if (!dentry->d_inode)
++		return false;
++	if (!S_ISLNK(dentry->d_inode->i_mode))
++		return false;
++
++	res = vfs_getxattr(dentry, ovl_whiteout_xattr, &val, 1);
++	if (res == 1 && val == 'y')
++		return true;
++
++	return false;
++}
++
++static bool ovl_is_opaquedir(struct dentry *dentry)
++{
++	int res;
++	char val;
++
++	if (!S_ISDIR(dentry->d_inode->i_mode))
++		return false;
++
++	res = vfs_getxattr(dentry, ovl_opaque_xattr, &val, 1);
++	if (res == 1 && val == 'y')
++		return true;
++
++	return false;
++}
++
++static void ovl_entry_free(struct rcu_head *head)
++{
++	struct ovl_entry *oe = container_of(head, struct ovl_entry, rcu);
++	kfree(oe);
++}
++
++static void ovl_dentry_release(struct dentry *dentry)
++{
++	struct ovl_entry *oe = dentry->d_fsdata;
++
++	if (oe) {
++		dput(oe->__upperdentry);
++		dput(oe->__upperdentry);
++		dput(oe->lowerdentry);
++		call_rcu(&oe->rcu, ovl_entry_free);
++	}
++}
++
++const struct dentry_operations ovl_dentry_operations = {
++	.d_release = ovl_dentry_release,
++};
++
++static struct ovl_entry *ovl_alloc_entry(void)
++{
++	return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
++}
++
++static inline struct dentry *ovl_lookup_real(struct dentry *dir, struct qstr *name)
++{
++	struct dentry *dentry;
++
++	mutex_lock(&dir->d_inode->i_mutex);
++	dentry = lookup_one_len(name->name, dir, name->len);
++	mutex_unlock(&dir->d_inode->i_mutex);
++
++	if (IS_ERR(dentry)) {
++		if (PTR_ERR(dentry) == -ENOENT)
++			dentry = NULL;
++	} else if (!dentry->d_inode) {
++		dput(dentry);
++		dentry = NULL;
++	}
++	return dentry;
++}
++
++static int ovl_do_lookup(struct dentry *dentry)
++{
++	struct ovl_entry *oe;
++	struct dentry *upperdir;
++	struct dentry *lowerdir;
++	struct dentry *upperdentry = NULL;
++	struct dentry *lowerdentry = NULL;
++	struct inode *inode = NULL;
++	int err;
++
++	err = -ENOMEM;
++	oe = ovl_alloc_entry();
++	if (!oe)
++		goto out;
++
++	upperdir = ovl_dentry_upper(dentry->d_parent);
++	lowerdir = ovl_dentry_lower(dentry->d_parent);
++
++	if (upperdir) {
++		upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
++		err = PTR_ERR(upperdentry);
++		if (IS_ERR(upperdentry))
++			goto out_put_dir;
++
++		if (lowerdir && upperdentry &&
++		    (S_ISLNK(upperdentry->d_inode->i_mode) ||
++		     S_ISDIR(upperdentry->d_inode->i_mode))) {
++			const struct cred *old_cred;
++			struct cred *override_cred;
++
++			err = -ENOMEM;
++			override_cred = prepare_creds();
++			if (!override_cred)
++				goto out_dput_upper;
++
++			/* CAP_SYS_ADMIN needed for getxattr */
++			cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++			old_cred = override_creds(override_cred);
++
++			if (ovl_is_opaquedir(upperdentry)) {
++				oe->opaque = true;
++			} else if (ovl_is_whiteout(upperdentry)) {
++				dput(upperdentry);
++				upperdentry = NULL;
++				oe->opaque = true;
++			}
++			revert_creds(old_cred);
++			put_cred(override_cred);
++		}
++	}
++	if (lowerdir && !oe->opaque) {
++		lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
++		err = PTR_ERR(lowerdentry);
++		if (IS_ERR(lowerdentry))
++			goto out_dput_upper;
++	}
++
++	if (lowerdentry && upperdentry &&
++	    (!S_ISDIR(upperdentry->d_inode->i_mode) ||
++	     !S_ISDIR(lowerdentry->d_inode->i_mode))) {
++		dput(lowerdentry);
++		lowerdentry = NULL;
++		oe->opaque = true;
++	}
++
++	if (lowerdentry || upperdentry) {
++		struct dentry *realdentry;
++
++		realdentry = upperdentry ? upperdentry : lowerdentry;
++		err = -ENOMEM;
++		inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, oe);
++		if (!inode)
++			goto out_dput;
++	}
++
++	if (upperdentry)
++		oe->__upperdentry = dget(upperdentry);
++
++	if (lowerdentry)
++		oe->lowerdentry = lowerdentry;
++
++	dentry->d_fsdata = oe;
++	dentry->d_op = &ovl_dentry_operations;
++	d_add(dentry, inode);
++
++	return 0;
++
++out_dput:
++	dput(lowerdentry);
++out_dput_upper:
++	dput(upperdentry);
++out_put_dir:
++	kfree(oe);
++out:
++	return err;
++}
++
++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
++			  struct nameidata *nd)
++{
++	int err = ovl_do_lookup(dentry);
++
++	if (err)
++		return ERR_PTR(err);
++
++	return NULL;
++}
++
++static void ovl_put_super(struct super_block *sb)
++{
++	struct ovl_fs *ufs = sb->s_fs_info;
++
++	if (!(sb->s_flags & MS_RDONLY))
++		mnt_drop_write(ufs->upper_mnt);
++
++	mntput(ufs->upper_mnt);
++	mntput(ufs->lower_mnt);
++
++	kfree(ufs->config.lowerdir);
++	kfree(ufs->config.upperdir);
++	kfree(ufs);
++}
++
++static int ovl_remount_fs(struct super_block *sb, int *flagsp, char *data)
++{
++	int flags = *flagsp;
++	struct ovl_fs *ufs = sb->s_fs_info;
++
++	/* When remounting rw or ro, we need to adjust the write access to the
++	 * upper fs.
++	 */
++	if (((flags ^ sb->s_flags) & MS_RDONLY) == 0)
++		/* No change to readonly status */
++		return 0;
++
++	if (flags & MS_RDONLY) {
++		mnt_drop_write(ufs->upper_mnt);
++		return 0;
++	} else
++		return mnt_want_write(ufs->upper_mnt);
++}
++
++/**
++ * ovl_statfs
++ * @sb: The overlayfs super block
++ * @buf: The struct kstatfs to fill in with stats
++ *
++ * Get the filesystem statistics.  As writes always target the upper layer
++ * filesystem pass the statfs to the same filesystem.
++ */
++static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct dentry *root_dentry = dentry->d_sb->s_root;
++	struct path path;
++	ovl_path_upper(root_dentry, &path);
++
++	if (!path.dentry->d_sb->s_op->statfs)
++		return -ENOSYS;
++	return path.dentry->d_sb->s_op->statfs(path.dentry, buf);
++}
++
++/**
++ * ovl_show_options
++ *
++ * Prints the mount options for a given superblock.
++ * Returns zero; does not fail.
++ */
++static int ovl_show_options(struct seq_file *m, struct vfsmount *mnt)
++{
++	struct super_block *sb = mnt->mnt_sb;
++	struct ovl_fs *ufs = sb->s_fs_info;
++
++	seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
++	seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
++	return 0;
++}
++
++static const struct super_operations ovl_super_operations = {
++	.put_super	= ovl_put_super,
++	.remount_fs	= ovl_remount_fs,
++	.statfs		= ovl_statfs,
++	.show_options	= ovl_show_options,
++};
++
++enum {
++	Opt_lowerdir,
++	Opt_upperdir,
++	Opt_err,
++};
++
++static const match_table_t ovl_tokens = {
++	{Opt_lowerdir,			"lowerdir=%s"},
++	{Opt_upperdir,			"upperdir=%s"},
++	{Opt_err,			NULL}
++};
++
++static int ovl_parse_opt(char *opt, struct ovl_config *config)
++{
++	char *p;
++
++	config->upperdir = NULL;
++	config->lowerdir = NULL;
++
++	while ((p = strsep(&opt, ",")) != NULL) {
++		int token;
++		substring_t args[MAX_OPT_ARGS];
++
++		if (!*p)
++			continue;
++
++		token = match_token(p, ovl_tokens, args);
++		switch (token) {
++		case Opt_upperdir:
++			kfree(config->upperdir);
++			config->upperdir = match_strdup(&args[0]);
++			if (!config->upperdir)
++				return -ENOMEM;
++			break;
++
++		case Opt_lowerdir:
++			kfree(config->lowerdir);
++			config->lowerdir = match_strdup(&args[0]);
++			if (!config->lowerdir)
++				return -ENOMEM;
++			break;
++
++		default:
++			return -EINVAL;
++		}
++	}
++	return 0;
++}
++
++static int ovl_fill_super(struct super_block *sb, void *data, int silent)
++{
++	struct path lowerpath;
++	struct path upperpath;
++	struct inode *root_inode;
++	struct dentry *root_dentry;
++	struct ovl_entry *oe;
++	struct ovl_fs *ufs;
++	int err;
++
++	err = -ENOMEM;
++	ufs = kmalloc(sizeof(struct ovl_fs), GFP_KERNEL);
++	if (!ufs)
++		goto out;
++
++	err = ovl_parse_opt((char *) data, &ufs->config);
++	if (err)
++		goto out_free_ufs;
++
++	err = -EINVAL;
++	if (!ufs->config.upperdir || !ufs->config.lowerdir) {
++		printk(KERN_ERR "overlayfs: missing upperdir or lowerdir\n");
++		goto out_free_config;
++	}
++
++	oe = ovl_alloc_entry();
++	if (oe == NULL)
++		goto out_free_config;
++
++	root_inode = ovl_new_inode(sb, S_IFDIR, oe);
++	if (!root_inode)
++		goto out_free_oe;
++
++	err = kern_path(ufs->config.upperdir, LOOKUP_FOLLOW, &upperpath);
++	if (err)
++		goto out_put_root;
++
++	err = kern_path(ufs->config.lowerdir, LOOKUP_FOLLOW, &lowerpath);
++	if (err)
++		goto out_put_upperpath;
++
++	err = -ENOTDIR;
++	if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
++	    !S_ISDIR(lowerpath.dentry->d_inode->i_mode))
++		goto out_put_lowerpath;
++
++	sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
++				lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
++
++	err = -EINVAL;
++	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++		printk(KERN_ERR "overlayfs: maximum fs stacking depth exceeded\n");
++		goto out_put_lowerpath;
++	}
++
++
++	ufs->upper_mnt = clone_private_mount(&upperpath);
++	err = PTR_ERR(ufs->upper_mnt);
++	if (IS_ERR(ufs->upper_mnt)) {
++		printk(KERN_ERR "overlayfs: failed to clone upperpath\n");
++		goto out_put_lowerpath;
++	}
++
++	ufs->lower_mnt = clone_private_mount(&lowerpath);
++	err = PTR_ERR(ufs->lower_mnt);
++	if (IS_ERR(ufs->lower_mnt)) {
++		printk(KERN_ERR "overlayfs: failed to clone lowerpath\n");
++		goto out_put_upper_mnt;
++	}
++
++	/*
++	 * Make lower_mnt R/O.  That way fchmod/fchown on lower file
++	 * will fail instead of modifying lower fs.
++	 */
++	ufs->lower_mnt->mnt_flags |= MNT_READONLY;
++
++	/* If the upper fs is r/o, we mark overlayfs r/o too */
++	if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
++		sb->s_flags |= MS_RDONLY;
++
++	if (!(sb->s_flags & MS_RDONLY)) {
++		err = mnt_want_write(ufs->upper_mnt);
++		if (err)
++			goto out_put_lower_mnt;
++	}
++
++	err = -ENOMEM;
++	root_dentry = d_alloc_root(root_inode);
++	if (!root_dentry)
++		goto out_drop_write;
++
++	mntput(upperpath.mnt);
++	mntput(lowerpath.mnt);
++
++	oe->__upperdentry = dget(upperpath.dentry);
++	oe->lowerdentry = lowerpath.dentry;
++
++	root_dentry->d_fsdata = oe;
++	root_dentry->d_op = &ovl_dentry_operations;
++
++	sb->s_op = &ovl_super_operations;
++	sb->s_root = root_dentry;
++	sb->s_fs_info = ufs;
++
++	return 0;
++
++out_drop_write:
++	if (!(sb->s_flags & MS_RDONLY))
++		mnt_drop_write(ufs->upper_mnt);
++out_put_lower_mnt:
++	mntput(ufs->lower_mnt);
++out_put_upper_mnt:
++	mntput(ufs->upper_mnt);
++out_put_lowerpath:
++	path_put(&lowerpath);
++out_put_upperpath:
++	path_put(&upperpath);
++out_put_root:
++	iput(root_inode);
++out_free_oe:
++	kfree(oe);
++out_free_config:
++	kfree(ufs->config.lowerdir);
++	kfree(ufs->config.upperdir);
++out_free_ufs:
++	kfree(ufs);
++out:
++	return err;
++}
++
++static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags,
++				const char *dev_name, void *raw_data)
++{
++	return mount_nodev(fs_type, flags, raw_data, ovl_fill_super);
++}
++
++static struct file_system_type ovl_fs_type = {
++	.owner		= THIS_MODULE,
++	.name		= "overlayfs",
++	.mount		= ovl_mount,
++	.kill_sb	= kill_anon_super,
++};
++
++static int __init ovl_init(void)
++{
++	return register_filesystem(&ovl_fs_type);
++}
++
++static void __exit ovl_exit(void)
++{
++	unregister_filesystem(&ovl_fs_type);
++}
++
++module_init(ovl_init);
++module_exit(ovl_exit);
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1300,6 +1300,7 @@ long do_splice_direct(struct file *in, l
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL(do_splice_direct);
+ 
+ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
+ 			       struct pipe_inode_info *opipe,
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -480,6 +480,12 @@ struct iattr {
+  */
+ #include <linux/quota.h>
+ 
++/*
++ * Maximum number of layers of fs stack.  Needs to be limited to
++ * prevent kernel stack overflow
++ */
++#define FILESYSTEM_MAX_STACK_DEPTH 2
++
+ /** 
+  * enum positive_aop_returns - aop return codes with specific semantics
+  *
+@@ -1438,6 +1444,11 @@ struct super_block {
+ 	 * Saved pool identifier for cleancache (-1 means none)
+ 	 */
+ 	int cleancache_poolid;
++
++	/*
++	 * Indicates how deep in a filesystem stack this SB is
++	 */
++	int s_stack_depth;
+ };
+ 
+ extern struct timespec current_fs_time(struct super_block *sb);
+@@ -1603,6 +1614,7 @@ struct inode_operations {
+ 	void (*truncate_range)(struct inode *, loff_t, loff_t);
+ 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
+ 		      u64 len);
++	struct file *(*open)(struct dentry *, int flags, const struct cred *);
+ } ____cacheline_aligned;
+ 
+ struct seq_file;
+@@ -1998,6 +2010,7 @@ extern long do_sys_open(int dfd, const c
+ extern struct file *filp_open(const char *, int, int);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+ 				   const char *, int);
++extern struct file *vfs_open(struct path *, int flags, const struct cred *);
+ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
+ 				 const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -100,6 +100,9 @@ extern void mnt_pin(struct vfsmount *mnt
+ extern void mnt_unpin(struct vfsmount *mnt);
+ extern int __mnt_is_readonly(struct vfsmount *mnt);
+ 
++struct path;
++extern struct vfsmount *clone_private_mount(struct path *path);
++
+ extern struct vfsmount *do_kern_mount(const char *fstype, int flags,
+ 				      const char *name, void *data);
+ 
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0011-fix-yylloc-for-modern-computers.patch b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0011-fix-yylloc-for-modern-computers.patch
new file mode 100644
index 00000000..d7088f90
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0011-fix-yylloc-for-modern-computers.patch
@@ -0,0 +1,11 @@
+--- a/scripts/dtc/dtc-lexer.lex.c_shipped	2015-08-07 22:08:04.000000000 +0300
++++ b/scripts/dtc/dtc-lexer.lex.c_shipped	2021-11-01 22:15:12.347053553 +0300
+@@ -637,7 +637,7 @@
+ #include "srcpos.h"
+ #include "dtc-parser.tab.h"
+ 
+-YYLTYPE yylloc;
++extern YYLTYPE yylloc;
+ 
+ /* CAUTION: this will stop working if we ever use yyless() or yyunput() */
+ #define	YY_USER_ACTION \
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0012-uimage-makefile.patch b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0012-uimage-makefile.patch
new file mode 100644
index 00000000..428b5a73
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0012-uimage-makefile.patch
@@ -0,0 +1,62 @@
+--- linux-3.0.101/arch/arm/boot/Makefile	2013-10-22 11:58:59.000000000 +0400
++++ linux-3.0.101/arch/arm/boot/Makefile	2021-12-15 10:51:15.956535987 +0300
+@@ -59,15 +59,19 @@
+ 
+ endif
+ 
+-quiet_cmd_uimage = UIMAGE  $@
+-      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \
+-		   -C none -a $(LOADADDR) -e $(STARTADDR) \
+-		   -n 'Linux-$(KERNELRELEASE)' -d $< $@
++#quiet_cmd_uimage = UIMAGE  $@
++#      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \
++#		   -C none -a $(LOADADDR) -e $(STARTADDR) \
++#		   -n 'Linux-$(KERNELRELEASE)' -d $< $@
+ 
+-ifeq ($(CONFIG_ZBOOT_ROM),y)
+-$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
++ifneq ($(LOADADDR),)
++  UIMAGE_LOADADDR=$(LOADADDR)
+ else
+-$(obj)/uImage: LOADADDR=$(ZRELADDR)
++  ifeq ($(CONFIG_ZBOOT_ROM),y)
++    UIMAGE_LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
++  else
++    UIMAGE_LOADADDR=$(ZRELADDR)
++  endif
+ endif
+ 
+ $(obj)/uImage: STARTADDR=$(LOADADDR)
+--- linux-3.0.101/scripts/Makefile.lib	2013-10-22 11:58:59.000000000 +0400
++++ linux-3.0.101/scripts/Makefile.lib	2021-12-15 10:53:24.551357718 +0300
+@@ -262,6 +262,30 @@
+ 	lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ 	(rm -f $@ ; false)
+ 
++# U-Boot mkimage
++# ---------------------------------------------------------------------------
++
++MKIMAGE := $(srctree)/scripts/mkuboot.sh
++
++# SRCARCH just happens to match slightly more than ARCH (on sparc), so reduces
++# the number of overrides in arch makefiles
++UIMAGE_ARCH ?= $(SRCARCH)
++UIMAGE_COMPRESSION ?= $(if $(2),$(2),none)
++UIMAGE_OPTS-y ?=
++UIMAGE_TYPE ?= kernel
++UIMAGE_LOADADDR ?= arch_must_set_this
++UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR)
++UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)'
++UIMAGE_IN ?= $<
++UIMAGE_OUT ?= $@
++
++quiet_cmd_uimage = UIMAGE  $(UIMAGE_OUT)
++      cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \
++			-C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \
++			-T $(UIMAGE_TYPE) \
++			-a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \
++			-n $(UIMAGE_NAME) -d $(UIMAGE_IN) $(UIMAGE_OUT)
++
+ # XZ
+ # ---------------------------------------------------------------------------
+ # Use xzkern to compress the kernel image and xzmisc to compress other things.
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0013-fix-openpty.patch b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0013-fix-openpty.patch
new file mode 100644
index 00000000..e00b7157
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0013-fix-openpty.patch
@@ -0,0 +1,144 @@
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -721,7 +721,18 @@ err_file:
+ 	return retval;
+ }
+ 
+-static struct file_operations ptmx_fops;
++static const struct file_operations ptmx_fops = 
++{
++	.llseek		= no_llseek,
++	.read		= tty_read,
++	.write		= tty_write,
++	.poll		= tty_poll,
++	.unlocked_ioctl	= tty_ioctl,
++	.compat_ioctl	= tty_compat_ioctl,
++	.open		= ptmx_open,
++	.release	= tty_release,
++	.fasync		= tty_fasync,
++};
+ 
+ static void __init unix98_pty_init(void)
+ {
+@@ -775,9 +786,6 @@ static void __init unix98_pty_init(void)
+ 	register_sysctl_table(pty_root_table);
+ 
+ 	/* Now create the /dev/ptmx special device */
+-	tty_default_fops(&ptmx_fops);
+-	ptmx_fops.open = ptmx_open;
+-
+ 	cdev_init(&ptmx_cdev, &ptmx_fops);
+ 	if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+ 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
+
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -137,21 +137,10 @@ EXPORT_SYMBOL(tty_mutex);
+ /* Spinlock to protect the tty->tty_files list */
+ DEFINE_SPINLOCK(tty_files_lock);
+ 
+-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
+-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+ ssize_t redirected_tty_write(struct file *, const char __user *,
+ 							size_t, loff_t *);
+-static unsigned int tty_poll(struct file *, poll_table *);
+ static int tty_open(struct inode *, struct file *);
+-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+-#ifdef CONFIG_COMPAT
+-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+-				unsigned long arg);
+-#else
+-#define tty_compat_ioctl NULL
+-#endif
+ static int __tty_fasync(int fd, struct file *filp, int on);
+-static int tty_fasync(int fd, struct file *filp, int on);
+ static void release_tty(struct tty_struct *tty, int idx);
+ static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+ static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+@@ -962,7 +951,7 @@ static void tty_update_time(struct timespec *time)
+  *	read calls may be outstanding in parallel.
+  */
+ 
+-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
++ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ 			loff_t *ppos)
+ {
+ 	int i;
+@@ -1141,7 +1130,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
+  *	write method will not be invoked in parallel for each device.
+  */
+ 
+-static ssize_t tty_write(struct file *file, const char __user *buf,
++ssize_t tty_write(struct file *file, const char __user *buf,
+ 						size_t count, loff_t *ppos)
+ {
+ 	struct inode *inode = file->f_path.dentry->d_inode;
+@@ -2002,7 +1991,7 @@ got_driver:
+  *	may be re-entered freely by other callers.
+  */
+ 
+-static unsigned int tty_poll(struct file *filp, poll_table *wait)
++unsigned int tty_poll(struct file *filp, poll_table *wait)
+ {
+ 	struct tty_struct *tty = file_tty(filp);
+ 	struct tty_ldisc *ld;
+@@ -2059,7 +2048,7 @@ out:
+ 	return retval;
+ }
+ 
+-static int tty_fasync(int fd, struct file *filp, int on)
++int tty_fasync(int fd, struct file *filp, int on)
+ {
+ 	int retval;
+ 	tty_lock();
+@@ -3246,11 +3235,6 @@ struct tty_struct *get_current_tty(void)
+ }
+ EXPORT_SYMBOL_GPL(get_current_tty);
+ 
+-void tty_default_fops(struct file_operations *fops)
+-{
+-	*fops = tty_fops;
+-}
+-
+ /*
+  * Initialize the console device. This is called *early*, so
+  * we can't necessarily depend on lots of kernel help here.
+
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -13,6 +13,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/tty_ldisc.h>
+ #include <linux/mutex.h>
++#include <linux/poll.h>
+ 
+ #include <asm/system.h>
+ 
+@@ -470,7 +471,6 @@ extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
+ extern dev_t tty_devnum(struct tty_struct *tty);
+ extern void proc_clear_tty(struct task_struct *p);
+ extern struct tty_struct *get_current_tty(void);
+-extern void tty_default_fops(struct file_operations *fops);
+ extern struct tty_struct *alloc_tty_struct(void);
+ extern int tty_alloc_file(struct file *file);
+ extern void tty_add_file(struct tty_struct *tty, struct file *file);
+@@ -482,6 +482,19 @@ extern void deinitialize_tty_struct(struct tty_struct *tty);
+ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
+ 								int first_ok);
+ extern int tty_release(struct inode *inode, struct file *filp);
++extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
++extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
++extern unsigned int tty_poll(struct file *, poll_table *);
++
++#ifdef CONFIG_COMPAT
++extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
++				unsigned long arg);
++#else
++#define tty_compat_ioctl NULL
++#endif
++extern int tty_fasync(int fd, struct file *filp, int on);
++
++
+ extern int tty_init_termios(struct tty_struct *tty);
+ 
+ extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
diff --git a/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0015-jl1101-phy-support.patch b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0015-jl1101-phy-support.patch
new file mode 100644
index 00000000..58b46866
--- /dev/null
+++ b/br-ext-chip-fullhan/board/fh8833v100/kernel/patches/0015-jl1101-phy-support.patch
@@ -0,0 +1,28 @@
+--- a/drivers/net/fh_gmac/fh_gmac_phyt.c
++++ b/drivers/net/fh_gmac/fh_gmac_phyt.c
+@@ -83,6 +83,7 @@
+ 	if (pGmac->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ 		switch (pGmac->phydev->phy_id) {
+ 		case FH_GMAC_PHY_RTL8201:
++		case FH_GMAC_PHY_JL1101:
+ 			fh_mdio_write(bus, phyid,
+ 					gmac_phyt_rtl8201_page_select, 7);
+ 			fh_mdio_write(bus, phyid,
+@@ -116,6 +117,7 @@
+ 	} else if (pGmac->phy_interface == PHY_INTERFACE_MODE_MII) {
+ 		switch (pGmac->phydev->phy_id) {
+ 		case FH_GMAC_PHY_RTL8201:
++		case FH_GMAC_PHY_JL1101:
+ 			fh_mdio_write(bus, phyid,
+ 					gmac_phyt_rtl8201_page_select, 7);
+ 			fh_mdio_write(bus, phyid,
+--- a/drivers/net/fh_gmac/fh_gmac_phyt.h
++++ b/drivers/net/fh_gmac/fh_gmac_phyt.h
+@@ -11,6 +11,7 @@
+ #define FH_GMAC_PHY_IP101G	0x02430C54
+ #define FH_GMAC_PHY_RTL8201	0x001CC816
+ #define FH_GMAC_PHY_TI83848	0xFFFFFFFF
++#define FH_GMAC_PHY_JL1101	0x937c4023
+ 
+ enum
+ {
diff --git a/br-ext-chip-fullhan/board/fh8852v100/kernel/patches/13_boardconfig.patch b/br-ext-chip-fullhan/board/fh8852v100/kernel/patches/13_boardconfig.patch
index ab94ca98..1c2c3571 100644
--- a/br-ext-chip-fullhan/board/fh8852v100/kernel/patches/13_boardconfig.patch
+++ b/br-ext-chip-fullhan/board/fh8852v100/kernel/patches/13_boardconfig.patch
@@ -4,7 +4,7 @@
  include/config/auto.conf: ;
  endif # $(dot-config)
  
-+export PROJECT_NAME = $(shell grep -e '^CONFIG_MACH_FH.*' .config|sed 's/CONFIG_MACH_\(.*\)=y/\1/'|awk '{print tolower($$0)}')
++export PROJECT_NAME = $(shell grep -e '^CONFIG_MACH_FH.*' .config|head -1|sed 's/CONFIG_MACH_\(.*\)=y/\1/'|awk '{print tolower($$0)}')
 +
 +PHONY += boardconfig
 +boardconfig: